卷积层
二维互相关运算
import torch
from torch import nn
from d2l import torch as d2l
def corr2d(X, K):
"""计算二维互相关运算"""
h, w = K.shape
Y = torch.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))
for i in range(Y.shape[0]):
for j in range(Y.shape[1]):
Y[i, j] = (X[i : i + h, j : j + w] * K).sum()
return Y
测试一下
X = torch.tensor([[0.0,1.0,2.0],
[3.0,4.0,5.0],
[6.0,7.0,8.0]])
K = torch.tensor([[0.0,1.0],
[2.0,3.0]])
corr2d(X, K)
输出
tensor([[19., 25.],
[37., 43.]])
卷积类
class Conv2D(nn.Module):
def __init__(self, kernel_size):
super().__init__()
self.weight = nn.Parameter(torch.rand(kernel_size))
self.bias = nn.Parameter(torch.zeros(1))
def forward(self, x):
return corr2d(x, self.weight) + self.bias
把中间的变成0,看起来像图片边界
X = torch.ones((6, 8))
X[:, 2:6] = 0
X
tensor([[1., 1., 0., 0., 0., 0., 1., 1.],
[1., 1., 0., 0., 0., 0., 1., 1.],
[1., 1., 0., 0., 0., 0., 1., 1.],
[1., 1., 0., 0., 0., 0., 1., 1.],
[1., 1., 0., 0., 0., 0., 1., 1.],
[1., 1., 0., 0., 0., 0., 1., 1.]])
一个1*2的卷积核
K = torch.tensor([[1.0, -1.0]])
输出结果
Y = corr2d(X, K)
Y
输出
tensor([[ 0., 1., 0., 0., 0., -1., 0.],
[ 0., 1., 0., 0., 0., -1., 0.],
[ 0., 1., 0., 0., 0., -1., 0.],
[ 0., 1., 0., 0., 0., -1., 0.],
[ 0., 1., 0., 0., 0., -1., 0.],
[ 0., 1., 0., 0., 0., -1., 0.]])
把X转置一下再试试
corr2d(X.t(), K)
输出
tensor([[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]])
可见这个卷积核智能判定垂直边界
手动写一个卷积过程
conv2d = nn.Conv2d(1, 1, kernel_size=(1, 2), bias= False)
X = X.reshape((1, 1, 6, 8))
Y = Y.reshape((1, 1, 6, 7))
for i in range(10):
Y_hat = conv2d(X)
l = (Y_hat - Y ) ** 2
conv2d.zero_grad()
l.sum().backward()
conv2d.weight.data[:] -= 3e-2 * conv2d.weight.grad
if(i + 1) % 2 == 0:
print(f'batch{
i + 1}, loss {
l.sum():.3f}')
nn.Conv2d(输入通道数,输出通道数,卷积核大小,偏移量)
X.reshape成一个四维变量(通道数,批量大小数,6, 8)
扫描二维码关注公众号,回复: 15377228 查看本文章Y也一样
迭代10次学习卷积核参数
conv2d.weight.grad梯度
batch2, loss 5.869
batch4, loss 1.119
batch6, loss 0.243
batch8, loss 0.063
batch10, loss 0.020
看看学习到的参数
conv2d.weight.data.reshape((1, 2))
输出
tensor([[ 0.9751, -1.0009]])
可以看到10代之后和标准的[1.0, -1.0]很接近了
填充和步长
无步长
输出大小 = 输入 - 卷积核 + 填充 + 1
有步长
输出大小 = (输入 - 卷积核 + 填充 - 步长)/步长
import torch
from torch import nn
def comp_conv2d(conv2d, X):
X = X.reshape((1, 1) + X.shape)
Y = conv2d(X)
return Y.reshape(Y.shape[2:])
conv2d = nn.Conv2d(1, 1, kernel_size = 3, padding = 1)
X = torch .rand(size = (8, 8))
comp_conv2d(conv2d, X).shape
X.reshape((1, 1) + X.shape)是加上通道数和小批次数[1 , 1]
Y.shape[2:]再把前两个维度通道数和小批次数[1 , 1]去掉
输出
torch.Size([8, 8])
8 - 3 + 2 + 1 = 8
例1
conv2d = nn.Conv2d(1, 1, kernel_size=(5, 3), padding=(2, 1))
comp_conv2d(conv2d, X).shape
输出
torch.Size([8, 8])
8 - 5 + 4 + 1 = 8
8 - 3 + 2 + 1 = 8
例2
conv2d = nn.Conv2d(1, 1, kernel_size=3, padding=1, stride = 2)
comp_conv2d(conv2d, X).shape
输出
torch.Size([4, 4])
(8 - 3 + 2 + 2) / 2 = 4.5 向下取整
例3
conv2d = nn.Conv2d(1, 1, kernel_size=(3, 4), padding=(0, 1), stride=(3, 4))
comp_conv2d(conv2d, X).shape
输出
torch.Size([2, 2])
(8 - 3 + 3) / 3向下取整 = 2
(8 - 4 + 2 + 4) / 4向下取整 = 2
多输入输出通道
多输入
import torch
from d2l import torch as d2l
def corr2d_multi_in(X, K):
return sum(d2l.corr2d(x, k) for x, k in zip(X, K))
X = torch.tensor([[[0.0, 1.0, 2.0],
[3.0, 4.0, 5.0],
[6.0, 7.0, 8.0]],
[[1.0, 2.0, 3.0],
[4.0, 5.0, 6.0],
[7.0, 8.0, 9.0]]])
K = torch.tensor([[[0.0, 1.0],
[2.0, 3.0]],
[[1.0, 2.0],
[3.0, 4.0]]])
corr2d_multi_in(X, K)
X(2 * 3 * 3)
K(2 * 2 * 2)
tensor([[ 56., 72.],
[104., 120.]])
多输出
有Co组卷积核,Co个通道输出
def corr2d_multi_in_out(X, K):
# 迭代“K”的第0个维度,每次都对输入“X”执行互相关运算。
# 最后将所有结果都叠加在一起
return torch.stack([corr2d_multi_in(X, k) for k in K], 0)
K = torch.stack((K, K+1, K+2), 0)
K.shape
torch.Size([3, 2, 2, 2])
K
tensor([[[[0., 1.],
[2., 3.]],
[[1., 2.],
[3., 4.]]],
[[[1., 2.],
[3., 4.]],
[[2., 3.],
[4., 5.]]],
[[[2., 3.],
[4., 5.]],
[[3., 4.],
[5., 6.]]]])
corr2d_multi_in_out(X, K)
tensor([[[ 56., 72.],
[104., 120.]],
[[ 76., 100.],
[148., 172.]],
[[ 96., 128.],
[192., 224.]]])
1 * 1 卷积层
def corr2d_multi_in_out_1x1(X, K):
c_i, h, w = X.shape
c_o = K.shape[0]
X = X.reshape((c_i, h * w))
K = K.reshape((c_o, c_i))
# 全连接层中的矩阵乘法
Y = torch.matmul(K, X)
return Y.reshape((c_o, h, w))
X = torch.normal(0, 1, (3, 3, 3))
K = torch.normal(0, 1, (2, 3, 1, 1))
Y1 = corr2d_multi_in_out_1x1(X, K)
Y2 = corr2d_multi_in_out(X, K)
assert float(torch.abs(Y1 - Y2).sum()) < 1e-6
相当于全连接层
池化层
缓解卷积层对位置的敏感性,同样有窗口大小,填充,步长,但不用学习参数,且对每个通道单独作用,输入通道是几输出就是几。
import torch
from torch import nn
from d2l import torch as d2l
def pool2d(X, pool_size, mode='max'):
p_h, p_w = pool_size
Y = torch.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w + 1))
for i in range(Y.shape[0]):
for j in range(Y.shape[1]):
if mode == 'max':
Y[i, j] = X[i: i + p_h, j: j + p_w].max()
elif mode == 'avg':
Y[i, j] = X[i: i + p_h, j: j + p_w].mean()
return Y
和卷积层的互相关运算差不多
X = torch.tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])
pool2d(X, (2, 2))
输出
tensor([[4., 5.],
[7., 8.]])
pool2d(X, (2, 2), 'avg')
输出
tensor([[2., 3.],
[5., 6.]])
填充和步幅
X = torch.arange(16, dtype=torch.float32).reshape((1, 1, 4, 4))
X
看看X
tensor([[[[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[12., 13., 14., 15.]]]])
pool2d = nn.MaxPool2d(3)
pool2d(X)
默认情况下,深度学习框架中的步幅与汇聚窗口的大小相同。 因此,如果我们使用形状为(3, 3)的汇聚窗口,那么默认情况下,我们得到的步幅形状为(3, 3)。
pool2d = nn.MaxPool2d(3, padding=1, stride=2)
pool2d(X)
也可手动调整
tensor([[[[ 5., 7.],
[13., 15.]]]])
pool2d = nn.MaxPool2d((2, 3), stride=(2, 3), padding=(0, 1))
pool2d(X)
tensor([[[[ 5., 7.],
[13., 15.]]]])
多通道输入
X = torch.cat((X, X + 1), 1)
X
这里用cat不是stack是因为stack会新增加一个轴在新轴上拼接,但上面 X 已经 reshape((1, 1, 4, 4))所以用cat在现有的通道数所在轴进行拼接
tensor([[[[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[12., 13., 14., 15.]],
[[ 1., 2., 3., 4.],
[ 5., 6., 7., 8.],
[ 9., 10., 11., 12.],
[13., 14., 15., 16.]]]])
pool2d = nn.MaxPool2d(3, padding=1, stride=2)
pool2d(X)
输出
tensor([[[[ 5., 7.],
[13., 15.]],
[[ 6., 8.],
[14., 16.]]]])
可以看到通道数输入的时候是2,输出的时候还是2