0%

pytorch

˙Ꙫ˙

神经网络基本骨架nn.Module

1
2
3
4
5
6
7
8
9
10
11
import torch.nn as nn
import torch.nn.functional as F

class Model(nn.Module): # 继承nn.Module类
def __init__(self): # 初始化
super(Model, self).__init__()
self.conv1 = nn.Conc2d(1, 20, 5) # 卷积
self.conv2 = nn.Conv2d(20, 20, 5)
def forward(self, x): # 前向传播 输入x
x = F.relu(self.conv1(x)) # 卷积+非线性处理
return F.relu(self.conv2(x)) # 又一次卷积+非线性
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# e.g.
import torch.nn as nn
import torch.nn.functional as F

class My_Model(nn.Module):
def __init__(self):
super(My_Model, self).__init__()

def forward(self, input):
output = input + 1
return output

# 创建实例
my_model = My_Model()
x = torch.tensor(1.0)
output = my_model(x)

卷积层 Convolution Layers

nn.Conv2d 2维卷积

1
2
CLASS torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros', device=None, dtype=None)
# 一般前五个参数需要自己设置
  • in_channels (int) – Number of channels in the input image
  • out_channels (int) – Number of channels produced by the convolution
  • kernel_size (int or tuple) – Size of the convolving kernel 在训练过程中不断调整
  • stride (int or tuple, optional) – Stride of the convolution. Default: 1
  • padding (int, tuple or str, optional) – Padding added to all four sides of the input. Default: 0
  • padding_mode (str, optional) – 'zeros', 'reflect', 'replicate' or 'circular'. Default: 'zeros'
  • dilation (int or tuple, optional) – Spacing between kernel elements. Default: 1
  • groups (int, optional) – Number of blocked connections from input channels to output channels. Default: 1
  • bias (bool, optional) – If True, adds a learnable bias to the output. Default: True

Shape计算

image-20221106205832895

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
import torch
import torchvision
from torch import nn
from torch import Conv2d
from torch.utils.data import DataLoader

dataset = torchvision.datasets.CIFAR10("../data", train=False, transform=torchvision.transforms.Totensor(), download=True)
dataloader = DataLoader(dataset, batch_size=64) # batch_seze一次取64张图片

class My_Model(nn.Module):
def __init__(self):
super(My_Model, self).__init__()
self.conv1 = Conv2d(in_channels=3, out_channels=6, kernel_size=3, stride=1, padding=0) # 输出3通道 期望输出6通道 kernel大小3×3 横向纵向每次走一步

def forward(self, x):
x = self.conv1(x)
return x

my_model = My_Model()
# print(my_model)

# writer = SummaryWriter("../logs") # 可用tensorboard直观显示
# step = 0
# 查看每张图像
for data in dataloader:
imgs, targets = data
output = my_model(imgs)
# print(output.shape)
# writer.add_images("input", imgs, step)
# torch.reshape(output, (-1,3,30,30))
# writer.add_images()
# step ++;
# 终端pytorch环境下输入tensorboard --logdir=logs

常用卷积层

VGG16 注意设置的padding大小

image-20221106210116509

池化层 Pooling layers

MaxPool2d最大池化

最大池化MaxPool

即取局部值最大的点 下采样

保留输入特征同时减小数据量

(MaxUnpool 上采样)

1
2
3
4
CLASS torch.nn.MaxPool2d(kernel_size, stride=None, padding=0, dilation=1, return_indices=False, ceil_mode=False)
# stride默认值为kernel_size
# ceil_mode 向上取整 floor_mode 向下取整
# 一般只需要设置kernel_size

在输入图像上每次取池化核大小范围内的最大值(步长默认为核大小)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
input = torch.tensor([[1,2,0,3,1],
[0,1,2,3,1],
[1,2,1,0,0],
[5,2,3,1,1],
[2,1,0,1,1]], dtype=torch.float32)
# input必须为四维 (N(batchsize),Channel,Height,Width)
input = torch.reshape(input, (-1,1,5,5)) # batchsize=-1(-1时自动计算),channel=1,5×5
# print(input.shape)

class My_Model(nn.Module):
def __init__(self):
super(My_Model, self).__init__()
self.maxpool1 = MaxPool2d(kernel_size=3, ceil_mode=True)

def forward(self, input):
output = self.maxpool1(input)
return output
my_model = My_Model()
output = my_model(input)
print(output)

非线性激活 Non-linear Activations

常用非线性激活

ReLU SIGMOID

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
# 以ReLU为例 input<0 output=0, input>=0 output=input

input = torch.tensor([[1, -0.5],
[-1,3]])
input = torch.reshape(input,(-1,1,2,2))
print(input.shape)

class My_Model(nn.Module):
def __init__(self):
super(My_Model, self).__init__()
self.relu1 = ReLU() # inplace=True将input值替换为output 默认False
def forward(self, input)
output = self.relu1(input)
return output

my_model = My_Model()
output = my_model(input)
print(output)

线性层 Linear Layers

Normalization Layers

BatchNorm2d 防止过拟合

Dropout Layers

随机失活 防止过拟合

Sequential && Sifar model structure

以SIFAR10数据集及其模型为例

sifar10

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
import ssl
from torch import nn
from torch.nn import Conv2d, MaxPool2d, Flatten, Linear, Sequential
import torch
import torchvision
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader

class my_Model(nn.Module):
def __init__(self):
super(my_Model, self).__init__()
# self.conv1 = Conv2d(3, 32, 5, padding=2) # in_channel=3 out_channel=32 kernel=5
# # 求出padding=2和stride=1
# self.maxpool1 = MaxPool2d(2)
# self.conv2 = Conv2d(32, 32, 5, padding=2)
# self.maxpool2 = MaxPool2d(2)
# self.conv3 = Conv2d(32, 64, 5, padding=2)
# self.maxpool3 = MaxPool2d(2)
# self.flatten = Flatten()
# self.linear1 = Linear(1024, 64) # 线性层 1024=64x4x4
# self.linear2 = Linear(64, 10) # 10个类别 最终输出10

self.model1 = Sequential(
Conv2d(3, 32, 5, padding=2),
MaxPool2d(2),
Conv2d(32, 32, 5, padding=2),
MaxPool2d(2),
Conv2d(32, 64, 5, padding=2),
MaxPool2d(2),
Flatten(),
Linear(1024, 64),
Linear(64, 10)
)

def forward(self, x):
# x = self.conv1(x)
# x = self.maxpool1(x)
# x = self.conv2(x)
# x = self.maxpool2(x)
# x = self.conv3(x)
# x = self.maxpool3(x)
# x = self.flatten(x)
# x = self.linear1(x)
# x = self.linear2(x)

x= self.model1(x)
return x


my_model = my_Model()
print(my_model)

# 检查网络正确性
input = torch.ones(64, 3, 32, 32)
output = my_model(input)
print(output.shape)

# ten sorboard可视化
writer = SummaryWriter("logs_seq")
writer.add_graph(my_model, input)
writer.close()

可视化

1
2
# cmd 该环境下
tensorboard --logdir=logs_seq --port=6007

image-20221201111252035

image-20221201111535140

image-20221201111604633

损失函数 && 反向传播 && 优化器

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
# 以CIFAR10数据集为例 使用上述模型
dataset = torchvision.datasets.CIFAR10("../data", train=False, transform=torchvision.transforms.ToTensor(), download=True)
dataloader = DataLoader(dataset, batch_size=64)
loss = nn.CrossEntropyLoss() # 交叉熵计算loss
my_model = my_Model()
optim = torch.optim.SGD(my_model.parameters(), lr=0.01) # 优化器optimizer 随机梯度下降 lr学习速率
for epoch in range(20): # 遍历20轮数据集
running_loss = 0
for data in dataloader:
imgs, targets = data
output = my_model(imgs)
result_loss = loss(output, targets)
# print(result_loss)
optim.zero_grad() # 梯度清零
result_loss.backward() # 求出每个节点的梯度
optim.step() # 选用合适的优化器 梯度下降
running_loss = running_loss + result_loss
print(running_loss) # 打印每一轮的loss之和

报错;

image-20221201202915739

解决:

1
2
import ssl
ssl._create_default_https_context = ssl._create_unverified_context

现有模型使用及修改

VGG16模型

1
2
3
4
5
6
7
8
9
10
11
12
13
import torchvision
from torch import nn

vgg16_false = torchvision.models.vgg16(pretrained=False)
print(vgg16_false)

# train_data = torchvision.datasets.CIFAR10('../data', train=True, transform=torchvision.transforms.ToTensor(), download=True)

# 给VGG16添加一个线性层 input=1000 output=10
vgg16_false.classifier.add_module('add_linear', nn.Linear(1000, 10))
#修改VGG16最后一个线性层(第七层)
vgg16_false.classifier[6] = nn.linear(4096, 10)
print(vgg16_false)

模型保存

1
2
3
4
5
6
7
8
9
10
11
12
# 模型保存与加载
# 保存方式1
torch.save(vgg16_false, "vgg16_1.pth") # 保存模型结构+参数
# 对应加载模型方式
model_1 = torch.load("vgg16_1.pth")

# 保存方式2(官方推荐)
torch.save(vgg16_false.state_dict(), "vgg16_2.pth") # 仅保存模型参数
# 对应加载模型方式
vgg16 = torchvision.models.vgg16(pretrained=False)
vgg16.load_state_dict(torch.load("vgg16_1.pth"))
print(vgg16)

完整模型训练套路

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
from model import * # 引入model
# import ssl
from torch import nn
# from torch.nn import Conv2d, MaxPool2d, Flatten, Linear, Sequential
# import torch
# import torchvision
# from torch.utils.tensorboard import SummaryWriter
# from torch.utils.data import DataLoader

ssl._create_default_https_context = ssl._create_unverified_context

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 准备数据集
train_data = torchvision.datasets.CIFAR10(root="../data", train=True, transform=torchvision.transforms.ToTensor(), download=True)
test_data = torchvision.datasets.CIFAR10(root="../data", train=False, transform=torchvision.transforms.ToTensor())
train_data_size = len(train_data)
test_data_size = len(test_data)
print("训练集长度为:{}".format(train_data_size))
print("测试集长度为:{}".format(test_data_size))

# 利用dataloader加载数据集
train_dataloader = DataLoader(train_data, batch_size=64)
test_dataloader = DataLoader(test_data, batch_size=64)

# 创建网络模型
my_model = my_Model()
my_model = my_model.to(device)

# 创建损失函数
loss_fn = nn.CrossEntropyLoss()
loss_fn = loss_fn.to(device)

# 优化器
learning_rate = 0.01
optimizer = torch.optim.SGD(my_model.parameters(), lr=learning_rate)

# 设置训练网络的参数
# 记录训练次数
total_train_step = 0
# 记录测试次数
total_test_step = 0
# 训练轮数
epoch = 10

# 添加tensorboard 可视化loss
writer = SummaryWriter("logs_train")


for i in range(epoch):
print("------第{}轮训练开始------".format(i+1))
# 开始训练
my_model.train()
for data in train_dataloader:
imgs, targets = data
imgs = imgs.to(device)
targets = targets.to(device)
outputs = my_model(imgs)
loss = loss_fn(outputs, targets)

optimizer.zero_grad() # 梯度清零
loss.backward()
optimizer.step()

total_train_step = total_train_step + 1
if total_train_step % 100 ==0:
print("训练次数:{}, loss:{}".format(total_train_step, loss.item(), total_train_step))
writer.add_scalar("train_loss", loss.item(), total_train_step)

# 测试步骤
my_model.eval()
total_test_loss = 0
total_accuracy = 0
with torch.no_grad(): # 只测试 不对梯度进行调整
for data in test_dataloader:
imgs, targets = data
imgs = imgs.to(device)
targets = targets.to(device)
outputs = my_model(imgs)
loss = loss_fn(outputs, targets)
total_test_loss = loss + total_test_loss
accuracy = (outputs.argmax(1) == targets).sum()
total_accuracy = total_accuracy + accuracy

print("整体测试集上的loss:{}".format(total_test_loss))
print("整体测试集上的正确率:{}".format(total_accuracy/test_data_size))
writer.add_scalar("test_loss", total_test_loss, total_test_step)
writer.add_scalar("test_accuracy", total_accuracy/test_data_size, total_test_step)
total_test_step = total_test_step + 1

# 保存每一轮的模型
# torch.save(my_model, "my_model_{}.pth".format(i+1))
torch.save(my_model.state_dict(), "my_model_{}.pth".format(i+1))

writer.close()

看这里啦

https://pytorch.org/docs/stable/nn.html

https://www.bilibili.com/video/BV1hE411t7RN?p=18&spm_id_from=pageDriver&vd_source=3def6347655903bff36de76eff34d8eb

google colab