部分网络网络权重加载另一网络权重

权重生成

# 1 加载相关库
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms  # 对数据库和图片预处理


# 2 定义超参数
BATCH_SIZE = 12
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")  # cpu还是gpu,不写gpu写cuda
EPOCHS = 10  # 总共6w张,一个epoch表示一次训练完6w张, 10轮为60w张


# 3 构建pipeline,对图像做预处理
pipeline = transforms.Compose([transforms.ToTensor(),  # 将图片转为Tensor
                               transforms.Normalize((0.1307,), (0.3081, ))])  # 正则化:降低模型复杂度

# 4 下载和加载数据
from torch.utils.data import DataLoader
train_set = datasets.MNIST(root="./mnist_data/", train=True, transform=pipeline, download=True)  # 下载不成功,可以多试几次
test_set = datasets.MNIST(root="./mnist_data/", train=False, transform=pipeline, download=True)
train_loader = DataLoader(dataset=train_set, batch_size=BATCH_SIZE, shuffle=False)  # shuffle打乱图片
test_loader = DataLoader(dataset=test_set, batch_size=BATCH_SIZE, shuffle=True)  # 可单独定义train和test的BATCH_SIZE

# 5 显示mnist中的图片
import matplotlib.pyplot as plt
images, labels = next(iter(train_loader))  # idx, (images, labels) = next(enumerate(train_loader)), images.shape = (128,1,28,28)

# 方式2
class Digit2(nn.Module):  # 继承父类
    def __init__(self):  # 构造方法
        super().__init__()  # 调用继承父类的方法
        self.conv1 = nn.Conv2d(1, 10, 5)  # 1: 输入通道数,mnist数据集为灰度图,10: 输出通道,5: 卷积核
        self.conv2 = nn.Conv2d(10, 20, 3)
        self.fc1 = nn.Linear(20*10*10, 500)  # 全连接层为线性层
        self.fc2 = nn.Linear(500, 10)
        self.relu1 = nn.ReLU()
        self.relu2 = nn.ReLU()
        self.relu3 = nn.ReLU()
        self.maxpool2d1 = nn.MaxPool2d(2, 2)
        self.maxpool2d2 = nn.MaxPool2d(2, 2)
        self.softmax = nn.Softmax(dim=1)  # batch行,10列
    def forward(self, x):
        input_size = x.size(0)  # x 这里针对读取到的train_loader(128, 1, 28, 28),128为batch
        x = self.conv1(x)  # 输入: (128, 1, 28, 28), 输出: (128, 10, 24, 24)  卷积后图像输出大小计算公式:N
        x = self.relu1(x)  # 激活函数,保持shape不变,(128, 10, 24, 24),或者写成x = F.relu(x)
        x = self.maxpool2d1(x)  # 最大池化,缩小,(128, 10, 12, 12)
        x = self.conv2(x)  # 输入: (128, 10, 12, 12), 输出: (128, 20, 10, 10)  大小计算公式: (12-3+2*0)/1+1=10,通道数10->20
        x = self.relu2(x)
        x = x.view(input_size, -1)  # 拉平,自动计算维度(128, 20, 10, 
        x = self.fc1(x)  # 128*2000 -> 128*500
        x = self.relu3(x)
        x = self.fc2(x)  # 128*500 -> 128*10
        out = self.softmax(x)
        return out
        
# 7 创建模型和定义优化器
model = Digit2().to(DEVICE)  # 创建模型部署到设备上
optimizer = optim.Adam(model.parameters())  # 更新模型参数,使得最终模型参数达到最优值

# 8 定义训练方法
def train_model(model, device, train_loader, optimizer, epoch):
    model.train()  # 模型训练
    for batch_index, (data, target) in enumerate(train_loader):
        data, target = data.to(device), target.to(device)  # 数据部署到设备上
        optimizer.zero_grad()  # 梯度设置为0,不然梯度会累积
        output = model(data)  # 训练后结果
        loss = F.cross_entropy(output, target)  # 多分类交叉熵损失函数, 二分类函数可以选择用sigmoid,
        # CE = nn.CrossEntropyLoss(), loss = CE(output, target)
        loss.backward()  # 反向传播
        optimizer.step()  # 参数更新
        if batch_index % 100 == 0:  # 60000/128=469,每100次输出一个
            print("Train Epoch:{} \t loss:{:.6f}".format(epoch, loss.item()))  # 保留6位有效数字的float数, .item

# 9 定义测试方法
def test_model(model, device, test_loader):
    model.eval()  # 模型验证
    correct = 0.0  # 准确率
    test_loss = 0.0  # 测试损失
    with torch.no_grad():  # test不需要计算梯度和不需要进行反向传播,所以不用grad
        for batch_index, (data, target) in enumerate(test_loader):
            data, target = data.to(device), target.to(device)  # 数据部署到device上
            output = model(data)  # 测试数据
            test_loss += F.cross_entropy(output, target).item()  # 计算测试损失
            pred = output.argmax(dim=1)  # 找到最大值的下标,1表示在第二维度查找。torch.max(output, dim=1) 或者 output.max(1, keepdim=True)
            correct += pred.eq(target.view_as(pred)).sum().item()  # 累计正确率
        test_loss /= len(test_loader.dataset)
        print("Test--Average loss:{:.4f}, Accuracy: {:.3f}\n".format(test_loss, 100.0*correct/len(test_loader.dataset)))
   
# 10 调用方法 8/9
for epoch in range(1, EPOCHS+1):
    train_model(model, DEVICE, train_loader, optimizer, epoch)
    test_model(model, DEVICE, test_loader)         

# 11 模型保存
torch.save(model.state_dict(), "params_w.pth")

权重加载

# 1 加载相关库
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms  # 对数据库和图片预处理


# 2 定义超参数
BATCH_SIZE = 12
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")  # cpu还是gpu,不写gpu写cuda
EPOCHS = 10  # 总共6w张,一个epoch表示一次训练完6w张, 10轮为60w张


# 3 构建pipeline,对图像做预处理
pipeline = transforms.Compose([transforms.ToTensor(),  # 将图片转为Tensor
                               transforms.Normalize((0.1307,), (0.3081, ))])  # 正则化:降低模型复杂度

# 4 下载和加载数据
from torch.utils.data import DataLoader
train_set = datasets.MNIST(root="./mnist_data/", train=True, transform=pipeline, download=True)  # 下载不成功,可以多试几次
test_set = datasets.MNIST(root="./mnist_data/", train=False, transform=pipeline, download=True)
train_loader = DataLoader(dataset=train_set, batch_size=BATCH_SIZE, shuffle=False)  # shuffle打乱图片
test_loader = DataLoader(dataset=test_set, batch_size=BATCH_SIZE, shuffle=True)  # 可单独定义train和test的BATCH_SIZE

# 5 显示mnist中的图片
import matplotlib.pyplot as plt
images, labels = next(iter(train_loader))  # idx, (images, labels) = next(enumerate(train_loader)), images.shape = (128,1,28,28)

# 方式2
class Digit2(nn.Module):  # 继承父类
    def __init__(self):  # 构造方法
        super().__init__()  # 调用继承父类的方法
        self.conv1 = nn.Conv2d(1, 10, 5)  # 1: 输入通道数,mnist数据集为灰度图,10: 输出通道,5: 卷积核
        self.conv2 = nn.Conv2d(10, 20, 3)
        self.fc1 = nn.Linear(20*10*10, 500)  # 全连接层为线性层
        self.fc2 = nn.Linear(500, 10)
        self.relu1 = nn.ReLU()
        self.relu2 = nn.ReLU()
        self.relu3 = nn.ReLU()
        self.maxpool2d1 = nn.MaxPool2d(2, 2)
        self.maxpool2d2 = nn.MaxPool2d(2, 2)
        self.softmax = nn.Softmax(dim=1)  # batch行,10列
    def forward(self, x):
        input_size = x.size(0)  # x 这里针对读取到的train_loader(128, 1, 28, 28),128为batch
        x = self.conv1(x)  # 输入: (128, 1, 28, 28), 输出: (128, 10, 24, 24)  卷积后图像输出大小计算公式:N
        x = self.relu1(x)  # 激活函数,保持shape不变,(128, 10, 24, 24),或者写成x = F.relu(x)
        x = self.maxpool2d1(x)  # 最大池化,缩小,(128, 10, 12, 12)
        x = self.conv2(x)  # 输入: (128, 10, 12, 12), 输出: (128, 20, 10, 10)  大小计算公式: (12-3+2*0)/1+1=10,通道数10->20
        x = self.relu2(x)
        x = x.view(input_size, -1)  # 拉平,自动计算维度(128, 20, 10, 
        x = self.fc1(x)  # 128*2000 -> 128*500
        x = self.relu3(x)
        x = self.fc2(x)  # 128*500 -> 128*10
        out = self.softmax(x)


# 方式2
class Digit3(nn.Module):  # 继承父类
    def __init__(self):  # 构造方法
        super().__init__()  # 调用继承父类的方法
        self.conv1 = nn.Conv2d(1, 10, 5)  # 1: 输入通道数,mnist数据集为灰度图,10: 输出通道,5: 卷积核
        self.conv2 = nn.Conv2d(10, 20, 3)
        self.fc1 = nn.Linear(20*10*10, 500)  # 全连接层为线性层
        self.fc3 = nn.Linear(500, 50)
        self.fc4 = nn.Linear(50, 10)
        self.relu1 = nn.ReLU()
        self.relu2 = nn.ReLU()
        self.relu3 = nn.ReLU()
        self.relu4 = nn.ReLU()
        self.maxpool2d1 = nn.MaxPool2d(2, 2)
        self.maxpool2d2 = nn.MaxPool2d(2, 2)
        self.softmax = nn.Softmax(dim=1)  # batch行,10列
    def forward(self, x):
        input_size = x.size(0)  # x 这里针对读取到的train_loader(128, 1, 28, 28),128为batch
        x = self.conv1(x)  # 输入: (128, 1, 28, 28), 输出: (128, 10, 24, 24)  卷积后图像输出大小计算公式:N
        x = self.relu1(x)  # 激活函数,保持shape不变,(128, 10, 24, 24),或者写成x = F.relu(x)
        x = self.maxpool2d1(x)  # 最大池化,缩小,(128, 10, 12, 12)
        x = self.conv2(x)  # 输入: (128, 10, 12, 12), 输出: (1  
        x = self.relu2(x)
        x = x.view(input_size, -1)  # 拉平,自动计算维度(128, 20, 10, 10) -> 128*2000, 或者使用torch.flatten(x, 1)???
        x = self.fc1(x)  # 128*2000 -> 128*500
        x = self.relu3(x)
        x = self.fc3(x)  # 128*500 -> 128*10
        x = self.relu4(x)
        x = self.fc4(x)  # 128*500 -> 128*10
        out = self.softmax(x)
        return out


# 7 创建模型和定义优化器
digit2 = Digit2().to(DEVICE)  # 创建模型部署到设备上
digit3 = Digit3().to(DEVICE)  # 创建模型部署到设备上
optimizer = optim.Adam(digit3.parameters())  # 更新模型参数,使得最终模型参数达到最优值

#读取参数
digit2.load_state_dict(torch.load("params_w.pth"))
pretrained_dict = digit2.state_dict()
model_dict = digit3.state_dict()
# 将pretrained_dict里不属于model_dict的键剔除掉
pretrained_dict =  {
    
    k: v for k, v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
digit3.load_state_dict(model_dict)

# 11 模型保存
torch.save(digit3.state_dict(), "params_new.pth")
digit3.load_state_dict(torch.load("params_new.pth"))
model_dict1 = digit3.state_dict()
print(1)

猜你喜欢

转载自blog.csdn.net/weixin_41874898/article/details/121764809