torch.nn.Module.type(dst_type)

参考链接: torch.nn.Module.type(dst_type)

在这里插入图片描述
Data type、 dtype、 CPU tensor、GPU tensor的取值范围如下截图所示:

在这里插入图片描述

原文及翻译:

type(dst_type)
方法: type(dst_type)
    Casts all parameters and buffers to dst_type.
    将所有的参数和缓冲的类型转化为dst_type.

    Parameters  参数
        dst_type (type or string) – the desired type
        dst_type (type类型或者字符串类型) – 所希望要转换的类型
    Returns  函数返回
        self  自身self
    Return type  返回类型
        Module  Module模块类型

总结:

# 总结: 如果方法type(dst_type)中的dst_type使用的是字符串,
# 那么必须是代表CPU tensor或者GPU tensor的字符串,
# 即: 
# torch.FloatTensor或torch.cuda.FloatTensor
# torch.DoubleTensor或torch.cuda.DoubleTensor
# torch.HalfTensor或torch.cuda.HalfTensor

# 总结: 如果方法type(dst_type)中的dst_type使用的是type类型,
# 那么可以使用dtype 或者 CPU tensor 或者 GPU tensor
# 如果使用dtype,可以是:
# torch.float32 or torch.float、torch.float64 or torch.double、torch.float16 or torch.half
# 其中dtype只转变浮点数的长度即16位、32位、64位,不改变张量的位置是GPU还是CPU
# 而如果dst_type使用CPU tensor或者GPU tensor
# 那么可以同时改变浮点数的长度和张量所在位置是CPU还是GPU
# 可用的包括:
# torch.FloatTensor  torch.cuda.FloatTensor 
# torch.DoubleTensor  torch.cuda.DoubleTensor
# torch.HalfTensor  torch.cuda.HalfTensor

代码实验展示:

import torch 
import torch.nn as nn
torch.manual_seed(seed=20200910)
class Model(torch.nn.Module):
    def __init__(self):
        super(Model,self).__init__()
        self.conv1=torch.nn.Sequential(  # 输入torch.Size([64, 1, 28, 28])
                torch.nn.Conv2d(1,64,kernel_size=3,stride=1,padding=1),
                torch.nn.ReLU(),  # 输出torch.Size([64, 64, 28, 28])
        )
        register_buffer_in_temp = torch.randn(4,6)
        self.register_buffer('register_buffer_in', register_buffer_in_temp)

    def forward(self,x): 
        pass

print('cuda(GPU)是否可用:',torch.cuda.is_available())
print('torch的版本:',torch.__version__)
model = Model() #.cuda()



print('0初始化之后模型修改之前'.center(100,"-"))
print('调用named_buffers()'.center(100,"-"))
for name, buf in model.named_buffers():
    print(name,'-->',buf.type(),'-->',buf.dtype,'-->',buf.shape)

print('调用named_parameters()'.center(100,"-"))
for name, param in model.named_parameters():
    print(name,'-->',param.type(),'-->',param.dtype,'-->',param.shape)


print('调用state_dict()'.center(100,"-"))
for k, v in model.state_dict().items():
    print(k, '-->', v.type(),'-->', v.dtype,'-->', v.shape)


print('\n\n')
model.type(dst_type='torch.DoubleTensor')
print('1模型修改之后'.center(100,"-"))
print('调用named_buffers()'.center(100,"-"))
for name, buf in model.named_buffers():
    print(name,'-->',buf.type(),'-->',buf.dtype,'-->',buf.shape)

print('调用named_parameters()'.center(100,"-"))
for name, param in model.named_parameters():
    print(name,'-->',param.type(),'-->',param.dtype,'-->',param.shape)


print('调用state_dict()'.center(100,"-"))
for k, v in model.state_dict().items():
    print(k, '-->', v.type(),'-->', v.dtype,'-->', v.shape)


print('\n\n')
model.type(dst_type='torch.HalfTensor')
print('2模型再次修改之后'.center(100,"-"))
print('调用named_buffers()'.center(100,"-"))
for name, buf in model.named_buffers():
    print(name,'-->',buf.type(),'-->',buf.dtype,'-->',buf.shape)

print('调用named_parameters()'.center(100,"-"))
for name, param in model.named_parameters():
    print(name,'-->',param.type(),'-->',param.dtype,'-->',param.shape)

print('调用state_dict()'.center(100,"-"))
for k, v in model.state_dict().items():
    print(k, '-->', v.type(),'-->', v.dtype,'-->', v.shape)





print('\n\n')
model.type(dst_type=torch.DoubleTensor)
print('3模型再次修改之后'.center(100,"-"))
print('调用named_buffers()'.center(100,"-"))
for name, buf in model.named_buffers():
    print(name,'-->',buf.type(),'-->',buf.dtype,'-->',buf.shape)

print('调用named_parameters()'.center(100,"-"))
for name, param in model.named_parameters():
    print(name,'-->',param.type(),'-->',param.dtype,'-->',param.shape)

print('调用state_dict()'.center(100,"-"))
for k, v in model.state_dict().items():
    print(k, '-->', v.type(),'-->', v.dtype,'-->', v.shape)



print('\n\n')
model.type(dst_type=torch.FloatTensor)
print('4模型再次修改之后'.center(100,"-"))
print('调用named_buffers()'.center(100,"-"))
for name, buf in model.named_buffers():
    print(name,'-->',buf.type(),'-->',buf.dtype,'-->',buf.shape)

print('调用named_parameters()'.center(100,"-"))
for name, param in model.named_parameters():
    print(name,'-->',param.type(),'-->',param.dtype,'-->',param.shape)

print('调用state_dict()'.center(100,"-"))
for k, v in model.state_dict().items():
    print(k, '-->', v.type(),'-->', v.dtype,'-->', v.shape)





print('\n\n')
model.type(dst_type=torch.cuda.DoubleTensor)
print('5模型再次修改之后'.center(100,"-"))
print('调用named_buffers()'.center(100,"-"))
for name, buf in model.named_buffers():
    print(name,'-->',buf.type(),'-->',buf.dtype,'-->',buf.shape)

print('调用named_parameters()'.center(100,"-"))
for name, param in model.named_parameters():
    print(name,'-->',param.type(),'-->',param.dtype,'-->',param.shape)

print('调用state_dict()'.center(100,"-"))
for k, v in model.state_dict().items():
    print(k, '-->', v.type(),'-->', v.dtype,'-->', v.shape)





print('\n\n')
model.type(dst_type='torch.DoubleTensor')
print('6模型再次修改之后'.center(100,"-"))
print('调用named_buffers()'.center(100,"-"))
for name, buf in model.named_buffers():
    print(name,'-->',buf.type(),'-->',buf.dtype,'-->',buf.shape)

print('调用named_parameters()'.center(100,"-"))
for name, param in model.named_parameters():
    print(name,'-->',param.type(),'-->',param.dtype,'-->',param.shape)

print('调用state_dict()'.center(100,"-"))
for k, v in model.state_dict().items():
    print(k, '-->', v.type(),'-->', v.dtype,'-->', v.shape)




print('\n\n')
model.type(dst_type=torch.half)
print('7模型再次修改之后'.center(100,"-"))
print('调用named_buffers()'.center(100,"-"))
for name, buf in model.named_buffers():
    print(name,'-->',buf.type(),'-->',buf.dtype,'-->',buf.shape)

print('调用named_parameters()'.center(100,"-"))
for name, param in model.named_parameters():
    print(name,'-->',param.type(),'-->',param.dtype,'-->',param.shape)

print('调用state_dict()'.center(100,"-"))
for k, v in model.state_dict().items():
    print(k, '-->', v.type(),'-->', v.dtype,'-->', v.shape)



print('\n\n')
model.type(dst_type=torch.cuda.FloatTensor)
print('8模型再次修改之后'.center(100,"-"))
print('调用named_buffers()'.center(100,"-"))
for name, buf in model.named_buffers():
    print(name,'-->',buf.type(),'-->',buf.dtype,'-->',buf.shape)

print('调用named_parameters()'.center(100,"-"))
for name, param in model.named_parameters():
    print(name,'-->',param.type(),'-->',param.dtype,'-->',param.shape)

print('调用state_dict()'.center(100,"-"))
for k, v in model.state_dict().items():
    print(k, '-->', v.type(),'-->', v.dtype,'-->', v.shape)




print('\n\n')
model.type(dst_type=torch.half)
print('9模型再次修改之后'.center(100,"-"))
print('调用named_buffers()'.center(100,"-"))
for name, buf in model.named_buffers():
    print(name,'-->',buf.type(),'-->',buf.dtype,'-->',buf.shape)

print('调用named_parameters()'.center(100,"-"))
for name, param in model.named_parameters():
    print(name,'-->',param.type(),'-->',param.dtype,'-->',param.shape)

print('调用state_dict()'.center(100,"-"))
for k, v in model.state_dict().items():
    print(k, '-->', v.type(),'-->', v.dtype,'-->', v.shape)

# 总结: 如果方法type(dst_type)中的dst_type使用的是字符串,
# 那么必须是代表CPU tensor或者GPU tensor的字符串,
# 即: 
# torch.FloatTensor或torch.cuda.FloatTensor
# torch.DoubleTensor或torch.cuda.DoubleTensor
# torch.HalfTensor或torch.cuda.HalfTensor

# 总结: 如果方法type(dst_type)中的dst_type使用的是type类型,
# 那么可以使用dtype 或者 CPU tensor 或者 GPU tensor
# 如果使用dtype,可以是:
# torch.float32 or torch.float、torch.float64 or torch.double、torch.float16 or torch.half
# 其中dtype只转变浮点数的长度即16位、32位、64位,不改变张量的位置是GPU还是CPU
# 而如果dst_type使用CPU tensor或者GPU tensor
# 那么可以同时改变浮点数的长度和张量所在位置是CPU还是GPU
# 可用的包括:
# torch.FloatTensor  torch.cuda.FloatTensor 
# torch.DoubleTensor  torch.cuda.DoubleTensor
# torch.HalfTensor  torch.cuda.HalfTensor

控制台输出结果:

Windows PowerShell
版权所有 (C) Microsoft Corporation。保留所有权利。

尝试新的跨平台 PowerShell https://aka.ms/pscore6

加载个人及系统配置文件用了 948 毫秒。
(base) PS C:\Users\chenxuqi\Desktop\News4cxq\test4cxq> conda activate ssd4pytorch1_2_0
(ssd4pytorch1_2_0) PS C:\Users\chenxuqi\Desktop\News4cxq\test4cxq>  & 'D:\Anaconda3\envs\ssd4pytorch1_2_0\python.exe' 'c:\Users\chenxuqi\.vscode\extensions\ms-python.python-2020.12.424452561\pythonFiles\lib\python\debugpy\launcher' '65517' '--' 'c:\Users\chenxuqi\Desktop\News4cxq\test4cxq\test2.py'
cuda(GPU)是否可用: True
torch的版本: 1.2.0+cu92
--------------------------------------------0初始化之后模型修改之前--------------------------------------------
-----------------------------------------调用named_buffers()------------------------------------------
register_buffer_in --> torch.FloatTensor --> torch.float32 --> torch.Size([4, 6])
----------------------------------------调用named_parameters()----------------------------------------
conv1.0.weight --> torch.FloatTensor --> torch.float32 --> torch.Size([64, 1, 3, 3])
conv1.0.bias --> torch.FloatTensor --> torch.float32 --> torch.Size([64])
-------------------------------------------调用state_dict()-------------------------------------------
register_buffer_in --> torch.FloatTensor --> torch.float32 --> torch.Size([4, 6])
conv1.0.weight --> torch.FloatTensor --> torch.float32 --> torch.Size([64, 1, 3, 3])
conv1.0.bias --> torch.FloatTensor --> torch.float32 --> torch.Size([64])



----------------------------------------------1模型修改之后-----------------------------------------------
-----------------------------------------调用named_buffers()------------------------------------------
register_buffer_in --> torch.DoubleTensor --> torch.float64 --> torch.Size([4, 6])
----------------------------------------调用named_parameters()----------------------------------------
conv1.0.weight --> torch.DoubleTensor --> torch.float64 --> torch.Size([64, 1, 3, 3])
conv1.0.bias --> torch.DoubleTensor --> torch.float64 --> torch.Size([64])
-------------------------------------------调用state_dict()-------------------------------------------
register_buffer_in --> torch.DoubleTensor --> torch.float64 --> torch.Size([4, 6])
conv1.0.weight --> torch.DoubleTensor --> torch.float64 --> torch.Size([64, 1, 3, 3])
conv1.0.bias --> torch.DoubleTensor --> torch.float64 --> torch.Size([64])



---------------------------------------------2模型再次修改之后----------------------------------------------
-----------------------------------------调用named_buffers()------------------------------------------
register_buffer_in --> torch.HalfTensor --> torch.float16 --> torch.Size([4, 6])
----------------------------------------调用named_parameters()----------------------------------------
conv1.0.weight --> torch.HalfTensor --> torch.float16 --> torch.Size([64, 1, 3, 3])
conv1.0.bias --> torch.HalfTensor --> torch.float16 --> torch.Size([64])
-------------------------------------------调用state_dict()-------------------------------------------
register_buffer_in --> torch.HalfTensor --> torch.float16 --> torch.Size([4, 6])
conv1.0.weight --> torch.HalfTensor --> torch.float16 --> torch.Size([64, 1, 3, 3])
conv1.0.bias --> torch.HalfTensor --> torch.float16 --> torch.Size([64])



---------------------------------------------3模型再次修改之后----------------------------------------------
-----------------------------------------调用named_buffers()------------------------------------------
register_buffer_in --> torch.DoubleTensor --> torch.float64 --> torch.Size([4, 6])
----------------------------------------调用named_parameters()----------------------------------------
conv1.0.weight --> torch.DoubleTensor --> torch.float64 --> torch.Size([64, 1, 3, 3])
conv1.0.bias --> torch.DoubleTensor --> torch.float64 --> torch.Size([64])
-------------------------------------------调用state_dict()-------------------------------------------
register_buffer_in --> torch.DoubleTensor --> torch.float64 --> torch.Size([4, 6])
conv1.0.weight --> torch.DoubleTensor --> torch.float64 --> torch.Size([64, 1, 3, 3])
conv1.0.bias --> torch.DoubleTensor --> torch.float64 --> torch.Size([64])



---------------------------------------------4模型再次修改之后----------------------------------------------
-----------------------------------------调用named_buffers()------------------------------------------
register_buffer_in --> torch.FloatTensor --> torch.float32 --> torch.Size([4, 6])
----------------------------------------调用named_parameters()----------------------------------------
conv1.0.weight --> torch.FloatTensor --> torch.float32 --> torch.Size([64, 1, 3, 3])
conv1.0.bias --> torch.FloatTensor --> torch.float32 --> torch.Size([64])
-------------------------------------------调用state_dict()-------------------------------------------
register_buffer_in --> torch.FloatTensor --> torch.float32 --> torch.Size([4, 6])
conv1.0.weight --> torch.FloatTensor --> torch.float32 --> torch.Size([64, 1, 3, 3])
conv1.0.bias --> torch.FloatTensor --> torch.float32 --> torch.Size([64])



---------------------------------------------5模型再次修改之后----------------------------------------------
-----------------------------------------调用named_buffers()------------------------------------------
register_buffer_in --> torch.cuda.DoubleTensor --> torch.float64 --> torch.Size([4, 6])
----------------------------------------调用named_parameters()----------------------------------------
conv1.0.weight --> torch.cuda.DoubleTensor --> torch.float64 --> torch.Size([64, 1, 3, 3])
conv1.0.bias --> torch.cuda.DoubleTensor --> torch.float64 --> torch.Size([64])
-------------------------------------------调用state_dict()-------------------------------------------
register_buffer_in --> torch.cuda.DoubleTensor --> torch.float64 --> torch.Size([4, 6])
conv1.0.weight --> torch.cuda.DoubleTensor --> torch.float64 --> torch.Size([64, 1, 3, 3])
conv1.0.bias --> torch.cuda.DoubleTensor --> torch.float64 --> torch.Size([64])



---------------------------------------------6模型再次修改之后----------------------------------------------
-----------------------------------------调用named_buffers()------------------------------------------
register_buffer_in --> torch.DoubleTensor --> torch.float64 --> torch.Size([4, 6])
----------------------------------------调用named_parameters()----------------------------------------
conv1.0.weight --> torch.DoubleTensor --> torch.float64 --> torch.Size([64, 1, 3, 3])
conv1.0.bias --> torch.DoubleTensor --> torch.float64 --> torch.Size([64])
-------------------------------------------调用state_dict()-------------------------------------------
register_buffer_in --> torch.DoubleTensor --> torch.float64 --> torch.Size([4, 6])
conv1.0.weight --> torch.DoubleTensor --> torch.float64 --> torch.Size([64, 1, 3, 3])
conv1.0.bias --> torch.DoubleTensor --> torch.float64 --> torch.Size([64])



---------------------------------------------7模型再次修改之后----------------------------------------------
-----------------------------------------调用named_buffers()------------------------------------------
register_buffer_in --> torch.HalfTensor --> torch.float16 --> torch.Size([4, 6])
----------------------------------------调用named_parameters()----------------------------------------
conv1.0.weight --> torch.HalfTensor --> torch.float16 --> torch.Size([64, 1, 3, 3])
conv1.0.bias --> torch.HalfTensor --> torch.float16 --> torch.Size([64])
-------------------------------------------调用state_dict()-------------------------------------------
register_buffer_in --> torch.HalfTensor --> torch.float16 --> torch.Size([4, 6])
conv1.0.weight --> torch.HalfTensor --> torch.float16 --> torch.Size([64, 1, 3, 3])
conv1.0.bias --> torch.HalfTensor --> torch.float16 --> torch.Size([64])



---------------------------------------------8模型再次修改之后----------------------------------------------
-----------------------------------------调用named_buffers()------------------------------------------
register_buffer_in --> torch.cuda.FloatTensor --> torch.float32 --> torch.Size([4, 6])
----------------------------------------调用named_parameters()----------------------------------------
conv1.0.weight --> torch.cuda.FloatTensor --> torch.float32 --> torch.Size([64, 1, 3, 3])
conv1.0.bias --> torch.cuda.FloatTensor --> torch.float32 --> torch.Size([64])
-------------------------------------------调用state_dict()-------------------------------------------
register_buffer_in --> torch.cuda.FloatTensor --> torch.float32 --> torch.Size([4, 6])
conv1.0.weight --> torch.cuda.FloatTensor --> torch.float32 --> torch.Size([64, 1, 3, 3])
conv1.0.bias --> torch.cuda.FloatTensor --> torch.float32 --> torch.Size([64])



---------------------------------------------9模型再次修改之后----------------------------------------------
-----------------------------------------调用named_buffers()------------------------------------------
register_buffer_in --> torch.cuda.HalfTensor --> torch.float16 --> torch.Size([4, 6])
----------------------------------------调用named_parameters()----------------------------------------
conv1.0.weight --> torch.cuda.HalfTensor --> torch.float16 --> torch.Size([64, 1, 3, 3])
conv1.0.bias --> torch.cuda.HalfTensor --> torch.float16 --> torch.Size([64])
-------------------------------------------调用state_dict()-------------------------------------------
register_buffer_in --> torch.cuda.HalfTensor --> torch.float16 --> torch.Size([4, 6])
conv1.0.weight --> torch.cuda.HalfTensor --> torch.float16 --> torch.Size([64, 1, 3, 3])
conv1.0.bias --> torch.cuda.HalfTensor --> torch.float16 --> torch.Size([64])
(ssd4pytorch1_2_0) PS C:\Users\chenxuqi\Desktop\News4cxq\test4cxq> 

猜你喜欢

转载自blog.csdn.net/m0_46653437/article/details/112775667
今日推荐