小言_互联网的博客

深入浅出PyTorch——PyTorch可视化

336人阅读  评论(0)

1. 可视化网络结构

        在复杂的网络结构中确定每一层的输入结构,方便我们在短时间内完成debug

1.1 使用print函数打印模型基础信息

        使用ResNet18的结构进行展示


  
  1. import torchvision.models as models
  2. model = models.resnet18()
  3. print(model)
  4. #打印结果
  5. ResNet(
  6. (conv1): Conv2d( 3, 64, kernel_size=( 7, 7), stride=( 2, 2), padding=( 3, 3), bias= False)
  7. (bn1): BatchNorm2d( 64, eps= 1e-05, momentum= 0.1, affine= True, track_running_stats= True)
  8. (relu): ReLU(inplace= True)
  9. (maxpool): MaxPool2d(kernel_size= 3, stride= 2, padding= 1, dilation= 1, ceil_mode= False)
  10. (layer1): Sequential(
  11. ( 0): Bottleneck(
  12. (conv1): Conv2d( 64, 64, kernel_size=( 1, 1), stride=( 1, 1), bias= False)
  13. (bn1): BatchNorm2d( 64, eps= 1e-05, momentum= 0.1, affine= True, track_running_stats= True)
  14. (conv2): Conv2d( 64, 64, kernel_size=( 3, 3), stride=( 1, 1), padding=( 1, 1), bias= False)
  15. (bn2): BatchNorm2d( 64, eps= 1e-05, momentum= 0.1, affine= True, track_running_stats= True)
  16. (conv3): Conv2d( 64, 256, kernel_size=( 1, 1), stride=( 1, 1), bias= False)
  17. (bn3): BatchNorm2d( 256, eps= 1e-05, momentum= 0.1, affine= True, track_running_stats= True)
  18. (relu): ReLU(inplace= True)
  19. (downsample): Sequential(
  20. ( 0): Conv2d( 64, 256, kernel_size=( 1, 1), stride=( 1, 1), bias= False)
  21. ( 1): BatchNorm2d( 256, eps= 1e-05, momentum= 0.1, affine= True, track_running_stats= True)
  22. )
  23. )
  24. ... ...
  25. )
  26. (avgpool): AdaptiveAvgPool2d(output_size=( 1, 1))
  27. (fc): Linear(in_features= 2048, out_features= 1000, bias= True)
  28. )

1.2 使用torchinfo可视化网络结构

1.2.1 torchinfo的安装


  
  1. # 安装方法一
  2. pip install torchinfo
  3. # 安装方法二
  4. conda install -c conda-forge torchinfo

1.2.2 torchinfo的使用

        (1)方法:torchinfo.summary()

        (2)参数:(这里展示的是函数定义时传入的参数),具体请看参数详解


  
  1. def summary(
  2. model: nn.Module,
  3. input_size: Optional[INPUT_SIZE_TYPE] = None,
  4. input_data: Optional[INPUT_DATA_TYPE] = None,
  5. batch_dim: Optional[int] = None,
  6. cache_forward_pass: Optional[bool] = None,
  7. col_names: Optional[Iterable[str]] = None,
  8. col_width: int = 25,
  9. depth: int = 3,
  10. device: Optional[torch.device] = None,
  11. dtypes: Optional[List[torch.dtype]] = None,
  12. mode: str | None = None,
  13. row_settings: Optional[Iterable[str]] = None,
  14. verbose: int = 1,
  15. **kwargs: Any,
  16. ) -> ModelStatistics

        (3)实例以ResNet18为例:


  
  1. import torchvision.models as models
  2. from torchinfo import summary
  3. resnet18 = models.resnet18() # 实例化模型
  4. summary(resnet18, ( 1, 3, 224, 224)) # 1:batch_size 3:图片的通道数 224: 图片的高宽
  5. # 结果输出
  6. =========================================================================================
  7. Layer ( type:depth-idx) Output Shape Param #
  8. =========================================================================================
  9. ResNet -- --
  10. ├─Conv2d: 1- 1 [ 1, 64, 112, 112] 9, 408
  11. ├─BatchNorm2d: 1- 2 [ 1, 64, 112, 112] 128
  12. ├─ReLU: 1- 3 [ 1, 64, 112, 112] --
  13. ├─MaxPool2d: 1- 4 [ 1, 64, 56, 56] --
  14. ├─Sequential: 1- 5 [ 1, 64, 56, 56] --
  15. │ └─BasicBlock: 2- 1 [ 1, 64, 56, 56] --
  16. │ │ └─Conv2d: 3- 1 [ 1, 64, 56, 56] 36, 864
  17. │ │ └─BatchNorm2d: 3- 2 [ 1, 64, 56, 56] 128
  18. │ │ └─ReLU: 3- 3 [ 1, 64, 56, 56] --
  19. │ │ └─Conv2d: 3- 4 [ 1, 64, 56, 56] 36, 864
  20. │ │ └─BatchNorm2d: 3- 5 [ 1, 64, 56, 56] 128
  21. │ │ └─ReLU: 3- 6 [ 1, 64, 56, 56] --
  22. │ └─BasicBlock: 2- 2 [ 1, 64, 56, 56] --
  23. │ │ └─Conv2d: 3- 7 [ 1, 64, 56, 56] 36, 864
  24. │ │ └─BatchNorm2d: 3- 8 [ 1, 64, 56, 56] 128
  25. │ │ └─ReLU: 3- 9 [ 1, 64, 56, 56] --
  26. │ │ └─Conv2d: 3- 10 [ 1, 64, 56, 56] 36, 864
  27. │ │ └─BatchNorm2d: 3- 11 [ 1, 64, 56, 56] 128
  28. │ │ └─ReLU: 3- 12 [ 1, 64, 56, 56] --
  29. ├─Sequential: 1- 6 [ 1, 128, 28, 28] --
  30. │ └─BasicBlock: 2- 3 [ 1, 128, 28, 28] --
  31. │ │ └─Conv2d: 3- 13 [ 1, 128, 28, 28] 73, 728
  32. │ │ └─BatchNorm2d: 3- 14 [ 1, 128, 28, 28] 256
  33. │ │ └─ReLU: 3- 15 [ 1, 128, 28, 28] --
  34. │ │ └─Conv2d: 3- 16 [ 1, 128, 28, 28] 147, 456
  35. │ │ └─BatchNorm2d: 3- 17 [ 1, 128, 28, 28] 256
  36. │ │ └─Sequential: 3- 18 [ 1, 128, 28, 28] 8, 448
  37. │ │ └─ReLU: 3- 19 [ 1, 128, 28, 28] --
  38. │ └─BasicBlock: 2- 4 [ 1, 128, 28, 28] --
  39. │ │ └─Conv2d: 3- 20 [ 1, 128, 28, 28] 147, 456
  40. │ │ └─BatchNorm2d: 3- 21 [ 1, 128, 28, 28] 256
  41. │ │ └─ReLU: 3- 22 [ 1, 128, 28, 28] --
  42. │ │ └─Conv2d: 3- 23 [ 1, 128, 28, 28] 147, 456
  43. │ │ └─BatchNorm2d: 3- 24 [ 1, 128, 28, 28] 256
  44. │ │ └─ReLU: 3- 25 [ 1, 128, 28, 28] --
  45. ├─Sequential: 1- 7 [ 1, 256, 14, 14] --
  46. │ └─BasicBlock: 2- 5 [ 1, 256, 14, 14] --
  47. │ │ └─Conv2d: 3- 26 [ 1, 256, 14, 14] 294, 912
  48. │ │ └─BatchNorm2d: 3- 27 [ 1, 256, 14, 14] 512
  49. │ │ └─ReLU: 3- 28 [ 1, 256, 14, 14] --
  50. │ │ └─Conv2d: 3- 29 [ 1, 256, 14, 14] 589, 824
  51. │ │ └─BatchNorm2d: 3- 30 [ 1, 256, 14, 14] 512
  52. │ │ └─Sequential: 3- 31 [ 1, 256, 14, 14] 33, 280
  53. │ │ └─ReLU: 3- 32 [ 1, 256, 14, 14] --
  54. │ └─BasicBlock: 2- 6 [ 1, 256, 14, 14] --
  55. │ │ └─Conv2d: 3- 33 [ 1, 256, 14, 14] 589, 824
  56. │ │ └─BatchNorm2d: 3- 34 [ 1, 256, 14, 14] 512
  57. │ │ └─ReLU: 3- 35 [ 1, 256, 14, 14] --
  58. │ │ └─Conv2d: 3- 36 [ 1, 256, 14, 14] 589, 824
  59. │ │ └─BatchNorm2d: 3- 37 [ 1, 256, 14, 14] 512
  60. │ │ └─ReLU: 3- 38 [ 1, 256, 14, 14] --
  61. ├─Sequential: 1- 8 [ 1, 512, 7, 7] --
  62. │ └─BasicBlock: 2- 7 [ 1, 512, 7, 7] --
  63. │ │ └─Conv2d: 3- 39 [ 1, 512, 7, 7] 1, 179, 648
  64. │ │ └─BatchNorm2d: 3- 40 [ 1, 512, 7, 7] 1,024
  65. │ │ └─ReLU: 3- 41 [ 1, 512, 7, 7] --
  66. │ │ └─Conv2d: 3- 42 [ 1, 512, 7, 7] 2, 359, 296
  67. │ │ └─BatchNorm2d: 3- 43 [ 1, 512, 7, 7] 1,024
  68. │ │ └─Sequential: 3- 44 [ 1, 512, 7, 7] 132,096
  69. │ │ └─ReLU: 3- 45 [ 1, 512, 7, 7] --
  70. │ └─BasicBlock: 2- 8 [ 1, 512, 7, 7] --
  71. │ │ └─Conv2d: 3- 46 [ 1, 512, 7, 7] 2, 359, 296
  72. │ │ └─BatchNorm2d: 3- 47 [ 1, 512, 7, 7] 1,024
  73. │ │ └─ReLU: 3- 48 [ 1, 512, 7, 7] --
  74. │ │ └─Conv2d: 3- 49 [ 1, 512, 7, 7] 2, 359, 296
  75. │ │ └─BatchNorm2d: 3- 50 [ 1, 512, 7, 7] 1,024
  76. │ │ └─ReLU: 3- 51 [ 1, 512, 7, 7] --
  77. ├─AdaptiveAvgPool2d: 1- 9 [ 1, 512, 1, 1] --
  78. ├─Linear: 1- 10 [ 1, 1000] 513, 000
  79. =========================================================================================
  80. Total params: 11, 689, 512
  81. Trainable params: 11, 689, 512
  82. Non-trainable params: 0
  83. Total mult-adds (G): 1.81
  84. =========================================================================================
  85. Input size (MB): 0.60
  86. Forward/backward pass size (MB): 39.75
  87. Params size (MB): 46.76
  88. Estimated Total Size (MB): 87.11
  89. =========================================================================================

2. CNN可视化

        卷积神经网路——CNN

2.1 CNN卷积核可视化

        以torchvision自带的VGG11模型为例。


  
  1. import torch
  2. from torchvision.models import vgg11
  3. model = vgg11(pretrained= True)
  4. print( dict(model.features.named_children()))
  5. # 输出
  6. { '0': Conv2d( 3, 64, kernel_size=( 3, 3), stride=( 1, 1), padding=( 1, 1)),
  7. '1': ReLU(inplace= True),
  8. '2': MaxPool2d(kernel_size= 2, stride= 2, padding= 0, dilation= 1, ceil_mode= False),
  9. '3': Conv2d( 64, 128, kernel_size=( 3, 3), stride=( 1, 1), padding=( 1, 1)),
  10. '4': ReLU(inplace= True),
  11. '5': MaxPool2d(kernel_size= 2, stride= 2, padding= 0, dilation= 1, ceil_mode= False),
  12. '6': Conv2d( 128, 256, kernel_size=( 3, 3), stride=( 1, 1), padding=( 1, 1)),
  13. '7': ReLU(inplace= True),
  14. '8': Conv2d( 256, 256, kernel_size=( 3, 3), stride=( 1, 1), padding=( 1, 1)),
  15. '9': ReLU(inplace= True),
  16. '10': MaxPool2d(kernel_size= 2, stride= 2, padding= 0, dilation= 1, ceil_mode= False),
  17. '11': Conv2d( 256, 512, kernel_size=( 3, 3), stride=( 1, 1), padding=( 1, 1)),
  18. '12': ReLU(inplace= True),
  19. '13': Conv2d( 512, 512, kernel_size=( 3, 3), stride=( 1, 1), padding=( 1, 1)),
  20. '14': ReLU(inplace= True),
  21. '15': MaxPool2d(kernel_size= 2, stride= 2, padding= 0, dilation= 1, ceil_mode= False),
  22. '16': Conv2d( 512, 512, kernel_size=( 3, 3), stride=( 1, 1), padding=( 1, 1)),
  23. '17': ReLU(inplace= True),
  24. '18': Conv2d( 512, 512, kernel_size=( 3, 3), stride=( 1, 1), padding=( 1, 1)),
  25. '19': ReLU(inplace= True),
  26. '20': MaxPool2d(kernel_size= 2, stride= 2, padding= 0, dilation= 1, ceil_mode= False)}

2.2 CNN特征图可视化方法

        在PyTorch中,提供了一个专用的接口使得网络在前向传播过程中能够获取到特征图,这个接口的名称非常形象,叫做hook.

        首先实现了一个hook类,之后在plot_feature函数中,将该hook类的对象注册到要进行可视化的网络的某层中。model在进行前向传播的时候会调用hook的__call__函数,我们也就是在那里存储了当前层的输入和输出。


  
  1. class Hook( object):
  2. def __init__( self):
  3. self.module_name = []
  4. self.features_in_hook = []
  5. self.features_out_hook = []
  6. def __call__( self,module, fea_in, fea_out):
  7. print( "hooker working", self)
  8. self.module_name.append(module.__class__)
  9. self.features_in_hook.append(fea_in)
  10. self.features_out_hook.append(fea_out)
  11. return None
  12. def plot_feature( model, idx, inputs):
  13. hh = Hook()
  14. model.features[idx].register_forward_hook(hh)
  15. # forward_model(model,False)
  16. model. eval()
  17. _ = model(inputs)
  18. print(hh.module_name)
  19. print((hh.features_in_hook[ 0][ 0].shape))
  20. print((hh.features_out_hook[ 0].shape))
  21. out1 = hh.features_out_hook[ 0]
  22. total_ft = out1.shape[ 1]
  23. first_item = out1[ 0].cpu().clone()
  24. plt.figure(figsize=( 20, 17))
  25. for ftidx in range(total_ft):
  26. if ftidx > 99:
  27. break
  28. ft = first_item[ftidx]
  29. plt.subplot( 10, 10, ftidx+ 1)
  30. plt.axis( 'off')
  31. #plt.imshow(ft[ :, :].detach(),cmap='gray')
  32. plt.imshow(ft[ :, :].detach())

2.3 CNN class activation map可视化方法

2.3.1 实现方法

        CAM系列操作的实现可以通过开源工具包pytorch-grad-cam来实现。

2.3.2 安装

pip install grad-cam

2.3.3 例子


  
  1. import torch
  2. from torchvision.models import vgg11,resnet18,resnet101,resnext101_32x8d
  3. import matplotlib.pyplot as plt
  4. from PIL import Image
  5. import numpy as np
  6. model = vgg11(pretrained= True)
  7. img_path = './dog.png'
  8. # resize操作是为了和传入神经网络训练图片大小一致
  9. img = Image. open(img_path).resize(( 224, 224))
  10. # 需要将原始图片转为np.float32格式并且在0-1之间
  11. rgb_img = np.float32(img)/ 255
  12. plt.imshow(img)
  13. ##########################################################################
  14. from pytorch_grad_cam import GradCAM,ScoreCAM,GradCAMPlusPlus,AblationCAM,XGradCAM,EigenCAM,FullGrad
  15. from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget
  16. from pytorch_grad_cam.utils.image import show_cam_on_image
  17. target_layers = [model.features[- 1]]
  18. # 选取合适的类激活图,但是ScoreCAM和AblationCAM需要batch_size
  19. cam = GradCAM(model=model,target_layers=target_layers)
  20. targets = [ClassifierOutputTarget(preds)]
  21. # 上方preds需要设定,比如ImageNet有1000类,这里可以设为200
  22. grayscale_cam = cam(input_tensor=img_tensor, targets=targets)
  23. grayscale_cam = grayscale_cam[ 0, :]
  24. cam_img = show_cam_on_image(rgb_img, grayscale_cam, use_rgb= True)
  25. print( type(cam_img))
  26. Image.fromarray(cam_img)

 2.4 使用FlashTorch快速实现CNN可视化

2.4.1 安装

pip install flashtorch

2.4.2 可视化梯度


  
  1. # Download example images
  2. # !mkdir -p images
  3. # !wget -nv \
  4. # https://github.com/MisaOgura/flashtorch/raw/master/examples/images/great_grey_owl.jpg \
  5. # https://github.com/MisaOgura/flashtorch/raw/master/examples/images/peacock.jpg \
  6. # https://github.com/MisaOgura/flashtorch/raw/master/examples/images/toucan.jpg \
  7. # -P /content/images
  8. import matplotlib.pyplot as plt
  9. import torchvision.models as models
  10. from flashtorch.utils import apply_transforms, load_image
  11. from flashtorch.saliency import Backprop
  12. model = models.alexnet(pretrained= True)
  13. backprop = Backprop(model)
  14. image = load_image( '/content/images/great_grey_owl.jpg')
  15. owl = apply_transforms(image)
  16. target_class = 24
  17. backprop.visualize(owl, target_class, guided= True, use_gpu= True)

2.4.3 可视化卷积核


  
  1. import torchvision.models as models
  2. from flashtorch.activmax import GradientAscent
  3. model = models.vgg16(pretrained= True)
  4. g_ascent = GradientAscent(model.features)
  5. # specify layer and filter info
  6. conv5_1 = model.features[ 24]
  7. conv5_1_filters = [ 45, 271, 363, 489]
  8. g_ascent.visualize(conv5_1, conv5_1_filters, title= "VGG16: conv5_1")

3. 使用TensorBoard可视化训练过程

3.1 TensorBoard安装

pip install tensorboardX

3.2 TensorBoard可视化的基本逻辑

       (1)TensorBoard是一个记录员;

        (2)记录我们指定的数据,包括模型每一层的feature map,权重,以及训练loss等等;

        (3)保存在指定的文件夹里;

        (4)程序不断运行TensorBoard会不断记录;

        (5)可以通过网页的形式加以可视化。

3.3 TensorBoard的配置与启动


  
  1. # 方法一、
  2. from tensorboardX import SummaryWriter
  3. writer = SummaryWriter( './runs')
  4. # 方法二
  5. from torch.utils.tensorboard import SummaryWriter

3.4 TensorBoard模型结构可视化


  
  1. import torch.nn as nn
  2. class Net(nn.Module):
  3. def __init__( self):
  4. super(Net, self).__init__()
  5. self.conv1 = nn.Conv2d(in_channels= 3,out_channels= 32,kernel_size = 3)
  6. self.pool = nn.MaxPool2d(kernel_size = 2,stride = 2)
  7. self.conv2 = nn.Conv2d(in_channels= 32,out_channels= 64,kernel_size = 5)
  8. self.adaptive_pool = nn.AdaptiveMaxPool2d(( 1, 1))
  9. self.flatten = nn.Flatten()
  10. self.linear1 = nn.Linear( 64, 32)
  11. self.relu = nn.ReLU()
  12. self.linear2 = nn.Linear( 32, 1)
  13. self.sigmoid = nn.Sigmoid()
  14. def forward( self,x):
  15. x = self.conv1(x)
  16. x = self.pool(x)
  17. x = self.conv2(x)
  18. x = self.pool(x)
  19. x = self.adaptive_pool(x)
  20. x = self.flatten(x)
  21. x = self.linear1(x)
  22. x = self.relu(x)
  23. x = self.linear2(x)
  24. y = self.sigmoid(x)
  25. return y
  26. model = Net()
  27. # 保存模型信息
  28. writer.add_graph(model, input_to_model = torch.rand( 1, 3, 224, 224))
  29. writer.close()

3.5 TensorBoard图像可视化

        (1)对于单张图片的显示使用add_image;

        (2)对于多张图片的显示使用add_images;

        (3)有时需要使用torchvision.utils.make_grid将多张图片拼成一张图片后,用writer.add_image显示。

        使用torchvision的CIFAR10数据集为例:


  
  1. import torchvision
  2. from torchvision import datasets, transforms
  3. from torch.utils.data import DataLoader
  4. transform_train = transforms.Compose(
  5. [transforms.ToTensor()])
  6. transform_test = transforms.Compose(
  7. [transforms.ToTensor()])
  8. train_data = datasets.CIFAR10( ".", train= True, download= True, transform=transform_train)
  9. test_data = datasets.CIFAR10( ".", train= False, download= True, transform=transform_test)
  10. train_loader = DataLoader(train_data, batch_size= 64, shuffle= True)
  11. test_loader = DataLoader(test_data, batch_size= 64)
  12. images, labels = next( iter(train_loader))
  13. # 仅查看一张图片
  14. writer = SummaryWriter( './pytorch_tb')
  15. writer.add_image( 'images[0]', images[ 0])
  16. writer.close()
  17. # 将多张图片拼接成一张图片,中间用黑色网格分割
  18. # create grid of images
  19. writer = SummaryWriter( './pytorch_tb')
  20. img_grid = torchvision.utils.make_grid(images)
  21. writer.add_image( 'image_grid', img_grid)
  22. writer.close()
  23. # 将多张图片直接写入
  24. writer = SummaryWriter( './pytorch_tb')
  25. writer.add_images( "images",images,global_step = 0)
  26. writer.close()

3.6 TensorBoard连续变量可视化

        通过add_scalar实现


  
  1. writer = SummaryWriter( './pytorch_tb')
  2. for i in range( 500):
  3. x = i
  4. y = x** 2
  5. writer.add_scalar( "x", x, i) #日志中记录x在第step i 的值
  6. writer.add_scalar( "y", y, i) #日志中记录y在第step i 的值
  7. writer.close()

3.7 TensorBoard参数分布可视化

        通过add_histogram实现


  
  1. import torch
  2. import numpy as np
  3. # 创建正态分布的张量模拟参数矩阵
  4. def norm( mean, std):
  5. t = std * torch.randn(( 100, 20)) + mean
  6. return t
  7. writer = SummaryWriter( './pytorch_tb/')
  8. for step, mean in enumerate( range(- 10, 10, 1)):
  9. w = norm(mean, 1)
  10. writer.add_histogram( "w", w, step)
  11. writer.flush()
  12. writer.close()

参考:PyTorch可视化


转载:https://blog.csdn.net/qq_51167531/article/details/128043195
查看评论
* 以上用户言论只代表其个人观点,不代表本网站的观点或立场