小言_互联网的博客

pytorch 咖啡豆识别

305人阅读  评论(0)

一、前期准备

1.设置GPU


  
  1. import torch
  2. from torch import nn
  3. import torchvision
  4. from torchvision import transforms,datasets,models
  5. import matplotlib.pyplot as plt
  6. import os,PIL,pathlib

  
  1. device = torch.device( "cuda" if torch.cuda.is_available() else "cpu")
  2. device
device(type='cuda')

2.导入数据


  
  1. data_dir = './49-data/'
  2. data_dir = pathlib.Path(data_dir)
  3. data_paths = list(data_dir.glob( '*'))
  4. classNames = [ str(path).split( '\\')[ 1] for path in data_paths]
  5. classNames
['Dark', 'Green', 'Light', 'Medium']

  
  1. train_transforms = transforms.Compose([
  2. transforms.Resize([ 224, 224]), # resize输入图片
  3. transforms.ToTensor(), # 将PIL Image或numpy.ndarray转换成tensor
  4. transforms.Normalize(
  5. mean = [ 0.485, 0.456, 0.406],
  6. std = [ 0.229, 0.224, 0.225]) # 从数据集中随机抽样计算得到
  7. ])
  8. test_transforms = transforms.Compose([
  9. transforms.Resize([ 224, 224]), # resize输入图片
  10. transforms.ToTensor(), # 将PIL Image或numpy.ndarray转换成tensor
  11. transforms.Normalize(
  12. mean = [ 0.485, 0.456, 0.406],
  13. std = [ 0.229, 0.224, 0.225]) # 从数据集中随机抽样计算得到
  14. ])
  15. total_data = datasets.ImageFolder(data_dir,transform=train_transforms)
  16. total_data
Dataset ImageFolder
    Number of datapoints: 1200
    Root location: 49-data
    StandardTransform
Transform: Compose(
               Resize(size=[224, 224], interpolation=PIL.Image.BILINEAR)
               ToTensor()
               Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
           )
total_data.class_to_idx
{'Dark': 0, 'Green': 1, 'Light': 2, 'Medium': 3}

 

3.数据集划分


  
  1. train_size = int( 0.8* len(total_data))
  2. test_size = len(total_data) - train_size
  3. train_dataset, test_dataset = torch.utils.data.random_split(total_data,[train_size,test_size])
  4. train_dataset,test_dataset
train_size,test_size
(960, 240)

  
  1. batch_size = 32
  2. train_dl = torch.utils.data.DataLoader(train_dataset,
  3. batch_size=batch_size,
  4. shuffle= True,
  5. num_workers= 1)
  6. test_dl = torch.utils.data.DataLoader(test_dataset,
  7. batch_size=batch_size,
  8. shuffle= True,
  9. num_workers= 1)

  
  1. imgs, labels = next( iter(train_dl))
  2. imgs.shape
torch.Size([32, 3, 224, 224])

  
  1. import numpy as np
  2. # 指定图片大小,图像大小为20宽、5高的绘图(单位为英寸inch)
  3. plt.figure(figsize=( 20, 5))
  4. for i, imgs in enumerate(imgs[: 20]):
  5. npimg = imgs.numpy().transpose(( 1, 2, 0))
  6. npimg = npimg * np.array(( 0.229, 0.224, 0.225)) + np.array(( 0.485, 0.456, 0.406))
  7. npimg = npimg.clip( 0, 1)
  8. # 将整个figure分成2行10列,绘制第i+1个子图。
  9. plt.subplot( 2, 10, i+ 1)
  10. plt.imshow(npimg)
  11. plt.axis( 'off')

 


  
  1. for X,y in test_dl:
  2. print( 'Shape of X [N, C, H, W]:', X.shape)
  3. print( 'Shape of y:', y.shape)
  4. break
Shape of X [N, C, H, W]: torch.Size([32, 3, 224, 224])
Shape of y: torch.Size([32])

二、构建简单的CNN网络

1. 搭建模型


  
  1. import torch.nn.functional as F
  2. # class vgg16(nn.Module):
  3. # def __init__(self):
  4. # super(vgg16,self).__init__()
  5. # self.block1 = nn.Sequential(
  6. # nn.Conv2d(3,64,kernel_size=(3,3),stride=(1,1),padding=(1,1)),
  7. # nn.ReLU(),
  8. # nn.Conv2d(64,64,kernel_size=(3,3),stride=(1,1),padding=(1,1)),
  9. # nn.ReLU(),
  10. # nn.MaxPool2d(kernel_size=(2,2),stride=(2,2))
  11. # )
  12. # self.block2 = nn.Sequential(
  13. # nn.Conv2d(64,128,kernel_size=(3,3),stride=(1,1),padding=(1,1)),
  14. # nn.ReLU(),
  15. # nn.Conv2d(128,128,kernel_size=(3,3),stride=(1,1),padding=(1,1)),
  16. # nn.ReLU(),
  17. # nn.MaxPool2d(kernel_size=(2,2),stride=(2,2))
  18. # )
  19. # self.block3 = nn.Sequential(
  20. # nn.Conv2d(128,256,kernel_size=(3,3),stride=(1,1),padding=(1,1)),
  21. # nn.ReLU(),
  22. # nn.Conv2d(256,256,kernel_size=(3,3),stride=(1,1),padding=(1,1)),
  23. # nn.ReLU(),
  24. # nn.Conv2d(256,256,kernel_size=(3,3),stride=(1,1),padding=(1,1)),
  25. # nn.ReLU(),
  26. # nn.MaxPool2d(kernel_size=(2,2),stride=(2,2))
  27. # )
  28. # self.block4 = nn.Sequential(
  29. # nn.Conv2d(256,512,kernel_size=(3,3),stride=(1,1),padding=(1,1)),
  30. # nn.ReLU(),
  31. # nn.Conv2d(512,512,kernel_size=(3,3),stride=(1,1),padding=(1,1)),
  32. # nn.ReLU(),
  33. # nn.Conv2d(512,512,kernel_size=(3,3),stride=(1,1),padding=(1,1)),
  34. # nn.ReLU(),
  35. # nn.MaxPool2d(kernel_size=(2,2),stride=(2,2))
  36. # )
  37. # self.block5 = nn.Sequential(
  38. # nn.Conv2d(512,512,kernel_size=(3,3),stride=(1,1),padding=(1,1)),
  39. # nn.ReLU(),
  40. # nn.Conv2d(512,512,kernel_size=(3,3),stride=(1,1),padding=(1,1)),
  41. # nn.ReLU(),
  42. # nn.Conv2d(512,512,kernel_size=(3,3),stride=(1,1),padding=(1,1)),
  43. # nn.ReLU(),
  44. # nn.MaxPool2d(kernel_size=(2,2),stride=(2,2))
  45. # )
  46. # self.classifier = nn.Sequential(
  47. # nn.Linear(in_features=512*7*7, out_features=4096),
  48. # nn.ReLU(),
  49. # nn.Linear(in_features=4096,out_features=4096),
  50. # nn.ReLU(),
  51. # nn.Linear(in_features=4096,out_features=4)
  52. # )
  53. # def forward(self,x):
  54. # x = self.block1(x)
  55. # x = self.block2(x)
  56. # x = self.block3(x)
  57. # x = self.block4(x)
  58. # x = self.block5(x)
  59. # x = torch.flatten(x, start_dim=1)
  60. # x = self.classifier(x)
  61. # return x
  62. # model = vgg16().to(device)
  63. # model

  
  1. from torchvision.models import vgg16
  2. model = vgg16(pretrained = True).to(device)
  3. for param in model.parameters(): # 只训练输出层
  4. param.requires_grad = False
  5. model.classifier._modules[ '6'] = nn.Linear( 4096, len(classNames))
  6. model.to(device)
  7. model
VGG(
  (features): Sequential(
    (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (1): ReLU(inplace=True)
    (2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (3): ReLU(inplace=True)
    (4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (6): ReLU(inplace=True)
    (7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (8): ReLU(inplace=True)
    (9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (11): ReLU(inplace=True)
    (12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (13): ReLU(inplace=True)
    (14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (15): ReLU(inplace=True)
    (16): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (17): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (18): ReLU(inplace=True)
    (19): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (20): ReLU(inplace=True)
    (21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (22): ReLU(inplace=True)
    (23): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (24): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (25): ReLU(inplace=True)
    (26): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (27): ReLU(inplace=True)
    (28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (29): ReLU(inplace=True)
    (30): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
  )
  (avgpool): AdaptiveAvgPool2d(output_size=(7, 7))
  (classifier): Sequential(
    (0): Linear(in_features=25088, out_features=4096, bias=True)
    (1): ReLU(inplace=True)
    (2): Dropout(p=0.5, inplace=False)
    (3): Linear(in_features=4096, out_features=4096, bias=True)
    (4): ReLU(inplace=True)
    (5): Dropout(p=0.5, inplace=False)
    (6): Linear(in_features=4096, out_features=4, bias=True)
  )
)
 

2.查看模型详情


  
  1. import torchsummary as summary
  2. summary.summary(model,( 3, 224, 224))
----------------------------------------------------------------
        Layer (type)               Output Shape         Param #
================================================================
            Conv2d-1         [-1, 64, 224, 224]           1,792
              ReLU-2         [-1, 64, 224, 224]               0
            Conv2d-3         [-1, 64, 224, 224]          36,928
              ReLU-4         [-1, 64, 224, 224]               0
         MaxPool2d-5         [-1, 64, 112, 112]               0
            Conv2d-6        [-1, 128, 112, 112]          73,856
              ReLU-7        [-1, 128, 112, 112]               0
            Conv2d-8        [-1, 128, 112, 112]         147,584
              ReLU-9        [-1, 128, 112, 112]               0
        MaxPool2d-10          [-1, 128, 56, 56]               0
           Conv2d-11          [-1, 256, 56, 56]         295,168
             ReLU-12          [-1, 256, 56, 56]               0
           Conv2d-13          [-1, 256, 56, 56]         590,080
             ReLU-14          [-1, 256, 56, 56]               0
           Conv2d-15          [-1, 256, 56, 56]         590,080
             ReLU-16          [-1, 256, 56, 56]               0
        MaxPool2d-17          [-1, 256, 28, 28]               0
           Conv2d-18          [-1, 512, 28, 28]       1,180,160
             ReLU-19          [-1, 512, 28, 28]               0
           Conv2d-20          [-1, 512, 28, 28]       2,359,808
             ReLU-21          [-1, 512, 28, 28]               0
           Conv2d-22          [-1, 512, 28, 28]       2,359,808
             ReLU-23          [-1, 512, 28, 28]               0
        MaxPool2d-24          [-1, 512, 14, 14]               0
           Conv2d-25          [-1, 512, 14, 14]       2,359,808
             ReLU-26          [-1, 512, 14, 14]               0
           Conv2d-27          [-1, 512, 14, 14]       2,359,808
             ReLU-28          [-1, 512, 14, 14]               0
           Conv2d-29          [-1, 512, 14, 14]       2,359,808
             ReLU-30          [-1, 512, 14, 14]               0
        MaxPool2d-31            [-1, 512, 7, 7]               0
AdaptiveAvgPool2d-32            [-1, 512, 7, 7]               0
           Linear-33                 [-1, 4096]     102,764,544
             ReLU-34                 [-1, 4096]               0
          Dropout-35                 [-1, 4096]               0
           Linear-36                 [-1, 4096]      16,781,312
             ReLU-37                 [-1, 4096]               0
          Dropout-38                 [-1, 4096]               0
           Linear-39                    [-1, 4]          16,388
================================================================
Total params: 134,276,932
Trainable params: 16,388
Non-trainable params: 134,260,544
----------------------------------------------------------------
Input size (MB): 0.57
Forward/backward pass size (MB): 218.77
Params size (MB): 512.23
Estimated Total Size (MB): 731.57
----------------------------------------------------------------
 

三、训练模型


  
  1. # 设置优化器
  2. optimizer = torch.optim.Adam(model.parameters(), lr= 1e-4) #要训练什么参数/
  3. scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size= 5, gamma= 0.92) #学习率每5个epoch衰减成原来的1/10
  4. loss_fn = nn.CrossEntropyLoss()

1. 编写训练函数


  
  1. # 训练循环
  2. def train( dataloader, model, loss_fn, optimizer):
  3. size = len(dataloader.dataset) # 训练集的大小,一共900张图片
  4. num_batches = len(dataloader) # 批次数目,29(900/32)
  5. train_loss, train_acc = 0, 0 # 初始化训练损失和正确率
  6. for X, y in dataloader: # 获取图片及其标签
  7. X, y = X.to(device), y.to(device)
  8. # 计算预测误差
  9. pred = model(X) # 网络输出
  10. loss = loss_fn(pred, y) # 计算网络输出和真实值之间的差距,targets为真实值,计算二者差值即为损失
  11. # 反向传播
  12. optimizer.zero_grad() # grad属性归零
  13. loss.backward() # 反向传播
  14. optimizer.step() # 每一步自动更新
  15. # 记录acc与loss
  16. train_acc += (pred.argmax( 1) == y). type(torch. float). sum().item()
  17. train_loss += loss.item()
  18. train_acc /= size
  19. train_loss /= num_batches
  20. return train_acc, train_loss

2.编写测试函数


  
  1. def test (dataloader, model, loss_fn):
  2. size = len(dataloader.dataset) # 测试集的大小,一共10000张图片
  3. num_batches = len(dataloader) # 批次数目,8(255/32=8,向上取整)
  4. test_loss, test_acc = 0, 0
  5. # 当不进行训练时,停止梯度更新,节省计算内存消耗
  6. with torch.no_grad():
  7. for imgs, target in dataloader:
  8. imgs, target = imgs.to(device), target.to(device)
  9. # 计算loss
  10. target_pred = model(imgs)
  11. loss = loss_fn(target_pred, target)
  12. test_loss += loss.item()
  13. test_acc += (target_pred.argmax( 1) == target). type(torch. float). sum().item()
  14. test_acc /= size
  15. test_loss /= num_batches
  16. return test_acc, test_loss

3、正式训练


  
  1. epochs = 20
  2. train_loss = []
  3. train_acc = []
  4. test_loss = []
  5. test_acc = []
  6. best_acc = 0
  7. for epoch in range(epochs):
  8. model.train()
  9. epoch_train_acc, epoch_train_loss = train(train_dl, model, loss_fn, optimizer)
  10. scheduler.step() #学习率衰减
  11. model. eval()
  12. epoch_test_acc, epoch_test_loss = test(test_dl, model, loss_fn)
  13. # 保存最优模型
  14. if epoch_test_acc > best_acc:
  15. best_acc = epoch_train_acc
  16. state = {
  17. 'state_dict': model.state_dict(), #字典里key就是各层的名字,值就是训练好的权重
  18. 'best_acc': best_acc,
  19. 'optimizer' : optimizer.state_dict(),
  20. }
  21. train_acc.append(epoch_train_acc)
  22. train_loss.append(epoch_train_loss)
  23. test_acc.append(epoch_test_acc)
  24. test_loss.append(epoch_test_loss)
  25. template = ( 'Epoch:{:2d}, Train_acc:{:.1f}%, Train_loss:{:.3f}, Test_acc:{:.1f}%,Test_loss:{:.3f}')
  26. print(template. format(epoch+ 1, epoch_train_acc* 100, epoch_train_loss, epoch_test_acc* 100, epoch_test_loss))
  27. print( 'Done')
  28. print( 'best_acc:',best_acc)
Epoch:18, Train_acc:93.5%, Train_loss:0.270, Test_acc:95.4%,Test_loss:0.223
Epoch:19, Train_acc:94.5%, Train_loss:0.241, Test_acc:95.8%,Test_loss:0.223
Epoch:20, Train_acc:94.4%, Train_loss:0.243, Test_acc:96.2%,Test_loss:0.207
Done
best_acc: 0.94375

四、结果可视化

1.Loss与Accuracy图


  
  1. import matplotlib.pyplot as plt
  2. #隐藏警告
  3. import warnings
  4. warnings.filterwarnings( "ignore") #忽略警告信息
  5. plt.rcParams[ 'font.sans-serif'] = [ 'SimHei'] # 用来正常显示中文标签
  6. plt.rcParams[ 'axes.unicode_minus'] = False # 用来正常显示负号
  7. plt.rcParams[ 'figure.dpi'] = 100 #分辨率
  8. epochs_range = range(epochs)
  9. plt.figure(figsize=( 12, 3))
  10. plt.subplot( 1, 2, 1)
  11. plt.plot(epochs_range, train_acc, label= 'Training Accuracy')
  12. plt.plot(epochs_range, test_acc, label= 'Test Accuracy')
  13. plt.legend(loc= 'lower right')
  14. plt.title( 'Training and Validation Accuracy')
  15. plt.subplot( 1, 2, 2)
  16. plt.plot(epochs_range, train_loss, label= 'Training Loss')
  17. plt.plot(epochs_range, test_loss, label= 'Test Loss')
  18. plt.legend(loc= 'upper right')
  19. plt.title( 'Training and Validation Loss')
  20. plt.show()

 

2.指定图片进行预测


  
  1. from PIL import Image
  2. classes = list(total_data.class_to_idx)
  3. def predict_one_img( image_path,model,transform,classes):
  4. test_img = Image. open(image_path).convert( 'RGB')
  5. plt.imshow(test_img)
  6. test_img = transform(test_img)
  7. img = test_img.to(device).unsqueeze( 0)
  8. model. eval()
  9. output = model(img)
  10. _,pred = torch. max(output, 1)
  11. pred_class = classes[pred]
  12. print( f'预测结果是:{pred_class}')
predict_one_img('./49-data/Dark/dark (1).png', model, train_transforms, classNames)
预测结果是:Dark


转载:https://blog.csdn.net/suic009/article/details/128499622
查看评论
* 以上用户言论只代表其个人观点,不代表本网站的观点或立场