1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125
| import torch import torchvision from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from torch import nn from torch.nn import Sequential, Conv2d, MaxPool2d, Flatten, Linear import time
train_data=torchvision.datasets.CIFAR10("dataset",train=True,download=True, transform=torchvision.transforms.ToTensor()) test_data=torchvision.datasets.CIFAR10("dataset",train=False,download=True, transform=torchvision.transforms.ToTensor())
train_data_size=len(train_data) test_data_size=len(test_data)
train_dataloader=DataLoader(train_data,batch_size=64) test_dataloader=DataLoader(test_data,batch_size=64)
class Model(nn.Module): def __init__(self): super().__init__() self.model1 = Sequential( Conv2d(3, 32, 5, 1,2), MaxPool2d(2), Conv2d(32, 32, 5, 1,2), MaxPool2d(2), Conv2d(32, 64, 5,1,2), MaxPool2d(2), Flatten(), Linear(1024, 64), Linear(64, 10) ) def forward(self, x): x = self.model1(x) return x
model=Model()
if torch.cuda.is_available(): model=model.cuda()
loss_fn=nn.CrossEntropyLoss() loss_fn=loss_fn.cuda()
learning_rate=1e-2
optimizer=torch.optim.SGD(model.parameters(),lr=learning_rate)
total_train_step=0 total_test_step=0
epoch=30
writer=SummaryWriter("logs_train")
start_time=time.time()
for i in range(epoch): print("------第 {} 轮训练开始------".format(i+1)) model.train() for data in train_dataloader: imgs,targets=data imgs=imgs.cuda() targets=targets.cuda() outputs=model(imgs) loss=loss_fn(outputs,targets)
optimizer.zero_grad() loss.backward() optimizer.step()
total_train_step+=1 if total_train_step %100 ==0: end_time=time.time() print(end_time-start_time) print("训练次数:{},Loss:{}".format(total_train_step,loss.item())) writer.add_scalar("train_loss",loss.item(),total_train_step)
model.eval() total_test_loss=0 total_accuracy=0 with torch.no_grad(): for data in test_dataloader: imgs,targets=data imgs=imgs.cuda() targets=targets.cuda() outputs=model(imgs) loss=loss_fn(outputs,targets) total_test_loss+=loss.item() accuracy=(outputs.argmax(1)==targets).sum() total_accuracy+=accuracy
print("整体测试集上的Loss: {}",format(total_test_loss)) print("整体测试集上的正确率: {}",format(total_accuracy/test_data_size)) writer.add_scalar("test_loss",total_test_loss,total_test_step) writer.add_scalar("test_accuracy",total_accuracy/test_data_size,total_test_step) total_test_step+= 1 torch.save(model,"model_{}.pth".format(i)) print("模型已保存")
writer.close()
|