%matplotlib inline from matplotlib import pyplot as plt import numpy as np import torch torch.device('cuda' if torch.cuda.is_available() else 'cpu') torch.set_printoptions(edgeitems=2,linewidth=75) torch.manual_seed(123) from torchvision import datasets, transforms data_path="data/8/cifae-10/" #这里用train=True加定下载训练集,下面一行所train=False定下载验证装 cifar10=datasets.CIFAR10(data_path, train=True, download=True) cifar10_val=datasets.CIFAR10(data_path, train=False, download=True)
Files already downloaded and verified Files already downloaded and verified
In [2]:
class_namess=["airplane", "automobile", "bird", "cat", "deer","dog", "frog", "horse", "ship", "truck"] fig=plt.figure(figsize=(8,3)) num_classes=10 for i in range(num_classes): ax=fig.add_subplot(1,num_classes,i+1,xticks=[],yticks=[]) ax.set_title(class_namess[i]) img=next(img for img,label in cifar10 if label==i) plt.imshow(img) plt.show()

In [3]:
type(cifar10).__mro__
Out[3]:
(torchvision.datasets.cifar.CIFAR10, torchvision.datasets.vision.VisionDataset, torch.utils.data.dataset.Dataset, typing.Generic, object)
In [4]:
#Dataset有两个函数对象需要我们实现,__Len__(),getitem__() #__len__()返回的是数据的大小,getitem可以按序号返回具体的数据item
In [5]:
len(cifar10)
Out[5]:
50000
In [6]:
len(cifar10_val)
Out[6]:
10000
In [7]:
img,label=cifar10[99]
In [8]:
img,label,class_namess[label]
Out[8]:
(<PIL.Image.Image image mode=RGB size=32x32>, 1, 'automobile')
In [9]:
plt.imshow(img) plt.show()

准备训练集和验证集
In [10]:
dir(transforms)
Out[10]:
['AugMix', 'AutoAugment', 'AutoAugmentPolicy', 'CenterCrop', 'ColorJitter', 'Compose', 'ConvertImageDtype', 'ElasticTransform', 'FiveCrop', 'GaussianBlur', 'Grayscale', 'InterpolationMode', 'Lambda', 'LinearTransformation', 'Normalize', 'PILToTensor', 'Pad', 'RandAugment', 'RandomAdjustSharpness', 'RandomAffine', 'RandomApply', 'RandomAutocontrast', 'RandomChoice', 'RandomCrop', 'RandomEqualize', 'RandomErasing', 'RandomGrayscale', 'RandomHorizontalFlip', 'RandomInvert', 'RandomOrder', 'RandomPerspective', 'RandomPosterize', 'RandomResizedCrop', 'RandomRotation', 'RandomSolarize', 'RandomVerticalFlip', 'Resize', 'TenCrop', 'ToPILImage', 'ToTensor', 'TrivialAugmentWide', '__builtins__', '__cached__', '__doc__', '__file__', '__loader__', '__name__', '__package__', '__path__', '__spec__', '_functional_pil', '_functional_tensor', '_presets', 'autoaugment', 'functional', 'transforms']
In [11]:
to_tensor=transforms.ToTensor() img_t=to_tensor(img) img_t.shape
Out[11]:
torch.Size([3, 32, 32])
In [12]:
tensor_cifar10=datasets.CIFAR10(data_path, train=True, download=False,transform=transforms.ToTensor()) img_t,_=tensor_cifar10[99] type(img_t)
Out[12]:
torch.Tensor
In [13]:
img_t.min(),img_t.max()
Out[13]:
(tensor(0.), tensor(1.))
In [14]:
imgs=torch.stack([img_t for img_t,__ in tensor_cifar10],dim=3) imgs.view(3,-1).mean(dim=1) imgs.view(3,-1).std(dim=1) transforms.Normalize((0.4915,0.4823,0.4468),(0.2470,0.2435,0.2616))
Out[14]:
Normalize(mean=(0.4915, 0.4823, 0.4468), std=(0.247, 0.2435, 0.2616))
In [15]:
transformed_cifar10=datasets.CIFAR10(data_path, train=True, download=False,transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.4915,0.4823,0.4468),(0.2470,0.2435,0.2616))])) transformed_cifar10_val=datasets.CIFAR10(data_path,train=False,download=False,transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.4915,0.4823,0.4468),(0.2470,0.2435,0.2616))]))
In [16]:
img_t,_=transformed_cifar10[99] plt.imshow(img_t.permute(1,2,0)) plt.show()
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).

In [17]:
label_map={0:0,2:1} class_map={"airplane","bird"} cifar2=[(img,label_map[label]) for img,label in transformed_cifar10 if label in [0,2]] cifar2_val=[(img,label_map[label]) for img,label in transformed_cifar10_val if label in [0,2]]
In [18]:
sum_x=3*32*32 sum_x
Out[18]:
3072
In [19]:
import torch.nn as nn n_out=2 model=nn.Sequential( nn.Linear(3072,512), nn.Tanh(), nn.Linear(512, n_out,) )
In [20]:
def softmax(x): return torch.exp(x) / torch.exp(x).sum() x=torch.tensor([1.0,2.0,3.0]) softmax(x)
Out[20]:
tensor([0.0900, 0.2447, 0.6652])
In [21]:
softmax=nn.Softmax(dim=1) x=torch.tensor([[1.0,2.0,3.0],[1.0,2.0,3.0]]) softmax(x)
Out[21]:
tensor([[0.0900, 0.2447, 0.6652], [0.0900, 0.2447, 0.6652]])
In [22]:
softmax=nn.Softmax(dim=0) softmax(x)
Out[22]:
tensor([[0.5000, 0.5000, 0.5000], [0.5000, 0.5000, 0.5000]])
In [23]:
softmax=nn.Softmax(dim=1)
In [24]:
model=nn.Sequential( nn.Linear(3072,512), nn.Tanh(), nn.Linear(512, n_out), nn.Softmax(dim=1) )
In [25]:
img,_=cifar2[0] plt.imshow(img.permute(1,2,0)) plt.show()
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).

In [26]:
img_batch=img.view(-1).unsqueeze(0) out=model(img_batch)
In [27]:
out
Out[27]:
tensor([[0.4784, 0.5216]], grad_fn=<SoftmaxBackward0>)
In [28]:
model=nn.Sequential( nn.Linear(3072,512), nn.Tanh(), nn.Linear(512, n_out), nn.LogSoftmax(dim=1) ) loss=nn.NLLLoss()
In [29]:
import torch import torch.nn as nn import torch.optim as optim model=nn.Sequential( nn.Linear(3072,512), nn.Tanh(), nn.Linear(512, n_out), nn.LogSoftmax(dim=1) ) learning_rate=1e-2 optimizer=optim.SGD(model.parameters(),lr=learning_rate) loss_fn=nn.NLLLoss() n_epochs=100
In [30]:
for epoch in range(n_epochs): for img,label in cifar2: out=model(img.view(-1).unsqueeze(0)) loss=loss_fn(out,torch.tensor([label])) optimizer.zero_grad() loss.backward() optimizer.step() print("Eopch: %d,loss: %f" % (epoch,float(loss)))
Eopch: 0,loss: 5.485894 Eopch: 1,loss: 4.617170 Eopch: 2,loss: 0.469880 Eopch: 3,loss: 2.285285 Eopch: 4,loss: 1.285325 Eopch: 5,loss: 13.470414 Eopch: 6,loss: 10.837532 Eopch: 7,loss: 7.425426 Eopch: 8,loss: 7.737788 Eopch: 9,loss: 5.619729 Eopch: 10,loss: 12.788360 Eopch: 11,loss: 10.572822 Eopch: 12,loss: 6.495363 Eopch: 13,loss: 9.600823 Eopch: 14,loss: 11.365033 Eopch: 15,loss: 13.338305 Eopch: 16,loss: 12.778342 Eopch: 17,loss: 7.627055 Eopch: 18,loss: 11.416717 Eopch: 19,loss: 9.715016 Eopch: 20,loss: 7.139337 Eopch: 21,loss: 4.925175 Eopch: 22,loss: 6.588778 Eopch: 23,loss: 8.716374 Eopch: 24,loss: 0.035832 Eopch: 25,loss: 0.728348 Eopch: 26,loss: 0.351484 Eopch: 27,loss: 5.819133 Eopch: 28,loss: 2.520909 Eopch: 29,loss: 0.169534 Eopch: 30,loss: 0.089472 Eopch: 31,loss: 1.650686 Eopch: 32,loss: 1.102441 Eopch: 33,loss: 15.099025 Eopch: 34,loss: 10.415537 Eopch: 35,loss: 13.349001 Eopch: 36,loss: 9.674664 Eopch: 37,loss: 1.381636 Eopch: 38,loss: 10.357496 Eopch: 39,loss: 9.785381 Eopch: 40,loss: 6.544083 Eopch: 41,loss: 7.540296 Eopch: 42,loss: 10.565806 Eopch: 43,loss: 3.333077 Eopch: 44,loss: 11.563530 Eopch: 45,loss: 6.808948 Eopch: 46,loss: 0.001592 Eopch: 47,loss: 6.894621 Eopch: 48,loss: 5.026095 Eopch: 49,loss: 6.603358 Eopch: 50,loss: 3.706079 Eopch: 51,loss: 9.049153 Eopch: 52,loss: 8.612333 Eopch: 53,loss: 7.409239 Eopch: 54,loss: 5.024203 Eopch: 55,loss: 8.568005 Eopch: 56,loss: 2.590775 Eopch: 57,loss: 1.053147 Eopch: 58,loss: 4.692816 Eopch: 59,loss: 8.793502 Eopch: 60,loss: 3.361957 Eopch: 61,loss: 3.225009 Eopch: 62,loss: 5.255093 Eopch: 63,loss: 0.015619 Eopch: 64,loss: 0.806943 Eopch: 65,loss: 0.140692 Eopch: 66,loss: 7.048239 Eopch: 67,loss: 16.357237 Eopch: 68,loss: 5.745358 Eopch: 69,loss: 0.858181 Eopch: 70,loss: 0.000422 Eopch: 71,loss: 0.396246 Eopch: 72,loss: 11.306356 Eopch: 73,loss: 5.439380 Eopch: 74,loss: 10.215464 Eopch: 75,loss: 12.111738 Eopch: 76,loss: 9.012519 Eopch: 77,loss: 8.129960 Eopch: 78,loss: 5.571000 Eopch: 79,loss: 9.431704 Eopch: 80,loss: 2.959796 Eopch: 81,loss: 1.451346 Eopch: 82,loss: 0.961933 Eopch: 83,loss: 5.248207 Eopch: 84,loss: 8.078607 Eopch: 85,loss: 2.254828 Eopch: 86,loss: 6.599775 Eopch: 87,loss: 3.688656 Eopch: 88,loss: 6.021101 Eopch: 89,loss: 13.318411 Eopch: 90,loss: 0.450303 Eopch: 91,loss: 0.461362 Eopch: 92,loss: 13.483785 Eopch: 93,loss: 4.519544 Eopch: 94,loss: 0.016307 Eopch: 95,loss: 11.214909 Eopch: 96,loss: 15.330046 Eopch: 97,loss: 10.443957 Eopch: 98,loss: 10.420175 Eopch: 99,loss: 11.156905
In [31]:
train_loader=torch.utils.data.DataLoader(cifar2,batch_size=64,shuffle=True) model=nn.Sequential( nn.Linear(3072,512), nn.Tanh(), nn.Linear(512, n_out), nn.LogSoftmax(dim=1) ) learning_rate=1e-2 optimizer=optim.SGD(model.parameters(),lr=learning_rate) loss_fn=nn.NLLLoss() n_epoochs=100
In [32]:
for epoch in range(n_epochs): for img,labels in train_loader: batch_size=img.shape[0] out=model(img.view(batch_size,-1)) loss=loss_fn(out,labels) optimizer.zero_grad() loss.backward() optimizer.step() print("Eopch: %d,loss: %f" % (epoch,float(loss)))
Eopch: 0,loss: 0.318367 Eopch: 1,loss: 0.389473 Eopch: 2,loss: 0.427979 Eopch: 3,loss: 0.435714 Eopch: 4,loss: 0.504344 Eopch: 5,loss: 0.483689 Eopch: 6,loss: 0.308372 Eopch: 7,loss: 0.115488 Eopch: 8,loss: 0.513510 Eopch: 9,loss: 0.386493 Eopch: 10,loss: 0.274975 Eopch: 11,loss: 0.252231 Eopch: 12,loss: 0.213218 Eopch: 13,loss: 0.256708 Eopch: 14,loss: 0.316772 Eopch: 15,loss: 0.631465 Eopch: 16,loss: 0.423488 Eopch: 17,loss: 0.402729 Eopch: 18,loss: 0.307367 Eopch: 19,loss: 0.179422 Eopch: 20,loss: 0.379566 Eopch: 21,loss: 0.290748 Eopch: 22,loss: 0.270363 Eopch: 23,loss: 0.186533 Eopch: 24,loss: 0.285739 Eopch: 25,loss: 0.455766 Eopch: 26,loss: 0.263250 Eopch: 27,loss: 0.257488 Eopch: 28,loss: 0.253201 Eopch: 29,loss: 0.088151 Eopch: 30,loss: 0.119370 Eopch: 31,loss: 0.101625 Eopch: 32,loss: 0.247492 Eopch: 33,loss: 0.090906 Eopch: 34,loss: 0.172537 Eopch: 35,loss: 0.462267 Eopch: 36,loss: 0.273000 Eopch: 37,loss: 0.254223 Eopch: 38,loss: 0.348747 Eopch: 39,loss: 0.300148 Eopch: 40,loss: 0.063681 Eopch: 41,loss: 0.079802 Eopch: 42,loss: 0.135370 Eopch: 43,loss: 0.100524 Eopch: 44,loss: 0.055207 Eopch: 45,loss: 0.177615 Eopch: 46,loss: 0.146831 Eopch: 47,loss: 0.030244 Eopch: 48,loss: 0.113541 Eopch: 49,loss: 0.054586 Eopch: 50,loss: 0.088400 Eopch: 51,loss: 0.037736 Eopch: 52,loss: 0.044950 Eopch: 53,loss: 0.045306 Eopch: 54,loss: 0.122482 Eopch: 55,loss: 0.094259 Eopch: 56,loss: 0.113845 Eopch: 57,loss: 0.020960 Eopch: 58,loss: 0.047257 Eopch: 59,loss: 0.054770 Eopch: 60,loss: 0.057607 Eopch: 61,loss: 0.045996 Eopch: 62,loss: 0.019060 Eopch: 63,loss: 0.017909 Eopch: 64,loss: 0.062859 Eopch: 65,loss: 0.024708 Eopch: 66,loss: 0.028053 Eopch: 67,loss: 0.061581 Eopch: 68,loss: 0.025352 Eopch: 69,loss: 0.021476 Eopch: 70,loss: 0.015957 Eopch: 71,loss: 0.024176 Eopch: 72,loss: 0.139968 Eopch: 73,loss: 0.025018 Eopch: 74,loss: 0.017756 Eopch: 75,loss: 0.012510 Eopch: 76,loss: 0.062693 Eopch: 77,loss: 0.019162 Eopch: 78,loss: 0.026831 Eopch: 79,loss: 0.022641 Eopch: 80,loss: 0.006038 Eopch: 81,loss: 0.012924 Eopch: 82,loss: 0.010721 Eopch: 83,loss: 0.002646 Eopch: 84,loss: 0.017216 Eopch: 85,loss: 0.010292 Eopch: 86,loss: 0.009473 Eopch: 87,loss: 0.034917 Eopch: 88,loss: 0.013506 Eopch: 89,loss: 0.009394 Eopch: 90,loss: 0.010806 Eopch: 91,loss: 0.015955 Eopch: 92,loss: 0.011492 Eopch: 93,loss: 0.019395 Eopch: 94,loss: 0.011841 Eopch: 95,loss: 0.025058 Eopch: 96,loss: 0.015301 Eopch: 97,loss: 0.008132 Eopch: 98,loss: 0.011373 Eopch: 99,loss: 0.013405
In [33]:
val_loader=torch.utils.data.DataLoader(cifar2_val,batch_size=64,shuffle=False) correct=0 total=0 with torch.no_grad(): for imgs,labels in val_loader: batch_size=imgs.shape[0] outputs=model(imgs.view(batch_size,-1)) _,predicted=torch.max(outputs,dim=1) total+=labels.shape[0] correct+=int((predicted==labels).sum()) print("Accuracy:",correct/total)
Accuracy: 0.8105
In [34]:
model=nn.Sequential( nn.Linear(3072,1024), nn.Tanh(), nn.Linear(1024,512), nn.Tanh(), nn.Linear(512, 128), nn.Tanh(), nn.Linear(128, n_out), nn.LogSoftmax(dim=1) )
In [35]:
for epoch in range(n_epochs): for imgs,labels in val_loader: batch_size=imgs.shape[0] out=model(imgs.view(batch_size,-1)) loss=loss_fn(out,labels) optimizer.zero_grad() loss.backward() optimizer.step() print("Eopch: %d,loss: %f" % (epoch,float(loss)))
Eopch: 0,loss: 0.687660 Eopch: 1,loss: 0.687660 Eopch: 2,loss: 0.687660 Eopch: 3,loss: 0.687660 Eopch: 4,loss: 0.687660 Eopch: 5,loss: 0.687660 Eopch: 6,loss: 0.687660 Eopch: 7,loss: 0.687660 Eopch: 8,loss: 0.687660 Eopch: 9,loss: 0.687660 Eopch: 10,loss: 0.687660 Eopch: 11,loss: 0.687660 Eopch: 12,loss: 0.687660 Eopch: 13,loss: 0.687660 Eopch: 14,loss: 0.687660 Eopch: 15,loss: 0.687660 Eopch: 16,loss: 0.687660 Eopch: 17,loss: 0.687660 Eopch: 18,loss: 0.687660 Eopch: 19,loss: 0.687660 Eopch: 20,loss: 0.687660 Eopch: 21,loss: 0.687660 Eopch: 22,loss: 0.687660 Eopch: 23,loss: 0.687660 Eopch: 24,loss: 0.687660 Eopch: 25,loss: 0.687660 Eopch: 26,loss: 0.687660 Eopch: 27,loss: 0.687660 Eopch: 28,loss: 0.687660 Eopch: 29,loss: 0.687660 Eopch: 30,loss: 0.687660 Eopch: 31,loss: 0.687660 Eopch: 32,loss: 0.687660 Eopch: 33,loss: 0.687660 Eopch: 34,loss: 0.687660 Eopch: 35,loss: 0.687660 Eopch: 36,loss: 0.687660 Eopch: 37,loss: 0.687660 Eopch: 38,loss: 0.687660 Eopch: 39,loss: 0.687660 Eopch: 40,loss: 0.687660 Eopch: 41,loss: 0.687660 Eopch: 42,loss: 0.687660 Eopch: 43,loss: 0.687660 Eopch: 44,loss: 0.687660 Eopch: 45,loss: 0.687660 Eopch: 46,loss: 0.687660 Eopch: 47,loss: 0.687660 Eopch: 48,loss: 0.687660 Eopch: 49,loss: 0.687660 Eopch: 50,loss: 0.687660 Eopch: 51,loss: 0.687660 Eopch: 52,loss: 0.687660 Eopch: 53,loss: 0.687660 Eopch: 54,loss: 0.687660 Eopch: 55,loss: 0.687660 Eopch: 56,loss: 0.687660 Eopch: 57,loss: 0.687660 Eopch: 58,loss: 0.687660 Eopch: 59,loss: 0.687660 Eopch: 60,loss: 0.687660 Eopch: 61,loss: 0.687660 Eopch: 62,loss: 0.687660 Eopch: 63,loss: 0.687660 Eopch: 64,loss: 0.687660 Eopch: 65,loss: 0.687660 Eopch: 66,loss: 0.687660 Eopch: 67,loss: 0.687660 Eopch: 68,loss: 0.687660 Eopch: 69,loss: 0.687660 Eopch: 70,loss: 0.687660 Eopch: 71,loss: 0.687660 Eopch: 72,loss: 0.687660 Eopch: 73,loss: 0.687660 Eopch: 74,loss: 0.687660 Eopch: 75,loss: 0.687660 Eopch: 76,loss: 0.687660 Eopch: 77,loss: 0.687660 Eopch: 78,loss: 0.687660 Eopch: 79,loss: 0.687660 Eopch: 80,loss: 0.687660 Eopch: 81,loss: 0.687660 Eopch: 82,loss: 0.687660 Eopch: 83,loss: 0.687660 Eopch: 84,loss: 0.687660 Eopch: 85,loss: 0.687660 Eopch: 86,loss: 0.687660 Eopch: 87,loss: 0.687660 Eopch: 88,loss: 0.687660 Eopch: 89,loss: 0.687660 Eopch: 90,loss: 0.687660 Eopch: 91,loss: 0.687660 Eopch: 92,loss: 0.687660 Eopch: 93,loss: 0.687660 Eopch: 94,loss: 0.687660 Eopch: 95,loss: 0.687660 Eopch: 96,loss: 0.687660 Eopch: 97,loss: 0.687660 Eopch: 98,loss: 0.687660 Eopch: 99,loss: 0.687660
In [36]:
import torch import torch.nn as nn torch.device('cuda' if torch.cuda.is_available() else 'cpu') import torch.optim as optim train_loader=torch.utils.data.DataLoader(cifar2,batch_size=64,shuffle=True) model=nn.Sequential( nn.Linear(3072,1024), nn.Tanh(), nn.Linear(1024, 512), nn.Tanh(), nn.Linear(512, 128), nn.Tanh(), nn.Linear(128, 2), ) learning_rate=1e-2 optimizer=optim.SGD(model.parameters(),lr=learning_rate) loss_fn=nn.CrossEntropyLoss() n_epochs=100 for epoch in range(n_epochs): for imgs,labels in train_loader: out=model(imgs.view(imgs.shape[0],-1)) loss=loss_fn(out,labels) optimizer.zero_grad() loss.backward() optimizer.step() print("Eopch: %d,loss: %f" % (epoch,float(loss)))
Eopch: 0,loss: 0.519947 Eopch: 1,loss: 0.465496 Eopch: 2,loss: 0.409001 Eopch: 3,loss: 0.266336 Eopch: 4,loss: 0.469560 Eopch: 5,loss: 0.474294 Eopch: 6,loss: 0.504630 Eopch: 7,loss: 0.281101 Eopch: 8,loss: 0.532771 Eopch: 9,loss: 0.398839 Eopch: 10,loss: 0.567806 Eopch: 11,loss: 0.151879 Eopch: 12,loss: 0.311535 Eopch: 13,loss: 0.140536 Eopch: 14,loss: 0.214214 Eopch: 15,loss: 0.560523 Eopch: 16,loss: 0.345593 Eopch: 17,loss: 0.216553 Eopch: 18,loss: 0.512839 Eopch: 19,loss: 0.327105 Eopch: 20,loss: 0.506541 Eopch: 21,loss: 0.471869 Eopch: 22,loss: 0.269147 Eopch: 23,loss: 0.313149 Eopch: 24,loss: 0.333555 Eopch: 25,loss: 0.542198 Eopch: 26,loss: 0.112522 Eopch: 27,loss: 0.212196 Eopch: 28,loss: 0.159089 Eopch: 29,loss: 0.397768 Eopch: 30,loss: 0.182997 Eopch: 31,loss: 0.025174 Eopch: 32,loss: 0.024426 Eopch: 33,loss: 0.132378 Eopch: 34,loss: 0.075593 Eopch: 35,loss: 0.122619 Eopch: 36,loss: 0.105373 Eopch: 37,loss: 0.353998 Eopch: 38,loss: 0.134418 Eopch: 39,loss: 0.059301 Eopch: 40,loss: 0.036647 Eopch: 41,loss: 0.228398 Eopch: 42,loss: 0.017553 Eopch: 43,loss: 0.136787 Eopch: 44,loss: 0.181020 Eopch: 45,loss: 0.052730 Eopch: 46,loss: 0.140553 Eopch: 47,loss: 0.106907 Eopch: 48,loss: 0.084316 Eopch: 49,loss: 0.009374 Eopch: 50,loss: 0.106933 Eopch: 51,loss: 0.162647 Eopch: 52,loss: 0.009722 Eopch: 53,loss: 0.079601 Eopch: 54,loss: 0.047722 Eopch: 55,loss: 0.207536 Eopch: 56,loss: 0.058132 Eopch: 57,loss: 0.016000 Eopch: 58,loss: 0.071940 Eopch: 59,loss: 0.007567 Eopch: 60,loss: 0.005136 Eopch: 61,loss: 0.021234 Eopch: 62,loss: 0.007793 Eopch: 63,loss: 0.018341 Eopch: 64,loss: 0.087390 Eopch: 65,loss: 0.023519 Eopch: 66,loss: 0.005784 Eopch: 67,loss: 0.013550 Eopch: 68,loss: 0.066955 Eopch: 69,loss: 0.014660 Eopch: 70,loss: 0.007594 Eopch: 71,loss: 0.004473 Eopch: 72,loss: 0.000949 Eopch: 73,loss: 0.001681 Eopch: 74,loss: 0.136722 Eopch: 75,loss: 0.006067 Eopch: 76,loss: 0.009538 Eopch: 77,loss: 0.003024 Eopch: 78,loss: 0.009064 Eopch: 79,loss: 0.001668 Eopch: 80,loss: 0.003446 Eopch: 81,loss: 0.001881 Eopch: 82,loss: 0.003283 Eopch: 83,loss: 0.002586 Eopch: 84,loss: 0.002454 Eopch: 85,loss: 0.001090 Eopch: 86,loss: 0.002784 Eopch: 87,loss: 0.000422 Eopch: 88,loss: 0.003119 Eopch: 89,loss: 0.001662 Eopch: 90,loss: 0.001331 Eopch: 91,loss: 0.000746 Eopch: 92,loss: 0.004222 Eopch: 93,loss: 0.000532 Eopch: 94,loss: 0.000623 Eopch: 95,loss: 0.000203 Eopch: 96,loss: 0.000525 Eopch: 97,loss: 0.000918 Eopch: 98,loss: 0.000449 Eopch: 99,loss: 0.001100
In [37]:
val_loader=torch.utils.data.DataLoader(cifar2_val,batch_size=64,shuffle=False) correct=0 total=0 with torch.no_grad(): for imgs,labels in val_loader: batch_size=imgs.shape[0] outputs=model(imgs.view(batch_size,-1)) _,predicted=torch.max(outputs,dim=1) total+=labels.shape[0] correct+=int((predicted==labels).sum()) print("Accuracy:",correct/total)
Accuracy: 0.802
发表回复