原作者1:https://www.bilibili.com/video/BV1Y7411d7Ys?p=1
原作者2:(15条消息) PyTorch 深度学习实践_错错莫的博客-CSDN博客

2. 线性模型

import numpy as np
import matplotlib.pyplot as plt
x_data=[1.0,2.0,3.0]
y_data=[2.0,4.0,6.0]def forward(x):return x*w+b
def loss(x,y):y_pred=forward(x)return (y-y_pred)**2w_list=[]
b_list=[]
mse_list=[]
w= np.arange(0.0,4.1,0.1)
b=np.arange(0.0,4.1,0.1)
for w,b in zip(w,b):print("w=",w,"b=",b)l_sum=0for x_val,y_val in zip(x_data,y_data):y_pred_val=forward(x_val)loss_val=loss(x_val,y_val)#loss累加l_sum+=loss_valprint('\t',x_val,y_val,y_pred_val,loss_val)print("MSE=",l_sum/3)w_list.append(w)b_list.append(b)mse_list.append(l_sum/3)##2D绘制
plt.plot(w_list,mse_list)
plt.ylabel('Loss')
plt.xlabel('w')
plt.show()
#https://blog.csdn.net/weixin_38052918/article/details/108032973
##3D绘制
# from mpl_toolkits.mplot3d import Axes3D
#
# x_train = np.array([1.0,2.0,3.0])
# y_train = np.array([2.0,4.0,6.0])
#
# dense = 400
# w,b = np.meshgrid(np.linspace(0,4.1, dense),np.linspace(0,4.1, dense))
#
# # y = wx+b
# def get_loss_value(w,b):
#     return np.square(w*x_train+b - y_train).sum()/len(x_train)
#
# def loss_point():
#     loss_list = []
#     for i in range(dense):
#         loss_list2=[]
#         for j in range(dense):
#             loss = get_loss_value(w[i][j], b[i][j])
#             loss_list2.append(loss)
#         loss_list.append(loss_list2)
#     return loss_list
#
# fig = plt.figure()
# ax = Axes3D(fig)
# loss = np.array(loss_point())
#
# # 添加坐标轴(顺序是Z, Y, X)
# ax.set_xlabel('w')
# ax.set_ylabel('b')
# ax.set_zlabel('L')
# ax.plot_surface(w, b, loss, rstride=30,cstride=30, cmap='jet')
# plt.show()
##等高线绘制
# x_train = np.array([1.0,2.0,3.0])
# y_train = np.array([2.0,4.0,6.0])
#
# dense = 400
#
# # y = wx+b
# def get_loss_value(w,b):
#     return np.square(w*x_train+b - y_train).sum()/len(x_train)
#
#
# w = np.linspace(0,4.1,dense)
# b = np.linspace(0,4.1,dense)
#
# def draw_contour_line(dense,isoheight): #dense表示取值的密度,isoheight表示等高线的值
#     list_w = []
#     list_b = []
#     list_loss = []
#     for i in range(dense):
#         for j in range(dense):
#             loss = get_loss_value(w[i],b[j])
#             if 1.05*isoheight>loss>0.95*isoheight:
#                 list_w.append(w[i])
#                 list_b.append(b[j])
#             else:
#                 pass
#     plt.scatter(list_w,list_b,s=1) #s=0.25比较合适
#
# draw_contour_line(dense,1)
# draw_contour_line(dense,4)
# draw_contour_line(dense,7)
# draw_contour_line(dense,10)
# draw_contour_line(dense,20)
# draw_contour_line(dense,30)
# draw_contour_line(dense,50)
# draw_contour_line(dense,100)
# draw_contour_line(dense,200)
# plt.title('Loss Func Contour Line')
# plt.xlabel('w')
# plt.ylabel('b')
# #y:0-4
# plt.axis([0,4,0,4])
# plt.show()

3. 梯度下降

import matplotlib.pyplot as plt
x_data=[1,2,3]
y_data=[2,4,6]
w=1def forward(x):return x*wdef cost(xs,ys):cost=0for x,y in zip(xs,ys):y_pred=forward(x)cost+=(y_pred-y)**2return cost/len(xs)   #MSE
#方法一:GD-每次进行梯度更新,需遍历所有数据
# def gradient(xs,ys):
#     grad=0
#     for x,y in zip(xs,ys):
#         grad+=2*x*(x*w-y)#Loss函数对w偏导
#     return grad/len(xs)
#
# print("Predict(before traing)",4,forward(4))
#
# #更新权值
# epoch_list=[]
# cost_list=[]
# for epoch in range(100):
#     cost_val=cost(x_data,y_data)
#     grad_val=gradient(x_data,y_data)
#     w-=0.01*grad_val
#     epoch_list.append(epoch)
#     cost_list.append(cost_val)
#     print("epoch",epoch,'w',w,'loss',cost_val)#观测过程
#
# print('Predict(after traing)',4,forward(4))
#
# plt.plot(epoch_list,cost_list)
# plt.show()#方法二:SGD-每次进行梯度更新,仅遍历当前数据对
def loss(x,y):y_pred=forward(x)return (y_pred-y)**2
def gradient(x,y):return 2*x*(x*w-y)epoch_list=[]
l_list=[]
print("Predict(before traing)",4,forward(4))
for epoch in range(100):for x,y in zip(x_data,y_data):grad=gradient(x,y)w -= 0.01 * gradprint('\tgrad:',x,y,grad)l=loss(x,y)epoch_list.append(epoch)l_list.append(l)print('progress:',epoch,'w=',w,'loss=',l)print('Predict(after traing)',4,forward(4))plt.plot(epoch_list,l_list)
plt.show()

4.反向传递

import torch
x_data=[1,2,3]
y_data=[4,5,6]w=torch.Tensor([1.0])
w.requires_grad=True#key1def forward(x):return x*w
def loss(x,y):y_pred=forward(x)return (y_pred-y)**2
print("Predict(before traing)",4,forward(4))
for epoch in range(100):for x,y in zip(x_data,y_data):l=loss(x,y)#forwardl.backward()#key2print('\tgrad',x,y,w.grad.item())w.data=w.data-0.01*w.grad.dataw.grad.data.zero_()#key3-防止梯度累积print('progress',epoch,l.item())#观测中间过程print('Predict(after traing)',4,forward(4))

5.线性回归

import torch
x_data=torch.Tensor([[1.0],[2.0],[3.0]])
y_data=torch.Tensor([[2.0],[4.0],[6.0]])#Our model class should be inherit from nn.Module
#, which is Base-class for all neural network modules.
class LinearModel(torch.nn.Module):def __init__(self):#刚需super(LinearModel,self).__init__()self.linear=torch.nn.Linear(1,1)#(输入向量dim,输出向量dim,bias=True默认可训练)Tensors:weights,biasdef forward(self,x):#刚需y_pred=self.linear(x)#nn.Linear应用_call_(),the class can be called like a function.such as:forward()return y_pred
model=LinearModel()
#
criterion=torch.nn.MSELoss(size_average=False)
optimizer=torch.optim.SGD(model.parameters(),lr=0.01)for epoch in range(100):y_pred=model(x_data)loss=criterion(y_pred,y_data)print(epoch,loss)optimizer.zero_grad()loss.backward()optimizer.step()
#########output weight and bias
print('w=',model.linear.weight.item())
print('b=',model.linear.bias.item())
#test model
x_test=torch.Tensor([4])
y_test=model(x_test)
print('y_pred=',y_test.data)

6.逻辑回归

import torch
import torch.nn.functional as F
import numpy as np
import matplotlib.pyplot as pltx_data = torch.Tensor([[1.0], [2.0], [3.0]])
y_data = torch.Tensor([[0], [0], [1]])class LogisticRegressionModel(torch.nn.Module):def __init__(self):super(LogisticRegressionModel,self).__init__()self.linear=torch.nn.Linear(1,1)def forward(self, x):y_pred=F.sigmoid(self.linear(x))return y_predmodel=LogisticRegressionModel()# construct loss and optimizer
# 默认情况下,loss会基于element平均,如果size_average=False的话,loss会被累加。
criterion = torch.nn.BCELoss(size_average = False)
optimizer = torch.optim.SGD(model.parameters(), lr = 0.01)for epoch in range(1000):y_pred=model(x_data)loss=criterion(y_pred,y_data)print(epoch,loss.item())optimizer.zero_grad()loss.backward()optimizer.step()#训练后的结果可视化
x=np.linspace(0,10,200)x_t=torch.Tensor(x).view((200,1))
y_t=model(x_t)
y=y_t.data.numpy()
plt.plot(x,y)plt.plot([0,10],[0.5,0.5],c='r')
plt.xlabel('Hours')
plt.ylabel('Probability of Pass')
plt.grid()
plt.show()

7.多维输入

import numpy as np
import torch
import matplotlib.pyplot as plt# prepare dataset
xy = np.loadtxt('diabetes.csv', delimiter=',', dtype=np.float32)
x_data = torch.from_numpy(xy[:, :-1])  # 第一个‘:’是指读取所有行,第二个‘:’是指从第一列开始,最后一列不要
print("input data.shape", x_data.shape)
y_data = torch.from_numpy(xy[:, [-1]])  # [-1] 最后得到的是个矩阵# print(x_data.shape)
# design model using classclass Model(torch.nn.Module):def __init__(self):super(Model, self).__init__()self.linear1 = torch.nn.Linear(8, 6)self.linear2 = torch.nn.Linear(6, 4)self.linear3 = torch.nn.Linear(4, 2)self.linear4 = torch.nn.Linear(2, 1)self.sigmoid = torch.nn.Sigmoid()def forward(self, x):x = self.sigmoid(self.linear1(x))x = self.sigmoid(self.linear2(x))x = self.sigmoid(self.linear3(x))  # y hatx = self.sigmoid(self.linear4(x))  # y hatreturn xmodel = Model()# construct loss and optimizer
criterion = torch.nn.BCELoss(size_average = True)#不累计
#criterion = torch.nn.BCELoss(reduction='mean')
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)# training cycle forward, backward, update
for epoch in range(100):y_pred = model(x_data)loss = criterion(y_pred, y_data)# print(epoch, loss.item())optimizer.zero_grad()loss.backward()optimizer.step()
#accif epoch % 100 == 99:y_pred_label = torch.where(y_pred >= 0.5, torch.tensor([1.0]), torch.tensor([0.0]))acc = torch.eq(y_pred_label, y_data).sum().item() / y_data.size(0)print("loss = ", loss.item(), "acc = ", acc)

8.数据库与数据下载器

#方案1
import torch
import numpy as np
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from sklearn.model_selection import train_test_split# 读取原始数据,并划分训练集和测试集
raw_data = np.loadtxt('D:\install by myself\BaiduNetdiskDownload\新建文件夹(桌面)\douma-algo-master\diabetes.csv', delimiter=',', dtype=np.float32)
X = raw_data[:, :-1]
y = raw_data[:, [-1]]
Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, y, test_size=0.3)Xtest = torch.from_numpy(Xtest)
Ytest = torch.from_numpy(Ytest)# 将训练数据集进行批量处理
# prepare datasetclass DiabetesDataset(Dataset):def __init__(self, data, label):self.len = data.shape[0]  # shape(行,列)self.x_data = torch.from_numpy(data)self.y_data = torch.from_numpy(label)def __getitem__(self, index):return self.x_data[index], self.y_data[index]def __len__(self):return self.lentrain_dataset = DiabetesDataset(Xtrain, Ytrain)
train_loader = DataLoader(dataset=train_dataset,batch_size=32,shuffle=True,num_workers=4)  # num_workers 多线程# design model using classclass Model(torch.nn.Module):def __init__(self):super(Model, self).__init__()self.linear1 = torch.nn.Linear(8, 6)self.linear2 = torch.nn.Linear(6, 4)self.linear3 = torch.nn.Linear(4, 2)self.linear4 = torch.nn.Linear(2, 1)self.sigmoid = torch.nn.Sigmoid()def forward(self, x):x = self.sigmoid(self.linear1(x))x = self.sigmoid(self.linear2(x))x = self.sigmoid(self.linear3(x))x = self.sigmoid(self.linear4(x))return xmodel = Model()# construct loss and optimizer
criterion = torch.nn.BCELoss(reduction='mean')
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)# training cycle forward, backward, updatedef train(epoch):train_loss = 0.0count = 0for i, data in enumerate(train_loader, 0):x, y = datay_pred = model(x)loss = criterion(y_pred, y)optimizer.zero_grad()loss.backward()optimizer.step()train_loss += loss.item()count = i  #16:759*0.7/32=16.7if epoch % 10 == 9:print("train loss:",count, train_loss / count, end=',')def test():with torch.no_grad():y_pred = model(Xtest)y_pred_label = torch.where(y_pred >= 0.5, torch.tensor([1.0]), torch.tensor([0.0]))acc = torch.eq(y_pred_label, Ytest).sum().item() / Ytest.size(0)print("test acc:", acc)if __name__ == '__main__':for epoch in range(50):train(epoch)if epoch % 10 == 9:test()#方案2
import torch
import numpy as np
from torch.utils.data import Dataset
from torch.utils.data import DataLoader# prepare datasetclass DiabetesDataset(Dataset):def __init__(self, filepath):xy = np.loadtxt(filepath, delimiter=',', dtype=np.float32)self.len = xy.shape[0]  # shape(多少行,多少列)self.x_data = torch.from_numpy(xy[:, :-1])self.y_data = torch.from_numpy(xy[:, [-1]])def __getitem__(self, index):return self.x_data[index], self.y_data[index]def __len__(self):return self.lendataset = DiabetesDataset('diabetes.csv')
train_loader = DataLoader(dataset=dataset, batch_size=32, shuffle=True, num_workers=2)  # num_workers 多线程# design model using classclass Model(torch.nn.Module):def __init__(self):super(Model, self).__init__()self.linear1 = torch.nn.Linear(8, 6)self.linear2 = torch.nn.Linear(6, 4)self.linear3 = torch.nn.Linear(4, 1)self.sigmoid = torch.nn.Sigmoid()def forward(self, x):x = self.sigmoid(self.linear1(x))x = self.sigmoid(self.linear2(x))x = self.sigmoid(self.linear3(x))return xmodel = Model()# construct loss and optimizer
criterion = torch.nn.BCELoss(reduction='mean')
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)# training cycle forward, backward, update
if __name__ == '__main__':for epoch in range(10):for i, data in enumerate(train_loader, 0):  # train_loader 是先shuffle后mini_batchx, y = datay_pred = model(x)loss = criterion(y_pred, y)#print(epoch, i, loss.item())optimizer.zero_grad()loss.backward()optimizer.step()# # acc1:756/32=24,每次输出为min-size=32的统计accuracy,一共24次# if epoch % 10 == 9:#     y_pred_label = torch.where(y_pred >= 0.5, torch.tensor([1.0]), torch.tensor([0.0]))#     acc = torch.eq(y_pred_label, y).sum().item() / y.size(0)#     print("loss = ", loss.item(), "acc = ", acc)#acc2:xy = np.loadtxt('diabetes.csv', delimiter=',', dtype=np.float32)x_data = torch.from_numpy(xy[:, :-1])y_data = torch.from_numpy(xy[:, [-1]])y_pred = model(x_data)y_pred_label = torch.where(y_pred >= 0.5, torch.tensor([1.0]), torch.tensor([0.0]))acc = torch.eq(y_pred_label, y_data).sum().item() / y_data.size(0)print("acc = ", acc)

9.softmax 分类器

#交叉熵:softmax核心
import numpy as np
z=np.array([0.2,0.1,-0.1])
y=np.array([1,0,0])y_pred=np.exp(z)/np.exp(z).sum()
loss=(-y*np.log(y_pred)).sum()
print(loss)
import torch
import torch.nn.functional as F
import torch.optim as optim
#数据预处理工具
from torchvision import transforms
from torchvision import datasets
from torch.utils.data import DataLoaderbatch_size=64
transform=transforms.Compose([transforms.ToTensor(),#convert the PIL Image to Tensor(pillow)transforms.Normalize((0.1307,),(0.3081,))
])train_dataset=datasets.MNIST(root='D:\install by myself\BaiduNetdiskDownload\新建文件夹(桌面)\douma-algo-master/dataset/mnist/',train=True,download=True,transform=transform)
test_dataset=datasets.MNIST(root='D:\install by myself\BaiduNetdiskDownload\新建文件夹(桌面)\douma-algo-master/dataset/mnist/',train=False,download=True,transform=transform)train_loader=DataLoader(train_dataset,shuffle=True,batch_size=batch_size)
test_loader=DataLoader(test_dataset,shuffle=False,batch_size=batch_size)class Net(torch.nn.Module):def __init__(self):super(Net, self).__init__()self.l1=torch.nn.Linear(784,512)self.l2 = torch.nn.Linear(512, 256)self.l3 = torch.nn.Linear(256, 128)self.l4 = torch.nn.Linear(128, 64)self.l5 = torch.nn.Linear(64, 10)def forward(self,x):x=x.view(-1,784)#(N,1,28,28)->(N,784)x=F.relu(self.l1(x))x = F.relu(self.l2(x))x = F.relu(self.l3(x))x = F.relu(self.l4(x))return self.l5(x)
model=Net()criterion=torch.nn.CrossEntropyLoss()
optimizer=optim.SGD(model.parameters(),lr=0.1,momentum=0.5)def train(epoch):running_loss=0for batch_idx,(x,y) in enumerate(train_loader,0):y_pred=model(x)loss=criterion(y_pred,y)optimizer.zero_grad()loss.backward()optimizer.step()running_loss+=loss.item()#每300个batch输出一次loss:一个batch_idx包含64个图片,及相应64个标签if batch_idx%300==299:print('[%d,%5d] loss: %.3f' % (epoch+1, batch_idx+1,running_loss/300))running_loss=0def test():correct=0total=0with torch.no_grad():for (x,y) in test_loader:# (64,784)->(64,10)y_pred=model(x)#(64,10),dim=1按列取最大值所在索引->(64,1)_,predicted=torch.max(y_pred.data,dim=1)#total+=64:0指(64,1)total+=y.size(0)correct+=(predicted==y).sum().item()print('Acc:%d %%' %(100*correct/total))if __name__ == '__main__':for epoch in range(10):train(epoch)if epoch % 10 == 9:test()

10.基本CNN

#预备1:no padding
import torch
in_channels,out_channels=5,10
width,height=100,100kernel_size=3
batch_size=1#(1,5,100,100)
input=torch.randn(batch_size,in_channels,width,height)conv_layer=torch.nn.Conv2d(in_channels,out_channels,kernel_size=kernel_size)
##output=n+2p-f+1=100+0-3+1=3
output=conv_layer(input)print(input.shape)
print(output.shape)
#(10,5,3,3)
print(conv_layer.weight.shape)#预备2:padding
import torch
input=[3,4,6,5,7,2,4,6,8,2,1,6,7,8,4,9,7,4,6,2,3,7,5,4,1]
input=torch.Tensor(input).view(1,1,5,5)conv_layer=torch.nn.Conv2d(1,1,kernel_size=3,padding=1,bias=False)
#定义卷积核
kernel=torch.Tensor([1,2,3,4,5,6,7,8,9]).view(1,1,3,3)
conv_layer.weight.data=kernel.dataoutput=conv_layer(input)
print(output)
#1张图片(1,5,5)+1filter(1,3,3)= 1张图片(1 filter,5+2*1-3+1=5,5+2*1-3+1=5)
print(output.shape)#预备3:stride
import torch
input=[3,4,6,5,7,2,4,6,8,2,1,6,7,8,4,9,7,4,6,2,3,7,5,4,1]
input=torch.Tensor(input).view(1,1,5,5)conv_layer=torch.nn.Conv2d(1,1,kernel_size=3,stride=2,bias=False)
#定义卷积核
kernel=torch.Tensor([1,2,3,4,5,6,7,8,9]).view(1,1,3,3)
conv_layer.weight.data=kernel.data
##output=((n+2p-f+1)/s)+1=((5+0-3+1)/2)+1=2
output=conv_layer(input)
print(output)
#1张图片(1,5,5)+1filter(1,3,3)= 1张图片(1 filter,2,2)
print(output.shape)#预备4:polling
import torch
input=[3,4,6,5,2,4,6,8,1,6,7,8,9,7,4,6,]
input=torch.Tensor(input).view(1,1,4,4)maxpooling_layer=torch.nn.MaxPool2d(kernel_size=2)output=maxpooling_layer(input)
print(output)
#torch.Size([1, 1, 2, 2])
print(output.shape)###############################################
import torch
from torchvision import transforms
from torchvision import datasets
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch.optim as optim
import matplotlib.pyplot as plt# prepare datasetbatch_size = 64
transform = transforms.Compose([transforms.ToTensor(),  # convert the PIL Image to Tensor(pillow)transforms.Normalize((0.1307,), (0.3081,))
])train_dataset = datasets.MNIST(root='D:\install by myself\BaiduNetdiskDownload\新建文件夹(桌面)\douma-algo-master/dataset/mnist/',train=True,download=True,transform=transform)
test_dataset = datasets.MNIST(root='D:\install by myself\BaiduNetdiskDownload\新建文件夹(桌面)\douma-algo-master/dataset/mnist/',train=False,download=True,transform=transform)train_loader = DataLoader(train_dataset,shuffle=True,batch_size=batch_size)
test_loader = DataLoader(test_dataset,shuffle=False,batch_size=batch_size)# design model using classclass Net(torch.nn.Module):def __init__(self):super(Net, self).__init__()# (1,28,28)->(10,24,24)self.conv1 = torch.nn.Conv2d(1, 10, kernel_size=5)# (10,24,24)->(10,12,12)self.pooling1 = torch.nn.MaxPool2d(2)# (10,12,12)->(20,8,8)self.conv2 = torch.nn.Conv2d(10, 20, kernel_size=5)# (20,8,8)->(20,4,4)self.pooling2 = torch.nn.MaxPool2d(2)# (20,4,4)->(320)#Flatten->(10)self.fc = torch.nn.Linear(320, 10)def forward(self, x):batch_size = x.size(0)  # (batch,1,28,28)x = F.relu(self.pooling1(self.conv1(x)))x = F.relu(self.pooling2(self.conv2(x)))# Flatten data from (n,20,4,4)->(n,320)x = x.view(batch_size, -1)  # -1 此处自动算出的是320# (n,320)->(n,10)x = self.fc(x)return xmodel = Net()
# CUDA
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.to(device)# construct loss and optimizer
criterion = torch.nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5)# training cycle forward, backward, updatedef train(epoch):running_loss = 0.0for batch_idx, (x,y) in enumerate(train_loader, 0):# CUDAx, y = x.to(device), y.to(device)optimizer.zero_grad()outputs = model(x)loss = criterion(outputs, y)loss.backward()optimizer.step()running_loss += loss.item()# 每300个batch输出一次loss:一个batch_idx包含64个图片,及相应64个标签if batch_idx % 300 == 299:print('[%d, %5d] loss: %.3f' % (epoch + 1, batch_idx + 1, running_loss / 300))running_loss = 0.0def test():correct = 0total = 0with torch.no_grad():for (x,y) in test_loader:# CUDAx, y = x.to(device), y.to(device)outputs = model(x)# (64,10),dim=1按列取最大值所在索引->(64,1)_, predicted = torch.max(outputs.data, dim=1)##total+=64:0指(64,1)total += y.size(0)correct += (predicted == y).sum().item()print('accuracy on test set: %d %% ' % (100 * correct / total))return correct / totalif __name__ == '__main__':epoch_list = []acc_list = []for epoch in range(10):train(epoch)# if epoch % 10 == 9:acc = test()epoch_list.append(epoch)acc_list.append(acc)plt.plot(epoch_list, acc_list)plt.ylabel('accuracy')plt.xlabel('epoch')plt.show()

11.高级CNN

#方法一:Inception
###############################################
import torch
import torch.nn as nn
from torchvision import transforms
from torchvision import datasets
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch.optim as optim
import matplotlib.pyplot as plt# prepare datasetbatch_size = 64
transform = transforms.Compose([transforms.ToTensor(),  # convert the PIL Image to Tensor(pillow)transforms.Normalize((0.1307,), (0.3081,))
])train_dataset = datasets.MNIST(root='D:\install by myself\BaiduNetdiskDownload\新建文件夹(桌面)\douma-algo-master/dataset/mnist/',train=True,download=True,transform=transform)
test_dataset = datasets.MNIST(root='D:\install by myself\BaiduNetdiskDownload\新建文件夹(桌面)\douma-algo-master/dataset/mnist/',train=False,download=True,transform=transform)train_loader = DataLoader(train_dataset,shuffle=True,batch_size=batch_size)
test_loader = DataLoader(test_dataset,shuffle=False,batch_size=batch_size)# design model using class
class InceptionA(nn.Module):def __init__(self,in_channels):super(InceptionA, self).__init__()self.branch1x1_1 = nn.Conv2d(in_channels, 24, kernel_size=1)self.branch1x1_2=nn.Conv2d(in_channels,16,kernel_size=1)self.branch1x1_3=nn.Conv2d(in_channels,16,kernel_size=1)self.branch5x5=nn.Conv2d(16,24,kernel_size=5,padding=2)self.branch1x1_4=nn.Conv2d(in_channels,16,kernel_size=1)self.branch3x3_1=nn.Conv2d(16,24,kernel_size=3,padding=1)self.branch3x3_2 = nn.Conv2d(24, 24, kernel_size=3, padding=1)def forward(self,x):branch1x1_1_1=F.avg_pool2d(x,kernel_size=3,stride=1,padding=1)branch1x1_1_2=self.branch1x1_1(branch1x1_1_1)branch1x1_2=self.branch1x1_2(x)branch1x1_3_1=self.branch1x1_3(x)branch1x1_3_2=self.branch5x5(branch1x1_3_1)branch1x1_4_1=self.branch1x1_4(x)branch1x1_4_2=self.branch3x3_1(branch1x1_4_1)branch1x1_4_3=self.branch3x3_2(branch1x1_4_2)outputs=[branch1x1_1_2, branch1x1_2,branch1x1_3_2, branch1x1_4_3]return torch.cat(outputs,dim=1) # b,c,w,h  c对应的是dim=1class Net(nn.Module):def __init__(self):super(Net, self).__init__()# (1,28,28)->(10,24,24)self.conv1 = nn.Conv2d(1, 10, kernel_size=5)# (10,24,24)->(10,12,12)self.pooling1 = nn.MaxPool2d(2)# (10,12,12)->(88,12,12)#88=16+24x3self.incep1 = InceptionA(10)# (88,12,12)->(20,8,8)self.conv2 = nn.Conv2d(88, 20, kernel_size=5)# (20,8,8)->(20,4,4)self.pooling2 = torch.nn.MaxPool2d(2)# (20,4,4)->(88,4,4)self.incep2=InceptionA(20)# (88,4,4)->(1408)#Flatten->(10)self.fc = torch.nn.Linear(1408, 10)def forward(self, x):batch_size = x.size(0)  # (batch,1,28,28)x = F.relu(self.pooling1(self.conv1(x)))x=self.incep1(x)x = F.relu(self.pooling2(self.conv2(x)))x=self.incep2(x)# Flatten data from (n,88,4,4)->(n,1408)x = x.view(batch_size, -1)  # -1 此处自动算出的是1408# (n,1408)->(n,10)x = self.fc(x)return xmodel = Net()
# CUDA
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.to(device)# construct loss and optimizer
criterion = torch.nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5)# training cycle forward, backward, updatedef train(epoch):running_loss = 0.0for batch_idx, (x,y) in enumerate(train_loader, 0):# CUDAx, y = x.to(device), y.to(device)optimizer.zero_grad()outputs = model(x)loss = criterion(outputs, y)loss.backward()optimizer.step()running_loss += loss.item()# 每300个batch输出一次loss:一个batch_idx包含64个图片,及相应64个标签if batch_idx % 300 == 299:print('[%d, %5d] loss: %.3f' % (epoch + 1, batch_idx + 1, running_loss / 300))running_loss = 0.0def test():correct = 0total = 0with torch.no_grad():for (x,y) in test_loader:# CUDAx, y = x.to(device), y.to(device)outputs = model(x)# (64,10),dim=1按列取最大值所在索引->(64,1)_, predicted = torch.max(outputs.data, dim=1)##total+=64:0指(64,1)total += y.size(0)correct += (predicted == y).sum().item()print('accuracy on test set: %d %% ' % (100 * correct / total))return correct / totalif __name__ == '__main__':epoch_list = []acc_list = []for epoch in range(2):train(epoch)# if epoch % 10 == 9:acc = test()epoch_list.append(epoch)acc_list.append(acc)plt.plot(epoch_list, acc_list)plt.ylabel('accuracy')plt.xlabel('epoch')plt.show()#方法二:Residual
###############################################
import torch
import torch.nn as nn
from torchvision import transforms
from torchvision import datasets
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch.optim as optim
import matplotlib.pyplot as plt# prepare dataset
batch_size = 64
transform = transforms.Compose([transforms.ToTensor(),  # convert the PIL Image to Tensor(pillow)transforms.Normalize((0.1307,), (0.3081,))
])train_dataset = datasets.MNIST(root='D:\install by myself\BaiduNetdiskDownload\新建文件夹(桌面)\douma-algo-master/dataset/mnist/',train=True,download=True,transform=transform)
test_dataset = datasets.MNIST(root='D:\install by myself\BaiduNetdiskDownload\新建文件夹(桌面)\douma-algo-master/dataset/mnist/',train=False,download=True,transform=transform)train_loader = DataLoader(train_dataset,shuffle=True,batch_size=batch_size)
test_loader = DataLoader(test_dataset,shuffle=False,batch_size=batch_size)# design model using class
class ResidualBlock(nn.Module):def __init__(self,in_channels):super(ResidualBlock, self).__init__()self.in_channels=in_channelsself.conv1=nn.Conv2d(in_channels,in_channels,kernel_size=3,padding=1)self.conv2=nn.Conv2d(in_channels,in_channels,kernel_size=3,padding=1)def forward(self,x):y=F.relu(self.conv1(x))y=self.conv2(y)return F.relu(x+y)class Net(nn.Module):def __init__(self):super(Net, self).__init__()# (1,28,28)->(16,24,24)self.conv1 = nn.Conv2d(1, 16, kernel_size=5)# (16,24,24)->(16,12,12)self.pooling1 = nn.MaxPool2d(2)# (16,12,12)->(16,12,12)self.rblock1 = ResidualBlock(16)# (16,12,12)->(32,8,8)self.conv2 = nn.Conv2d(16, 32, kernel_size=5)# (32,8,8)->(32,4,4)self.pooling2 = nn.MaxPool2d(2)# (32,4,4)->(32,4,4)self.rblock2=ResidualBlock(32)# (32,4,4)->(512)#Flatten->(10)self.fc = nn.Linear(512, 10)def forward(self, x):batch_size = x.size(0)  # (batch,1,28,28)x = F.relu(self.pooling1(self.conv1(x)))x=self.rblock1(x)x = F.relu(self.pooling2(self.conv2(x)))x=self.rblock2(x)# Flatten data from (n,32,4,4)->(n,512)x = x.view(batch_size, -1)  # -1 此处自动算出的是512# (n,512)->(n,10)x = self.fc(x)return xmodel = Net()
# CUDA
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.to(device)# construct loss and optimizer
criterion = torch.nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5)# training cycle forward, backward, update
def train(epoch):running_loss = 0.0for batch_idx, (x,y) in enumerate(train_loader, 0):# CUDAx, y = x.to(device), y.to(device)optimizer.zero_grad()outputs = model(x)loss = criterion(outputs, y)loss.backward()optimizer.step()running_loss += loss.item()# 每300个batch输出一次loss:一个batch_idx包含64个图片,及相应64个标签if batch_idx % 300 == 299:print('[%d, %5d] loss: %.3f' % (epoch + 1, batch_idx + 1, running_loss / 300))running_loss = 0.0def test():correct = 0total = 0with torch.no_grad():for (x,y) in test_loader:# CUDAx, y = x.to(device), y.to(device)outputs = model(x)# (64,10),dim=1按列取最大值所在索引->(64,1)_, predicted = torch.max(outputs.data, dim=1)##total+=64:0指(64,1)total += y.size(0)correct += (predicted == y).sum().item()print('accuracy on test set: %d %% ' % (100 * correct / total))return correct / totalif __name__ == '__main__':epoch_list = []acc_list = []for epoch in range(2):train(epoch)# if epoch % 10 == 9:acc = test()epoch_list.append(epoch)acc_list.append(acc)plt.plot(epoch_list, acc_list)plt.ylabel('accuracy')plt.xlabel('epoch')plt.show()

原作者3:MNIST-pytorch: Pytorch 实现全连接神经网络/卷积神经网络训练MNIST数据集,并将训练好的模型在自己的手写图片数据集上测试 (gitee.com)

实战练习:CNN实现MINST识别(pytorch)

项目导读







思路(train.py):

1.参数设置(超参数、数据集路径等)

2.数据集通道构建(dataset加载数据以及数据预处理)

3.构建模型类(init中添加需要的层,forward函数中构建网络,return返回网络输出)

4.构建损失函数和优化器(torch.nn.CrossEntropyLoss(),torch.optim.Adam())

5.启动训练、启动测试(保存模型,保存loss&acc数据)

目录导航

1.1 model.py

import torch# 神经网络结构
class CNN(torch.nn.Module):def __init__(self):super(CNN, self).__init__()self.conv1 = torch.nn.Conv2d(1, 10, kernel_size=5)self.conv2 = torch.nn.Conv2d(10, 20, kernel_size=5)self.pooling = torch.nn.MaxPool2d(2)self.fc = torch.nn.Linear(320, 10)self.relu = torch.nn.ReLU()def forward(self, x):batch_size = x.size(0)x = self.conv1(x)x = self.pooling(x)x = self.relu(x)x = self.conv2(x)x = self.pooling(x)x = self.relu(x)x = x.view(batch_size, -1)x = self.fc(x)return x

1.2 train.py

from model import CNN
import torch
from torch.optim import lr_scheduler
#数据处理
from torchvision import transforms
from torchvision import datasets
from torch.utils.data import DataLoader
#命令行参数处理
import argparse
#进度条描述训练/测试进度
from tqdm import tqdm
#数据处理
import numpy as np
import pandas as pd
###############################################################
#argparse 是 Python 的标准库之一,它使命令行参数更易于处理。
# 使用 argparse 模块运行您自己的 Python 文件时,可以分析参数,并在出现问题时显示错误并返回或显示帮助。
def parse_opt():#然后创建一个解析对象parser=argparse.ArgumentParser(description='CNNet-MNIST')#然后向该对象中添加你要关注的命令行参数和选项parser.add_argument('--epochs',type=int,default=1,help='input total epoch')parser.add_argument('--batch_size',type=int, default=64,help='dataloader batch size')parser.add_argument('--lr',type=float,default=0.001,help='optimizer learning rate')parser.add_argument('--weight_decay',type=float,default=0.00001,help='optimizer weight_decay')#最后调用parse_args()方法进行解析;解析成功之后即可使用。args=parser.parse_args()return args#################################################################
args = parse_opt()
# 由于神经网络中数据对象为tensor,所以需要用transform将普通数据转换为tensor
transform = transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.1307,), (0.3081,))
])
# 训练数据集,torchvision中封装了数据集的下载方式,调用下面函数就会自动下载
train_dataset = datasets.MNIST(root='D:\CNN实现Mnist识别\Dataset(for testing)', train=True, download=True,transform=transform)
train_loader = DataLoader(train_dataset, shuffle=True, batch_size=args.batch_size)# 测试数据集
test_dataset = datasets.MNIST(root='D:\CNN实现Mnist识别\Dataset(for testing)', train=False, download=True,transform=transform)
test_loader = DataLoader(test_dataset, shuffle=False, batch_size=args.batch_size)##################################################################
#CUDA
device=torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('using {} device'.format(device))
# 生成神经网络实例
model = CNN()
model.to(device)####################################################################
# 损失函数和优化器
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
# 设置动态学习率
scheduler = lr_scheduler.ExponentialLR(optimizer, gamma=0.9)###################################################################
train_loss=[]
train_acc=[]
def train(epoch):total_loss=0total_correct=0total_x_num=0global iterationmodel.train()train_bar=tqdm(train_loader)for (x,y) in train_bar:x,y=x.to(device),y.to(device)optimizer.zero_grad()y_pred=model(x)#total_correct、 total_loss_,predicted=torch.max(y_pred.data,dim=1)total_correct+=torch.eq(predicted,y).sum().item()loss=criterion(y_pred,y)total_loss+=loss.item()#更新loss.backward()optimizer.step()#total_x_num+=x.size(0)iteration+=1#进度条式train_bar.desc='train epoch[{}/{}] loss:{:.3f} iteration":{}'.format(epoch+1,args.epochs,loss,iteration)#学习率更新scheduler.step()print(optimizer.state_dict()['param_groups'][0]['lr'])loss=total_loss/len(train_loader)acc=100*total_correct/total_x_numtrain_loss.append(loss)train_acc.append(acc)print('accuracy on train set:%d %%'%acc)test_loss=[]
test_acc=[]
def test(epoch):total_loss=0total_correct=0total_x_num=0with torch.no_grad():test_bar = tqdm(test_loader)for (x, y) in test_bar:x, y = x.to(device), y.to(device)y_pred = model(x)# total_correct、 total_loss_, predicted = torch.max(y_pred.data, dim=1)total_correct += torch.eq(predicted, y).sum().item()loss = criterion(y_pred, y)total_loss += loss.item()#total_x_num += x.size(0)# 进度条式test_bar.desc = 'test epoch[{}/{}] loss:{:.3f} iteration":{}'.format(epoch + 1,args.epochs,loss,iteration)loss = total_loss / len(test_loader)acc = 100 * total_correct / total_x_numtest_loss.append(loss)test_acc.append(acc)print('accuracy on train set:%d %%' % acc)if __name__ == "__main__":iteration=1for i in range(args.epochs):train(i)test(i)#保存模型.pthdevice_type = 'GPU' if torch.cuda.is_available() else 'CPU'torch.save(model.state_dict(),'D:\CNN实现Mnist识别\{}\CNN.pth'.format(device_type))#保存loss&acc.csvepoch=np.arange(1,args.epochs+1)dataframe=pd.DataFrame({'epoch':epoch,'train loss':train_loss,'train accuracy':train_acc,'test loss':test_loss,'test accuracy':test_acc})dataframe.to_csv(r'D:\CNN实现Mnist识别\{}\loss&acc.csv'.format(device_type))

1.3 plot.py

import pandas as pd
import matplotlib.pyplot as pltdevice_type=input('CPU or GPU\n')
csv_path='D:\CNN实现Mnist识别\{}\loss&acc.csv'.format(device_type)
data=pd.read_csv(open(csv_path))#bug修复(或者da4=pd.read_csv('F:\数据源\工程清单.csv',engine='python'))#取数据
epoch=data['epoch']
train_loss=data['train loss']
train_acc=data['train accuracy']
test_loss=data['test loss']
test_acc=data['test accuracy']font={'family':'Times New Roman','weight':'normal','size':15,}
fig=plt.figure()
ax1=fig.add_subplot(121)
ax1.plot(epoch,train_loss,color='r',label='training loss')
ax1.plot(epoch,test_loss,color='b',label='test loss')
ax1.legend()
ax1.set_title('Training and Test loss')ax2=fig.add_subplot(122)
ax2.plot(epoch,train_acc,color='r',label='training accuracy')
ax2.plot(epoch,test_acc,color='b',label='test accuracy')
ax2.legend()
ax2.set_title('Training and Test accuracy')plt.show()

2.1 make_ours_dataset.py

#整理文件和目录
import os
#图像处理
import PIL.ImageOps
from PIL import ImageclassifyFolderPath='D:\CNN实现Mnist识别\my_mnist_dateset(for verification)/classify'
imageFolderPath='D:\CNN实现Mnist识别\my_mnist_dateset(for verification)/images'labelFolderPath='D:\CNN实现Mnist识别\my_mnist_dateset(for verification)/labels/'
labelFilePath=os.path.join(labelFolderPath,'labels.txt')##assert函数的目的,就是判定语句的正确性。
assert os.path.exists(classifyFolderPath),"file:'{}'dose not exist.".format(classifyFolderPath)
if not os.path.exists(imageFolderPath):# 创建一个新的文件夹os.mkdir(imageFolderPath)
if not os.path.exists(labelFolderPath):# 创建一个新的文件夹os.mkdir(labelFolderPath)classes={'0','1','2','3','4','5','6','7','8','9'}
file=open(labelFilePath,'w')index=0
for className in classes:
#classifyFolderPath:找到对应类别文件路径classPath=os.path.join(classifyFolderPath,className)#获取文件列表for imageName in os.listdir(classPath):#找到对应类别文件图片路径,读取类别下图片,翻转图像颜色imagePath = os.path.join(classPath, imageName)image=Image.open(imagePath)inverted_image=PIL.ImageOps.invert(image)
#imageFolderPath:index命名并保存翻转图像newName='{}.jpg'.format(index)inverted_image.save(os.path.join(imageFolderPath,newName))index+=1
#labelFilePath:在labels.txt文件中写入名称和标签,格式为:名称 空格 标签file.writelines(newName+' '+className)file.write('\n')file.close()

2.2 my_dataset.py

import torch
import os
from PIL import Image
from torch.utils.data import Datasetclass MyMnistDataset(Dataset):def __init__(self,root,transform):self.myMnistPath=rootself.trans = transformself.labelsDict = []  # keyself.imagesData=[]self.labelsData=[]self.loadLabelsDate()self.loadImageData()#读取标签txt文件,并生成字典def loadLabelsData(self):labelsPath=os.path.join(self.myMnistPath,'labels','labels.txt')f=open(labelsPath)#key1.1lines=f.readlines()#key1.2for line in lines:name=line.split(' ')[0]label=line.split(' ')[1]self.labelsDict[name]=int[label]#读取手写图片数据,并将图片数据和对应的标签组合在一起(媒介:字典)def loadImageData(self):imagesFolderPath=os.path.join(self.myMnistPath,'images')imageFiles=os.listdir(imagesFolderPath)#key2.1for imageName in imageFiles:imagePath=os.path.join(imagesFolderPath,imageName)#key2.2image=Image.open(imagePath)#key2.3grayImage=image.convert('L')imageTensor=self.trans(grayImage)#存储1(张量形式)self.imagesData.append(imageTensor)#存储2.1(通过字典查找)self.labelsData.append(self.labelsDict[imageName])#存储2.2(转为张量)self.labelsData=torch.Tensor(self.labelsData)def __getitem__(self, index):return self.imagesData[index],self.labelsData[index]def __len__(self):return len(self.labelsData)

2.3 trained_model_test.py

from my_dataset import MyMnistDataset
import torch
#数据预处理
from torchvision import transforms
from torch.utils.data import DataLoader
from model import CNNtransform=transforms.Compose([transforms.Resize([28,28]),transforms.ToTensor(),transforms.Normalize((0.1307,),(0.3081,))
])#载入自己的数据集
dataset=MyMnistDataset(root='D:\CNN实现Mnist识别\my_mnist_dateset(for verification)',transform=transform)
test_loader=DataLoader(dataset=dataset,shuffle=False)#载入已训练模型
model=CNN()
device_type='GPU' if torch.cuda.is_available() else 'CPU'
print('loading {} trained model...'.format(device_type))
model.load_state_dict(torch.load('D:\CNN实现Mnist识别\{}\CNN.pth'.format(device_type)))def test():total_correct=0total_num=0print('label predicted')with torch.no_grad():for (x,y) in test_loader:y_pred=model(x)_,predicted=torch.max(y_pred,dim=1)print('{}  {}'.format(int(y.item()), predicted.data.item()))total_num+=x.size(0)total_correct+=(predicted==y.long()).sum().item()#bug修复print(total_num)print(total_correct)print('CNN trained model: accuracy on my_mnist_dataset set:%d %%'%(100*total_correct/total_num))if __name__=='__main__':test()

【基础系列】赏析刘洪普《PyTorch深度学习实践》与《实战:基于CNN的MNIST手写数字识别》(Python版)相关推荐

  1. 深度学习案例之基于 CNN 的 MNIST 手写数字识别

    一.模型结构 本文只涉及利用Tensorflow实现CNN的手写数字识别,CNN的内容请参考:卷积神经网络(CNN) MNIST数据集的格式与数据预处理代码input_data.py的讲解请参考 :T ...

  2. 深度学习(32)随机梯度下降十: 手写数字识别问题(层)

    深度学习(32)随机梯度下降十: 手写数字识别问题(层) 1. 数据集 2. 网络层 3. 网络模型 4. 网络训练 本节将利用前面介绍的多层全连接网络的梯度推导结果,直接利用Python循环计算每一 ...

  3. 【深度学习】实验1答案:Softmax实现手写数字识别

    DL_class 学堂在线<深度学习>实验课代码+报告(其中实验1和实验6有配套PPT),授课老师为胡晓林老师.课程链接:https://www.xuetangx.com/training ...

  4. 深度学习入门实例——基于keras的mnist手写数字识别

    本文介绍了利用keras做mnist数据集的手写数字识别. 参考网址 http://www.cnblogs.com/lc1217/p/7132364.html mnist数据集中的图片为28*28的单 ...

  5. 深度学习练手项目(一)-----利用PyTorch实现MNIST手写数字识别

    一.前言 MNIST手写数字识别程序就不过多赘述了,这个程序在深度学习中的地位跟C语言中的Hello World地位并驾齐驱,虽然很基础,但很重要,是深度学习入门必备的程序之一. 二.MNIST数据集 ...

  6. MOOC网深度学习应用开发1——Tensorflow基础、多元线性回归:波士顿房价预测问题Tensorflow实战、MNIST手写数字识别:分类应用入门、泰坦尼克生存预测

    Tensorflow基础 tensor基础 当数据类型不同时,程序做相加等运算会报错,可以通过隐式转换的方式避免此类报错. 单变量线性回归 监督式机器学习的基本术语 线性回归的Tensorflow实战 ...

  7. 用MXnet实战深度学习之一:安装GPU版mxnet并跑一个MNIST手写数字识别 (zz)

    用MXnet实战深度学习之一:安装GPU版mxnet并跑一个MNIST手写数字识别 我想写一系列深度学习的简单实战教程,用mxnet做实现平台的实例代码简单讲解深度学习常用的一些技术方向和实战样例.这 ...

  8. 基于TensorFlow深度学习框架,运用python搭建LeNet-5卷积神经网络模型和mnist手写数字识别数据集,设计一个手写数字识别软件。

    本软件是基于TensorFlow深度学习框架,运用LeNet-5卷积神经网络模型和mnist手写数字识别数据集所设计的手写数字识别软件. 具体实现如下: 1.读入数据:运用TensorFlow深度学习 ...

  9. 深度学习21天——卷积神经网络(CNN):实现mnist手写数字识别(第1天)

    目录 一.前期准备 1.1 环境配置 1.2 CPU和GPU 1.2.1 CPU 1.2.2 GPU 1.2.3 CPU和GPU的区别 第一步:设置GPU 1.3 MNIST 手写数字数据集 第二步: ...

  10. 《深度学习之TensorFlow》reading notes(3)—— MNIST手写数字识别之二

    文章目录 模型保存 模型读取 测试模型 搭建测试模型 使用模型 模型可视化 本文是在上一篇文章 <深度学习之TensorFlow>reading notes(2)-- MNIST手写数字识 ...

最新文章

  1. binutils-2.22编译心得
  2. Servlet技术简介与编写、编译Servlet程序
  3. 2013腾讯编程马拉松初赛(3月20日)
  4. Federated learning论文修改2021-11-14(X-Y Liang)
  5. js中call()方法和apply方法的使用
  6. 语言阿克曼函数_函数式的动态规划
  7. jroo,已开源,一个java web快速开发工具
  8. 如何 珍惜自己和珍重别人。珍惜一切
  9. 基于MATLAB OCR的发票识别系统
  10. php bmp图片下载,[gd]生成bmp格式的图片(imagebmp)_php技巧
  11. 实验3:视频播放小程序
  12. Qt播放视频0x8007000e报错 DirectShowPlayerService::doPlay: Unresolved error code 0x8007000e
  13. 计算机824难不难,南京理工大学
  14. Apple Watch更懂女人心
  15. 继美团重申严禁诱导强迫骑手注册成个体工商户后,饿了么也回应了
  16. 三跨复旦计算机 初复试经验贴
  17. 计算机中 8位无符号数,8位无符号数乘法运算HDL设计实例 - 全文
  18. AcWing-算法提高课【合集】
  19. 2020.9.9华为笔试记忆:KMP+记忆化搜索+字典树
  20. linux centos安装远程软件向日葵

热门文章

  1. 网站攻击常见的几种方式
  2. torch代码解析 为什么要使用optimizer.zero_grad()
  3. 雨夜赶长路,房企必经的三场“价值战事”
  4. VARCHART XGantt v5.2用户手册:甘特图如何Drag Drop
  5. ANSI/ISO C++ Professional Programmer's Handbook 7
  6. Android UserManager.isUserAGoat() 的正确用例?
  7. 程序员必备的七个电脑软件
  8. 在现有Fabric 2.2.0 网络上设置和运行Caliper性能测试 实战
  9. marshmallow文档
  10. informix数据库大全(持续更新)