文章目录

  • 1.单层神经网络实现(CH3.7)
  • 2.(optimizer)优化器的使用示例(CH4.3)
  • 4.MNIST数据集上卷积神经网络的实现(CH5.3)
  • 5.自编码器实现(CH6.2)
  • 6.基于自编码器的图形去噪
    • 1.训练30次
    • 2.训练100次
    • 3.训练200次
    • 4.训练 1000次
  • 7.PyTorch实现PCA(CH6.1)

1.单层神经网络实现(CH3.7)

'''
PyTorch实现iris数据集的分类
'''import torch
import matplotlib.pyplot as plt
import torch.nn.functional as Ffrom sklearn.datasets import load_iris
from torch.autograd import Variable
from torch.optim import SGD#判断GPU是否可用
use_cuda = torch.cuda.is_available()
print('user_cuda: ',use_cuda)use_cpu = torch.fbgemm_is_cpu_supported()
print('user_cpu: ',use_cpu)
#加载数据集
iris = load_iris()
print(iris.keys())
#dict_key =(['target_names','data','feature_names','DESCR','target'])#数据预处理
x = iris['data'] #特征信息
y = iris['target'] #目标分类
print(x.shape)  #(150,4)
print(y.shape)  #(150,)print(y)
x = torch.FloatTensor(x)
y = torch.LongTensor(y)
x,y = Variable(x),Variable(y)#网络模型定义,需要继承module
class Net(torch.nn.Module):#初始化函数,接受自定义输入特征维数,隐含层特征维数,输出层特征维数def __init__(self,n_feature,n_hidden,n_output):super(Net,self).__init__()self.hidden = torch.nn.Linear(n_feature,n_hidden)   #一个线性隐含层self.predict = torch.nn.Linear(n_hidden,n_output)   #线性输出层#前向传播过程def forward(self, x):x = torch.tanh(self.hidden(x))  #tanh的效果比sigmoid差很多,原因未知x = self.predict(x)out = F.log_softmax(x,dim=1)return out#网络实例化,并查看网络结构
#iris中输入特征4维,隐含层和输出层可以自己选择
net = Net(n_feature=4,n_hidden=5,n_output=3)
print(net)#判断GPU/是否可用CPU,若可用,则放到相应的处理器上面x = x.cpu()
y = y.cpu()
net = net.cpu()#设置学习率为0.5
optimizer = SGD(net.parameters(),lr=0.5)px,py = [],[]   #记录绘制的数据
for i in range(1000):#数据集传入网络前向计算prediction = net(x)#计算lossloss = F.nll_loss(prediction,y)#清除网络状态optimizer.zero_grad()#loss反向传播loss.backward()#更新参数optimizer.step()#打印每次迭代的损失情况:print(i,'loss: ',loss.item())px.append(i)py.append(loss.item())#每十次迭代绘制训练动态if i%10 == 0:#动态画出loss的走向plt.cla()plt.plot(px,py,'r-',lw=1)plt.text(0,0,'Loss=%.4f' %loss.item(),fontdict={'size':20,'color':'red'})plt.pause(0.1)plt.savefig('E:\Pycharm\project\project1\ tanh训练结果.png')
plt.show()

训练结果:

2.(optimizer)优化器的使用示例(CH4.3)

import torch
import torch.utils.data as Data
import torch.nn.functional as F
from torch.autograd import Variable
import matplotlib.pyplot as plt
import numpy as np
torch.manual_seed(1)    #保证结果可复现
LR = 0.01
BATCH_SIZE = 20
EPOCH = 10#生成数据
x = torch.unsqueeze(torch.linspace(-1,1,1500),dim=1)
y = x.pow(3) + 0.1*torch.normal(torch.zeros(*x.size()))#数据画图
plt.scatter(x.numpy(),y.numpy())
plt.show()#把数据转化为torch类型
torch_dataset = Data.TensorDataset(x,y)
loader = Data.DataLoader(dataset=torch_dataset,batch_size=BATCH_SIZE,shuffle=True,num_workers=0,)   #值为0,单线程,多线程要在main函数下调用#定义模型
class Net(torch.nn.Module):def __init__(self):super(Net, self).__init__()self.hidden = torch.nn.Linear(1,20)self.predict = torch.nn.Linear(20,1)def forward(self,x):#pdb.set_tracex = torch.relu(self.hidden(x))  #隐含层的激活函数x = self.predict(x) #输出层,线性输出return x#不同的网络模型
net_SGD = Net()
net_Momentum = Net()
net_RMSprop = Net()
net_AdaGrad = Net()
net_Adam = Net()nets = [net_SGD,net_Momentum,net_AdaGrad,net_RMSprop,net_Adam]
#不同的优化器
opt_SGD = torch.optim.SGD(net_SGD.parameters(),lr=LR)
opt_Momentum = torch.optim.SGD(net_Momentum.parameters(),lr=LR,momentum=0.8)
opt_AdaGrad = torch.optim.Adagrad(net_AdaGrad.parameters(),lr=LR)
opt_RMSprop = torch.optim.RMSprop(net_RMSprop.parameters(),lr=LR,alpha=0.9)
opt_Adam = torch.optim.Adam(net_Adam.parameters(),lr=LR,betas=(0.9,0.99))optimizers = [opt_SGD,opt_Momentum,opt_AdaGrad,opt_RMSprop,opt_Adam]
loss_func = torch.nn.MSELoss()
losser_his = [[],[],[],[],[]] #存储loss数据#模型训练
for epoch in range(EPOCH):print('Epoch: ',epoch)for step,(batch_x,batch_y) in enumerate(loader):b_x = Variable(batch_x)b_y = Variable(batch_y)for net,opt,l_his in zip(nets,optimizers,losser_his):output = net(b_x)#    pdb.set_trace()        #断点loss = loss_func(output,b_y)opt.zero_grad()loss.backward()opt.step()l_his.append(loss.item())labels = ['SGD','Momentum','Adagrad','RMSprop','Adam']
for i,l_his in enumerate(losser_his):plt.plot(l_his,label=labels[i])
plt.legend(loc='best')
plt.xlabel('Steps')
plt.ylabel('Loss')
plt.ylim((0,0.2))
plt.show()

训练结果如下:

3.深度神经网络实现

import torch
import torch.nn as nn
import torchvision.datasets as dsets
import torchvision.transforms as transform
from torch.autograd import Variable#配置参数
torch.manual_seed(1)    #确保结果可重复
input_size = 784
hidden_size = 500
num_classes = 10
num_epochs = 5 #训练次数
batch_size = 100    #批量处理数据大小
learning_rate = 0.001#加载MNIST数据
train_dataset = dsets.MNIST(root='./data',  #数据保持的位置train=True, #训练集transform=transform.ToTensor(), #将数据范围为0~255的PIT.Image,转化为0~1的Tensordownload=True)test_dataset = dsets.MNIST(root='./data',train=False, #测试集transform=transform.ToTensor())#数据批处理
#尺寸大小为batch_size
#在训练集中,shuffle必须为True,表示次序随机
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,batch_size=batch_size,shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,batch_size=batch_size,shuffle=False)#创建DNN模型
#定义神经网络模型
class Net(nn.Module):def __init__(self,input_size,hidden_size,num_classes):super(Net,self).__init__()self.fc1 = nn.Linear(input_size,hidden_size)self.relu = nn.ReLU()self.fc2 = nn.Linear(hidden_size,num_classes)def forward(self,x):out = self.fc1(x)out = self.relu(out)out = self.fc2(out)return outnet = Net(input_size,hidden_size,num_classes)
print(net)#训练流程
#定义loss和optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(net.parameters(),lr=learning_rate)
#开始训练
for epoch in range(num_epochs):for i,(images,labels) in enumerate(train_loader):    #批处理images = Variable(images.view(-1,28*28))labels = Variable(labels)#forward + backard + optimzeoptimizer.zero_grad()   #梯度清零,以免影响其他batch批outputs = net(images)   #前向传播loss = criterion(outputs,labels)loss.backward() #后向传播,计算梯度optimizer.step()if(i+1) % 100 ==0:print('Epoch [%d/%d],Step[%d/%d], Loss :%.4f'%(epoch+1,num_epochs,i+1,len(train_dataset)//batch_size,loss.item()))#在测试集上验证模型
correct = 0
total = 0
for images,labels in test_loader:   #test set 批处理images = Variable(images.view(-1,28*28))outputs = net(images)_,predicted = torch.max(outputs.data,1)   #预测结果#predicted = Variable(predicted)total += labels.size(0) #正确结果correct += (predicted == labels).sum()  #正确结果总数
print('Accuracy of the network on the 10000 test images: %d %%' % (100*correct/total))

4.MNIST数据集上卷积神经网络的实现(CH5.3)

#配置库
import torch
from torch import nn,optim
import torch.nn.functional as F
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision import datasets#配置参数
torch.manual_seed(1)
batch_size = 128    #批处理大小
learning_rate = 1e-2
num_epocher = 10    #训练次数#加载MNIST数据
train_dataset = datasets.MNIST(root='./data',train = True,transform=transforms.ToTensor(),download=True)test_dataset = datasets.MNIST(root='./data',train=False,    #测试集transform=transforms.ToTensor())
train_loader = DataLoader(train_dataset,batch_size=batch_size,shuffle=True)
test_loader = DataLoader(test_dataset,batch_size=batch_size,shuffle=False)#定义卷积神经网络模型
class Cnn(nn.Module):def __init__(self,in_dim,n_class):super(Cnn,self).__init__()self.conv = nn.Sequential(nn.Conv2d(in_dim,6,3,stride=1,padding=1),   #28x28nn.ReLU(True),nn.MaxPool2d(2,2),  #14x14,池化层减小尺寸nn.Conv2d(6,16,5,stride=1,padding=0),   #10x10x16nn.ReLU(True),nn.MaxPool2d(2,2)   #5x5x16)self.fc = nn.Sequential(nn.Linear(400,120), #400=5*5*16nn.Linear(120,84),nn.Linear(84,n_class))def forward(self,x):out = self.conv(x)out = out.view(out.size(0),400)out = self.fc(out)return outmodel = Cnn(1,10)   #图片大小28*28,10为数据种类
#打印模型
print(model)#模型训练
#定义loss和optimizer
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(),lr=learning_rate)#开始训练
for epoch in range(num_epocher):running_loss = 0.0running_acc = 0.0for i,data in enumerate(train_loader,1):img,label = dataimg = Variable(img)label = Variable(label)#前向传播out = model(img)print(out)loss = criterion(out,label) #lossrunning_loss += loss.item() * label.size(0)#total loss ,由于loss是取batch均值的,因此,需要把batch size成回去_,pred = torch.max(out,1)   #预测结果num_correct = (pred == label).sum() #正确结果的数量#accuracy = (pred == label).float().mean()   #正确率running_acc += num_correct.item()   #正确结果的总数#后向传播optimizer.zero_grad()   #梯度清零loss.backward() #后向传播计算梯度optimizer.step()    #利用梯度更新W,b参数#打印一个循环后,训练集合上的loss和正确率print('Train{} epoch, Loss: {:.6f},Acc: {:.6f}'.format(epoch+1,running_loss / (len(train_dataset)),running_acc / (len(train_dataset))))
#测试集中测试识别率
#模型测试
model.eval()    #需要说明是否模型测试
eval_loss = 0
eval_acc = 0
for data in test_loader:img,label = dataimg = Variable(img,volatile=True)   #with torch.no_grad():这里的volatile在新的版本被移除了#volatile用于测试是否不调用backward#测试中不需要label= Variable...out = model(img)    #前向算法loss = criterion(out,label) #计算losseval_loss += loss.item() * label.size(0)    #total loss_,pred = torch.max(out,1)   #预测结果num_correct = (pred == label).sum() #正确结果eval_acc += num_correct.item()  #正确结果总数print('Test Loss:{:.6f},Acc: {:.6f}'.format(eval_loss/ (len(test_dataset)),eval_acc * 1.0/(len(test_dataset))))
#print('Accuracy: ' % (accuracy))

5.自编码器实现(CH6.2)

#加载库
import os
import pdb
import torch
import torchvision
from torch import nn
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.datasets import MNIST
from torchvision.utils import save_image
from torchvision import datasets
import matplotlib.pyplot as plt#配置参数
torch.manual_seed(1)
batch_size = 128
learning_rate = 1e-2
num_epochs = 10#下载数据
train_dataset = datasets.MNIST(root='./data',train=True,transform=transforms.ToTensor(),download=False
)
test_dataaset = datasets.MNIST(root='./data',train=False,transform=transforms.ToTensor()
)
train_loader = DataLoader(train_dataset,batch_size=batch_size,shuffle=True)
test_loader = DataLoader(test_dataaset,batch_size=10000,shuffle=False)
class autoencoder(nn.Module):def __init__(self):super(autoencoder, self).__init__()self.encoder = nn.Sequential(nn.Linear(28*28,1000),nn.ReLU(True),nn.Linear(1000,500),nn.ReLU(True),nn.Linear(500,250),nn.ReLU(True),nn.Linear(250,2))self.decoder = nn.Sequential(nn.Linear(2,250),nn.ReLU(True),nn.Linear(250,500),nn.ReLU(True),nn.Linear(500,1000),nn.ReLU(True),nn.Linear(1000,28*28),nn.Tanh())def forward(self, x):x = self.encoder(x)x = self.decoder(x)return xmodel = autoencoder()
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(),lr=learning_rate,weight_decay=1e-5
)#模型训练
for epoch in range(num_epochs):for data in train_loader:img,_ = dataimg = img.view(img.size(0),-1)img = Variable(img)#前向传播output = model(img)print(output)loss = criterion(output,img)#后向传播optimizer.zero_grad()loss.backward()optimizer.step()print('epoch[{}/{}],loss:{:.4f}'.format(epoch+1,num_epochs,loss.item()))#测试模型
model.eval()
eval_loss = 0
for data in test_loader:    #批处理img,label = dataimg = img.view(img.size(0),-1)img = Variable(img,volatile=True)label = Variable(label,volatile=True)out = model(img)    #前向算法y = (label.data).numpy()plt.scatter(out[:,0],c=y)plt.colorbar()plt.title('autocoder of MNIST test dataset')plt.show()

训练结果如下:

6.基于自编码器的图形去噪

#去噪编码器
import torch
import torch.nn as nn
import torch.utils as utiles
from torch.autograd import Variable
import torchvision.datasets as dset
import torchvision.transforms as transforms
import numpy as np
import matplotlib.pyplot as plt
#配置参数
torch.manual_seed(1)
n_epoch = 1000
batch_size = 100
learning_rate = 0.0002#下载图片训练集
mnist_train = dset.MNIST(root='./data',train=True,transform=transforms.ToTensor(),target_transform=None,download=False)
train_loader = torch.utils.data.DataLoader(dataset=mnist_train,batch_size=batch_size,shuffle=True)#encoder模型设置class Encoder(nn.Module):def __init__(self):super(Encoder, self).__init__()self.layer1 = nn.Sequential(nn.Conv2d(1, 32, 3, padding=1),  # batch x 32 x 28 x 28nn.ReLU(),nn.BatchNorm2d(32),nn.Conv2d(32, 32, 3, padding=1),  # batch x 32 x 28 x 28nn.ReLU(),nn.BatchNorm2d(32),nn.Conv2d(32, 64, 3, padding=1),  # batch x 64 x 28 x 28nn.ReLU(),nn.BatchNorm2d(64),nn.Conv2d(64, 64, 3, padding=1),  # batch x 64 x 28 x 28nn.ReLU(),nn.BatchNorm2d(64),nn.MaxPool2d(2, 2)  # batch x 64 x 14 x 14)self.layer2 = nn.Sequential(nn.Conv2d(64, 128, 3, padding=1),  # batch x 128 x 14 x 14nn.ReLU(),nn.BatchNorm2d(128),nn.Conv2d(128, 128, 3, padding=1),  # batch x 128 x 14 x 14nn.ReLU(),nn.BatchNorm2d(128),nn.MaxPool2d(2, 2),nn.Conv2d(128, 256, 3, padding=1),  # batch x 256 x 7 x 7nn.ReLU())def forward(self, x):out = self.layer1(x)out = self.layer2(out)out = out.view(batch_size, -1)return out
encoder = Encoder()class Decoder(nn.Module):def __init__(self):super(Decoder, self).__init__()self.layer1 = nn.Sequential(nn.ConvTranspose2d(256, 128, 3, 2, 1, 1),  # batch x 128 x 14 x 14nn.ReLU(),nn.BatchNorm2d(128),nn.ConvTranspose2d(128, 128, 3, 1, 1),  # batch x 128 x 14 x 14nn.ReLU(),nn.BatchNorm2d(128),nn.ConvTranspose2d(128, 64, 3, 1, 1),  # batch x 64 x 14 x 14nn.ReLU(),nn.BatchNorm2d(64),nn.ConvTranspose2d(64, 64, 3, 1, 1),  # batch x 64 x 14 x 14nn.ReLU(),nn.BatchNorm2d(64))self.layer2 = nn.Sequential(nn.ConvTranspose2d(64, 32, 3, 1, 1),  # batch x 32 x 14 x 14nn.ReLU(),nn.BatchNorm2d(32),nn.ConvTranspose2d(32, 32, 3, 1, 1),  # batch x 32 x 14 x 14nn.ReLU(),nn.BatchNorm2d(32),nn.ConvTranspose2d(32, 1, 3, 2, 1, 1),  # batch x 1 x 28 x 28nn.ReLU())def forward(self, x):out = x.view(batch_size, 256, 7, 7)out = self.layer1(out)out = self.layer2(out)return out
decoder = Decoder()#Loss函数和优化器
parameters = list(encoder.parameters())+list(decoder.parameters())
loss_func = nn.MSELoss()
optimizer = torch.optim.Adam(parameters,lr=learning_rate)#添加噪声
noise = torch.rand(batch_size,1,28,28)
for I in range(n_epoch):for image,label in train_loader:image_n = torch.mul(image+0.25,0.1*noise)image = Variable(image)image_n = Variable(image_n)optimizer.zero_grad()output = encoder(image_n)output = decoder(output)loss = loss_func(output,image)loss.backward()optimizer.step()breakprint('epoch [{}/{}],loss: {:.4f}'.format(I+1,n_epoch,loss.item()))img = image[0]
input_img = image_n[0]
output_img = output[0]
origin = img.data.numpy()
inp = input_img.data.numpy()
out = output_img.data.numpy()
plt.figure('denoising autoencoder')
plt.subplot(131)
plt.imshow(origin[0],cmap='gray')
plt.subplot(132)
plt.imshow(inp[0],cmap='gray')
plt.subplot(133)
plt.imshow(out[0],cmap='gray')
plt.show()
print(label[0])#loss = 0.0164

训练结果如下:

1.训练30次

2.训练100次

3.训练200次

4.训练 1000次

7.PyTorch实现PCA(CH6.1)

PCA的基本原理是从大量数据中找到少量的主成分变量,从而在数据维度降低的情况下尽可能地保存原始数据信息,实现减少数据量,提高数据处理能力。以下是PyTorch实现PCA:

from sklearn import datasets
import torch
import numpy as np
import matplotlib.pyplot as pltdef PCA(data,k=2):#preprocess the dataX = torch.from_numpy(data)X_mean = torch.mean(X,0)X = X - X_mean.expand_as(X)#svdU,S,V = torch.svd(torch.t(X))return torch.mm(X,U[:,:k])iris = datasets.load_iris()
X = iris.data
y = iris.target
X_PCA = PCA(X)
pca = X_PCA.numpy()
plt.figure()
color = ['red','green','blue']
for i, target_name in enumerate(iris.target_names):plt.scatter(pca[y==i,0],pca[y==i,1],label = target_name,color=color[i])plt.legend()plt.title('PCA of IRIS dataset')plt.show()

《PyTorch机器学习从入门到实战》 例程(PyTorch1.2版本可用)相关推荐

  1. PyTorch机器学习从入门到实战

    人工智能入门实践, 轻松玩转PyTorch框架. 校宝在线出品的书籍<PyTorch机器学习从入门到实战>发售了! 购书链接: 内容简介 近年来,基于深度学习的人工智能掀起了一股学习的热潮 ...

  2. PyTorch机器学习从入门到实战-CH2

    PyTorch安装和快速上手 基础安装和工具使用 安装Anaconda3,内置python3.7 启动Jupyter Notebook 命令终端输入:jupyter notebook 命令运行后,浏览 ...

  3. PyTorch自然语言处理入门与实战 | 文末赠书

    文末赠书 注:本文选自人民邮电出版社出版的<PyTorch自然语言处理入门与实战>一书,略有改动.经出版社授权刊登于此. 处理中文与英文的一个显著区别是中文的词之间缺乏明确的分隔符.分词是 ...

  4. PyTorch深度学习入门与实战(案例视频精讲)

    作者:孙玉林,余本国 著 出版社:中国水利水电出版社 品牌:智博尚书 出版时间:2020-07-01 PyTorch深度学习入门与实战(案例视频精讲)

  5. 【机器学习】入门到实战笔记系列 | 西瓜书

    机器学习 南京大学周志华教授亲讲<机器学习初步>!跟着大佬从入门到实战 机器学习的本质就是寻找一个函数function,来寻找一个输入input与输出output之间的映射关系.可以是输入 ...

  6. PyTorch机器学习从入门到实践-CH1

    深度学习介绍 PyTorch:深度学习框架之一,以研究为核心的框架 关系: 深度学习∈机器学习∈人工智能 Kareas:一个深度学习接口,调用其他深度学习框架

  7. Pytorch深度学习入门与实战一--全连接神经网络

    全连接神经网络在分类和回归问题中都非常有效,本节对全连接神经网及神经元结构进行介绍,并使用Pytorch针对分类和回归问题分别介绍全连接神经网络的搭建,训练及可视化相关方面的程序实现. 1.全连接神经 ...

  8. Pytorch深度学习入门与实战(笔记)

    目录 第二章 1.张量数据类型 (1)查看张量数据类型 (2)修改张量数据类型

  9. pytorch深度学习入门与实战——今天我们来对一张图像进行卷积、池化,以及激活层的使用展示

    import numpy as np import torch import torch.nn as nn import matplotlib.pyplot as plt from PIL impor ...

最新文章

  1. hive 解密_hive 中自定义 base64 加密 解密 UDF 函数
  2. python生成四位随机数
  3. 爬虫多线程生产者与消费者
  4. HTTP缓存详解之etag
  5. FPGA设计的常用思想与技巧(转)
  6. LeetCode 252. Meeting Rooms (会议室)$
  7. linux内存平均值,linux下查看内存使用情况[转载]
  8. 在吗,支付宝土味情歌撩到你了吗?网友:撩到了,好酸
  9. 程序员必备技能之单元测试
  10. excel如何去重统计户数_Excel如何去重,然后统计数据?_excel提取数据并去重
  11. 筛选出一些个人常用的快捷键
  12. Swing界面设计工具
  13. 华为服务器虚拟云主机,虚拟云主机和虚拟云服务器
  14. 智能手机操作系统Android
  15. Codeforces Round #807 (Div. 2) A - D
  16. Android 画中画模式
  17. 计算机专业对数学英语要求高吗,大学专科计算机专业对英语数学的要求高么
  18. 自定义View时,用到Paint Canvas的一些温故,讲讲用路径绘画实现动画效果(基础篇 三)
  19. 润迈德医疗开启招股:未有基石投资者参与,亏损金额翻倍增长
  20. 北京羽毛球场馆全攻略

热门文章

  1. 腾讯位置服务教你轻松搞定微信发送位置功能
  2. 蓄水池采样算法的python实现_蓄水池抽样算法(Reservoir Sampling)
  3. MySQL数据库乱码(中文变问号)问题解决
  4. 天龙八部服务端Lua脚本系统(转)
  5. java卡夫卡_卡夫卡(kafka)
  6. [4G5G专题-76]:流程 - 4G LTE PLMN选择、扫频、小区搜索、系统消息读取、小区选择过程
  7. 小米电视/盒子 访问群晖 需要打开 设置“SMB1”
  8. 智能家居,产业跌宕三十年
  9. 作物产量常用的单位换算公式
  10. mysql日期教程_Navicat for MySQL 设置日期时间栏位教程(图文)