RNN LSTM GRU 代码实战 ---- 简单的文本生成任务

import torch
if torch.cuda.is_available():# Tell PyTorch to use the GPU.device = torch.device("cuda")print('There are %d GPU(s) available.' % torch.cuda.device_count())print('We will use the GPU:', torch.cuda.get_device_name(0))
else:print('No GPU available, using the CPU instead.')device = torch.device("cpu")
There are 1 GPU(s) available.
We will use the GPU: GeForce GTX 1070

设置参数

import torchtext
from torchtext.vocab import Vectors
import numpy as np
import random
# 为了保证实验结果可以复现,我们经常会把各种random seed固定在某一个值
random.seed(53113)
np.random.seed(53113)
torch.manual_seed(53113)BATCH_SIZE = 32 #一个batch多少个句子
EMBEDDING_SIZE = 650  #每个单词多少维
MAX_VOCAB_SIZE = 50000  #单词总数

下载数据集

TEXT = torchtext.data.Field(lower=True)
# https://s0pytorch0org.icopy.site/text/data.html?highlight=torchtext%20data%20field#torchtext.data.Field# torchtext提供了LanguageModelingDataset这个class来帮助我们处理语言模型数据集
train, val, test = torchtext.datasets.LanguageModelingDataset.splits(path=".", train="train.txt", validation="dev.txt", test="test.txt", text_field=TEXT)

构造词典

TEXT.build_vocab(train, max_size=MAX_VOCAB_SIZE)
# build_vocab可以根据我们提供的训练数据集来创建最高频单词的单词表,max_size帮助我们限定单词总量。
print("vocabulary size: {}".format(len(TEXT.vocab)))
vocabulary size: 50002
print(TEXT.vocab.itos[0:50])
# 这里增加了两个特殊的token,<unk>表示未知的单词,<pad>表示padding。
['<unk>', '<pad>', 'the', 'of', 'and', 'one', 'in', 'a', 'to', 'zero', 'nine', 'two', 'is', 'as', 'eight', 'for', 's', 'five', 'three', 'was', 'by', 'that', 'four', 'six', 'seven', 'with', 'on', 'are', 'it', 'from', 'or', 'his', 'an', 'be', 'this', 'he', 'at', 'which', 'not', 'also', 'have', 'were', 'has', 'but', 'other', 'their', 'its', 'first', 'they', 'had']
print(list(TEXT.vocab.stoi.items())[0:50])
[('<unk>', 0), ('<pad>', 1), ('the', 2), ('of', 3), ('and', 4), ('one', 5), ('in', 6), ('a', 7), ('to', 8), ('zero', 9), ('nine', 10), ('two', 11), ('is', 12), ('as', 13), ('eight', 14), ('for', 15), ('s', 16), ('five', 17), ('three', 18), ('was', 19), ('by', 20), ('that', 21), ('four', 22), ('six', 23), ('seven', 24), ('with', 25), ('on', 26), ('are', 27), ('it', 28), ('from', 29), ('or', 30), ('his', 31), ('an', 32), ('be', 33), ('this', 34), ('he', 35), ('at', 36), ('which', 37), ('not', 38), ('also', 39), ('have', 40), ('were', 41), ('has', 42), ('but', 43), ('other', 44), ('their', 45), ('its', 46), ('first', 47), ('they', 48), ('had', 49)]

创建迭代器

# BPTTIterator: 基于BPTT(基于时间的反向传播算法)的迭代器,一般用于语言模型中。
# BPTTIterator可以连续地得到连贯的句子,BPTT的全称是back propagation through time。
train_iter, val_iter, test_iter = torchtext.data.BPTTIterator.splits((train, val, test), batch_size=BATCH_SIZE, bptt_len=50, # 反向传播往回传的长度,这里我暂时理解为一个样本有多少个单词传入模型repeat=False, shuffle=True)

构建语言模型

import torch
import torch.nn as nnclass My_Model(nn.Module):def __init__(self, rnn_type,ntoken,ninp,nhid,nlayers,dropout=0.5):super(My_Model,self).__init__()self.drop = nn.Dropout(dropout)self.encoder = nn.Embedding(ntoken,ninp)self.rnn_type = rnn_typeself.nhid = nhidself.nlayers = nlayersself.rnn = getattr(nn, rnn_type)(ninp,nhid,nlayers,dropout=dropout)print(getattr(nn, rnn_type))self.decoder = nn.Linear(nhid, ntoken)# 初始化 weight 在 -0.1 到 0.1 之间initrange = 0.1self.encoder.weight.data.uniform_(-initrange,initrange)self.decoder.bias.data.zero_()self.decoder.weight.data.uniform_(-initrange,initrange)def forward(self, input,hidden):# input:[ 50, 32] -> emb: [ 650, 50, 32] [  50, 32,650]emb = self.encoder(input)emb = self.drop(emb)# nn.RNN(input_size, hidden_size, num_layers=1, nonlinearity=tanh, bias=True, batch_first=False, dropout=0,#        bidirectional=False)# emb: [ 50, 32,650] -> rnn_layer: [50, 32,1000] hidden: [1, 32,1000]rnn_layer, hidden = self.rnn(emb,hidden)rnn_layer = self.drop(rnn_layer )output = self.decoder(rnn_layer.view(rnn_layer.size(0)*rnn_layer.size(1),rnn_layer.size(2)))output = output.view(rnn_layer.size(0), rnn_layer.size(1), output.size(1))return output,hiddendef init_hidden(self, bsz, requires_grad=True):# 这步我们初始化下隐藏层参数weight = next(self.parameters())if self.rnn_type == 'LSTM':return (weight.new_zeros((self.nlayers, bsz, self.nhid), requires_grad=requires_grad),weight.new_zeros((self.nlayers, bsz, self.nhid), requires_grad=requires_grad))# return = (2 * 32 * 1000, 2 * 32 * 1000)# 这里不明白为什么需要weight.new_zeros,我估计是想整个计算图能链接起来# 这里特别注意hidden的输入不是model的参数,不参与更新,就跟输入数据x一样else:return weight.new_zeros((self.nlayers, bsz, self.nhid), requires_grad=requires_grad)# GRU神经网络把h层和c层合并了,所以这里只有一层。
nhid_size = 1000
VOCAB_SIZE = MAX_VOCAB_SIZE
model = My_Model("LSTM",MAX_VOCAB_SIZE,EMBEDDING_SIZE,nhid_size,2,dropout=0.5)
model = model.cuda()loss_fn = nn.CrossEntropyLoss() # 交叉熵损失
learning_rate = 0.001
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, 0.5)
# 每调用一次这个函数,lenrning_rate就降一半,0.5就是一半的意思
<class 'torch.nn.modules.rnn.LSTM'>

截断运算图,减少内存压力

# 如果一直往后传,会造成整个计算图很庞大,反向传播会内存崩溃。所有每次一个batch的计
# 算图迭代完成后,需要把计算图截断,只保留隐藏层的输出值。
def repackage_hidden(h):"""Wraps hidden states in new Tensors, to detach them from their history."""if isinstance(h, torch.Tensor):# 这个是GRU的截断,因为只有一个隐藏层# 判断h是不是torch.Tensorreturn h.detach() # 截断计算图,h是全的计算图的开始,只是保留了h的值else: # 这个是LSTM的截断,有两个隐藏层,格式是元组return tuple(repackage_hidden(v) for v in h)

模型评估

# 先从下面训练模式看起,在看evaluate
def evaluate(model, data):model.eval()  # 预测模式total_loss = 0.it = iter(data)total_count = 0.with torch.no_grad():hidden = model.init_hidden(BATCH_SIZE, requires_grad=False)# 这里不管是训练模式还是预测模式,h层的输入都是初始化为0,hidden的输入不是model的参数# 这里model里的model.parameters()已经是训练过的参数。for i, batch in enumerate(it):data, target = batch.text, batch.target# 取出验证集的输入的数据和输出的数据,相当于特征和标签data, target = data.cuda(), target.cuda()hidden = repackage_hidden(hidden)  # 截断计算图with torch.no_grad():  # 验证阶段不需要更新梯度output, hidden = model(data, hidden)# 调用model的forward方法进行一次前向传播,得到return输出值loss = loss_fn(output.view(-1, VOCAB_SIZE), target.view(-1))# 计算交叉熵损失total_count += np.multiply(*data.size())# 上面计算交叉熵的损失是平均过的,这里需要计算下总的损失# total_count先计算验证集样本的单词总数,一个样本有50个单词,一个batch32个样本# np.multiply(*data.size()) =50*32=1600total_loss += loss.item() * np.multiply(*data.size())# 每次batch平均后的损失乘以每次batch的样本的总的单词数 = 一次batch总的损失loss = total_loss / total_count  # 整个验证集总的损失除以总的单词数model.train()  # 训练模式return loss

开始训练模型

import copyGRAD_CLIP = 1.
NUM_EPOCHS = 2val_losses = []
for epoch in range(NUM_EPOCHS):model.train()  # 训练模式# iter,生成迭代器,这里train_iter也是迭代器,不用iter也可以it = iter(train_iter)# 得到hidden初始化后的维度hidden = model.init_hidden(BATCH_SIZE)for i, batch in enumerate(it):data, target = batch.text, batch.target# 取出训练集的输入的数据和输出的数据,相当于特征和标签data, target = data.cuda(), target.cuda()hidden = repackage_hidden(hidden)# 语言模型每个batch的隐藏层的输出值是要继续作为下一个batch的隐藏层的输入的# 因为batch数量很多,如果一直往后传,会造成整个计算图很庞大,反向传播会内存崩溃。# 所有每次一个batch的计算图迭代完成后,需要把计算图截断,只保留隐藏层的输出值。# 不过只有语言模型才这么干,其他比如翻译模型不需要这么做。# repackage_hidden自定义函数用来截断计算图的。model.zero_grad()  # 梯度归零,不然每次迭代梯度会累加output, hidden = model(data, hidden)# output = (50,32,50002)loss = loss_fn(output.view(-1, VOCAB_SIZE), target.view(-1))# output.view(-1, VOCAB_SIZE) = (1600,50002)# target.view(-1) =(1600),关于pytorch中交叉熵的计算公式请看下面链接。# https://blog.csdn.net/geter_CS/article/details/84857220loss.backward()torch.nn.utils.clip_grad_norm_(model.parameters(), GRAD_CLIP)# 防止梯度爆炸,设定阈值,当梯度大于阈值时,更新的梯度为阈值optimizer.step()if i % 1000 == 0:print("epoch", epoch, "iter", i, "loss", loss.item())if i % 10000 == 0:val_loss = evaluate(model, val_iter)if len(val_losses) == 0 or val_loss < min(val_losses):# 如果比之前的loss要小,就保存模型print("best model, val loss: ", val_loss)torch.save(model, "best_model.pkl")else:  # 否则loss没有降下来,需要优化scheduler.step()  # 自动调整学习率optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)# 学习率调整后需要更新optimizer,下次训练就用更新后的val_losses.append(val_loss)  # 保存每10000次迭代后的验证集损失损失
epoch 0 iter 0 loss 10.730563163757324
best model, val loss:  10.478901235690277D:\Anaconda\envs\jianbo\lib\site-packages\torch\serialization.py:402: UserWarning: Couldn't retrieve source code for container of type My_Model. It won't be checked for correctness upon loading."type " + obj.__name__ + ". It won't be checked "epoch 0 iter 1000 loss 6.0242919921875
epoch 0 iter 2000 loss 6.029582500457764
epoch 0 iter 3000 loss 5.8461594581604
epoch 0 iter 4000 loss 5.5147223472595215
epoch 0 iter 5000 loss 5.937921047210693
epoch 0 iter 6000 loss 5.6236090660095215
epoch 0 iter 7000 loss 5.482613563537598
epoch 0 iter 8000 loss 5.344069004058838
epoch 0 iter 9000 loss 5.418025970458984
epoch 1 iter 0 loss 5.486691474914551
best model, val loss:  5.002634433592716
epoch 1 iter 1000 loss 5.0923237800598145
epoch 1 iter 2000 loss 5.381066799163818
epoch 1 iter 3000 loss 5.237982273101807
epoch 1 iter 4000 loss 4.973425388336182
epoch 1 iter 5000 loss 5.4851861000061035
epoch 1 iter 6000 loss 5.201869010925293
epoch 1 iter 7000 loss 5.1173810958862305
epoch 1 iter 8000 loss 5.007303237915039
epoch 1 iter 9000 loss 5.120178699493408

保存训练后的模型

torch.save(model, "final_model.pkl")

取出最好的模型

# 加载保存好的模型参数
nhid_size = 1000
best_model = My_Model("LSTM", VOCAB_SIZE, EMBEDDING_SIZE, nhid_size, 2, dropout=0.5)
best_model = best_model.cuda()PATH = './best_model.pkl'
# 把模型参数load到best_model里
best_model = torch.load(PATH)
<class 'torch.nn.modules.rnn.LSTM'>

简单生成一些摘要文本

hidden = best_model.init_hidden(1) # batch_size = 1
input = torch.randint(VOCAB_SIZE, (1, 1), dtype=torch.long).to(device)
# (1,1)表示输出格式是1行1列的2维tensor,VOCAB_SIZE表示随机取的值小于VOCAB_SIZE=50002
# 我们input相当于取的是一个单词
words = []
for i in range(100):output, hidden = best_model(input, hidden)# output.shape = 1 * 1 * 50002# hidden = (2 * 1 * 1000, 2 * 1 * 1000)word_weights = output.squeeze().exp().cpu()# .exp()的两个作用:一是把概率更大的变得更大,二是把负数经过e后变成正数,下面.multinomial参数需要正数word_idx = torch.multinomial(word_weights, 1)[0]# 按照word_weights里面的概率随机的取值,概率大的取到的机会大。# torch.multinomial看这个博客理解:https://blog.csdn.net/monchin/article/details/79787621# 这里如果选择概率最大的,会每次生成重复的句子。input.fill_(word_idx) # 预测的单词index是word_idx,然后把word_idx作为下一个循环预测的input输入word = TEXT.vocab.itos[word_idx] # 根据word_idx取出对应的单词words.append(word)
print(" ".join(words))
who become born epicurus and looking for them as a <unk> is to print using hypocrisy that married his corresponding a buffer of his bicycle and put her came that <unk> into the drink the abuse of manganese s into the liver and prayers the second it is his own nowhere of the earth recognizes his origin but has primarily been used by arthur gardner largely written for this reason he differs from his eight sequel to the catherine copula which appears to be that of it encoding beethoven s demonstration the last ship desires to invent wittenberg was an



RNN LSTM GRU 代码实战 ---- 简单的文本生成任务相关推荐

  1. ​​​​​​​DL之RNN/LSTM/GRU:RNN/LSTM/GRU算法动图对比、TF代码定义之详细攻略

    DL之RNN/LSTM/GRU:RNN/LSTM/GRU算法动图对比.TF代码定义之详细攻略 目录 RNN.LSTM.GRU算法对比 1.RNN/LSTM/GRU对比 2.RNN/LSTM/GRU动图 ...

  2. RNN, LSTM, GRU, SRU, Multi-Dimensional LSTM, Grid LSTM, Graph LSTM系列解读

    RNN/Stacked RNN rnn一般根据输入和输出的数目分为5种 一对一 最简单的rnn 一对多 Image Captioning(image -> sequence of words) ...

  3. Pytorch+Google BERT模型(RoBERTa+LSTM+GRU)实战

    Pytorch+Google BERT模型(RoBERTa+LSTM+GRU)实战 BERT(Bidirectional Encoder Representations from Transforme ...

  4. DL之LSTM:LSTM算法论文简介(原理、关键步骤、RNN/LSTM/GRU比较、单层和多层的LSTM)、案例应用之详细攻略

    DL之LSTM:LSTM算法论文简介(原理.关键步骤.RNN/LSTM/GRU比较.单层和多层的LSTM).案例应用之详细攻略 目录 LSTM算法简介 1.LSTM算法论文 1.1.LSTM算法相关论 ...

  5. RNN, LSTM, GRU模型的作用, 构建, 优劣势比较,attention机制

    查看全文 http://www.taodudu.cc/news/show-5952629.html 相关文章: RNN基本原理及代码实战 高通camera hal3学习 高通平台sensor框架图[学 ...

  6. RNN,LSTM,GRU计算方式及优缺点

    本文主要参考李宏毅老师的视频介绍RNN相关知识,主要包括两个部分: 分别介绍Navie RNN,LSTM,GRU的结构 对比这三者的优缺点 1.RNN,LSTM,GRU结构及计算方式 1.1 Navi ...

  7. RNN基本原理及代码实战

    一.RNN是什么? RNN全名循环神经网络,主要用于时间序列数据分析预测.与传统的ANN区别在于其将前一网络输出的部分信息保存并传递给后面的一层参与计算,使前后两个RNN模块建立关联. | 传统ANN ...

  8. 图解 RNN, LSTM, GRU

    参考: Illustrated Guide to Recurrent Neural Networks Illustrated Guide to LSTM's and GRU's: A step by ...

  9. huggingface transformers实战系列-05_文本生成

    # hide from utils import * setup_chapter() Using transformers v4.11.3 Using datasets v1.13.0 Using a ...

最新文章

  1. Gradle 修改 Maven 仓库地址(阿里镜像)
  2. hdu 2669 Romantic
  3. anaconda装完没反应
  4. 如何给新固态硬盘安装系统
  5. 在Eclipse4.2 4.3 中安装最新版插件 WindowsBuilder swt
  6. JAVA将多个Pdf合并成一个Pdf
  7. 免费体验手机版MSN2.0
  8. 提交网址到bing搜索引擎
  9. 向日葵 远程开机 linux,教你使用向日葵开机棒轻松实现远程开机
  10. 笔记本电脑无法连接WiFi,如何解决
  11. mysql基础命令(一)
  12. 【ninja】Ninja安装和基本使用
  13. 留用户、补内容,在线音乐暗战不停
  14. vue中echarts纵轴添加点击事件
  15. mysql配置及安装
  16. 计算器(可随意编辑)
  17. 文件服务器定时开关机,服务器设置定时开关机
  18. 前端:页面内容不够,始终把footer固定在底部
  19. Window Installer Clean Up好用的软件管理工具
  20. 【驱动之四】Nt和Zw

热门文章

  1. 【Flink源码篇】Flink 1.15.0源码编译
  2. 通过企业微信,向AD域过期用户发送更改密码提醒
  3. Internet Download Manager6.41加速器最快的电脑工具下载器
  4. Kotlin学习——简单运用协程网络下载图片并更新到UI
  5. lqc_系统引导和登录控制
  6. IPv6专题系列:01. IPv6地址介绍
  7. 搜狗输入法截屏不能用了
  8. UINO优锘科技:数字孪生6大概念超强解析
  9. 2020年有哪些ERP系统软件
  10. Docker+NETCore系列文章(三、Docker常用命令)