• rnn
import time
import torch
import numpy as np
from train_eval import train, init_network
from importlib import import_module
import argparse#命令行解析,当代码需要频繁修改参数时用,让参数与代码分离,代码更加简洁
from tensorboardX import SummaryWriter
#--model=TextRNN
parser = argparse.ArgumentParser(description='Chinese Text Classification')
parser.add_argument('--model', type=str, required=True, help='choose a model: TextCNN, TextRNN, FastText, TextRCNN, TextRNN_Att, DPCNN, Transformer')
parser.add_argument('--embedding', default='pre_trained', type=str, help='random or pre_trained')
parser.add_argument('--word', default=False, type=bool, help='True for word, False for char')
#parser.add_argument()里面的参数:第一个是参数名称,type参数类型,default参数的默认值,
# help帮助信息
args = parser.parse_args()#解析参数,可以从args中获取对应参数的对应值if __name__ == '__main__':#如果为主程序就执行后面程序,如果在其他文件调用run.py这个文件就不执行后面程序dataset = 'THUCNews'  # 数据集# 搜狗新闻:embedding_SougouNews.npz, 腾讯:embedding_Tencent.npz, 随机初始化:randomembedding = 'embedding_SougouNews.npz'#用了别人训练好的,把每个字转化成一个向量,不用自己做训练了(这里只是个分类任务)if args.embedding == 'random':embedding = 'random'model_name = args.model  #选择用那个模型TextCNN, TextRNN,一开始参数配置的时候就选了if model_name == 'FastText':from utils_fasttext import build_dataset, build_iterator, get_time_difembedding = 'random'else:from utils import build_dataset, build_iterator, get_time_difx = import_module('models.' + model_name)#把你选的模块导入进来config = x.Config(dataset, embedding)#配置文件np.random.seed(1)torch.manual_seed(1)torch.cuda.manual_seed_all(1)torch.backends.cudnn.deterministic = True  #每次随机初始化的时候结果都是一样的#在训练过程中可能会取调节各种网络结构或者参数,只有保证每次初始化策略都是一样的才能看出来一些具体参数的影响start_time = time.time()print("Loading data...")#后面几行是把数据读进来vocab, train_data, dev_data, test_data = build_dataset(config, args.word)#这行是把数据都读进来,这些数据都是每个字对应的索引了train_iter = build_iterator(train_data, config)dev_iter = build_iterator(dev_data, config)test_iter = build_iterator(test_data, config)#这三行把每一个batch拿到手time_dif = get_time_dif(start_time)#算一下我在读数据上的用时print("Time usage:", time_dif)# trainconfig.n_vocab = len(vocab)model = x.Model(config).to(config.device)writer = SummaryWriter(log_dir=config.log_path + '/' + time.strftime('%m-%d_%H.%M', time.localtime()))if model_name != 'Transformer':init_network(model)#权重参数初始化print(model.parameters)#打印模型的结构train(config, model, train_iter, dev_iter, test_iter,writer)#训练模块
  • train_eval
# coding: UTF-8
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn import metrics
import time
from utils import get_time_dif
from tensorboardX import SummaryWriter# 权重初始化,默认xavier
def init_network(model, method='xavier', exclude='embedding', seed=123):#权重参数初始化for name, w in model.named_parameters():if exclude not in name:if 'weight' in name:if method == 'xavier':nn.init.xavier_normal_(w)elif method == 'kaiming':nn.init.kaiming_normal_(w)else:nn.init.normal_(w)elif 'bias' in name:nn.init.constant_(w, 0)else:passdef train(config, model, train_iter, dev_iter, test_iter,writer):start_time = time.time()model.train()# model.train()将启用BatchNormalization和Dropout,归一化和随机失活# 相应的model.eval()则不启用BatchNormalization和Dropoutoptimizer = torch.optim.Adam(model.parameters(), lr=config.learning_rate)#指定优化方法# 学习率指数衰减,每次epoch:学习率 = gamma * 学习率# scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9)total_batch = 0  # 记录进行到多少batchdev_best_loss = float('inf')#记录当前最好的损失是多少,一开始设置成无穷大最后慢慢选last_improve = 0  # 记录上次验证集loss下降的batch数flag = False  # 记录是否很久没有效果提升#writer = SummaryWriter(log_dir=config.log_path + '/' + time.strftime('%m-%d_%H.%M', time.localtime()))#用来可视化展示的for epoch in range(config.num_epochs):print('Epoch [{}/{}]'.format(epoch + 1, config.num_epochs))# scheduler.step() # 学习率衰减for i, (trains, labels) in enumerate(train_iter):#print (trains[0].shape)outputs = model(trains)#前向传播model.zero_grad()#梯度清零loss = F.cross_entropy(outputs, labels)#计算损失函数loss.backward()#反向传播optimizer.step()if total_batch % 100 == 0:# 每多少轮输出在训练集和验证集上的效果true = labels.data.cpu()predic = torch.max(outputs.data, 1)[1].cpu()train_acc = metrics.accuracy_score(true, predic)#计算准确率dev_acc, dev_loss = evaluate(config, model, dev_iter)#计算验证集的准确率和损失值if dev_loss < dev_best_loss:dev_best_loss = dev_losstorch.save(model.state_dict(), config.save_path)#比之前好就保存improve = '*'last_improve = total_batchelse:improve = ''time_dif = get_time_dif(start_time)msg = 'Iter: {0:>6},  Train Loss: {1:>5.2},  Train Acc: {2:>6.2%},  Val Loss: {3:>5.2},  Val Acc: {4:>6.2%},  Time: {5} {6}'print(msg.format(total_batch, loss.item(), train_acc, dev_loss, dev_acc, time_dif, improve))writer.add_scalar("loss/train", loss.item(), total_batch)writer.add_scalar("loss/dev", dev_loss, total_batch)writer.add_scalar("acc/train", train_acc, total_batch)writer.add_scalar("acc/dev", dev_acc, total_batch)#这几个主要是记录训练集损失,验证集损失,以及准确率等都保存在的指定路径下model.train()total_batch += 1if total_batch - last_improve > config.require_improvement:# 验证集loss超过1000batch没下降,结束训练print("No optimization for a long time, auto-stopping...")flag = Truebreakif flag:breakwriter.close()test(config, model, test_iter)def test(config, model, test_iter):# testmodel.load_state_dict(torch.load(config.save_path))model.eval()start_time = time.time()test_acc, test_loss, test_report, test_confusion = evaluate(config, model, test_iter, test=True)msg = 'Test Loss: {0:>5.2},  Test Acc: {1:>6.2%}'print(msg.format(test_loss, test_acc))print("Precision, Recall and F1-Score...")print(test_report)print("Confusion Matrix...")print(test_confusion)time_dif = get_time_dif(start_time)print("Time usage:", time_dif)def evaluate(config, model, data_iter, test=False):model.eval()loss_total = 0predict_all = np.array([], dtype=int)labels_all = np.array([], dtype=int)with torch.no_grad():#验证集里不做任何更新for texts, labels in data_iter:outputs = model(texts)#得到预测结果loss = F.cross_entropy(outputs, labels)#计算损失loss_total += losslabels = labels.data.cpu().numpy()predic = torch.max(outputs.data, 1)[1].cpu().numpy()labels_all = np.append(labels_all, labels)predict_all = np.append(predict_all, predic)acc = metrics.accuracy_score(labels_all, predict_all)#把每个batch的结构都拿到手之后最后一起算准确率if test:report = metrics.classification_report(labels_all, predict_all, target_names=config.class_list, digits=4)confusion = metrics.confusion_matrix(labels_all, predict_all)return acc, loss_total / len(data_iter), report, confusionreturn acc, loss_total / len(data_iter)
  • utils
# coding: UTF-8
import os
import torch
import numpy as np
import pickle as pkl
from tqdm import tqdm
import time
from datetime import timedeltaMAX_VOCAB_SIZE = 10000  # 词表长度限制
UNK, PAD = '<UNK>', '<PAD>'  # 未知字,padding符号def build_vocab(file_path, tokenizer, max_size, min_freq):vocab_dic = {}with open(file_path, 'r', encoding='UTF-8') as f:for line in tqdm(f):lin = line.strip()if not lin:continuecontent = lin.split('\t')[0]for word in tokenizer(content):vocab_dic[word] = vocab_dic.get(word, 0) + 1vocab_list = sorted([_ for _ in vocab_dic.items() if _[1] >= min_freq], key=lambda x: x[1], reverse=True)[:max_size]vocab_dic = {word_count[0]: idx for idx, word_count in enumerate(vocab_list)}vocab_dic.update({UNK: len(vocab_dic), PAD: len(vocab_dic) + 1})return vocab_dicdef build_dataset(config, ues_word):if ues_word:#具体用词组还是字切分可以选tokenizer = lambda x: x.split(' ')  #把每个字以空格隔开,word-level,else:tokenizer = lambda x: [y for y in x]  # char-levelif os.path.exists(config.vocab_path):vocab = pkl.load(open(config.vocab_path, 'rb'))else:vocab = build_vocab(config.train_path, tokenizer=tokenizer, max_size=MAX_VOCAB_SIZE, min_freq=1)pkl.dump(vocab, open(config.vocab_path, 'wb'))print(f"Vocab size: {len(vocab)}")def load_dataset(path, pad_size=32):contents = []with open(path, 'r', encoding='UTF-8') as f:for line in tqdm(f):lin = line.strip()if not lin:continuecontent, label = lin.split('\t')#把数据切分,因为数据里面前面的汉字是样本,后面的数字是标签words_line = []token = tokenizer(content)#tokenizer把这句话拆成一个一个的字seq_len = len(token)#看一下多长后边多切断补if pad_size:if len(token) < pad_size:token.extend([vocab.get(PAD)] * (pad_size - len(token)))#这里当长度不够的时候填充的是一个特殊字符PAD对应的索引else:token = token[:pad_size]seq_len = pad_size# word to idfor word in token:words_line.append(vocab.get(word, vocab.get(UNK)))#把每个切分好的字传进去然后输出每个字对应的索引contents.append((words_line, int(label), seq_len))#这里就都是每次字对应的索引了return contents  # [([...], 0), ([...], 1), ...]train = load_dataset(config.train_path, config.pad_size)#读取进来训练集dev = load_dataset(config.dev_path, config.pad_size)#读取验证集test = load_dataset(config.test_path, config.pad_size)#读取测试集return vocab, train, dev, testclass DatasetIterater(object):def __init__(self, batches, batch_size, device):self.batch_size = batch_sizeself.batches = batchesself.n_batches = len(batches) // batch_sizeself.residue = False  # 记录batch数量是否为整数if len(batches) % self.n_batches != 0:self.residue = Trueself.index = 0self.device = devicedef _to_tensor(self, datas):x = torch.LongTensor([_[0] for _ in datas]).to(self.device)y = torch.LongTensor([_[1] for _ in datas]).to(self.device)# pad前的长度(超过pad_size的设为pad_size)seq_len = torch.LongTensor([_[2] for _ in datas]).to(self.device)return (x, seq_len), ydef __next__(self):if self.residue and self.index == self.n_batches:batches = self.batches[self.index * self.batch_size: len(self.batches)]self.index += 1batches = self._to_tensor(batches)return batcheselif self.index > self.n_batches:self.index = 0raise StopIterationelse:batches = self.batches[self.index * self.batch_size: (self.index + 1) * self.batch_size]self.index += 1batches = self._to_tensor(batches)return batchesdef __iter__(self):return selfdef __len__(self):if self.residue:return self.n_batches + 1else:return self.n_batchesdef build_iterator(dataset, config):iter = DatasetIterater(dataset, config.batch_size, config.device)return iterdef get_time_dif(start_time):"""获取已使用时间"""end_time = time.time()time_dif = end_time - start_timereturn timedelta(seconds=int(round(time_dif)))if __name__ == "__main__":'''提取预训练词向量'''# 下面的目录、文件名按需更改。train_dir = "./THUCNews/data/train.txt"vocab_dir = "./THUCNews/data/vocab.pkl"pretrain_dir = "./THUCNews/data/sgns.sogou.char"emb_dim = 300filename_trimmed_dir = "./THUCNews/data/embedding_SougouNews"if os.path.exists(vocab_dir):word_to_id = pkl.load(open(vocab_dir, 'rb'))else:# tokenizer = lambda x: x.split(' ')  # 以词为单位构建词表(数据集中词之间以空格隔开)tokenizer = lambda x: [y for y in x]  # 以字为单位构建词表word_to_id = build_vocab(train_dir, tokenizer=tokenizer, max_size=MAX_VOCAB_SIZE, min_freq=1)pkl.dump(word_to_id, open(vocab_dir, 'wb'))embeddings = np.random.rand(len(word_to_id), emb_dim)f = open(pretrain_dir, "r", encoding='UTF-8')for i, line in enumerate(f.readlines()):# if i == 0:  # 若第一行是标题,则跳过#     continuelin = line.strip().split(" ")if lin[0] in word_to_id:idx = word_to_id[lin[0]]emb = [float(x) for x in lin[1:301]]embeddings[idx] = np.asarray(emb, dtype='float32')f.close()np.savez_compressed(filename_trimmed_dir, embeddings=embeddings)
  • utils_fasttext
# coding: UTF-8
import os
import torch
import numpy as np
import pickle as pkl
from tqdm import tqdm
import time
from datetime import timedeltaMAX_VOCAB_SIZE = 10000
UNK, PAD = '<UNK>', '<PAD>'def build_vocab(file_path, tokenizer, max_size, min_freq):vocab_dic = {}with open(file_path, 'r', encoding='UTF-8') as f:for line in tqdm(f):lin = line.strip()if not lin:continuecontent = lin.split('\t')[0]for word in tokenizer(content):vocab_dic[word] = vocab_dic.get(word, 0) + 1vocab_list = sorted([_ for _ in vocab_dic.items() if _[1] >= min_freq], key=lambda x: x[1], reverse=True)[:max_size]vocab_dic = {word_count[0]: idx for idx, word_count in enumerate(vocab_list)}vocab_dic.update({UNK: len(vocab_dic), PAD: len(vocab_dic) + 1})return vocab_dicdef build_dataset(config, ues_word):if ues_word:tokenizer = lambda x: x.split(' ')  # 以空格隔开,word-levelelse:tokenizer = lambda x: [y for y in x]  # char-levelif os.path.exists(config.vocab_path):vocab = pkl.load(open(config.vocab_path, 'rb'))else:vocab = build_vocab(config.train_path, tokenizer=tokenizer, max_size=MAX_VOCAB_SIZE, min_freq=1)pkl.dump(vocab, open(config.vocab_path, 'wb'))print(f"Vocab size: {len(vocab)}")def biGramHash(sequence, t, buckets):t1 = sequence[t - 1] if t - 1 >= 0 else 0return (t1 * 14918087) % bucketsdef triGramHash(sequence, t, buckets):t1 = sequence[t - 1] if t - 1 >= 0 else 0t2 = sequence[t - 2] if t - 2 >= 0 else 0return (t2 * 14918087 * 18408749 + t1 * 14918087) % bucketsdef load_dataset(path, pad_size=32):contents = []with open(path, 'r', encoding='UTF-8') as f:for line in tqdm(f):lin = line.strip()if not lin:continuecontent, label = lin.split('\t')words_line = []token = tokenizer(content)seq_len = len(token)if pad_size:if len(token) < pad_size:token.extend([vocab.get(PAD)] * (pad_size - len(token)))else:token = token[:pad_size]seq_len = pad_size# word to idfor word in token:words_line.append(vocab.get(word, vocab.get(UNK)))# fasttext ngrambuckets = config.n_gram_vocabbigram = []trigram = []# ------ngram------for i in range(pad_size):bigram.append(biGramHash(words_line, i, buckets))trigram.append(triGramHash(words_line, i, buckets))# -----------------contents.append((words_line, int(label), seq_len, bigram, trigram))return contents  # [([...], 0), ([...], 1), ...]train = load_dataset(config.train_path, config.pad_size)dev = load_dataset(config.dev_path, config.pad_size)test = load_dataset(config.test_path, config.pad_size)return vocab, train, dev, testclass DatasetIterater(object):def __init__(self, batches, batch_size, device):self.batch_size = batch_sizeself.batches = batchesself.n_batches = len(batches) // batch_sizeself.residue = False  # 记录batch数量是否为整数if len(batches) % self.n_batches != 0:self.residue = Trueself.index = 0self.device = devicedef _to_tensor(self, datas):# xx = [xxx[2] for xxx in datas]# indexx = np.argsort(xx)[::-1]# datas = np.array(datas)[indexx]x = torch.LongTensor([_[0] for _ in datas]).to(self.device)y = torch.LongTensor([_[1] for _ in datas]).to(self.device)bigram = torch.LongTensor([_[3] for _ in datas]).to(self.device)trigram = torch.LongTensor([_[4] for _ in datas]).to(self.device)# pad前的长度(超过pad_size的设为pad_size)seq_len = torch.LongTensor([_[2] for _ in datas]).to(self.device)return (x, seq_len, bigram, trigram), ydef __next__(self):if self.residue and self.index == self.n_batches:batches = self.batches[self.index * self.batch_size: len(self.batches)]self.index += 1batches = self._to_tensor(batches)return batcheselif self.index > self.n_batches:self.index = 0raise StopIterationelse:batches = self.batches[self.index * self.batch_size: (self.index + 1) * self.batch_size]self.index += 1batches = self._to_tensor(batches)return batchesdef __iter__(self):return selfdef __len__(self):if self.residue:return self.n_batches + 1else:return self.n_batchesdef build_iterator(dataset, config):iter = DatasetIterater(dataset, config.batch_size, config.device)return iterdef get_time_dif(start_time):"""获取已使用时间"""end_time = time.time()time_dif = end_time - start_timereturn timedelta(seconds=int(round(time_dif)))if __name__ == "__main__":'''提取预训练词向量'''vocab_dir = "./THUCNews/data/vocab.pkl"pretrain_dir = "./THUCNews/data/sgns.sogou.char"emb_dim = 300filename_trimmed_dir = "./THUCNews/data/vocab.embedding.sougou"word_to_id = pkl.load(open(vocab_dir, 'rb'))embeddings = np.random.rand(len(word_to_id), emb_dim)f = open(pretrain_dir, "r", encoding='UTF-8')for i, line in enumerate(f.readlines()):# if i == 0:  # 若第一行是标题,则跳过#     continuelin = line.strip().split(" ")if lin[0] in word_to_id:idx = word_to_id[lin[0]]emb = [float(x) for x in lin[1:301]]embeddings[idx] = np.asarray(emb, dtype='float32')f.close()np.savez_compressed(filename_trimmed_dir, embeddings=embeddings)
  • TextRNN
# coding: UTF-8
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as npclass Config(object):"""配置参数"""def __init__(self, dataset, embedding):self.model_name = 'TextRNN'self.train_path = dataset + '/data/train.txt' # 训练集路径self.dev_path = dataset + '/data/dev.txt'     # 验证集self.test_path = dataset + '/data/test.txt'   # 测试集self.class_list = [x.strip() for x in open(dataset + '/data/class.txt').readlines()] #10个类别名单self.vocab_path = dataset + '/data/vocab.pkl' #词表里面是每一个词及其对应的索引,词嵌入的查找是基于这个索引来找对应的词的词向量表示self.save_path = dataset + '/saved_dict/' + self.model_name + '.ckpt' #模型训练结果,指定保存路径self.log_path = dataset + '/log/' + self.model_name#把中间的一些比如中间的损失保存下来可以做可视化展示self.embedding_pretrained = torch.tensor(#把提供的别人训练好的模型里面的词向量读进来再转化成tensor的格式np.load(dataset + '/data/' + embedding)["embeddings"].astype('float32'))\if embedding != 'random' else None                                       # 预训练词向量self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')   # 指定设备,我这没有gpu用cpu跑的self.dropout = 0.5                                   # 随机失活,防止过拟合self.require_improvement = 1000                      # 若超过1000batch效果还没提升,则提前结束训练self.num_classes = len(self.class_list)              # 类别数有10个类别self.n_vocab = 0                                     # 词表大小,在运行时赋值,实际把词表读进来之后再赋值self.num_epochs = 10                                 # epoch数self.batch_size = 128                                # mini-batch大小self.pad_size = 32                                   # 每句话处理成的长度(短填长切)self.learning_rate = 1e-3                            # 学习率self.embed = self.embedding_pretrained.size(1)\if self.embedding_pretrained is not None else 300 # 字向量维度,用的人家的词表那就根据他的维度,是300self.hidden_size = 128                                # lstm隐藏层神经元个数self.num_layers = 2                                   # lstm层数'''Recurrent Neural Network for Text Classification with Multi-Task Learning'''class Model(nn.Module):def __init__(self, config):super(Model, self).__init__()if config.embedding_pretrained is not None:#nn.Embedding作用就是把每个词都转化成对应的词向量(实际是通过索引做的)self.embedding = nn.Embedding.from_pretrained(config.embedding_pretrained, freeze=False)else:self.embedding = nn.Embedding(config.n_vocab, config.embed, padding_idx=config.n_vocab - 1)self.lstm = nn.LSTM(config.embed, config.hidden_size, config.num_layers,bidirectional=True, batch_first=True, dropout=config.dropout)#LSTM的几个输入的意思:config.embed是当前输入的特征也就是300维,config.hidden_size隐藏层神经元128个就是每一个x0进入A得到一个中间结果h0过程中A的神经元个数#config.num_layers指LSTM的层数一般取最后一层的最后一个输出,bidirectional=True代表LSTM是双向的(效果好),batch_first=True指定第一个维度是batch#dropout=config.dropout随机失活self.fc = nn.Linear(config.hidden_size * 2, config.num_classes)#最后的全连接层因为是双向的所以要乘以2,# 这里全连接层连接的是最后一层LSTM的最后一个输出,然后转化成10个类别def forward(self, x):x, _ = xout = self.embedding(x)  # 先走一个embedding层[batch_size, seq_len, embeding]=[128, 32, 300]out, _ = self.lstm(out)#在经过letm层out = self.fc(out[:, -1, :])  # 把最后一层的最后一个时刻的输出丢到全连接层里面# hidden state,out[:, -1, :]代表的是取LSTM的最后一个隐层特征(256)丢到全连接层里面return out
  • TextCNN
# coding: UTF-8
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as npclass Config(object):"""配置参数"""def __init__(self, dataset, embedding):self.model_name = 'TextCNN'self.train_path = dataset + '/data/train.txt'                                # 训练集self.dev_path = dataset + '/data/dev.txt'                                    # 验证集self.test_path = dataset + '/data/test.txt'                                  # 测试集self.class_list = [x.strip() for x in open(dataset + '/data/class.txt').readlines()]                                # 类别名单self.vocab_path = dataset + '/data/vocab.pkl'                                # 词表self.save_path = dataset + '/saved_dict/' + self.model_name + '.ckpt'        # 模型训练结果self.log_path = dataset + '/log/' + self.model_nameself.embedding_pretrained = torch.tensor(np.load(dataset + '/data/' + embedding)["embeddings"].astype('float32'))\if embedding != 'random' else None                                       # 预训练词向量self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')   # 设备self.dropout = 0.5                                              # 随机失活self.require_improvement = 1000                                 # 若超过1000batch效果还没提升,则提前结束训练self.num_classes = len(self.class_list)                         # 类别数self.n_vocab = 0                                                # 词表大小,在运行时赋值self.num_epochs = 20                                            # epoch数self.batch_size = 128                                           # mini-batch大小self.pad_size = 32                                              # 每句话处理成的长度(短填长切)self.learning_rate = 1e-3                                       # 学习率self.embed = self.embedding_pretrained.size(1)\if self.embedding_pretrained is not None else 300           # 字向量维度self.filter_sizes = (2, 3, 4)                                   # 卷积核尺寸self.num_filters = 256                                          # 卷积核数量(channels数)'''Convolutional Neural Networks for Sentence Classification'''class Model(nn.Module):def __init__(self, config):super(Model, self).__init__()if config.embedding_pretrained is not None:self.embedding = nn.Embedding.from_pretrained(config.embedding_pretrained, freeze=False)else:self.embedding = nn.Embedding(config.n_vocab, config.embed, padding_idx=config.n_vocab - 1)self.convs = nn.ModuleList(#你后边有3个卷积核,要转成ModuleList才行[nn.Conv2d(1, config.num_filters, (k, config.embed)) for k in config.filter_sizes])#卷积核的大小是不同的,# 这里用了个循环(k, config.embed)是卷积核的维度,k分别为2,3,4,config.embed是300维的self.dropout = nn.Dropout(config.dropout)self.fc = nn.Linear(config.num_filters * len(config.filter_sizes), config.num_classes)#config.num_classes就是10,config.num_filters代表的是你卷积之后每个卷积核都得到的256个特征图特征图,也就是我每个卷积层用来256个卷积核,# 所以特征图数量是265*3,那池话之后也是256*3这么多特征个数def conv_and_pool(self, x, conv):x = F.relu(conv(x)).squeeze(3)x = F.max_pool1d(x, x.size(2)).squeeze(2)return xdef forward(self, x):#print (x[0].shape)out = self.embedding(x[0])out = out.unsqueeze(1)#加了一个颜色通道,就是1out = torch.cat([self.conv_and_pool(out, conv) for conv in self.convs], 1)out = self.dropout(out)out = self.fc(out)return out

【LSTM新闻数据集分类代码】相关推荐

  1. php修改新闻分类代码,完整的新闻无限级分类代码,可添加,删除,移动,修改

    //连接数据库教程 $link = mysql教程_connect('localhost','root','密码') or die(mysql_error()); mysql_select_db('s ...

  2. CNN进行新闻文本分类代码实战,包含分类文本

    依次运行三个文件: cnews_loader.py cnn_model.py run_cnn.py cnews新闻文件夹下载路径:链接:https://pan.baidu.com/s/1H3K94E7 ...

  3. 基于逻辑回归的新闻数据集分类

    目录 1. 作者介绍 2. 逻辑回归 2.1 逻辑回归 2.2 逻辑回归算法 3. 实验过程 3.1 fetch_20newsgroups(20类新闻文本)数据集的简介 3.2 实验代码 3.3 运行 ...

  4. python 新闻标题分类_NLPCC2017(中文)新闻标题分类示例代码以及数据描述

    NLPCC 2017 新闻标题分类 代码运行环境 python2.7 (最好用anaconda2) tensorflow1.0.0 gpu版本或者cpu版本 建议操作系统:Linux Linux 上的 ...

  5. 朴素贝叶斯(西瓜数据集分类,社区恶意留言分类,垃圾邮件分类,新浪新闻分类),AODE分类器 代码实现

    朴素贝叶斯(西瓜数据集分类,社区恶意留言分类,垃圾邮件分类,新浪新闻分类),AODE分类器 代码实现 以下代码为本人学习后,修改或补充后的代码实现,数据集和原代码请参考:https://github. ...

  6. [Pytorch系列-61]:循环神经网络 - 中文新闻文本分类详解-3-CNN网络训练与评估代码详解

    作者主页(文火冰糖的硅基工坊):文火冰糖(王文兵)的博客_文火冰糖的硅基工坊_CSDN博客 本文网址:https://blog.csdn.net/HiWangWenBing/article/detai ...

  7. 基于LSTM的美国大选的新闻真假分类【NLP 新年开胃菜】

    简介 新闻媒体已成为向世界人民传递世界上正在发生的事情的信息的渠道. 人们通常认为新闻中传达的一切都是真实的. 在某些情况下,甚至新闻频道也承认他们的新闻不如他们写的那样真实. 但是,一些新闻不仅对人 ...

  8. [Pytorch系列-60]:循环神经网络 - 中文新闻文本分类详解-2-LSTM网络训练与评估代码详解

    作者主页(文火冰糖的硅基工坊):文火冰糖(王文兵)的博客_文火冰糖的硅基工坊_CSDN博客 本文网址:https://blog.csdn.net/HiWangWenBing/article/detai ...

  9. 深度学习笔记(2)——pytorch实现MNIST数据集分类(FNN、CNN、RNN、LSTM、GRU)

    文章目录 0 前言 1 数据预处理 2 FNN(前馈神经网络) 3 CNN(卷积神经网络) 4 RNN(循环神经网络) 5 LSTM(长短期记忆网络) 6 GRU(门控循环单元) 7 完整代码 0 前 ...

最新文章

  1. jQuery插件总动员
  2. Session与Cookie
  3. linux oracle12c dbca,Linux下Oracle 12c R2图形化安装笔记
  4. NeurIPS | 谷歌使用机器学习如何做好分布外异常检测
  5. Eclipse+Pydev环境搭建
  6. 计算机考研雷区,考研的五大雷区是什么 如何避免
  7. 与孩子一起学编程15章
  8. Android中调用系统所装的软件打开文件,android应用开发基础学堂在线答案
  9. 极化SAR图像特征提取与分类方法研究
  10. 编译原理中单线箭头->和双线箭头=>有什么区别
  11. 胡乱学Java_遇见类与对象
  12. 怎么看PLC梯形图?
  13. QQ空间日志导出(php)
  14. 中兴承建WoStore:联通摒弃苹果“自封神话”?
  15. 基于SSM的植物花草养护交流网站设计
  16. access中的Format用法详解
  17. 正版现货黄金怎么区分(上)
  18. 第5次作业+160+曾元鹏
  19. IntelliJ IDEA 安装和使用
  20. AI算法工程师 | 02人工智能基础-Python基础(二)语言特性_控制语句_切片_数据类型

热门文章

  1. 渲染函数render
  2. elasticSearch常见问题答疑
  3. 好用的revit软件:MEP绘制管道风管时,提示不可见如何解决?
  4. 启发式算法的基础定义与了解
  5. jsp393学生宿舍管理系统mysql
  6. 最新影视小程序对接苹果cms源码无授权版+详细教程
  7. 使用poi导出excel
  8. 世界各国当日数据探索性分析
  9. MongoDB 全面总结
  10. apktool下载及“安装”(windows系统)