#注释全部写在了代码中哦,注意仔细看
#主程序,sequence_gan.py
import numpy as np
import tensorflow as tf
import random
from dataloader import Gen_Data_loader, Dis_dataloader
from generator import Generator
from discriminator import Discriminator
from rollout import ROLLOUT
from target_lstm import TARGET_LSTM
from dataloader import StrToBytes
import pickle
import pdb#########################################################################################
#  Generator  Hyper-parameters
######################################################################################
EMB_DIM = 32 # embedding dimension
HIDDEN_DIM = 32 # hidden state dimension of lstm cell
SEQ_LENGTH = 20 # sequence length
START_TOKEN = 0
PRE_EPOCH_NUM = 10 # supervise (maximum likelihood estimation) epochs 120
SEED = 88
BATCH_SIZE = 64#########################################################################################
#  Discriminator  Hyper-parameters
#########################################################################################
dis_embedding_dim = 64
dis_filter_sizes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20]
dis_num_filters = [100, 200, 200, 200, 200, 100, 100, 100, 100, 100, 160, 160]
dis_dropout_keep_prob = 0.75
dis_l2_reg_lambda = 0.2
dis_batch_size = 64#########################################################################################
#  Basic Training Parameters
#########################################################################################
TOTAL_BATCH = 10#200
positive_file = 'save/real_data.txt'   #通过oracle模型生成真实数据,保存在这个文件
negative_file = 'save/generator_sample.txt'  #生成器生成的假数据保存在这个文件
eval_file = 'save/eval_file.txt'   #验证集文件
generated_num = 1000#10000         #产生generated_num个样本#通过trainable_model模型产生int(generated_num/batch_size)*batch_size多个样本,并保存在output_file文件
def generate_samples(sess, trainable_model, batch_size, generated_num, output_file):# Generate Samplesgenerated_samples = []for _ in range(int(generated_num / batch_size)):generated_samples.extend(trainable_model.generate(sess))with open(output_file, 'w') as fout:for poem in generated_samples:buffer = ' '.join([str(x) for x in poem]) + '\n'fout.write(buffer)def target_loss(sess, target_lstm, data_loader):# target_loss means the oracle negative log-likelihood tested with the oracle model "target_lstm"# For more details, please see the Section 4 in https://arxiv.org/abs/1609.05473nll = []data_loader.reset_pointer()for it in range(data_loader.num_batch):batch = data_loader.next_batch()g_loss = sess.run(target_lstm.pretrain_loss, {target_lstm.x: batch})nll.append(g_loss)return np.mean(nll)def pre_train_epoch(sess, trainable_model, data_loader):# Pre-train the generator using MLE for one epochsupervised_g_losses = []data_loader.reset_pointer()for it in range(data_loader.num_batch):batch = data_loader.next_batch()_, g_loss = trainable_model.pretrain_step(sess, batch)supervised_g_losses.append(g_loss)return np.mean(supervised_g_losses)def main():#使得随机数据可预测,即只要seed的值一样,后续生成的随机数都一样。random.seed(SEED)np.random.seed(SEED)#刚开始没有数据,因此从状态0开始,生成到状态19,产生20个数字为一个样本assert START_TOKEN == 0#先创建对象  gen_data_loader = Gen_Data_loader(BATCH_SIZE)likelihood_data_loader = Gen_Data_loader(BATCH_SIZE) # For testingvocab_size = 5000dis_data_loader = Dis_dataloader(BATCH_SIZE)generator = Generator(vocab_size, BATCH_SIZE, EMB_DIM, HIDDEN_DIM, SEQ_LENGTH, START_TOKEN)# target_params = pickle.load(open(StrToBytes('save/target_params.pkl')))target_params = pickle.load(open('save/target_params.pkl', 'rb'), encoding='iso-8859-1')target_lstm = TARGET_LSTM(vocab_size, BATCH_SIZE, EMB_DIM, HIDDEN_DIM, SEQ_LENGTH, START_TOKEN, target_params) # The oracle modeldiscriminator = Discriminator(sequence_length=20, num_classes=2, vocab_size=vocab_size, embedding_size=dis_embedding_dim, filter_sizes=dis_filter_sizes, num_filters=dis_num_filters, l2_reg_lambda=dis_l2_reg_lambda)config = tf.ConfigProto()config.gpu_options.allow_growth = Truesess = tf.Session(config=config)sess.run(tf.global_variables_initializer())#--------------------------------mycode-------------------------------#merged = tf.summary.merge_all()  # 将图形、训练过程等数据合并在一起writer = tf.summary.FileWriter('logs', sess.graph)  # 将训练日志写入到logs文件夹下# First, use the oracle model to provide the positive examples, which are sampled from the oracle data distributiongenerate_samples(sess, target_lstm, BATCH_SIZE, generated_num, positive_file)#pdb.set_trace()gen_data_loader.create_batches(positive_file)log = open('save/experiment-log.txt', 'w')#---------  pre-train generator预训练生成器--------------------#print("Start pre-training...")log.write('pre-training...\n')for epoch in range(PRE_EPOCH_NUM):#120loss = pre_train_epoch(sess, generator, gen_data_loader)if epoch % 5 == 0:generate_samples(sess, generator, BATCH_SIZE, generated_num, eval_file)likelihood_data_loader.create_batches(eval_file)test_loss = target_loss(sess, target_lstm, likelihood_data_loader)print ("pre-train epoch ", epoch, "test_loss ", test_loss)buffer = 'epoch:\t'+ str(epoch) + '\tnll:\t' + str(test_loss) + '\n'log.write(buffer)print("Start pre-training discriminator...")# Train 3 epoch on the generated data and do this for 50 timesfor _ in range(10):generate_samples(sess, generator, BATCH_SIZE, generated_num, negative_file)#pdb.set_trace()dis_data_loader.load_train_data(positive_file, negative_file)for _ in range(3):dis_data_loader.reset_pointer()for it in range(dis_data_loader.num_batch):x_batch, y_batch = dis_data_loader.next_batch()feed = {discriminator.input_x: x_batch,discriminator.input_y: y_batch,discriminator.dropout_keep_prob: dis_dropout_keep_prob}_ = sess.run(discriminator.train_op, feed)#pdb.set_trace()rollout = ROLLOUT(generator, 0.8)print ("#########################################################################")print ("Start Adversarial Training...")log.write('adversarial training...\n')for total_batch in range(TOTAL_BATCH):# Train the generator for one stepfor it in range(1):samples = generator.generate(sess)rewards = rollout.get_reward(sess, samples, 16, discriminator)feed = {generator.x: samples, generator.rewards: rewards}_ = sess.run(generator.g_updates, feed_dict=feed)# Testif total_batch % 5 == 0 or total_batch == TOTAL_BATCH - 1:generate_samples(sess, generator, BATCH_SIZE, generated_num, eval_file)likelihood_data_loader.create_batches(eval_file)test_loss = target_loss(sess, target_lstm, likelihood_data_loader)buffer = 'epoch:\t' + str(total_batch) + '\tnll:\t' + str(test_loss) + '\n'print ("total_batch: ", total_batch, "test_loss: ", test_loss)log.write(buffer)# Update roll-out parametersrollout.update_params()# Train the discriminatorfor _ in range(5):generate_samples(sess, generator, BATCH_SIZE, generated_num, negative_file)dis_data_loader.load_train_data(positive_file, negative_file)for _ in range(3):dis_data_loader.reset_pointer()for it in range(dis_data_loader.num_batch):x_batch, y_batch = dis_data_loader.next_batch()feed = {discriminator.input_x: x_batch,discriminator.input_y: y_batch,discriminator.dropout_keep_prob: dis_dropout_keep_prob}_ = sess.run(discriminator.train_op, feed)log.close()if __name__ == '__main__':main()
#dataloader.pyimport numpy as np
import pdbclass StrToBytes:def __init__(self, fileobj):self.fileobj = fileobjdef read(self, size):return self.fileobj.read(size).encode()def readline(self, size=-1):return self.fileobj.readline(size).encode()class Gen_Data_loader():def __init__(self, batch_size):self.batch_size = batch_sizeself.token_stream = []def create_batches(self, data_file):self.token_stream = []with open(data_file, 'r') as f:for line in f:line = line.strip() # 去除首尾空格line = line.split() #通过指定分隔符对字符串进行切片,默认为空格、换行(\n)、制表符(\t)等。parse_line = [int(x) for x in line]if len(parse_line) == 20:self.token_stream.append(parse_line)self.num_batch = int(len(self.token_stream) / self.batch_size)self.token_stream = self.token_stream[:self.num_batch * self.batch_size]#self.sequence_batch通过索引可得到每个批次self.sequence_batch = np.split(np.array(self.token_stream), self.num_batch, 0)#self.pointer指向每个批次的行首,用来作为self.sequence_batch的索引self.pointer = 0def next_batch(self):ret = self.sequence_batch[self.pointer]self.pointer = (self.pointer + 1) % self.num_batchreturn retdef reset_pointer(self):self.pointer = 0#判别器的数据提取
class Dis_dataloader():def __init__(self, batch_size):self.batch_size = batch_sizeself.sentences = np.array([])self.labels = np.array([])def load_train_data(self, positive_file, negative_file):# Load datapositive_examples = []negative_examples = []with open(positive_file)as fin:for line in fin:line = line.strip()line = line.split()parse_line = [int(x) for x in line]positive_examples.append(parse_line)with open(negative_file)as fin:for line in fin:line = line.strip()line = line.split()parse_line = [int(x) for x in line]if len(parse_line) == 20:negative_examples.append(parse_line)#self.sentences前面为正样本,后面为负样本,如果正样本是960x20,负样本也是960x20,则该数为1920x20self.sentences = np.array(positive_examples + negative_examples)# Generate labels,正样本标签为[0,1],负样本标签为[1,0],标签为2维的positive_labels = [[0, 1] for _ in positive_examples]negative_labels = [[1, 0] for _ in negative_examples]self.labels = np.concatenate([positive_labels, negative_labels], 0)# Shuffle the datashuffle_indices = np.random.permutation(np.arange(len(self.labels)))self.sentences = self.sentences[shuffle_indices]self.labels = self.labels[shuffle_indices]# Split batchesself.num_batch = int(len(self.labels) / self.batch_size)self.sentences = self.sentences[:self.num_batch * self.batch_size]self.labels = self.labels[:self.num_batch * self.batch_size]self.sentences_batches = np.split(self.sentences, self.num_batch, 0)self.labels_batches = np.split(self.labels, self.num_batch, 0)self.pointer = 0def next_batch(self):ret = self.sentences_batches[self.pointer], self.labels_batches[self.pointer]self.pointer = (self.pointer + 1) % self.num_batchreturn retdef reset_pointer(self):self.pointer = 0
#generator.py
#采用GRU作为生成器import tensorflow as tf
from tensorflow.python.ops import tensor_array_ops, control_flow_ops
import pdbclass Generator(object):#num_emb表示词汇表的数目,emb_dim词汇表中数的维度def __init__(self, num_emb, batch_size, emb_dim, hidden_dim,sequence_length, start_token,learning_rate=0.01, reward_gamma=0.95):self.num_emb = num_emb  #5000self.batch_size = batch_size #64self.emb_dim = emb_dim   #32self.hidden_dim = hidden_dim   #32self.sequence_length = sequence_length#20self.start_token = tf.constant([start_token] * self.batch_size, dtype=tf.int32)self.learning_rate = tf.Variable(float(learning_rate), trainable=False)self.reward_gamma = reward_gammaself.g_params = []self.d_params = []self.temperature = 1.0self.grad_clip = 5.0self.expected_reward = tf.Variable(tf.zeros([self.sequence_length]))with tf.variable_scope('generator'):#pdb.set_trace()self.g_embeddings = tf.Variable(self.init_matrix([self.num_emb, self.emb_dim]))#5000x32self.g_params.append(self.g_embeddings)  #将输入层到隐藏层,隐藏层到输出层的参数都放在g_paramsself.g_recurrent_unit = self.create_recurrent_unit(self.g_params)  # maps h_tm1 to h_t for generatorself.g_output_unit = self.create_output_unit(self.g_params)  # maps h_t to o_t (output token logits)# placeholder definitionself.x = tf.placeholder(tf.int32, shape=[self.batch_size, self.sequence_length]) # sequence of tokens generated by generatorself.rewards = tf.placeholder(tf.float32, shape=[self.batch_size, self.sequence_length]) # get from rollout policy and discriminator# processed for batchwith tf.device("/cpu:0"):self.processed_x = tf.transpose(tf.nn.embedding_lookup(self.g_embeddings, self.x), perm=[1, 0, 2])  # seq_length x batch_size x emb_dim# Initial states  包含了LSTM的hidden_state和Ctself.h0 = tf.zeros([self.batch_size, self.hidden_dim])self.h0 = tf.stack([self.h0, self.h0])#2x64x32#采用GRU生成64个下一个单词的概率和具体值#pdb.set_trace()gen_o = tensor_array_ops.TensorArray(dtype=tf.float32, size=self.sequence_length,dynamic_size=False, infer_shape=True)gen_x = tensor_array_ops.TensorArray(dtype=tf.int32, size=self.sequence_length,dynamic_size=False, infer_shape=True)def _g_recurrence(i, x_t, h_tm1, gen_o, gen_x):#pdb.set_trace()h_t = self.g_recurrent_unit(x_t, h_tm1)  # hidden_memory_tupleo_t = self.g_output_unit(h_t)  # batch x vocab , logits not problog_prob = tf.log(tf.nn.softmax(o_t))next_token = tf.cast(tf.reshape(tf.multinomial(log_prob, 1), [self.batch_size]), tf.int32)x_tp1 = tf.nn.embedding_lookup(self.g_embeddings, next_token)  # batch x emb_dimgen_o = gen_o.write(i, tf.reduce_sum(tf.multiply(tf.one_hot(next_token, self.num_emb, 1.0, 0.0),tf.nn.softmax(o_t)), 1))  # [batch_size] , probgen_x = gen_x.write(i, next_token)  # indices, batch_sizereturn i + 1, x_tp1, h_t, gen_o, gen_x#如果目前的状态长度还没有达到最后的序列长度,采用GRU预测下一个值,因此gen_o和gen_x都是64维,直到产生要求长度的序列为止_, _, _, self.gen_o, self.gen_x = control_flow_ops.while_loop(cond=lambda i, _1, _2, _3, _4: i < self.sequence_length,body=_g_recurrence,loop_vars=(tf.constant(0, dtype=tf.int32),tf.nn.embedding_lookup(self.g_embeddings, self.start_token), self.h0, gen_o, gen_x))self.gen_x = self.gen_x.stack()  # seq_length x batch_sizeself.gen_x = tf.transpose(self.gen_x, perm=[1, 0])  # batch_size x seq_length# supervised pretraining for generatorg_predictions = tensor_array_ops.TensorArray(dtype=tf.float32, size=self.sequence_length,dynamic_size=False, infer_shape=True)ta_emb_x = tensor_array_ops.TensorArray(dtype=tf.float32, size=self.sequence_length)ta_emb_x = ta_emb_x.unstack(self.processed_x)def _pretrain_recurrence(i, x_t, h_tm1, g_predictions):h_t = self.g_recurrent_unit(x_t, h_tm1)o_t = self.g_output_unit(h_t)g_predictions = g_predictions.write(i, tf.nn.softmax(o_t))  # batch x vocab_sizex_tp1 = ta_emb_x.read(i)return i + 1, x_tp1, h_t, g_predictions_, _, _, self.g_predictions = control_flow_ops.while_loop(cond=lambda i, _1, _2, _3: i < self.sequence_length,body=_pretrain_recurrence,loop_vars=(tf.constant(0, dtype=tf.int32),tf.nn.embedding_lookup(self.g_embeddings, self.start_token),self.h0, g_predictions))self.g_predictions = tf.transpose(self.g_predictions.stack(), perm=[1, 0, 2])  # batch_size x seq_length x vocab_size# pretraining lossself.pretrain_loss = -tf.reduce_sum(tf.one_hot(tf.to_int32(tf.reshape(self.x, [-1])), self.num_emb, 1.0, 0.0) * tf.log(tf.clip_by_value(tf.reshape(self.g_predictions, [-1, self.num_emb]), 1e-20, 1.0))) / (self.sequence_length * self.batch_size)# training updatespretrain_opt = self.g_optimizer(self.learning_rate)self.pretrain_grad, _ = tf.clip_by_global_norm(tf.gradients(self.pretrain_loss, self.g_params), self.grad_clip)self.pretrain_updates = pretrain_opt.apply_gradients(zip(self.pretrain_grad, self.g_params))########################################################################################################  Unsupervised Training#将预测值的log与reward相乘,然后再乘以生成的数据,即采样的数据出现,对y属于词汇表中人一个词求和,再对20个数据全部生成求和,得到最后的loss#######################################################################################################self.g_loss = -tf.reduce_sum(tf.reduce_sum(tf.one_hot(tf.to_int32(tf.reshape(self.x, [-1])), self.num_emb, 1.0, 0.0) * tf.log(tf.clip_by_value(tf.reshape(self.g_predictions, [-1, self.num_emb]), 1e-20, 1.0)), 1) * tf.reshape(self.rewards, [-1]))g_opt = self.g_optimizer(self.learning_rate)self.g_grad, _ = tf.clip_by_global_norm(tf.gradients(self.g_loss, self.g_params), self.grad_clip)self.g_updates = g_opt.apply_gradients(zip(self.g_grad, self.g_params))def generate(self, sess):outputs = sess.run(self.gen_x)return outputsdef pretrain_step(self, sess, x):outputs = sess.run([self.pretrain_updates, self.pretrain_loss], feed_dict={self.x: x})return outputsdef init_matrix(self, shape):return tf.random_normal(shape, stddev=0.1)def init_vector(self, shape):return tf.zeros(shape)#-----------------------------一个完整的LSTM--------------------------------------#def create_recurrent_unit(self, params):# Weights and Bias for input and hidden tensorself.Wi = tf.Variable(self.init_matrix([self.emb_dim, self.hidden_dim])) #32x32self.Ui = tf.Variable(self.init_matrix([self.hidden_dim, self.hidden_dim]))self.bi = tf.Variable(self.init_matrix([self.hidden_dim]))self.Wf = tf.Variable(self.init_matrix([self.emb_dim, self.hidden_dim]))self.Uf = tf.Variable(self.init_matrix([self.hidden_dim, self.hidden_dim]))self.bf = tf.Variable(self.init_matrix([self.hidden_dim]))self.Wog = tf.Variable(self.init_matrix([self.emb_dim, self.hidden_dim]))self.Uog = tf.Variable(self.init_matrix([self.hidden_dim, self.hidden_dim]))self.bog = tf.Variable(self.init_matrix([self.hidden_dim]))self.Wc = tf.Variable(self.init_matrix([self.emb_dim, self.hidden_dim]))self.Uc = tf.Variable(self.init_matrix([self.hidden_dim, self.hidden_dim]))self.bc = tf.Variable(self.init_matrix([self.hidden_dim]))params.extend([self.Wi, self.Ui, self.bi,self.Wf, self.Uf, self.bf,self.Wog, self.Uog, self.bog,self.Wc, self.Uc, self.bc])#----------------------创建LSTM中的cell-----------------------------#def unit(x, hidden_memory_tm1):#pdb.set_trace()previous_hidden_state, c_prev = tf.unstack(hidden_memory_tm1)#按行拆分矩阵# Input Gatei = tf.sigmoid(tf.matmul(x, self.Wi) +tf.matmul(previous_hidden_state, self.Ui) + self.bi)# Forget Gatef = tf.sigmoid(tf.matmul(x, self.Wf) +tf.matmul(previous_hidden_state, self.Uf) + self.bf)# Output Gateo = tf.sigmoid(tf.matmul(x, self.Wog) +tf.matmul(previous_hidden_state, self.Uog) + self.bog)# New Memory Cellc_ = tf.nn.tanh(tf.matmul(x, self.Wc) +tf.matmul(previous_hidden_state, self.Uc) + self.bc)# Final Memory cellc = f * c_prev + i * c_# Current Hidden statecurrent_hidden_state = o * tf.nn.tanh(c)return tf.stack([current_hidden_state, c])return unitdef create_output_unit(self, params):self.Wo = tf.Variable(self.init_matrix([self.hidden_dim, self.num_emb]))self.bo = tf.Variable(self.init_matrix([self.num_emb]))params.extend([self.Wo, self.bo])def unit(hidden_memory_tuple):hidden_state, c_prev = tf.unstack(hidden_memory_tuple)# hidden_state : batch x hidden_dimlogits = tf.matmul(hidden_state, self.Wo) + self.bo# output = tf.nn.softmax(logits)return logitsreturn unitdef g_optimizer(self, *args, **kwargs):return tf.train.AdamOptimizer(*args, **kwargs)
#discriminator.pyimport tensorflow as tf
import numpy as np
import pdb# An alternative to tf.nn.rnn_cell._linear function, which has been removed in Tensorfow 1.0.1
# The highway layer is borrowed from https://github.com/mkroutikov/tf-lstm-char-cnn
def linear(input_, output_size, scope=None):'''Linear map: output[k] = sum_i(Matrix[k, i] * input_[i] ) + Bias[k]Args:input_: a tensor or a list of 2D, batch x n, Tensors.output_size: int, second dimension of W[i].scope: VariableScope for the created subgraph; defaults to "Linear".Returns:A 2D Tensor with shape [batch x output_size] equal tosum_i(input_[i] * W[i]), where W[i]s are newly created matrices.Raises:ValueError: if some of the arguments has unspecified or wrong shape.'''shape = input_.get_shape().as_list()if len(shape) != 2:raise ValueError("Linear is expecting 2D arguments: %s" % str(shape))if not shape[1]:raise ValueError("Linear expects shape[1] of arguments: %s" % str(shape))input_size = shape[1]# Now the computation.with tf.variable_scope(scope or "SimpleLinear"):matrix = tf.get_variable("Matrix", [output_size, input_size], dtype=input_.dtype)bias_term = tf.get_variable("Bias", [output_size], dtype=input_.dtype)return tf.matmul(input_, tf.transpose(matrix)) + bias_termdef highway(input_, size, num_layers=1, bias=-2.0, f=tf.nn.relu, scope='Highway'):"""Highway Network (cf. http://arxiv.org/abs/1505.00387).t = sigmoid(Wy + b)z = t * g(Wy + b) + (1 - t) * ywhere g is nonlinearity, t is transform gate, and (1 - t) is carry gate."""with tf.variable_scope(scope):for idx in range(num_layers):g = f(linear(input_, size, scope='highway_lin_%d' % idx))t = tf.sigmoid(linear(input_, size, scope='highway_gate_%d' % idx) + bias)output = t * g + (1. - t) * input_input_ = outputreturn outputclass Discriminator(object):"""A CNN for text classification.Uses an embedding layer, followed by a convolutional, max-pooling and softmax layer."""def __init__(self, sequence_length, num_classes, vocab_size,embedding_size, filter_sizes, num_filters, l2_reg_lambda=0.0):# Placeholders for input, output and dropoutself.input_x = tf.placeholder(tf.int32, [None, sequence_length], name="input_x")self.input_y = tf.placeholder(tf.float32, [None, num_classes], name="input_y")self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")# Keeping track of l2 regularization loss (optional)l2_loss = tf.constant(0.0)with tf.variable_scope('discriminator'):# Embedding layerwith tf.device('/cpu:0'), tf.name_scope("embedding"):self.W = tf.Variable(tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0),name="W")self.embedded_chars = tf.nn.embedding_lookup(self.W, self.input_x)self.embedded_chars_expanded = tf.expand_dims(self.embedded_chars, -1)# Create a convolution + maxpool layer for each filter sizepooled_outputs = []for filter_size, num_filter in zip(filter_sizes, num_filters):with tf.name_scope("conv-maxpool-%s" % filter_size):# Convolution Layerfilter_shape = [filter_size, embedding_size, 1, num_filter]W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="W")b = tf.Variable(tf.constant(0.1, shape=[num_filter]), name="b")conv = tf.nn.conv2d(self.embedded_chars_expanded,W,strides=[1, 1, 1, 1],padding="VALID",name="conv")# Apply nonlinearityh = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")# Maxpooling over the outputspooled = tf.nn.max_pool(h,ksize=[1, sequence_length - filter_size + 1, 1, 1],strides=[1, 1, 1, 1],padding='VALID',name="pool")pooled_outputs.append(pooled)#pdb.set_trace()# Combine all the pooled featuresnum_filters_total = sum(num_filters)self.h_pool = tf.concat(pooled_outputs, 3)self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])# Add highwaywith tf.name_scope("highway"):self.h_highway = highway(self.h_pool_flat, self.h_pool_flat.get_shape()[1], 1, 0)# Add dropoutwith tf.name_scope("dropout"):self.h_drop = tf.nn.dropout(self.h_highway, self.dropout_keep_prob)# Final (unnormalized) scores and predictionswith tf.name_scope("output"):W = tf.Variable(tf.truncated_normal([num_filters_total, num_classes], stddev=0.1), name="W")b = tf.Variable(tf.constant(0.1, shape=[num_classes]), name="b")l2_loss += tf.nn.l2_loss(W)l2_loss += tf.nn.l2_loss(b)self.scores = tf.nn.xw_plus_b(self.h_drop, W, b, name="scores")self.ypred_for_auc = tf.nn.softmax(self.scores)self.predictions = tf.argmax(self.scores, 1, name="predictions")# CalculateMean cross-entropy losswith tf.name_scope("loss"):losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.scores, labels=self.input_y)self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_losstf.summary.scalar('loss', self.loss)self.params = [param for param in tf.trainable_variables() if 'discriminator' in param.name]d_optimizer = tf.train.AdamOptimizer(1e-4)grads_and_vars = d_optimizer.compute_gradients(self.loss, self.params, aggregation_method=2)self.train_op = d_optimizer.apply_gradients(grads_and_vars)
#rollout.pyimport tensorflow as tf
from tensorflow.python.ops import tensor_array_ops, control_flow_ops
import numpy as npclass ROLLOUT(object):def __init__(self, lstm, update_rate):self.lstm = lstmself.update_rate = update_rateself.num_emb = self.lstm.num_embself.batch_size = self.lstm.batch_sizeself.emb_dim = self.lstm.emb_dimself.hidden_dim = self.lstm.hidden_dimself.sequence_length = self.lstm.sequence_lengthself.start_token = tf.identity(self.lstm.start_token)self.learning_rate = self.lstm.learning_rateself.g_embeddings = tf.identity(self.lstm.g_embeddings)self.g_recurrent_unit = self.create_recurrent_unit()  # maps h_tm1 to h_t for generatorself.g_output_unit = self.create_output_unit()  # maps h_t to o_t (output token logits)###################################################################################################### placeholder definitionself.x = tf.placeholder(tf.int32, shape=[self.batch_size, self.sequence_length]) # sequence of tokens generated by generatorself.given_num = tf.placeholder(tf.int32)# processed for batchwith tf.device("/cpu:0"):self.processed_x = tf.transpose(tf.nn.embedding_lookup(self.g_embeddings, self.x), perm=[1, 0, 2])  # seq_length x batch_size x emb_dimta_emb_x = tensor_array_ops.TensorArray(dtype=tf.float32, size=self.sequence_length)ta_emb_x = ta_emb_x.unstack(self.processed_x)ta_x = tensor_array_ops.TensorArray(dtype=tf.int32, size=self.sequence_length)ta_x = ta_x.unstack(tf.transpose(self.x, perm=[1, 0]))#####################################################################################################self.h0 = tf.zeros([self.batch_size, self.hidden_dim])self.h0 = tf.stack([self.h0, self.h0])gen_x = tensor_array_ops.TensorArray(dtype=tf.int32, size=self.sequence_length,dynamic_size=False, infer_shape=True)# When current index i < given_num, use the provided tokens as the input at each time stepdef _g_recurrence_1(i, x_t, h_tm1, given_num, gen_x):h_t = self.g_recurrent_unit(x_t, h_tm1)  # hidden_memory_tuplex_tp1 = ta_emb_x.read(i)gen_x = gen_x.write(i, ta_x.read(i))return i + 1, x_tp1, h_t, given_num, gen_x# When current index i >= given_num, start roll-out, use the output as time step t as the input at time step t+1def _g_recurrence_2(i, x_t, h_tm1, given_num, gen_x):h_t = self.g_recurrent_unit(x_t, h_tm1)  # hidden_memory_tupleo_t = self.g_output_unit(h_t)  # batch x vocab , logits not problog_prob = tf.log(tf.nn.softmax(o_t))next_token = tf.cast(tf.reshape(tf.multinomial(log_prob, 1), [self.batch_size]), tf.int32)x_tp1 = tf.nn.embedding_lookup(self.g_embeddings, next_token)  # batch x emb_dimgen_x = gen_x.write(i, next_token)  # indices, batch_sizereturn i + 1, x_tp1, h_t, given_num, gen_xi, x_t, h_tm1, given_num, self.gen_x = control_flow_ops.while_loop(cond=lambda i, _1, _2, given_num, _4: i < given_num,body=_g_recurrence_1,loop_vars=(tf.constant(0, dtype=tf.int32),tf.nn.embedding_lookup(self.g_embeddings, self.start_token), self.h0, self.given_num, gen_x))_, _, _, _, self.gen_x = control_flow_ops.while_loop(cond=lambda i, _1, _2, _3, _4: i < self.sequence_length,body=_g_recurrence_2,loop_vars=(i, x_t, h_tm1, given_num, self.gen_x))self.gen_x = self.gen_x.stack()  # seq_length x batch_sizeself.gen_x = tf.transpose(self.gen_x, perm=[1, 0])  # batch_size x seq_lengthdef get_reward(self, sess, input_x, rollout_num, discriminator):# input_x表示需要打分的序列#rollout_num:采样的次数,即将多少个reward取平均rewards = []for i in range(rollout_num):# given_num between 1 to sequence_length - 1 for a part completed sentencefor given_num in range(1, self.sequence_length ):#生成一个样本,前given_num由input_x提供,given_num后的token由生成器补feed = {self.x: input_x, self.given_num: given_num}samples = sess.run(self.gen_x, feed)feed = {discriminator.input_x: samples, discriminator.dropout_keep_prob: 1.0}ypred_for_auc = sess.run(discriminator.ypred_for_auc, feed)#判别器给每个句子打分,作为rewardypred = np.array([item[1] for item in ypred_for_auc])if i == 0:rewards.append(ypred)else:rewards[given_num - 1] += ypred# the last token reward   如果given_num已经是最后一个token,最喂给判别器的样本就全是input_xfeed = {discriminator.input_x: input_x, discriminator.dropout_keep_prob: 1.0}#ypred_for_auc是二分类的,第一个数为该样本为假样本的概率,第二个数为真样本的概率ypred_for_auc = sess.run(discriminator.ypred_for_auc, feed)ypred = np.array([item[1] for item in ypred_for_auc])if i == 0:rewards.append(ypred)else:# completed sentence rewardrewards[self.sequence_length - 1] += ypredrewards = np.transpose(np.array(rewards)) / (1.0 * rollout_num)  # batch_size x seq_lengthreturn rewardsdef create_recurrent_unit(self):# Weights and Bias for input and hidden tensorself.Wi = tf.identity(self.lstm.Wi)self.Ui = tf.identity(self.lstm.Ui)self.bi = tf.identity(self.lstm.bi)self.Wf = tf.identity(self.lstm.Wf)self.Uf = tf.identity(self.lstm.Uf)self.bf = tf.identity(self.lstm.bf)self.Wog = tf.identity(self.lstm.Wog)self.Uog = tf.identity(self.lstm.Uog)self.bog = tf.identity(self.lstm.bog)self.Wc = tf.identity(self.lstm.Wc)self.Uc = tf.identity(self.lstm.Uc)self.bc = tf.identity(self.lstm.bc)def unit(x, hidden_memory_tm1):previous_hidden_state, c_prev = tf.unstack(hidden_memory_tm1)# Input Gatei = tf.sigmoid(tf.matmul(x, self.Wi) +tf.matmul(previous_hidden_state, self.Ui) + self.bi)# Forget Gatef = tf.sigmoid(tf.matmul(x, self.Wf) +tf.matmul(previous_hidden_state, self.Uf) + self.bf)# Output Gateo = tf.sigmoid(tf.matmul(x, self.Wog) +tf.matmul(previous_hidden_state, self.Uog) + self.bog)# New Memory Cellc_ = tf.nn.tanh(tf.matmul(x, self.Wc) +tf.matmul(previous_hidden_state, self.Uc) + self.bc)# Final Memory cellc = f * c_prev + i * c_# Current Hidden statecurrent_hidden_state = o * tf.nn.tanh(c)return tf.stack([current_hidden_state, c])return unitdef update_recurrent_unit(self):# Weights and Bias for input and hidden tensorself.Wi = self.update_rate * self.Wi + (1 - self.update_rate) * tf.identity(self.lstm.Wi)self.Ui = self.update_rate * self.Ui + (1 - self.update_rate) * tf.identity(self.lstm.Ui)self.bi = self.update_rate * self.bi + (1 - self.update_rate) * tf.identity(self.lstm.bi)self.Wf = self.update_rate * self.Wf + (1 - self.update_rate) * tf.identity(self.lstm.Wf)self.Uf = self.update_rate * self.Uf + (1 - self.update_rate) * tf.identity(self.lstm.Uf)self.bf = self.update_rate * self.bf + (1 - self.update_rate) * tf.identity(self.lstm.bf)self.Wog = self.update_rate * self.Wog + (1 - self.update_rate) * tf.identity(self.lstm.Wog)self.Uog = self.update_rate * self.Uog + (1 - self.update_rate) * tf.identity(self.lstm.Uog)self.bog = self.update_rate * self.bog + (1 - self.update_rate) * tf.identity(self.lstm.bog)self.Wc = self.update_rate * self.Wc + (1 - self.update_rate) * tf.identity(self.lstm.Wc)self.Uc = self.update_rate * self.Uc + (1 - self.update_rate) * tf.identity(self.lstm.Uc)self.bc = self.update_rate * self.bc + (1 - self.update_rate) * tf.identity(self.lstm.bc)def unit(x, hidden_memory_tm1):previous_hidden_state, c_prev = tf.unstack(hidden_memory_tm1)# Input Gatei = tf.sigmoid(tf.matmul(x, self.Wi) +tf.matmul(previous_hidden_state, self.Ui) + self.bi)# Forget Gatef = tf.sigmoid(tf.matmul(x, self.Wf) +tf.matmul(previous_hidden_state, self.Uf) + self.bf)# Output Gateo = tf.sigmoid(tf.matmul(x, self.Wog) +tf.matmul(previous_hidden_state, self.Uog) + self.bog)# New Memory Cellc_ = tf.nn.tanh(tf.matmul(x, self.Wc) +tf.matmul(previous_hidden_state, self.Uc) + self.bc)# Final Memory cellc = f * c_prev + i * c_# Current Hidden statecurrent_hidden_state = o * tf.nn.tanh(c)return tf.stack([current_hidden_state, c])return unitdef create_output_unit(self):self.Wo = tf.identity(self.lstm.Wo)self.bo = tf.identity(self.lstm.bo)def unit(hidden_memory_tuple):hidden_state, c_prev = tf.unstack(hidden_memory_tuple)# hidden_state : batch x hidden_dimlogits = tf.matmul(hidden_state, self.Wo) + self.bo# output = tf.nn.softmax(logits)return logitsreturn unitdef update_output_unit(self):self.Wo = self.update_rate * self.Wo + (1 - self.update_rate) * tf.identity(self.lstm.Wo)self.bo = self.update_rate * self.bo + (1 - self.update_rate) * tf.identity(self.lstm.bo)def unit(hidden_memory_tuple):hidden_state, c_prev = tf.unstack(hidden_memory_tuple)# hidden_state : batch x hidden_dimlogits = tf.matmul(hidden_state, self.Wo) + self.bo# output = tf.nn.softmax(logits)return logitsreturn unitdef update_params(self):self.g_embeddings = tf.identity(self.lstm.g_embeddings)self.g_recurrent_unit = self.update_recurrent_unit()self.g_output_unit = self.update_output_unit()

SeqGAN代码解析相关推荐

  1. 对抗思想与强化学习的碰撞-SeqGAN模型原理和代码解析

    GAN作为生成模型的一种新型训练方法,通过discriminative model来指导generative model的训练,并在真实数据中取得了很好的效果.尽管如此,当目标是一个待生成的非连续性序 ...

  2. matrix_multiply代码解析

    matrix_multiply代码解析 关于matrix_multiply 程序执行代码里两个矩阵的乘法,并将相乘结果打印在屏幕上. 示例的主要目的是展现怎么实现一个自定义CPU计算任务. 参考:ht ...

  3. CornerNet代码解析——损失函数

    CornerNet代码解析--损失函数 文章目录 CornerNet代码解析--损失函数 前言 总体损失 1.Heatmap的损失 2.Embedding的损失 3.Offset的损失 前言 今天要解 ...

  4. 视觉SLAM开源算法ORB-SLAM3 原理与代码解析

    来源:深蓝学院,文稿整理者:何常鑫,审核&修改:刘国庆 本文总结于上交感知与导航研究所科研助理--刘国庆关于[视觉SLAM开源算法ORB-SLAM3 原理与代码解析]的公开课. ORB-SLA ...

  5. java获取object属性值_java反射获取一个object属性值代码解析

    有些时候你明明知道这个object里面是什么,但是因为种种原因,你不能将它转化成一个对象,只是想单纯地提取出这个object里的一些东西,这个时候就需要用反射了. 假如你这个类是这样的: privat ...

  6. python中的doc_基于Python获取docx/doc文件内容代码解析

    这篇文章主要介绍了基于Python获取docx/doc文件内容代码解析,文中通过示例代码介绍的非常详细,对大家的学习或者工作具有一定的参考学习价值,需要的朋友可以参考下 整体思路: 下载文件并修改后缀 ...

  7. mongoose框架示例代码解析(一)

    mongoose框架示例代码解析(一) 参考: Mongoose Networking Library Documentation(Server) Mongoose Networking Librar ...

  8. ViBe算法原理和代码解析

    ViBe - a powerful technique for background detection and subtraction in video sequences 算法官网:http:// ...

  9. 【Android 逆向】使用 Python 代码解析 ELF 文件 ( PyCharm 中进行断点调试 | ELFFile 实例对象分析 )

    文章目录 一.PyCharm 中进行断点调试 二.ELFFile 实例对象分析 一.PyCharm 中进行断点调试 在上一篇博客 [Android 逆向]使用 Python 代码解析 ELF 文件 ( ...

最新文章

  1. 2.4.4.1、Django新建APP(acounts)
  2. 小米资深工程师瞿晋萍(男):米聊服务器的技术选型和架构设计
  3. maven2创建或者导入eclipse工程 设置M2_REPO消除错误
  4. 如何解决安装瑞星后用FoxMail收发邮件速度慢的问题。
  5. getopt长参数(长选项)获取不到参数BUG
  6. 两路语音 两路计算机数据综合,脉冲编码调制解调实验摘要.doc
  7. datatables.js 简单使用--多选框和服务器端分页
  8. ENVI5.4新添加功能简介
  9. CSS3学习笔记——伪类hover
  10. Python3 NaN+NaN等于0
  11. CST学习笔记------材料边界
  12. android色温值转成rgb
  13. Windows XP修改CHM字体大小
  14. Python控制仪器【2 ,控制矢量网络分析仪】
  15. opencv-python cv2.VideoWriter() 导出视频无法播放的四个原因
  16. 联通与阿里云、腾讯云合作 引发“云震荡”
  17. js获取图片像素颜色,修改图片像素颜色
  18. 遗传算法GA算法思路及其C++实现
  19. 尚硅谷Java基础学习--常用类部分例题解答(仅使用String类方法)
  20. 为什么需要划分VLAN?

热门文章

  1. 怎样预防雏鹅痛风 鹅痛风是什么原因引起的 雏鹅病毒性痛风 鹅痛风病的预防
  2. 后悔了,1000多条,手指在发抖....
  3. JavaScript学习作业-20181011
  4. aliyun maven远程镜像库 mirror配置
  5. 网瘾少年被骂成渣,逆袭成阿里高工年薪百万:厉害的人不怕走弯道
  6. 记录一下.NET Core Flurl的Post各种用法
  7. linux服务器安全(1)
  8. puer实现前后端分离开发
  9. 双语版Google Technical Writing (For Students) | 谷歌技术写作课程(学生向)
  10. 【忏悔的博客】2020普及组三校联考(西附)