11-1 第十周作业-验证码识别(未完成)

#!/usr/bin/env python
# coding: utf-8# In[1]:import os
import tensorflow as tf
from PIL import Image
from nets2 import nets_factory
import numpy as np# In[2]:# 不同字符数量
CHAR_SET_LEN = 10
# 图片高度
IMAGE_HEIGHT = 60
# 图片宽度
IMAGE_WIDTH = 160
# 批次
BATCH_SIZE = 25
# tfrecord文件存放路径
TFRECORD_FILE = "D:/python_data/captcha_logs/train.tfrecords"# placeholder
x = tf.placeholder(tf.float32, [None, 224, 224])
y0 = tf.placeholder(tf.float32, [None])
y1 = tf.placeholder(tf.float32, [None])
y2 = tf.placeholder(tf.float32, [None])
y3 = tf.placeholder(tf.float32, [None])# 学习率
lr = tf.Variable(0.003, dtype=tf.float32)# 从tfrecord读出数据
def read_and_decode(filename):# 根据文件名生成一个队列filename_queue = tf.train.string_input_producer([filename])reader = tf.TFRecordReader()# 返回文件名和文件_, serialized_example = reader.read(filename_queue)   features = tf.parse_single_example(serialized_example,features={'image' : tf.FixedLenFeature([], tf.string),'label0': tf.FixedLenFeature([], tf.int64),'label1': tf.FixedLenFeature([], tf.int64),'label2': tf.FixedLenFeature([], tf.int64),'label3': tf.FixedLenFeature([], tf.int64),})# 获取图片数据image = tf.decode_raw(features['image'], tf.uint8)# tf.train.shuffle_batch必须确定shapeimage = tf.reshape(image, [224, 224])# 图片预处理image = tf.cast(image, tf.float32) / 255.0image = tf.subtract(image, 0.5)image = tf.multiply(image, 2.0)# 获取labellabel0 = tf.cast(features['label0'], tf.int32)label1 = tf.cast(features['label1'], tf.int32)label2 = tf.cast(features['label2'], tf.int32)label3 = tf.cast(features['label3'], tf.int32)return image, label0, label1, label2, label3# In[3]:# 获取图片数据和标签
image, label0, label1, label2, label3 = read_and_decode(TFRECORD_FILE)#使用shuffle_batch可以随机打乱
image_batch, label_batch0, label_batch1, label_batch2, label_batch3 = tf.train.shuffle_batch([image, label0, label1, label2, label3], batch_size = BATCH_SIZE,capacity = 50000, min_after_dequeue=10000, num_threads=1)#定义网络结构
train_network_fn = nets_factory.get_network_fn('alexnet_v2',num_classes=CHAR_SET_LEN*4,weight_decay=0.0005,is_training=True)with tf.Session() as sess:# inputs: a tensor of size [batch_size, height, width, channels]X = tf.reshape(x, [BATCH_SIZE, 224, 224, 1])# 数据输入网络得到输出值logits,end_points = train_network_fn(X)# 把标签转成one_hot的形式one_hot_labels0 = tf.one_hot(indices=tf.cast(y0, tf.int32), depth=CHAR_SET_LEN)one_hot_labels1 = tf.one_hot(indices=tf.cast(y1, tf.int32), depth=CHAR_SET_LEN)one_hot_labels2 = tf.one_hot(indices=tf.cast(y2, tf.int32), depth=CHAR_SET_LEN)one_hot_labels3 = tf.one_hot(indices=tf.cast(y3, tf.int32), depth=CHAR_SET_LEN)# 把标签转成长度为40的向量label_40 = tf.concat([one_hot_labels0,one_hot_labels1,one_hot_labels2,one_hot_labels3],1)# 计算lossloss_40 = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits_v2(logits=logits,labels=label_40))# 优化lossoptimizer_40 = tf.train.AdamOptimizer(learning_rate=lr).minimize(loss_40) # 计算准确率correct_prediction_40 = tf.equal(tf.argmax(label_40,1),tf.argmax(logits,1))accuracy_40 = tf.reduce_mean(tf.cast(correct_prediction_40,tf.float32))# 用于保存模型saver = tf.train.Saver()# 初始化sess.run(tf.global_variables_initializer())# 创建一个协调器,管理线程coord = tf.train.Coordinator()# 启动QueueRunner, 此时文件名队列已经进队threads = tf.train.start_queue_runners(sess=sess, coord=coord)for i in range(10001):# 获取一个批次的数据和标签b_image, b_label0, b_label1 ,b_label2 ,b_label3 = sess.run([image_batch, label_batch0, label_batch1, label_batch2, label_batch3])# 优化模型sess.run(optimizer_40, feed_dict={x: b_image, y0:b_label0, y1: b_label1, y2: b_label2, y3: b_label3})  # 每迭代20次计算一次loss和准确率if i % 20 == 0:  # 每迭代3000次降低一次学习率if i%3000 == 0:sess.run(tf.assign(lr, lr/3))acc, loss_ = sess.run([accuracy_40,loss_40],feed_dict={x: b_image,y0: b_label0,y1: b_label1,y2: b_label2,y3: b_label3})     learning_rate = sess.run(lr)print ("Iter:%d  Loss:%.3f  Accuracy:%.2f  Learning_rate:%.4f" % (i,loss_,acc,learning_rate))#             acc0,acc1,acc2,acc3,loss_ = sess.run([accuracy0,accuracy1,accuracy2,accuracy3,total_loss],feed_dict={x: b_image,
#                                                                                                                 y0: b_label0,
#                                                                                                                 y1: b_label1,
#                                                                                                                 y2: b_label2,
#                                                                                                                 y3: b_label3})
#             learning_rate = sess.run(lr)
#             print ("Iter:%d  Loss:%.3f  Accuracy:%.2f,%.2f,%.2f,%.2f  Learning_rate:%.4f" % (i,loss_,acc0,acc1,acc2,acc3,learning_rate))# 保存模型if i == 10000 : saver.save(sess, "D:/python_data/captcha_logs/models/crack_captcha.model", global_step=i)  break # 通知其他线程关闭coord.request_stop()# 其他所有线程关闭之后,这一函数才能返回coord.join(threads)
# In[ ]:

报错如下:
File “C:/Users/SSC/PycharmProjects/test20191011/11-1第十周作业-验证码识别.py”, line 93, in
logits,end_points = train_network_fn(X)
ValueError: too many values to unpack (expected 2)

11-2 第十周作业-验证码测试(未完成)

#!/usr/bin/env python
# coding: utf-8# In[1]:import os
import tensorflow as tf
from PIL import Image
from nets2 import nets_factory
import numpy as np
import matplotlib.pyplot as plt  # In[2]:# 不同字符数量
CHAR_SET_LEN = 10
# 图片高度
IMAGE_HEIGHT = 60
# 图片宽度
IMAGE_WIDTH = 160
# 批次
BATCH_SIZE = 1
# tfrecord文件存放路径
TFRECORD_FILE = "D:/python_data/captcha_logs/test.tfrecords"# placeholder
x = tf.placeholder(tf.float32, [None, 224, 224])  # 从tfrecord读出数据
def read_and_decode(filename):# 根据文件名生成一个队列filename_queue = tf.train.string_input_producer([filename])reader = tf.TFRecordReader()# 返回文件名和文件_, serialized_example = reader.read(filename_queue)   features = tf.parse_single_example(serialized_example,features={'image' : tf.FixedLenFeature([], tf.string),'label0': tf.FixedLenFeature([], tf.int64),'label1': tf.FixedLenFeature([], tf.int64),'label2': tf.FixedLenFeature([], tf.int64),'label3': tf.FixedLenFeature([], tf.int64),})# 获取图片数据image = tf.decode_raw(features['image'], tf.uint8)# 没有经过预处理的灰度图image_raw = tf.reshape(image, [224, 224])# tf.train.shuffle_batch必须确定shapeimage = tf.reshape(image, [224, 224])# 图片预处理image = tf.cast(image, tf.float32) / 255.0image = tf.subtract(image, 0.5)image = tf.multiply(image, 2.0)# 获取labellabel0 = tf.cast(features['label0'], tf.int32)label1 = tf.cast(features['label1'], tf.int32)label2 = tf.cast(features['label2'], tf.int32)label3 = tf.cast(features['label3'], tf.int32)return image, image_raw, label0, label1, label2, label3# In[3]:# 获取图片数据和标签
image, image_raw, label0, label1, label2, label3 = read_and_decode(TFRECORD_FILE)#使用shuffle_batch可以随机打乱
image_batch, image_raw_batch, label_batch0, label_batch1, label_batch2, label_batch3 = tf.train.shuffle_batch([image, image_raw, label0, label1, label2, label3], batch_size = BATCH_SIZE,capacity = 50000, min_after_dequeue=10000, num_threads=1)#定义网络结构
train_network_fn = nets_factory.get_network_fn('alexnet_v2',num_classes=CHAR_SET_LEN*4,weight_decay=0.0005,is_training=False)with tf.Session() as sess:# inputs: a tensor of size [batch_size, height, width, channels]X = tf.reshape(x, [BATCH_SIZE, 224, 224, 1])# 数据输入网络得到输出值logits,end_points = train_network_fn(X)# 预测值logits0 = tf.slice(logits, [0,0], [-1,10])logits1 = tf.slice(logits, [0,10], [-1,10])logits2 = tf.slice(logits, [0,20], [-1,10])logits3 = tf.slice(logits, [0,30], [-1,10])predict0 = tf.argmax(logits0, 1)  predict1 = tf.argmax(logits1, 1)  predict2 = tf.argmax(logits2, 1)  predict3 = tf.argmax(logits3, 1)  # 初始化sess.run(tf.global_variables_initializer())# 载入训练好的模型saver = tf.train.Saver()saver.restore(sess,'D:/python_data/captcha_logs/models/crack_captcha.model-10000')# 创建一个协调器,管理线程coord = tf.train.Coordinator()# 启动QueueRunner, 此时文件名队列已经进队threads = tf.train.start_queue_runners(sess=sess, coord=coord)for i in range(10):# 获取一个批次的数据和标签b_image, b_image_raw, b_label0, b_label1 ,b_label2 ,b_label3 = sess.run([image_batch, image_raw_batch, label_batch0, label_batch1, label_batch2, label_batch3])# 显示图片img=Image.fromarray(b_image_raw[0],'L')plt.imshow(img)plt.axis('off')plt.show()# 打印标签print('label:',b_label0, b_label1 ,b_label2 ,b_label3)# 预测label0,label1,label2,label3 = sess.run([predict0,predict1,predict2,predict3], feed_dict={x: b_image})# 打印预测值print('predict:',label0,label1,label2,label3) # 通知其他线程关闭coord.request_stop()# 其他所有线程关闭之后,这一函数才能返回coord.join(threads)

报错和前一个程序一样:
File “C:/Users/SSC/PycharmProjects/test20191011/11-1第十周作业-验证码识别.py”, line 93, in
logits,end_points = train_network_fn(X)
ValueError: too many values to unpack (expected 2)

11-3 simple_word2vec

# encoding=utf8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_functionimport tensorflow as tf
import collections
import math
import os
import random
import zipfileimport numpy as np
from six.moves import urllib
from six.moves import xrange  # pylint: disable=redefined-builtin
import tensorflow as tf# Step 1: Download the data.
url = 'http://mattmahoney.net/dc/'# 下载数据集
def maybe_download(filename, expected_bytes):"""Download a file if not present, and make sure it's the right size."""if not os.path.exists(filename):filename, _ = urllib.request.urlretrieve(url + filename, filename)# 获取文件相关属性statinfo = os.stat(filename)# 比对文件的大小是否正确if statinfo.st_size == expected_bytes:print('Found and verified', filename)else:print(statinfo.st_size)raise Exception('Failed to verify ' + filename + '. Can you get to it with a browser?')return filenamefilename = maybe_download('text8.zip', 31344016)# Read the data into a list of strings.
def read_data(filename):"""Extract the first file enclosed in a zip file as a list of words"""with zipfile.ZipFile(filename) as f:data = tf.compat.as_str(f.read(f.namelist()[0])).split()return data# 单词表
words = read_data(filename)# Data size
print('Data size', len(words))# Step 2: Build the dictionary and replace rare words with UNK token.
# 只留50000个单词,其他的词都归为UNK
vocabulary_size = 50000def build_dataset(words, vocabulary_size):count = [['UNK', -1]]# extend追加一个列表# Counter用来统计每个词出现的次数# most_common返回一个TopN列表,只留50000个单词包括UNK  # c = Counter('abracadabra')# c.most_common()# [('a', 5), ('r', 2), ('b', 2), ('c', 1), ('d', 1)]# c.most_common(3)# [('a', 5), ('r', 2), ('b', 2)]# 前50000个出现次数最多的词count.extend(collections.Counter(words).most_common(vocabulary_size - 1))# 生成 dictionary,词对应编号, word:id(0-49999)# 词频越高编号越小dictionary = dict()for word, _ in count:dictionary[word] = len(dictionary)# data把数据集的词都编号data = list()unk_count = 0for word in words:if word in dictionary:index = dictionary[word]else:index = 0  # dictionary['UNK']unk_count += 1data.append(index)# 记录UNK词的数量count[0][1] = unk_count# 编号对应词的字典reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))return data, count, dictionary, reverse_dictionary# data 数据集,编号形式
# count 前50000个出现次数最多的词
# dictionary 词对应编号
# reverse_dictionary 编号对应词
data, count, dictionary, reverse_dictionary = build_dataset(words, vocabulary_size)
del words  # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])data_index = 0# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):global data_indexassert batch_size % num_skips == 0assert num_skips <= 2 * skip_windowbatch = np.ndarray(shape=(batch_size), dtype=np.int32)labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)span = 2 * skip_window + 1  # [ skip_window target skip_window ]buffer = collections.deque(maxlen=span)# [ skip_window target skip_window ]# [ skip_window target skip_window ]# [ skip_window target skip_window ]#     [0 1 2 3 4 5 6 7 8 9 ...]
#            t     i  # 循环3次for _ in range(span):buffer.append(data[data_index])data_index = (data_index + 1) % len(data)# 获取batch和labelsfor i in range(batch_size // num_skips):target = skip_window  # target label at the center of the buffertargets_to_avoid = [skip_window]# 循环2次,一个目标单词对应两个上下文单词for j in range(num_skips):while target in targets_to_avoid:# 可能先拿到前面的单词也可能先拿到后面的单词target = random.randint(0, span - 1)targets_to_avoid.append(target)batch[i * num_skips + j] = buffer[skip_window]labels[i * num_skips + j, 0] = buffer[target]buffer.append(data[data_index])data_index = (data_index + 1) % len(data)# Backtrack a little bit to avoid skipping words in the end of a batch# 回溯3个词。因为执行完一个batch的操作之后,data_index会往右多偏移span个位置data_index = (data_index + len(data) - span) % len(data)return batch, labels# 打印sample data
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):print(batch[i], reverse_dictionary[batch[i]],'->', labels[i, 0], reverse_dictionary[labels[i, 0]])# Step 4: Build and train a skip-gram model.
batch_size = 128
# 词向量维度
embedding_size = 128  # Dimension of the embedding vector.
skip_window = 1       # How many words to consider left and right.
num_skips = 2         # How many times to reuse an input to generate a label.# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = 16     # Random set of words to evaluate similarity on.
valid_window = 100  # Only pick dev samples in the head of the distribution.
# 从0-100抽取16个整数,无放回抽样
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
# 负采样样本数
num_sampled = 64    # Number of negative examples to sample.graph = tf.Graph()
with graph.as_default():# Input data.train_inputs = tf.placeholder(tf.int32, shape=[batch_size])train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])valid_dataset = tf.constant(valid_examples, dtype=tf.int32)# Ops and variables pinned to the CPU because of missing GPU implementation
#     with tf.device('/cpu:0'):# 词向量# Look up embeddings for inputs.embeddings = tf.Variable(tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))# embedding_lookup(params,ids)其实就是按照ids顺序返回params中的第ids行# 比如说,ids=[1,7,4],就是返回params中第1,7,4行。返回结果为由params的1,7,4行组成的tensor# 提取要训练的词embed = tf.nn.embedding_lookup(embeddings, train_inputs)# Construct the variables for the noise-contrastive estimation(NCE) lossnce_weights = tf.Variable(tf.truncated_normal([vocabulary_size, embedding_size],stddev=1.0 / math.sqrt(embedding_size)))nce_biases = tf.Variable(tf.zeros([vocabulary_size]))# Compute the average NCE loss for the batch.# tf.nce_loss automatically draws a new sample of the negative labels each# time we evaluate the loss.loss = tf.reduce_mean(tf.nn.nce_loss(weights=nce_weights,biases=nce_biases,labels=train_labels,inputs=embed,num_sampled=num_sampled,   num_classes=vocabulary_size))# Construct the SGD optimizer using a learning rate of 1.0.optimizer = tf.train.GradientDescentOptimizer(1).minimize(loss)# Compute the cosine similarity between minibatch examples and all embeddings.norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))normalized_embeddings = embeddings / norm# 抽取一些常用词来测试余弦相似度valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings, valid_dataset)# valid_size == 16# [16,1] * [1*50000] = [16,50000]similarity = tf.matmul(valid_embeddings, normalized_embeddings, transpose_b=True)# Add variable initializer.init = tf.global_variables_initializer()# Step 5: Begin training.
num_steps = 100001
final_embeddings = []with tf.Session(graph=graph) as session:# We must initialize all variables before we use them.init.run()print("Initialized")average_loss = 0for step in xrange(num_steps):# 获取一个批次的target,以及对应的labels,都是编号形式的batch_inputs, batch_labels = generate_batch(batch_size, num_skips, skip_window)feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}# We perform one update step by evaluating the optimizer op (including it# in the list of returned values for session.run()_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)average_loss += loss_val# 计算训练2000次的平均lossif step % 2000 == 0:if step > 0:average_loss /= 2000# The average loss is an estimate of the loss over the last 2000 batches.print("Average loss at step ", step, ": ", average_loss)average_loss = 0# Note that this is expensive (~20% slowdown if computed every 500 steps)if step % 20000 == 0:sim = similarity.eval()# 计算验证集的余弦相似度最高的词for i in xrange(valid_size):# 根据id拿到对应单词valid_word = reverse_dictionary[valid_examples[i]]top_k = 8  # number of nearest neighbors# 从大到小排序,排除自己本身,取前top_k个值nearest = (-sim[i, :]).argsort()[1:top_k + 1]log_str = "Nearest to %s:" % valid_wordfor k in xrange(top_k):close_word = reverse_dictionary[nearest[k]]log_str = "%s %s," % (log_str, close_word)print(log_str)# 训练结束得到的词向量final_embeddings = normalized_embeddings.eval()# Step 6: Visualize the embeddings.def plot_with_labels(low_dim_embs, labels, filename='tsne.png'):assert low_dim_embs.shape[0] >= len(labels), "More labels than embeddings"# 设置图片大小plt.figure(figsize=(15, 15))  # in inchesfor i, label in enumerate(labels):x, y = low_dim_embs[i, :]plt.scatter(x, y)plt.annotate(label,xy=(x, y),xytext=(5, 2),textcoords='offset points',ha='right',va='bottom')plt.savefig(filename)try:from sklearn.manifold import TSNEimport matplotlib.pyplot as plttsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000, method='exact')# mac:method='exact'# 画500个点plot_only = 500low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :])labels = [reverse_dictionary[i] for i in xrange(plot_only)]plot_with_labels(low_dim_embs, labels)except ImportError:print("Please install sklearn, matplotlib, and scipy to visualize embeddings.")

Found and verified text8.zip
Data size 17005207
Most common words (+UNK) [[‘UNK’, 418391], (‘the’, 1061396), (‘of’, 593677), (‘and’, 416629), (‘one’, 411764)]
Sample data [5234, 3081, 12, 6, 195, 2, 3134, 46, 59, 156] [‘anarchism’, ‘originated’, ‘as’, ‘a’, ‘term’, ‘of’, ‘abuse’, ‘first’, ‘used’, ‘against’]
3081 originated -> 12 as
3081 originated -> 5234 anarchism
12 as -> 6 a
12 as -> 3081 originated
6 a -> 195 term
6 a -> 12 as
195 term -> 2 of
195 term -> 6 a
WARNING:tensorflow:From /home/ssc/anaconda3/lib/python3.7/site-packages/tensorflow/python/ops/nn_impl.py:180: add_dispatch_support..wrapper (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.where in 2.0, which has the same broadcast rule as np.where
WARNING:tensorflow:From :202: calling reduce_sum_v1 (from tensorflow.python.ops.math_ops) with keep_dims is deprecated and will be removed in a future version.
Instructions for updating:
keep_dims is deprecated, use keepdims instead
Initialized
Average loss at step 0 : 288.97705078125
Nearest to there: truth, croesus, sudetenland, discipline, consented, scarlet, kyd, infrastructure,
Nearest to while: dealt, muon, cardiff, fasttrack, lendl, bite, gyroscopic, adour,
Nearest to been: pressure, fatalities, canaan, emperors, maiden, sla, outsold, cargo,
Nearest to history: adaptable, contracting, degli, evoked, hanyu, aged, infinitum, opera,
Nearest to only: elbow, cellulose, snoopy, agra, revolt, theodore, field, caracalla,
Nearest to used: watering, clear, ethereal, realised, realistically, burt, jacksonville, insults,
Nearest to this: ultrasonic, diels, ott, rosa, commandants, pilate, bolton, repent,
Nearest to three: worry, wavelet, media, moderators, lamar, tigris, tint, indigenous,
Nearest to zero: cabaret, posteriori, mcmahon, kleist, oceans, haller, mari, ek,
Nearest to state: midwest, diabetic, braided, horrors, attainment, individualistic, illyrian, cadbury,
Nearest to people: belfast, mammoth, hom, yum, unprecedented, beaulieu, pizan, battista,
Nearest to has: cleaved, seamstress, virginals, shorthand, arbor, upbringing, lensman, bars,
Nearest to which: salvador, janssen, julius, mousetrap, yankovic, incas, dukakis, alum,
Nearest to one: hunted, tangent, braking, these, mehr, vestiges, quoting, bahama,
Nearest to new: coexistence, hausdorff, reformer, rashid, mentored, voids, britain, resorting,
Nearest to up: mutate, permian, inherits, succumb, quit, fuck, compassionate, homeowners,
Average loss at step 2000 : 113.35879691886902
Average loss at step 4000 : 52.4763582508564
Average loss at step 6000 : 33.34202474546432
Average loss at step 8000 : 23.839270351290704
Average loss at step 10000 : 18.041483691334726
Average loss at step 12000 : 14.34144814658165
Average loss at step 14000 : 11.680477630853654
Average loss at step 16000 : 9.830855200529099
Average loss at step 18000 : 8.372542578160763
Average loss at step 20000 : 8.028284571290015
Nearest to there: it, croesus, truth, operatorname, sudetenland, who, scarlet, roper,
Nearest to while: and, with, cardiff, sergeant, dealt, exception, occur, taylor,
Nearest to been: pressure, maiden, are, scientists, introduction, by, cargo, thumbs,
Nearest to history: dasyprocta, aged, hbox, circ, contracting, relatively, adaptable, trench,
Nearest to only: astoria, operatorname, even, theodore, tragedians, argon, elbow, applets,
Nearest to used: clear, regarded, realised, christians, operatorname, slump, realistically, nephew,
Nearest to this: the, it, a, which, pilate, he, suspense, one,
Nearest to three: five, nine, zero, two, six, eight, one, operatorname,
Nearest to zero: nine, eight, five, six, three, four, seven, two,
Nearest to state: height, midwest, dasyprocta, archie, circ, acacia, operatorname, agitation,
Nearest to people: operatorname, mya, yum, null, agouti, died, dasyprocta, solve,
Nearest to has: had, is, was, agouti, operatorname, have, millennium, individualist,
Nearest to which: and, this, that, incas, some, salvador, tissue, one,
Nearest to one: two, three, operatorname, four, six, archie, nine, seven,
Nearest to new: losing, same, asteroid, operatorname, reformer, britain, alongside, napster,
Nearest to up: aquila, chronicle, concepts, quit, ist, map, unix, imran,
Average loss at step 22000 : 7.026395204424858
Average loss at step 24000 : 6.913136872887612
Average loss at step 26000 : 6.730628359913826
Average loss at step 28000 : 6.406256815433502
Average loss at step 30000 : 5.970158792257309
Average loss at step 32000 : 5.985419781506062
Average loss at step 34000 : 5.675910597324371
Average loss at step 36000 : 5.785085339903832
Average loss at step 38000 : 5.493747946858406
Average loss at step 40000 : 5.265156390070915
Nearest to there: it, they, who, croesus, he, albury, angelina, but,
Nearest to while: and, with, although, cardiff, or, when, exception, sergeant,
Nearest to been: be, pressure, maiden, are, by, were, was, scientists,
Nearest to history: dasyprocta, adaptable, aged, hbox, circ, contracting, agouti, standardisation,
Nearest to only: even, astoria, operatorname, birkenau, argon, helix, elbow, tragedians,
Nearest to used: regarded, found, recitative, presbyters, operatorname, christians, somoza, clear,
Nearest to this: which, it, the, that, one, amalthea, a, vdc,
Nearest to three: five, six, four, eight, seven, two, zero, one,
Nearest to zero: five, eight, six, seven, four, three, circ, operatorname,
Nearest to state: diabetic, height, dasyprocta, midwest, circ, archie, agitation, sociale,
Nearest to people: operatorname, yum, mya, recitative, null, imf, airshow, agouti,
Nearest to has: had, is, was, have, agouti, operatorname, individualist, in,
Nearest to which: that, this, it, amalthea, also, and, one, operatorname,
Nearest to one: two, four, three, eight, seven, six, archie, amalthea,
Nearest to new: losing, operatorname, asteroid, reformer, dasyprocta, organizations, same, dearborn,
Nearest to up: aquila, ist, chronicle, permian, succumb, fuck, quit, concepts,
Average loss at step 42000 : 5.3733094739913945
Average loss at step 44000 : 5.23788167154789
Average loss at step 46000 : 5.227378981828689
Average loss at step 48000 : 5.194902821063995
Average loss at step 50000 : 5.009050263285637
Average loss at step 52000 : 5.038351456522942
Average loss at step 54000 : 5.188210473895073
Average loss at step 56000 : 5.05611066699028
Average loss at step 58000 : 5.054555775165558
Average loss at step 60000 : 4.937457258939743
Nearest to there: it, they, but, he, croesus, who, operatorname, albury,
Nearest to while: although, when, with, and, cardiff, or, lendl, michelob,
Nearest to been: be, were, was, pressure, by, maiden, are, scientists,
Nearest to history: dasyprocta, adaptable, circ, aged, agouti, hbox, kapoor, bore,
Nearest to only: even, birkenau, astoria, operatorname, but, argon, kapoor, ursus,
Nearest to used: regarded, found, presbyters, recitative, ethereal, clear, known, agouti,
Nearest to this: which, it, that, the, ursus, amalthea, a, one,
Nearest to three: four, five, six, two, seven, eight, operatorname, one,
Nearest to zero: eight, six, seven, five, four, nine, three, operatorname,
Nearest to state: diabetic, height, dasyprocta, midwest, michelob, sociale, circ, agitation,
Nearest to people: operatorname, ursus, yum, recitative, mya, airshow, null, zero,
Nearest to has: had, was, have, is, ursus, agouti, operatorname, abv,
Nearest to which: this, that, also, it, amalthea, ursus, and, but,
Nearest to one: two, four, six, five, three, eight, seven, archie,
Nearest to new: losing, asteroid, michelob, operatorname, dasyprocta, liao, dearborn, ursus,
Nearest to up: ist, aquila, succumb, fuck, chronicle, permian, bilabial, mukherjee,
Average loss at step 62000 : 5.0088949539661405
Average loss at step 64000 : 4.8211859655380245
Average loss at step 66000 : 4.610192925810814
Average loss at step 68000 : 5.0035795356035235
Average loss at step 70000 : 4.896569334745407
Average loss at step 72000 : 4.7616591372489925
Average loss at step 74000 : 4.803243573069572
Average loss at step 76000 : 4.725066065311432
Average loss at step 78000 : 4.805730449318886
Average loss at step 80000 : 4.795071109175682
Nearest to there: it, they, he, croesus, who, but, operatorname, noir,
Nearest to while: although, and, when, with, or, birkenau, though, lendl,
Nearest to been: be, were, was, become, pressure, by, maiden, gaels,
Nearest to history: dasyprocta, adaptable, bore, circ, conjunction, agouti, relatively, pseudocode,
Nearest to only: even, birkenau, mitral, but, operatorname, microbats, astoria, argon,
Nearest to used: found, known, regarded, presbyters, agouti, recitative, thaler, operatorname,
Nearest to this: which, it, that, the, ursus, amalthea, any, romanticism,
Nearest to three: four, six, five, two, seven, eight, one, zero,
Nearest to zero: five, four, six, seven, eight, nine, three, operatorname,
Nearest to state: height, diabetic, thaler, oxford, dasyprocta, agitation, midwest, michelob,
Nearest to people: operatorname, words, iit, yum, recitative, microcebus, imf, ursus,
Nearest to has: had, have, was, is, ursus, agouti, abv, enters,
Nearest to which: that, this, also, it, amalthea, these, who, ursus,
Nearest to one: seven, two, six, four, three, five, kapoor, archie,
Nearest to new: asteroid, losing, michelob, operatorname, dasyprocta, callithrix, liao, ursus,
Nearest to up: ist, fuck, succumb, permian, aquila, him, chronicle, them,
Average loss at step 82000 : 4.76658174431324
Average loss at step 84000 : 4.75544013440609
Average loss at step 86000 : 4.763636798620224
Average loss at step 88000 : 4.7539977058172225
Average loss at step 90000 : 4.730407988667488
Average loss at step 92000 : 4.661033563613891
Average loss at step 94000 : 4.709836422920227
Average loss at step 96000 : 4.690336490869522
Average loss at step 98000 : 4.596337952613831
Average loss at step 100000 : 4.688487784385681
Nearest to there: they, it, he, often, but, now, operatorname, dinar,
Nearest to while: although, when, and, but, though, or, with, birkenau,
Nearest to been: be, become, was, were, by, pressure, previously, relinquish,
Nearest to history: dasyprocta, adaptable, circ, agouti, bore, kapoor, conjunction, operatorname,
Nearest to only: even, birkenau, mitral, around, operatorname, microbats, but, kapoor,
Nearest to used: known, found, presbyters, regarded, agouti, recitative, referred, thaler,
Nearest to this: which, it, the, that, aquilegia, some, ursus, any,
Nearest to three: five, four, six, seven, two, eight, zero, nine,
Nearest to zero: five, eight, four, seven, nine, six, three, operatorname,
Nearest to state: height, diabetic, oxford, dasyprocta, thaler, sociale, michelob, midwest,
Nearest to people: operatorname, words, recitative, microcebus, iit, ursus, yum, imf,
Nearest to has: had, have, was, is, enters, agouti, ursus, abv,
Nearest to which: that, this, aquilegia, also, it, but, these, amalthea,
Nearest to one: two, seven, four, six, five, eight, kapoor, archie,
Nearest to new: losing, asteroid, michelob, callithrix, certain, operatorname, dasyprocta, liao,
Nearest to up: ist, fuck, him, succumb, them, out, permian, aquila,

text_cnn.py(未完成)

#coding:utf-8
import tensorflow as tf
import numpy as np
import pickleclass TextCNN(object):"""A CNN for text classification.Uses an embedding layer, followed by a convolutional, max-pooling and softmax layer."""# sequence_length-最长词汇数# num_classes-分类数# vocab_size-总词汇数# embedding_size-词向量长度# filter_sizes-卷积核尺寸3,4,5# num_filters-卷积核数量# l2_reg_lambda-l2正则化系数def __init__(self, sequence_length, num_classes, vocab_size,embedding_size, filter_sizes, num_filters, l2_reg_lambda=0.0):# Placeholders for input, output and dropoutself.input_x = tf.placeholder(tf.int32, [None, sequence_length], name="input_x")self.input_y = tf.placeholder(tf.float32, [None, num_classes], name="input_y")self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")# Keeping track of l2 regularization loss (optional)l2_loss = tf.constant(0.0)# Embedding layerwith tf.device('/cpu:0'), tf.name_scope("embedding"):self.W = tf.Variable(tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0),name="W")# [batch_size, sequence_length, embedding_size]self.embedded_chars = tf.nn.embedding_lookup(self.W, self.input_x)# 添加一个维度,[batch_size, sequence_length, embedding_size, 1]self.embedded_chars_expanded = tf.expand_dims(self.embedded_chars, -1)# Create a convolution + maxpool layer for each filter sizepooled_outputs = []for i, filter_size in enumerate(filter_sizes):with tf.name_scope("conv-maxpool-%s" % filter_size):# Convolution Layerfilter_shape = [filter_size, embedding_size, 1, num_filters]W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="W")b = tf.Variable(tf.constant(0.1, shape=[num_filters]), name="b")conv = tf.nn.conv2d(self.embedded_chars_expanded,W,strides=[1, 1, 1, 1],padding="VALID",name="conv")# Apply nonlinearityh = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")# Maxpooling over the outputspooled = tf.nn.max_pool(h,ksize=[1, sequence_length - filter_size + 1, 1, 1],strides=[1, 1, 1, 1],padding='VALID',name="pool")pooled_outputs.append(pooled)# Combine all the pooled featuresnum_filters_total = num_filters * len(filter_sizes)self.h_pool = tf.concat(pooled_outputs, 3)# 把池化层输出变成一维向量self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])# Add dropoutwith tf.name_scope("dropout"):self.h_drop = tf.nn.dropout(self.h_pool_flat, self.dropout_keep_prob)# Final (unnormalized) scores and predictionswith tf.name_scope("output"):W = tf.get_variable("W",shape=[num_filters_total, num_classes],initializer=tf.contrib.layers.xavier_initializer())b = tf.Variable(tf.constant(0.1, shape=[num_classes]), name="b")l2_loss += tf.nn.l2_loss(W)l2_loss += tf.nn.l2_loss(b)self.scores = tf.nn.softmax(tf.nn.xw_plus_b(self.h_drop, W, b, name="scores"))self.predictions = tf.argmax(self.scores, 1, name="predictions")# CalculateMean cross-entropy losswith tf.name_scope("loss"):losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.scores, labels=self.input_y)self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss# Accuracywith tf.name_scope("accuracy"):correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")

报错如下:
Original stack trace for ‘conv-maxpool-5/conv’:
File “E:/学习资料/深度学习/深度学习框架Tensorflow学习与应用/资料/第十一周/程序/cnn-text-classification-tf-master/train.py”, line 90, in
l2_reg_lambda=FLAGS.l2_reg_lambda)
File “E:\学习资料\深度学习\深度学习框架Tensorflow学习与应用\资料\第十一周\程序\cnn-text-classification-tf-master\text_cnn.py”, line 55, in init
name=“conv”)
File “D:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\ops\nn_ops.py”, line 1953, in conv2d
name=name)
File “D:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\ops\gen_nn_ops.py”, line 1161, in conv2d
data_format=data_format, dilations=dilations, name=name)
File “D:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\framework\op_def_library.py”, line 788, in _apply_op_helper
op_def=op_def)
File “D:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\util\deprecation.py”, line 507, in new_func
return func(*args, **kwargs)
File “D:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\framework\ops.py”, line 3616, in create_op
op_def=op_def)
File “D:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\framework\ops.py”, line 2005, in init
self._traceback = tf_stack.extract_stack()

Process finished with exit code 1

深度学习框架tensorflow学习与应用——代码笔记11(未完成)相关推荐

  1. 深度学习框架tensorflow学习与应用6(防止过拟合dropout,keep_prob =tf.placeholder(tf.float32))

    import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data# In[3]:# 载入数据集 mn ...

  2. 深度学习框架Tensorflow学习与应用(五卷机神经网络CNN的讲解及CNN解决MNIST分类问题)

    (一)作业 # coding: utf-8# In[2]:import tensorflow as tf from tensorflow.examples.tutorials.mnist import ...

  3. 4.1 深度学习框架-TensorFlow

    4.1 深度学习框架-TensorFlow 学习目标 目标 了解Tensorflow框架的组成.接口 了解TensorFlow框架的安装 知道tf.keras的特点和使用 应用 无 4.1.1 常见深 ...

  4. 2_初学者快速掌握主流深度学习框架Tensorflow、Keras、Pytorch学习代码(20181211)

    初学者快速掌握主流深度学习框架Tensorflow.Keras.Pytorch学习代码 一.TensorFlow 1.资源地址: 2.资源介绍: 3.配置环境: 4.资源目录: 二.Keras 1.资 ...

  5. DL框架之TensorFlow:深度学习框架TensorFlow Core(低级别TensorFlow API)的简介、安装、使用方法之详细攻略

    DL框架之TensorFlow:TensorFlow Core(低级别TensorFlow API)的简介.安装.使用方法之详细DL框架之TensorFlow:深度学习框架TensorFlow Cor ...

  6. DL框架之Tensorflow:深度学习框架Tensorflow的简介、安装、使用方法之详细攻略

    DL框架之Tensorflow:深度学习框架Tensorflow的简介.安装.使用方法之详细攻略 目录 Tensorflow的简介 1.描述 2.TensorFlow的六大特征 3.了解Tensorf ...

  7. 快速了解深度学习框架--tensorflow(更新中)

    深度学习框架(工具)简单来说即库,需要import,比如tensorflow,Caffe- 深度学习框架提供了一系列的深度学习的组件(对于通用的算法,里面会有实现),当需要使用新的算法的时候就需要用户 ...

  8. 深度学习框架 TensorFlow:张量、自动求导机制、tf.keras模块(Model、layers、losses、optimizer、metrics)、多层感知机(即多层全连接神经网络 MLP)

    日萌社 人工智能AI:Keras PyTorch MXNet TensorFlow PaddlePaddle 深度学习实战(不定时更新) 安装 TensorFlow2.CUDA10.cuDNN7.6. ...

  9. DL框架:主流深度学习框架(TensorFlow/Pytorch/Caffe/Keras/CNTK/MXNet/Theano/PaddlePaddle)简介、多个方向比较、案例应用之详细攻略

    DL框架:主流深度学习框架(TensorFlow/Pytorch/Caffe/Keras/CNTK/MXNet/Theano/PaddlePaddle)简介.多个方向比较.案例应用之详细攻略 目录 深 ...

最新文章

  1. java实现单词替换_java – 正则表达式将空格和单词替换为单词的toFirstUpper
  2. Keras之ML~P:基于Keras中建立的简单的二分类问题的神经网络模型(根据200个数据样本预测新的5+1个样本)——类别预测
  3. “华为杯”中国矿业大学程序设计学科竞赛
  4. 句法依存分析_复旦大学邱锡鹏教授:词法、句法分析研究进展综述
  5. raspberry pi_尝试使用Raspberry Pi Sense HAT
  6. PHP 微信公众号之客服完整讲解
  7. Linux下汇编语言学习笔记23 ---
  8. CamShift算法,OpenCV实现1--Back Projection
  9. litepal更好的操作sqlite3,配置与基本操作
  10. 如何写一个批量下载PDF文件的程序hp48
  11. java txt导出_Java导出txt文件的方法
  12. redis的过期策略【转】
  13. presscad图层LIsp_统赢presscad2010免安装版 32位/64位
  14. c语言求布尔矩阵的乘积,离散数学 关系矩阵的布尔乘法的简便方法
  15. 桌面应用开发技术对比
  16. 从估值、稀释和倍数的角度来看 Yuga labs 、Opensea 等明星 NFT 项目
  17. 荣盛发展,或正在步华夏幸福后尘
  18. js实现hover效果
  19. 英雄无敌3 Heroes III 里面的英语单词 (转)
  20. 微分流形与黎曼几何学习笔记(转自http://blog.sciencenet.cn/home.php?mod=spaceuid=81613do=blogid=333317)

热门文章

  1. 域名中www、域名中最后的“/”有什么意义?
  2. 近视眼用什么台灯比较好?防近视眼护眼台灯排名
  3. 安装matlab 2021 多出来一个 cd 驱动器盘符,删掉这个东西的办法
  4. 中国量化在AI全球盛会上的惊艳亮相
  5. org.hibernate.QueryParameterException: could not locate named
  6. PPT如何另存为高清图片
  7. MATLAB强化学习实战(一) 强化学习智能体训练总览
  8. UE4 Shader 着色器概述
  9. python入门基础笔记
  10. 拷机测试需要多久_【温馨提示】亚马逊卖家警惕!没有UL测试报告产品将会被下架...