代码:

# 导入包
import tensorflow as tf
# encoding=utf8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_functionimport collections
import math
import os
import random
import zipfileimport numpy as np
from six.moves import urllib
from six.moves import xrange  # pylint: disable=redefined-builtin

代码:

# Step 1: Download the data.
url = 'http://mattmahoney.net/dc/'# 下载数据集
def maybe_download(filename, expected_bytes):"""Download a file if not present, and make sure it's the right size."""if not os.path.exists(filename):filename, _ = urllib.request.urlretrieve(url + filename, filename)# 获取文件相关属性statinfo = os.stat(filename)# 比对文件的大小是否正确if statinfo.st_size == expected_bytes:print('Found and verified', filename)else:print(statinfo.st_size)raise Exception('Failed to verify ' + filename + '. Can you get to it with a browser?')return filenamefilename = maybe_download('text8.zip', 31344016)# Read the data into a list of strings.
def read_data(filename):"""Extract the first file enclosed in a zip file as a list of words"""with zipfile.ZipFile(filename) as f:data = tf.compat.as_str(f.read(f.namelist()[0])).split()return data# 单词表
words = read_data(filename)# Data size
print('Data size', len(words))

运行结果:

Found and verified text8.zip
Data size 17005207

代码:

# Step 2: Build the dictionary and replace rare words with UNK token.
# 建立字典
# 只留50000个单词,其他的词都归为UNK
vocabulary_size = 50000def build_dataset(words, vocabulary_size):count = [['UNK', -1]]# extend追加一个列表# Counter用来统计每个词出现的次数# most_common返回一个TopN列表,只留50000个单词包括UNK  # c = Counter('abracadabra')# c.most_common()# [('a', 5), ('r', 2), ('b', 2), ('c', 1), ('d', 1)]# c.most_common(3)# [('a', 5), ('r', 2), ('b', 2)]# 前50000个出现次数最多的词count.extend(collections.Counter(words).most_common(vocabulary_size - 1))# 生成 dictionary,词对应编号, word:id(0-49999)# 词频越高编号越小dictionary = dict()for word, _ in count:dictionary[word] = len(dictionary)# data把数据集的词都编号data = list()unk_count = 0for word in words:if word in dictionary:index = dictionary[word]else:index = 0  # dictionary['UNK']unk_count += 1data.append(index)# 记录UNK词的数量count[0][1] = unk_count# 编号对应词的字典reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))return data, count, dictionary, reverse_dictionary# data 数据集,编号形式
# count 前50000个出现次数最多的词
# dictionary 词对应编号
# reverse_dictionary 编号对应词
data, count, dictionary, reverse_dictionary = build_dataset(words, vocabulary_size)
del words  # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])data_index = 0

运行结果:

Most common words (+UNK) [['UNK', 418391], ('the', 1061396), ('of', 593677), ('and', 416629), ('one', 411764)]
Sample data [5234, 3081, 12, 6, 195, 2, 3134, 46, 59, 156] ['anarchism', 'originated', 'as', 'a', 'term', 'of', 'abuse', 'first', 'used', 'against']

代码:

# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):global data_indexassert batch_size % num_skips == 0assert num_skips <= 2 * skip_windowbatch = np.ndarray(shape=(batch_size), dtype=np.int32)labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)span = 2 * skip_window + 1  # [ skip_window target skip_window ]  3# 双向队列buffer = collections.deque(maxlen=span)# [ skip_window target skip_window ]# [ skip_window target skip_window ]# [ skip_window target skip_window ]#     [0 1 2 3 4 5 6 7 8 9 ...]
#            t     i  # 循环3次for _ in range(span):buffer.append(data[data_index])data_index = (data_index + 1) % len(data)# 获取batch和labelsfor i in range(batch_size // num_skips):target = skip_window  # target label at the center of the buffertargets_to_avoid = [skip_window]# 循环2次,一个目标单词对应两个上下文单词for j in range(num_skips):while target in targets_to_avoid:# 可能先拿到前面的单词也可能先拿到后面的单词target = random.randint(0, span - 1)targets_to_avoid.append(target)batch[i * num_skips + j] = buffer[skip_window]labels[i * num_skips + j, 0] = buffer[target]buffer.append(data[data_index])data_index = (data_index + 1) % len(data)# Backtrack a little bit to avoid skipping words in the end of a batch# 回溯3个词。因为执行完一个batch的操作之后,data_index会往右多偏移span个位置data_index = (data_index + len(data) - span) % len(data)return batch, labels# 打印sample data
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):print(batch[i], reverse_dictionary[batch[i]],'->', labels[i, 0], reverse_dictionary[labels[i, 0]])

运行结果:

3081 originated -> 5234 anarchism
3081 originated -> 12 as
12 as -> 3081 originated
12 as -> 6 a
6 a -> 195 term
6 a -> 12 as
195 term -> 2 of
195 term -> 6 a

代码:

# Step 4: Build and train a skip-gram model.
batch_size = 128
# 词向量维度
embedding_size = 128  # Dimension of the embedding vector.
skip_window = 1       # How many words to consider left and right.
num_skips = 2         # How many times to reuse an input to generate a label.# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = 16     # Random set of words to evaluate similarity on.
valid_window = 100  # Only pick dev samples in the head of the distribution.
# 从0-100抽取16个整数,无放回抽样
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
# 负采样样本数
num_sampled = 64    # Number of negative examples to sample.graph = tf.Graph()
with graph.as_default():# Input data.train_inputs = tf.placeholder(tf.int32, shape=[batch_size])train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])valid_dataset = tf.constant(valid_examples, dtype=tf.int32)# Ops and variables pinned to the CPU because of missing GPU implementation
#     with tf.device('/cpu:0'):# 词向量# Look up embeddings for inputs.embeddings = tf.Variable(tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))# embedding_lookup(params,ids)其实就是按照ids顺序返回params中的第ids行# 比如说,ids=[1,7,4],就是返回params中第1,7,4行。返回结果为由params的1,7,4行组成的tensor# 提取要训练的词embed = tf.nn.embedding_lookup(embeddings, train_inputs)# Construct the variables for the noise-contrastive estimation(NCE) lossnce_weights = tf.Variable(tf.truncated_normal([vocabulary_size, embedding_size],stddev=1.0 / math.sqrt(embedding_size)))nce_biases = tf.Variable(tf.zeros([vocabulary_size]))# Compute the average NCE loss for the batch.# tf.nce_loss automatically draws a new sample of the negative labels each# time we evaluate the loss.loss = tf.reduce_mean(tf.nn.nce_loss(weights=nce_weights,biases=nce_biases,labels=train_labels,inputs=embed,num_sampled=num_sampled,   num_classes=vocabulary_size))# Construct the SGD optimizer using a learning rate of 1.0.optimizer = tf.train.GradientDescentOptimizer(1).minimize(loss)# Compute the cosine similarity between minibatch examples and all embeddings.norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keepdims=True))normalized_embeddings = embeddings / norm# 抽取一些常用词来测试余弦相似度valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings, valid_dataset)# valid_size == 16# [16,1] * [1*50000] = [16,50000]similarity = tf.matmul(valid_embeddings, normalized_embeddings, transpose_b=True)# Add variable initializer.init = tf.global_variables_initializer()

代码:

# Step 5: Begin training.
num_steps = 100001
final_embeddings = []with tf.Session(graph=graph) as session:# We must initialize all variables before we use them.init.run()print("Initialized")average_loss = 0for step in xrange(num_steps):# 获取一个批次的target,以及对应的labels,都是编号形式的batch_inputs, batch_labels = generate_batch(batch_size, num_skips, skip_window)feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}# We perform one update step by evaluating the optimizer op (including it# in the list of returned values for session.run()_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)average_loss += loss_val# 计算训练2000次的平均lossif step % 2000 == 0:if step > 0:average_loss /= 2000# The average loss is an estimate of the loss over the last 2000 batches.print("Average loss at step ", step, ": ", average_loss)average_loss = 0# Note that this is expensive (~20% slowdown if computed every 500 steps)if step % 20000 == 0:sim = similarity.eval()# 计算验证集的余弦相似度最高的词for i in xrange(valid_size):# 根据id拿到对应单词valid_word = reverse_dictionary[valid_examples[i]]top_k = 8  # number of nearest neighbors# 从大到小排序,排除自己本身,取前top_k个值nearest = (-sim[i, :]).argsort()[1:top_k + 1]log_str = "Nearest to %s:" % valid_wordfor k in xrange(top_k):close_word = reverse_dictionary[nearest[k]]log_str = "%s %s," % (log_str, close_word)print(log_str)# 训练结束得到的词向量final_embeddings = normalized_embeddings.eval()

运行结果:

Initialized
Average loss at step  0 :  302.383636475
Nearest to one: mitochondrial, succumbed, heligoland, ump, slowed, forges, exquisite, hardly,
Nearest to of: conformal, holbach, binitarianism, woodland, globe, coeur, artery, legacy,
Nearest to three: rooks, notorious, geelong, macrinus, airstrip, macy, fled, route,
Nearest to war: kadyrov, burbank, groin, geoffrey, machines, announcers, vascular, accidents,
Nearest to in: omari, scourge, esr, ethica, osman, sex, typhoid, panacea,
Nearest to are: ilium, ordain, reproductive, progesterone, calais, alphabetically, direct, porsche,
Nearest to were: moreau, biochemistry, innsbruck, slum, potencies, uncertainty, capacitive, autumn,
Nearest to the: singularities, differentials, hurriedly, kiribati, neverwinter, kabbalists, owe, jonathan,
Nearest to new: tamar, synthesized, midrashim, klux, tian, veronica, cervix, thermopylae,
Nearest to american: quarterly, psychopathic, motala, nudity, indentured, cooking, melancholia, groningen,
Nearest to they: dct, frye, theirs, athenaeus, obscura, andromache, rial, austere,
Nearest to its: shuffled, socratic, masterminded, post, among, painter, rifles, petty,
Nearest to seven: cautions, expounded, dm, doings, containing, afro, isle, cadre,
Nearest to which: risen, inevitable, toe, fis, geniuses, jim, dictatorship, insufficient,
Nearest to up: colliding, crushes, pritchard, mirza, communicate, soundhole, heir, jail,
Nearest to has: brenner, bulges, scandalous, doorman, hermeticism, escap, decrees, salvator,
Average loss at step  2000 :  113.390164988
Average loss at step  4000 :  52.6187440126
Average loss at step  6000 :  33.286626014
Average loss at step  8000 :  23.7020297694
Average loss at step  10000 :  17.7729295442
Average loss at step  12000 :  14.1117788121
Average loss at step  14000 :  11.771555624
Average loss at step  16000 :  9.96325901306
Average loss at step  18000 :  8.48758016729
Average loss at step  20000 :  8.13723055959
Nearest to one: two, operatorname, eight, six, nine, three, four, five,
Nearest to of: and, in, for, dasyprocta, with, between, nine, s,
Nearest to three: eight, four, two, nine, zero, seven, six, operatorname,
Nearest to war: vocals, feast, hundreds, machines, aoc, voluntarily, geoffrey, coimbra,
Nearest to in: and, of, on, for, at, from, with, by,
Nearest to are: were, is, was, ilium, zero, tiny, by, would,
Nearest to were: are, was, is, capable, and, transportation, arkham, modestly,
Nearest to the: a, dasyprocta, one, his, operatorname, this, their, circ,
Nearest to new: tamar, random, synthesized, veronica, tian, of, mathbf, readable,
Nearest to american: quarterly, s, bckgr, feminist, and, d, subsistence, helps,
Nearest to they: there, frye, he, anglicans, theirs, creating, elite, often,
Nearest to its: the, his, dasyprocta, eichmann, circ, a, en, ancestors,
Nearest to seven: nine, eight, four, zero, six, three, two, five,
Nearest to which: that, and, this, agincourt, tissue, the, dictatorship, toe,
Nearest to up: module, his, blacks, partners, alien, agouti, adding, austin,
Nearest to has: had, is, was, have, scandalous, decrees, kubitzki, marlon,
Average loss at step  22000 :  7.06678244722
Average loss at step  24000 :  6.85123083913
Average loss at step  26000 :  6.81250965095
Average loss at step  28000 :  6.34419031024
Average loss at step  30000 :  5.9245999701
Average loss at step  32000 :  5.93673320675
Average loss at step  34000 :  5.70171094501
Average loss at step  36000 :  5.74694340336
Average loss at step  38000 :  5.50405748427
Average loss at step  40000 :  5.25322429836
Nearest to one: two, eight, four, six, three, seven, zero, operatorname,
Nearest to of: zero, and, in, dasyprocta, for, agouti, recitative, eight,
Nearest to three: four, six, eight, five, seven, two, zero, one,
Nearest to war: feast, brass, machines, vocals, automobile, voluntarily, vascular, aoc,
Nearest to in: zero, and, at, on, dasyprocta, from, during, of,
Nearest to are: were, is, zero, was, progesterone, abet, have, calais,
Nearest to were: are, was, is, be, zero, have, had, by,
Nearest to the: its, dasyprocta, his, their, zero, agouti, operatorname, circ,
Nearest to new: tamar, veronica, synthesized, random, midrashim, ancestor, dasyprocta, elephant,
Nearest to american: and, quarterly, abakan, zero, indentured, bckgr, feminist, vma,
Nearest to they: there, he, it, we, not, deport, i, discard,
Nearest to its: the, their, his, dasyprocta, a, circ, some, zero,
Nearest to seven: six, eight, four, five, nine, zero, three, two,
Nearest to which: that, this, also, it, and, tissue, agincourt, one,
Nearest to up: module, mirza, recitative, enabling, partners, abandoning, cyanobacteria, mg,
Nearest to has: had, was, is, have, scandalous, amalthea, decrees, aba,
Average loss at step  42000 :  5.36403241181
Average loss at step  44000 :  5.27934718394
Average loss at step  46000 :  5.25050886309
Average loss at step  48000 :  5.24700605953
Average loss at step  50000 :  4.9966404134
Average loss at step  52000 :  5.03326895094
Average loss at step  54000 :  5.17822365785
Average loss at step  56000 :  5.04268380868
Average loss at step  58000 :  5.06483457124
Average loss at step  60000 :  4.93359541976
Nearest to one: two, four, three, six, five, eight, operatorname, seven,
Nearest to of: and, for, in, nine, dasyprocta, eight, including, six,
Nearest to three: four, five, two, six, eight, seven, operatorname, one,
Nearest to war: machines, boreal, feast, brass, automobile, rebellious, kadyrov, denigrating,
Nearest to in: from, during, at, dasyprocta, on, and, microsite, kapoor,
Nearest to are: were, is, have, was, zero, michelob, other, be,
Nearest to were: are, was, had, have, is, be, by, zero,
Nearest to the: its, dasyprocta, their, circ, recitative, his, a, operatorname,
Nearest to new: tamar, random, veronica, synthesized, member, xb, midrashim, tian,
Nearest to american: and, abakan, british, quarterly, abercrombie, indentured, feminist, bckgr,
Nearest to they: he, there, we, it, you, i, not, who,
Nearest to its: their, his, the, dasyprocta, bckgr, some, circ, dddddd,
Nearest to seven: eight, six, five, nine, four, three, zero, operatorname,
Nearest to which: this, that, also, it, but, ursus, one, wct,
Nearest to up: module, them, mirza, recitative, partners, enabling, cyanobacteria, abandoning,
Nearest to has: had, have, was, is, ursus, wct, decrees, amalthea,
Average loss at step  62000 :  4.99505268264
Average loss at step  64000 :  4.82697634709
Average loss at step  66000 :  4.59925288892
Average loss at step  68000 :  4.98079027224
Average loss at step  70000 :  4.89412822211
Average loss at step  72000 :  4.74675208092
Average loss at step  74000 :  4.80340922415
Average loss at step  76000 :  4.72690085912
Average loss at step  78000 :  4.79733606535
Average loss at step  80000 :  4.80540977299
Nearest to one: seven, six, two, five, four, operatorname, ursus, three,
Nearest to of: mico, in, dasyprocta, including, abet, kapoor, abakan, original,
Nearest to three: six, four, two, five, seven, eight, operatorname, lymphoma,
Nearest to war: machines, boreal, automobile, feast, kadyrov, brass, rebellious, geophysical,
Nearest to in: during, at, dasyprocta, from, ursus, and, of, under,
Nearest to are: were, is, have, be, was, michelob, britney, these,
Nearest to were: are, was, have, had, be, by, been, ursus,
Nearest to the: their, dasyprocta, a, kapoor, its, operatorname, iit, circ,
Nearest to new: tamar, random, member, veronica, synthesized, xb, dasyprocta, electrical,
Nearest to american: british, abakan, abercrombie, mico, indentured, bckgr, nunnery, indian,
Nearest to they: there, he, we, you, it, who, she, not,
Nearest to its: their, his, the, dasyprocta, dddddd, bckgr, her, iit,
Nearest to seven: six, eight, five, four, nine, three, one, two,
Nearest to which: that, this, also, but, it, ursus, what, and,
Nearest to up: filmfour, them, module, mirza, him, out, abandoning, recitative,
Nearest to has: had, have, was, is, ursus, decrees, wct, amalthea,
Average loss at step  82000 :  4.75804372787
Average loss at step  84000 :  4.75755859768
Average loss at step  86000 :  4.77840362
Average loss at step  88000 :  4.74728782678
Average loss at step  90000 :  4.73435067379
Average loss at step  92000 :  4.66841691899
Average loss at step  94000 :  4.72742706275
Average loss at step  96000 :  4.69911255908
Average loss at step  98000 :  4.60135727322
Average loss at step  100000 :  4.70121149051
Nearest to one: two, six, seven, five, four, operatorname, three, eight,
Nearest to of: mico, in, dasyprocta, including, cebus, globemaster, kapoor, same,
Nearest to three: four, five, six, seven, two, eight, operatorname, lymphoma,
Nearest to war: boreal, automobile, brass, feast, machines, kadyrov, hanna, cotswold,
Nearest to in: during, at, on, dasyprocta, microsite, from, under, within,
Nearest to are: were, is, have, these, be, michelob, britney, while,
Nearest to were: are, was, have, had, be, is, by, been,
Nearest to the: dasyprocta, their, a, its, agouti, kapoor, iit, this,
Nearest to new: tamar, random, veronica, member, synthesized, xb, mishnayot, dasyprocta,
Nearest to american: british, abakan, indian, abercrombie, mico, and, quarterly, bckgr,
Nearest to they: he, there, we, you, it, she, not, who,
Nearest to its: their, his, the, dasyprocta, her, elwes, bckgr, some,
Nearest to seven: eight, six, five, four, nine, three, zero, two,
Nearest to which: that, this, but, also, what, it, ursus, and,
Nearest to up: them, filmfour, out, him, module, abandoning, recitative, mirza,
Nearest to has: had, have, was, is, ursus, stationary, wct, globemaster,

代码:

# Step 6: Visualize the embeddings.def plot_with_labels(low_dim_embs, labels, filename='tsne.png'):assert low_dim_embs.shape[0] >= len(labels), "More labels than embeddings"# 设置图片大小plt.figure(figsize=(15, 15))  # in inchesfor i, label in enumerate(labels):x, y = low_dim_embs[i, :]plt.scatter(x, y)plt.annotate(label,xy=(x, y),xytext=(5, 2),textcoords='offset points',ha='right',va='bottom')plt.savefig(filename)try:from sklearn.manifold import TSNEimport matplotlib.pyplot as plttsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000, method='exact')# mac:method='exact'# 画500个点plot_only = 500low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :])labels = [reverse_dictionary[i] for i in xrange(plot_only)]plot_with_labels(low_dim_embs, labels)except ImportError:print("Please install sklearn, matplotlib, and scipy to visualize embeddings.")

运行结果:

代码:

with tf.Session() as session:valid_word = "one"valid_examples = dictionary[valid_word]valid_dataset = tf.constant([valid_examples], dtype=tf.int32)valid_embeddings = tf.nn.embedding_lookup(final_embeddings, valid_dataset)similarity = tf.matmul(valid_embeddings, final_embeddings, transpose_b=True)sim = similarity.eval()top_k = 8  # number of nearest neighborsnearest = (-sim[0]).argsort()[1:top_k + 1]# 排除自己本身,从小到大排序log_str = "Nearest to %s:" % valid_wordfor k in xrange(top_k):close_word = reverse_dictionary[nearest[k]]log_str = "%s %s," % (log_str, close_word)print(log_str)

运行结果:

Nearest to one: two, six, seven, five, four, operatorname, three, eight,

Tensorflow— word2vec相关推荐

  1. word2vec python实现_教程 | 在Python和TensorFlow上构建Word2Vec词嵌入模型

    原标题:教程 | 在Python和TensorFlow上构建Word2Vec词嵌入模型 选自adventuresinmachinelearning 参与:李诗萌.刘晓坤 本文详细介绍了 word2ve ...

  2. 在Python和TensorFlow上构建Word2Vec词嵌入模型

    本文详细介绍了 word2vector 模型的模型架构,以及 TensorFlow 的实现过程,包括数据准备.建立模型.构建验证集,并给出了运行结果示例. GitHub 链接:https://gith ...

  3. 深度学习利器:TensorFlow与NLP模型

    深度学习利器:TensorFlow与NLP模型 享到:微博微信FacebookTwitter有道云笔记邮件分享 稍后阅读 我的阅读清单 前言 自然语言处理(简称NLP),是研究计算机处理人类语言的一门 ...

  4. GraphSAGE NIPS 2017 代码分析(Tensorflow版)

    文章目录 数据集 ppi数据集信息 toy-ppi-G.json 图的信息 toy-ppi-class_map.json toy-ppi-id_map.json toy-ppi-walks.txt t ...

  5. SKIL/工作流程/TensorFlow模型

    TensorFlow模型 SKIL1.0.3使用TensorFlow C++ API (版本1.4)的自定义JVM绑定,不施加限制.未来版本的SKIL将使用nd4j中的samediff功能来执行导入的 ...

  6. word2vec 笔记

    word2vec 是 Google 于 2013年开源的一个用于获取词向量的工具包,作者是 Tomas Mikolov,不过现在他已经从 Google Brain 跳槽到了 Facebook Rese ...

  7. 论文研读 “Liar, Liar Pants on Fire”:A New Benchmark Dataset for Fake News Detection

    给十月画个句号 最近上的很喜欢的一门课中,老师要求我们研读一篇顶会论文并进行分享,好久没能静静地坐下来写一篇博客了,接下来希望自己能够多读论文的同时把论文的思路以博客的形式输出~ 论文来源 " ...

  8. Boss直聘网爬虫 基于requests 请求的源码

    import requests from bs4 import BeautifulSoup import re import time import random# from zhi_lian_zha ...

  9. Tensorflow学习笔记——word2vec

    本笔记记录一下鄙人在使用tf的心得,好让自己日后可以回忆一下.其代码内容都源于tf的tutorial里面的Vector Representations of Words. 现在我们一起来实现通过tf实 ...

最新文章

  1. Flash Builder 4.7 安装
  2. 7.2图的存储结构(十字链表、邻接多重表、边集数组)
  3. win10+ubuntu双系统安装方案
  4. 《你不可不知的50个艺术知识》之抽象画
  5. 技术大众化--10款无需编程的App DIY开发工具
  6. R语言绘图--PCA图
  7. 小程序微信支付errcode:40163问题
  8. 没人教的项目管理方法之(练好你的站桩) 一、 干系人分析应该怎么做
  9. cmd命令打开文本文档_win7定时关机命令 【处置技巧】
  10. 以洪荒之力打开linux德·摩根定律
  11. 电信怎么关闭信息接受服务器,怎么关闭短信接收功能
  12. 【经典】双子男与天蝎女的爱情故事
  13. esp8266与mega2560开发板串口通信
  14. mui实现底部导航栏页面切换
  15. 计算机信息安全及保密,计算机信息安全与保密.ppt
  16. elasticSearch Alternatively use a keyword field instead.
  17. 2022中国公司注册亚马逊欧洲站卖家资质审核(KYC)所需资料料及要求!
  18. 牛客社区论坛项目(二)
  19. 交换机技术 路由器OSPF路由配置
  20. 递推算法之数字三角形求和问题

热门文章

  1. 作为程序员,你是否曾经想过写一本书?
  2. Matlab图形绘制经典案例 (2)
  3. 【Manning新书】Kafka实战
  4. 机器学习如何彻底改变游戏中的物理模拟
  5. 学习人必看!空军老兵自学编程,仅隔一年成为国土安全部的数据库分析师
  6. 数据蒋堂 | 莫非我就是被时代呼唤的数学人
  7. 数据蒋堂 | 多维分析预汇总的功能盲区
  8. AMiner新功能:技术趋势分析—挖掘技术源头、近期热度和全局热度
  9. AI开学第一课!CMU深度学习秋季课程开课了(附PPT 、视频)
  10. 干货 | 一文总结机器学习类面试问题与思路