2016 google machine translation 代码参考https://gluon-nlp.mxnet.io/examples/machine_translation/gnmt.html
个人理解:
encoder-decoder框架:encoder和decoder 采用多个双向RNN,接着又堆叠了多个单向RNN,并把对应层的隐状态给decoder进行初始化,值得注意的是encoder的双向RNN的隐状态结果只取后向RNN的隐状态结果给decoder初始化。encoder和decoder的交互采用注意力机制,decoder的输入经过嵌入层后和encoder的结果进行拼接后做query,和encoder的隐状态(ht,既做key,也做value)交互。
beam-search,限制了前beam-size个最有结果
blue 对翻译结果进行评分还有一些输入的处理,每个模块的分析,由于时间关系暂时不写了。同时理解可能有误。
import warnings
warnings.filterwarnings('ignore')import argparse
import time
import random
import os
import io
import logging
import numpy as np
import mxnet as mx
from mxnet import gluon
import gluonnlp as nlp
import nmt
from gluonnlp import Vocab
from mxnet.gluon.data import ArrayDataset
from mxnet.gluon.data import SimpleDataset
from gluonnlp.data.dataset import TextLineDatasetnlp.utils.check_version('0.7.0')
os.environ["CUDA_VISIBLE_DEVICES"] = "3"np.random.seed(100)
random.seed(100)
mx.random.seed(10000)
ctx = mx.gpu(0)#mx.gpu(0)num_hidden = 512
num_layers = 3
num_bi_layers = 2
dropout = 0.2src_max_len, tgt_max_len = 50, 100# parameters for training
batch_size, test_batch_size = 128, 32
num_buckets = 5
epochs = 50
clip = 5
lr = 0.001
lr_update_factor = 0.5
log_interval = 10
save_dir = 'gnmt_en_cn'
beam_size = 10
lp_alpha = 1.0
lp_k = 5nmt.utils.logging_config(save_dir)
CACHE_PATH = os.path.realpath(os.path.join(os.path.realpath(__file__), '..', 'en_cn_cached'))tokenizer = nlp.data.BERTBasicTokenizer(lower=True)
def cache_dataset(dataset, prefix):"""Cache the processed npy dataset  the dataset into an npz file将SimpleDataset包含(源数据,目标数据)的数组写成npz文本进行缓存Parameters----------dataset : gluon.data.SimpleDatasetfile_path : str"""if not os.path.exists(CACHE_PATH):os.makedirs(CACHE_PATH)#将源数据e[0]转为一维数组,将目标数据e[1]转为一维数组src_data = np.concatenate([e[0] for e in dataset])tgt_data = np.concatenate([e[1] for e in dataset])#用累加方式计算每个句子的终点位置,组成一个数组,方便取数src_cumlen = np.cumsum([0]+[len(e[0]) for e in dataset])tgt_cumlen = np.cumsum([0]+[len(e[1]) for e in dataset])np.savez(os.path.join(CACHE_PATH, prefix + '.npz'),src_data=src_data, tgt_data=tgt_data,src_cumlen=src_cumlen, tgt_cumlen=tgt_cumlen)def load_cached_dataset(prefix):"""加载缓存数据:param prefix::return:"""cached_file_path = os.path.join(CACHE_PATH, prefix + '.npz')if os.path.exists(cached_file_path):print('Load cached data from {}'.format(cached_file_path))npz_data = np.load(cached_file_path)src_data, tgt_data, src_cumlen, tgt_cumlen = [npz_data[n] for n in['src_data', 'tgt_data', 'src_cumlen', 'tgt_cumlen']]src_data = np.array([src_data[low:high] for low, high in zip(src_cumlen[:-1], src_cumlen[1:])])tgt_data = np.array([tgt_data[low:high] for low, high in zip(tgt_cumlen[:-1], tgt_cumlen[1:])])return gluon.data.ArrayDataset(np.array(src_data), np.array(tgt_data))else:return Noneclass TrainValDataTransform(object):"""Transform the machine translation dataset.将源数据和目标数据的句子剪成最大长度,源句子尾部增加<eos>标识符,目标句子头部尾部分别增加<bos>和<eos>Clip source and the target sentences to the maximum length. For the source sentence, append theEOS. For the target sentence, append BOS and EOS.Parameters----------src_vocab : Vocabtgt_vocab : Vocabsrc_max_len : inttgt_max_len : int"""def __init__(self, src_vocab, tgt_vocab, src_max_len, tgt_max_len):# On initialization of the class, we set the class variablesself._src_vocab = src_vocabself._tgt_vocab = tgt_vocabself._src_max_len = src_max_lenself._tgt_max_len = tgt_max_lendef __call__(self, src, tgt):# On actual calling of the class, we perform the clipping then the appending of the EOS and BOS tokens.#分词行为src.split(),中文可用(list(src)等分词),限制句子长度行为[:self._src_max_len]]if self._src_max_len > 0:src_sentence = self._src_vocab[tokenizer(src)[:self._src_max_len]]else:src_sentence = self._src_vocab[tokenizer(src)]if self._tgt_max_len > 0:tgt_sentence = self._tgt_vocab[tokenizer(tgt)[:self._tgt_max_len]]else:tgt_sentence = self._tgt_vocab[tokenizer(tgt)]#将词转变成词索引src_sentence.append(self._src_vocab[self._src_vocab.eos_token])tgt_sentence.insert(0, self._tgt_vocab[self._tgt_vocab.bos_token])tgt_sentence.append(self._tgt_vocab[self._tgt_vocab.eos_token])src_npy = np.array(src_sentence, dtype=np.int32)tgt_npy = np.array(tgt_sentence, dtype=np.int32)return src_npy, tgt_npydef process_dataset(dataset, src_vocab, tgt_vocab, src_max_len=-1, tgt_max_len=-1):start = time.time()#lazy=False一次性将dataset进行TrainValDataTransform转换。dataset_processed = dataset.transform(TrainValDataTransform(src_vocab, tgt_vocab,src_max_len,tgt_max_len), lazy=False)end = time.time()print('Processing time spent: {}'.format(end - start))return dataset_processeden,zh= [], []
with io.open('~/training/news-commentary-v12.zh-en.en','rb') as f:for line in f:en.append(line.strip().decode('utf8'))with open('~/training/news-commentary-v12.zh-en.zh','rb') as f:for line in f:zh.append(line.strip().decode('utf8'))from sklearn.model_selection import train_test_split
train_X,test_X, train_y, test_y = train_test_split(en,zh,test_size = 0.1,random_state = 0)
train_X,val_X, train_y, val_y = train_test_split(train_X,train_y,test_size = 0.2,random_state = 0)data_dict = {'train':[train_X,train_y],'val':[val_X,val_y], 'test':[test_X, test_y]}def get_data(segment):return data_dict[segment]class _TranslationDataset(ArrayDataset):def __init__(self,segment):segment = [segment]self.tokenizer = tokenizersrc_corpus = []tgt_corpus = []for ele_segment in segment:[src_cor, tgt_cor] = get_data(ele_segment)src_corpus.extend(SimpleDataset(src_cor))tgt_corpus.extend(SimpleDataset(tgt_cor))# Filter 0-length src/tgt sentencessrc_lines = []tgt_lines = []for src_line, tgt_line in zip(list(src_corpus), list(tgt_corpus)):if len(src_line) > 0 and len(tgt_line) > 0:src_lines.append(src_line)tgt_lines.append(tgt_line)super(_TranslationDataset, self).__init__(src_lines, tgt_lines)@propertydef src_vocab(self):src_corpus = get_data('train')[0]data = [word for sentence in src_corpus for word in self.tokenizer(sentence)]counter = nlp.data.count_tokens(data)vocab = nlp.Vocab(counter)return vocab@propertydef tgt_vocab(self):tgt_corpus = get_data('train')[1]data = [word for sentence in tgt_corpus for word in self.tokenizer(sentence)]counter = nlp.data.count_tokens(data)vocab = nlp.Vocab(counter)return vocabdef load_translation_data(src_lang='en', tgt_lang='cn'):"""Load translation dataset载入翻译数据集(包含训练集、验证集、测试集)Parameters----------dataset : strsrc_lang : str, default 'en'tgt_lang : str, default 'zh'Returns-------data_train_processed : DatasetThe preprocessed training sentence pairsdata_val_processed : DatasetThe preprocessed validation sentence pairsdata_test_processed : DatasetThe preprocessed test sentence pairsval_tgt_sentences : listThe target sentences in the validation settest_tgt_sentences : listThe target sentences in the test setsrc_vocab : VocabVocabulary of the source languagetgt_vocab : VocabVocabulary of the target language"""common_prefix = 'nc_v12_{}_{}'.format(src_lang, tgt_lang)# Load the three datasets from filesdata_train = _TranslationDataset('train')data_val = _TranslationDataset('val')data_test = _TranslationDataset('test')#在读取数据的过程中顺便加载源数据词类和目标数据词类src_vocab, tgt_vocab = data_train.src_vocab, data_train.tgt_vocabdata_train_processed = load_cached_dataset(common_prefix + '_train')# Check if each dataset has been processed or not, and if not, process and cache them.# 判断是重新加载数据进行处理还是用缓存数据,优先使用缓存数据。if not data_train_processed:data_train_processed = process_dataset(data_train, src_vocab, tgt_vocab,src_max_len, tgt_max_len)cache_dataset(data_train_processed, common_prefix + '_train')data_val_processed = load_cached_dataset(common_prefix + '_val')if not data_val_processed:data_val_processed = process_dataset(data_val, src_vocab, tgt_vocab)cache_dataset(data_val_processed, common_prefix + '_val')data_test_processed = load_cached_dataset(common_prefix + '_test')if not data_test_processed:data_test_processed = process_dataset(data_test, src_vocab, tgt_vocab)cache_dataset(data_test_processed, common_prefix + '_test')# Pull out the target sentences for both test and validation#获取目标句子的测试集和验证集的词列表[[s0_w0,s0_w1],[s1_w1,s2_w2],...]fetch_tgt_sentence = lambda src, tgt: tokenizer(tgt)val_tgt_sentences = list(data_val.transform(fetch_tgt_sentence))test_tgt_sentences = list(data_test.transform(fetch_tgt_sentence))# Return all of the necessary pieces we can extract from the data for training our modelreturn data_train_processed, data_val_processed, data_test_processed, \val_tgt_sentences, test_tgt_sentences, src_vocab, tgt_vocabdef get_data_lengths(dataset):# list操作相当于遍历数据集了,所以返回的结果是[(sent0_src_length),(sent1_tgt_length),...]return list(dataset.transform(lambda srg, tgt: (len(srg), len(tgt))))data_train, data_val, data_test, val_tgt_sentences, test_tgt_sentences, src_vocab, tgt_vocab\= load_translation_data()
for i in data_train:print(i)print(src_vocab.to_tokens(i[0].tolist()))print(tgt_vocab.to_tokens(i[1].tolist()))break
print(train_X[0])
print(train_y[0])
data_train_lengths = get_data_lengths(data_train)
data_val_lengths = get_data_lengths(data_val)
data_test_lengths = get_data_lengths(data_test)with io.open(os.path.join(save_dir, 'val_gt.txt'), 'w', encoding='utf-8') as of:for ele in val_tgt_sentences:of.write(' '.join(ele) + '\n')with io.open(os.path.join(save_dir, 'test_gt.txt'), 'w', encoding='utf-8') as of:for ele in test_tgt_sentences:of.write(' '.join(ele) + '\n')#获取训练集源句子、目标句子以及相应的长度
data_train = data_train.transform(lambda src, tgt: (src, tgt, len(src), len(tgt)), lazy=False)
#获取验证集源句子、目标句子以及相应的长度,同时增加一个句子编号i
data_val = gluon.data.SimpleDataset([(ele[0], ele[1], len(ele[0]), len(ele[1]), i)for i, ele in enumerate(data_val)])
data_test = gluon.data.SimpleDataset([(ele[0], ele[1], len(ele[0]), len(ele[1]), i)for i, ele in enumerate(data_test)])train_batchify_fn = nlp.data.batchify.Tuple(nlp.data.batchify.Pad(pad_val=0),nlp.data.batchify.Pad(pad_val=0),nlp.data.batchify.Stack(dtype='float32'),nlp.data.batchify.Stack(dtype='float32'))
test_batchify_fn = nlp.data.batchify.Tuple(nlp.data.batchify.Pad(pad_val=0),nlp.data.batchify.Pad(pad_val=0),nlp.data.batchify.Stack(dtype='float32'),nlp.data.batchify.Stack(dtype='float32'),nlp.data.batchify.Stack())bucket_scheme = nlp.data.ExpWidthBucket(bucket_len_step=1.2)
train_batch_sampler = nlp.data.FixedBucketSampler(lengths=data_train_lengths,batch_size=batch_size,num_buckets=num_buckets,shuffle=True,bucket_scheme=bucket_scheme)
logging.info('Train Batch Sampler:\n{}'.format(train_batch_sampler.stats()))
val_batch_sampler = nlp.data.FixedBucketSampler(lengths=data_val_lengths,batch_size=test_batch_size,num_buckets=num_buckets,shuffle=False)
logging.info('Valid Batch Sampler:\n{}'.format(val_batch_sampler.stats()))
test_batch_sampler = nlp.data.FixedBucketSampler(lengths=data_test_lengths,batch_size=test_batch_size,num_buckets=num_buckets,shuffle=False)
logging.info('Test Batch Sampler:\n{}'.format(test_batch_sampler.stats()))train_data_loader = gluon.data.DataLoader(data_train,batch_sampler=train_batch_sampler,batchify_fn=train_batchify_fn,num_workers=0)
val_data_loader = gluon.data.DataLoader(data_val,batch_sampler=val_batch_sampler,batchify_fn=test_batchify_fn,num_workers=0)
test_data_loader = gluon.data.DataLoader(data_test,batch_sampler=test_batch_sampler,batchify_fn=test_batchify_fn,num_workers=0)####################################################加载模型的过程####################################################encoder, decoder, one_step_ahead_decoder = nmt.gnmt.get_gnmt_encoder_decoder(hidden_size=num_hidden, dropout=dropout, num_layers=num_layers,num_bi_layers=num_bi_layers)
model = nlp.model.translation.NMTModel(src_vocab=src_vocab, tgt_vocab=tgt_vocab, encoder=encoder,decoder=decoder, one_step_ahead_decoder=one_step_ahead_decoder,embed_size=num_hidden, prefix='gnmt_')
model.initialize(init=mx.init.Uniform(0.1), ctx=ctx)
static_alloc = True
#model.hybridize(static_alloc=static_alloc)
logging.info(model)# Due to the paddings, we need to mask out the losses corresponding to padding tokens.
loss_function = nlp.loss.MaskedSoftmaxCELoss()
#loss_function.hybridize(static_alloc=static_alloc)translator = nmt.translation.BeamSearchTranslator(model=model, beam_size=beam_size,scorer=nlp.model.BeamSearchScorer(alpha=lp_alpha,K=lp_k),max_length=tgt_max_len + 100)
logging.info('Use beam_size={}, alpha={}, K={}'.format(beam_size, lp_alpha, lp_k))def evaluate(data_loader):"""Evaluate given the data loaderParameters----------data_loader : gluon.data.DataLoaderReturns-------avg_loss : floatAverage lossreal_translation_out : list of list of strThe translation output"""translation_out = []all_inst_ids = []avg_loss_denom = 0avg_loss = 0.0for _, (src_seq, tgt_seq, src_valid_length, tgt_valid_length, inst_ids) \in enumerate(data_loader):src_seq = src_seq.as_in_context(ctx)tgt_seq = tgt_seq.as_in_context(ctx)src_valid_length = src_valid_length.as_in_context(ctx)tgt_valid_length = tgt_valid_length.as_in_context(ctx)# Calculate Lossout, _ = model(src_seq, tgt_seq[:, :-1], src_valid_length, tgt_valid_length - 1)loss = loss_function(out, tgt_seq[:, 1:], tgt_valid_length - 1).mean().asscalar() #每个字的平均loss值all_inst_ids.extend(inst_ids.asnumpy().astype(np.int32).tolist()) #inst_ids验证机或测试集的句子id编号avg_loss += loss * (tgt_seq.shape[1] - 1) #句子总lossavg_loss_denom += (tgt_seq.shape[1] - 1) #句子数# Translate the sequences and score themsamples, _, sample_valid_length =\translator.translate(src_seq=src_seq, src_valid_length=src_valid_length)max_score_sample = samples[:, 0, :].asnumpy()sample_valid_length = sample_valid_length[:, 0].asnumpy()# Iterate through the tokens and stitch the tokens together for the sentencefor i in range(max_score_sample.shape[0]):translation_out.append([tgt_vocab.idx_to_token[ele] for ele inmax_score_sample[i][1:(sample_valid_length[i] - 1)]])# Calculate the average loss and initialize a None-filled translation listavg_loss = avg_loss / avg_loss_denomreal_translation_out = [None for _ in range(len(all_inst_ids))]# Combine all the words/tokens into a sentence for the final translationfor ind, sentence in zip(all_inst_ids, translation_out):real_translation_out[ind] = sentence# Return the loss and the translationreturn avg_loss, real_translation_outdef write_sentences(sentences, file_path):with io.open(file_path, 'w', encoding='utf-8') as of:for sent in sentences:of.write(' '.join(sent) + '\n')trainer = gluon.Trainer(model.collect_params(), 'adam', {'learning_rate': lr})best_valid_bleu = 0.0# Run through each epoch
for epoch_id in range(epochs):log_avg_loss = 0log_avg_gnorm = 0log_wc = 0log_start_time = time.time()# Iterate through each batchfor batch_id, (src_seq, tgt_seq, src_valid_length, tgt_valid_length)\in enumerate(train_data_loader):src_seq = src_seq.as_in_context(ctx)tgt_seq = tgt_seq.as_in_context(ctx)src_valid_length = src_valid_length.as_in_context(ctx)tgt_valid_length = tgt_valid_length.as_in_context(ctx)# Compute gradients and losseswith mx.autograd.record():out, _ = model(src_seq, tgt_seq[:, :-1], src_valid_length, tgt_valid_length - 1)loss = loss_function(out, tgt_seq[:, 1:], tgt_valid_length - 1).mean()loss = loss * (tgt_seq.shape[1] - 1) / (tgt_valid_length - 1).mean()loss.backward()grads = [p.grad(ctx) for p in model.collect_params().values()]gnorm = gluon.utils.clip_global_norm(grads, clip)trainer.step(1)src_wc = src_valid_length.sum().asscalar()tgt_wc = (tgt_valid_length - 1).sum().asscalar()step_loss = loss.asscalar()log_avg_loss += step_losslog_avg_gnorm += gnormlog_wc += src_wc + tgt_wcif (batch_id + 1) % log_interval == 0:wps = log_wc / (time.time() - log_start_time)logging.info('[Epoch {} Batch {}/{}] loss={:.4f}, ppl={:.4f}, gnorm={:.4f}, ''throughput={:.2f}K wps, wc={:.2f}K'.format(epoch_id, batch_id + 1, len(train_data_loader),log_avg_loss / log_interval,np.exp(log_avg_loss / log_interval),log_avg_gnorm / log_interval,wps / 1000, log_wc / 1000))log_start_time = time.time()log_avg_loss = 0log_avg_gnorm = 0log_wc = 0# Evaluate the losses on validation and test datasets and find the corresponding BLEU score and log itvalid_loss, valid_translation_out = evaluate(val_data_loader)valid_bleu_score, _, _, _, _ = nmt.bleu.compute_bleu([val_tgt_sentences], valid_translation_out)logging.info('[Epoch {}] valid Loss={:.4f}, valid ppl={:.4f}, valid bleu={:.2f}'.format(epoch_id, valid_loss, np.exp(valid_loss), valid_bleu_score * 100))test_loss, test_translation_out = evaluate(test_data_loader)test_bleu_score, _, _, _, _ = nmt.bleu.compute_bleu([test_tgt_sentences], test_translation_out)logging.info('[Epoch {}] test Loss={:.4f}, test ppl={:.4f}, test bleu={:.2f}'.format(epoch_id, test_loss, np.exp(test_loss), test_bleu_score * 100))# Output the sentences we predicted on the validation and test datasetswrite_sentences(valid_translation_out,os.path.join(save_dir, 'epoch{:d}_valid_out.txt').format(epoch_id))write_sentences(test_translation_out,os.path.join(save_dir, 'epoch{:d}_test_out.txt').format(epoch_id))# Save the model if the BLEU score is better than the previous bestif valid_bleu_score > best_valid_bleu:best_valid_bleu = valid_bleu_scoresave_path = os.path.join(save_dir, 'valid_best.params')logging.info('Save best parameters to {}'.format(save_path))model.save_parameters(save_path)# Update the learning rate based on the number of epochs that have passedif epoch_id + 1 >= (epochs * 2) // 3:new_lr = trainer.learning_rate * lr_update_factorlogging.info('Learning rate change to {}'.format(new_lr))trainer.set_learning_rate(new_lr)

结果:

2016 google machine translation 英译中相关推荐

  1. 用PyTorch玩转Transformer英译中翻译

    点击上方"机器学习与生成对抗网络",关注"星标" 获取有趣.好玩的前沿干货! 作者:知乎-hemingkx 地址:https://www.zhihu.com/p ...

  2. 大学英语综合教程一 Unit 8 课文内容英译中 中英翻译

    大学英语综合教程一 Unit 8 课文内容英译中 中英翻译   大家好,我叫亓官劼(qí guān jié ),在CSDN中记录学习的点滴历程,时光荏苒,未来可期,加油~博客地址为:亓官劼的博客 本文 ...

  3. 大学英语综合教程二 Unit 5 课文内容英译中 中英翻译

    大学英语综合教程二 Unit 5 课文内容英译中 中英翻译   大家好,我叫亓官劼(qí guān jié ),在CSDN中记录学习的点滴历程,时光荏苒,未来可期,加油~博客地址为:亓官劼的博客 本文 ...

  4. 大学英语综合教程三 Unit 6 课文内容英译中 中英翻译

    大学英语综合教程三 Unit 6 课文内容英译中 中英翻译   大家好,我叫亓官劼(qí guān jié ),在CSDN中记录学习的点滴历程,时光荏苒,未来可期,加油~博客地址为:亓官劼的博客 本文 ...

  5. 大学英语综合教程一 Unit 2 课文内容英译中 中英翻译

    大学英语综合教程一 Unit 2 课文内容英译中 中英翻译   大家好,我叫亓官劼(qí guān jié ),在CSDN中记录学习的点滴历程,时光荏苒,未来可期,加油~博客地址为:亓官劼的博客 本文 ...

  6. 大学英语综合教程三 Unit 5 课文内容英译中 中英翻译

    大学英语综合教程三 Unit 5 课文内容英译中 中英翻译   大家好,我叫亓官劼(qí guān jié ),在CSDN中记录学习的点滴历程,时光荏苒,未来可期,加油~博客地址为:亓官劼的博客 本文 ...

  7. 大学英语综合教程四 Unit 3 课文内容英译中 中英翻译

    大学英语综合教程四 Unit 3 课文内容英译中 中英翻译   大家好,我叫亓官劼(qí guān jié ),在CSDN中记录学习的点滴历程,时光荏苒,未来可期,加油~博客地址为:亓官劼的博客 本文 ...

  8. 大学英语综合教程二 Unit 4 课文内容英译中 中英翻译

    大学英语综合教程二 Unit 4 课文内容英译中 中英翻译   大家好,我叫亓官劼(qí guān jié ),在CSDN中记录学习的点滴历程,时光荏苒,未来可期,加油~博客地址为:亓官劼的博客 本文 ...

  9. 大学英语综合教程四 Unit 4 课文内容英译中 中英翻译

    大学英语综合教程四 Unit 4 课文内容英译中 中英翻译   大家好,我叫亓官劼(qí guān jié ),在CSDN中记录学习的点滴历程,时光荏苒,未来可期,加油~博客地址为:亓官劼的博客 本文 ...

最新文章

  1. zabbix Server 4.0 监控TCP的12种状态
  2. 使用eclipse编写JAVA程序
  3. solaris下常见文件压缩/解压方式简单小结—待续中
  4. 介绍一种Fiori标准应用的增强方式
  5. Jensen不等式及其证明
  6. 通过取父级for循环的i来理解闭包,iife,匿名函数
  7. FreeRTOS内存管理
  8. 与David Blevins聊Java EE的未来
  9. android 两个imageview重叠,android-在现有ListVIew中添加更多项目时,ImageView重叠
  10. SSM返回中文乱码问题,SpringMVC返回中文乱码,SSM返回String字符串乱码问题
  11. 对象与对象之间的赋值(别名现象)
  12. 高光谱图像pca降维matlab_基于 SAE-LR 的高光谱分类
  13. 什么是工序分析法?工序分析的方法和实施步骤有哪些
  14. c语言输入字符输出数字,C语言——输入一个字符串,将连续数字字符转换为数字...
  15. matlab 根式化简,薛定宇教授大讲堂(卷Ⅳ):MATLAB最优化计算最新章节_薛定宇著_掌阅小说网...
  16. 语音备忘录里的录音怎么导出来
  17. 什么是安卓马甲APP?寻求高手安卓马甲包封装APP上架
  18. SpringCloud NetFlix学习笔记(一)
  19. Google 国内外镜像地址:
  20. 太阳高度角计算题_太阳高度角的计算问题

热门文章

  1. linux中top显示的信息详解,Linux中top显示信息详解
  2. MQTT协议-报文分析及网络客户端报文测试(MQTT报文连接阿里云上传数据+订阅数据)
  3. bzoj1787.紧急集合(倍增LCA)
  4. C++开发环境配置实验
  5. 手把手教你编写脚本批量实现k8s镜像部署
  6. 今天收留了一条流浪的小狗
  7. 遮挡检测--基于角度的遮挡检测方法
  8. 单细胞文献学习(part5)--Using Cell-to-Cell Variability— A New Era in Molecular Biology
  9. 2018华为校招机试题目练习
  10. 两台Exadata搭建RAC+DG