用注意力机制实现中英文互译

[KEY: > input, = target, < output]

il est en train de peindre un tableau .
= he is painting a picture .
< he is painting a picture .

pourquoi ne pas essayer ce vin delicieux ?
= why not try that delicious wine ?
< why not try that delicious wine ?

elle n est pas poete mais romanciere .
= she is not a poet but a novelist .
< she not not a poet but a novelist .

导入需要的模块及数据

from __future__ import unicode_literals, print_function, division
from io import open
import unicodedata
import string
import re
import random
import jieba
import torch
import torch.nn as nn
from torch import optim
import torch.nn.functional as Fimport matplotlib.font_manager as fm
myfont = fm.FontProperties(fname='/Users/maqi/opt/anaconda3/lib/python3.8/site-packages/matplotlib/mpl-data/fonts/ttf/DejaVuSans.ttf')device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

预处理数据

SOS_token = 0
EOS_token = 1class Lang:def __init__(self, name):self.name = nameself.word2index = {}self.word2count = {}self.index2word = {0: "SOS", 1: "EOS"}self.n_words = 2  # Count SOS and EOSdef addSentence(self, sentence):for word in sentence.split(' '):self.addWord(word)def addSentence_cn(self, sentence):for word in list(jieba.cut(sentence)):self.addWord(word)def addWord(self, word):if word not in self.word2index:self.word2index[word] = self.n_wordsself.word2count[word] = 1self.index2word[self.n_words] = wordself.n_words += 1else:self.word2count[word] += 1
# 为便于数据处理,把Unicode字符串转换为ASCII编码def unicodeToAscii(s):return ''.join(c for c in unicodedata.normalize('NFD', s)if unicodedata.category(c) != 'Mn')# 对英文转换为小写,去空格及非字母符号等处理def normalizeString(s):s = unicodeToAscii(s.lower().strip())s = re.sub(r"([.!?])", r" \1", s)#s = re.sub(r"[^a-zA-Z.!?]+", r" ", s)return s
def readLangs(lang1, lang2, reverse=False):print("Reading lines...")# 读文件,然后分成行lines = open('eng-cmn/%s-%s.txt' % (lang1, lang2), encoding='utf-8').\read().strip().split('\n')# 把行分成语句对,并进行规范化pairs = [[normalizeString(s) for s in l.split('\t')] for l in lines]# 判断是否需要转换语句对的次序,如[英文,中文]转换为[中文,英文]次序if reverse:pairs = [list(reversed(p)) for p in pairs]input_lang = Lang(lang2)output_lang = Lang(lang1)else:input_lang = Lang(lang1)output_lang = Lang(lang2)return input_lang, output_lang, pairs
#为便于训练,这里选择部分数据
MAX_LENGTH = 20eng_prefixes = ("i am ", "i m ","he is", "he s ","she is", "she s ","you are", "you re ","we are", "we re ","they are", "they re "
)def filterPair(p):return len(p[0].split(' ')) < MAX_LENGTH and \len(p[1].split(' ')) < MAX_LENGTH and \p[1].startswith(eng_prefixes)def filterPairs(pairs):return [pair for pair in pairs if filterPair(pair)]
def prepareData(lang1, lang2, reverse=False):input_lang, output_lang, pairs = readLangs(lang1, lang2, reverse)print("Read %s sentence pairs" % len(pairs))pairs = filterPairs(pairs)print("Trimmed to %s sentence pairs" % len(pairs))print("Counting words...")for pair in pairs:input_lang.addSentence_cn(pair[0])output_lang.addSentence(pair[1])print("Counted words:")print(input_lang.name, input_lang.n_words)print(output_lang.name, output_lang.n_words)return input_lang, output_lang, pairs
input_lang, output_lang, pairs = prepareData('eng', 'cmn',True)
print(random.choice(pairs))
Reading lines...Building prefix dict from the default dictionary ...Loading model from cache /var/folders/7t/wvjcfn5575g892qb2nqbd9kw0000gn/T/jieba.cacheRead 21007 sentence pairsTrimmed to 640 sentence pairsCounting words...Loading model cost 0.571 seconds.Prefix dict has been built succesfully.Counted words:cmn 1063eng 808['他很穷。', 'he is poor .']
pairs[:3]
[['我冷。', 'i am cold .'], ['我沒事。', 'i am okay .'], ['我生病了。', 'i am sick .']]

构建模型

class EncoderRNN(nn.Module):    def __init__(self, input_size, hidden_size):        super(EncoderRNN, self).__init__()        self.hidden_size = hidden_size        self.embedding = nn.Embedding(input_size, hidden_size)        self.gru = nn.GRU(hidden_size, hidden_size)    def forward(self, input, hidden):        embedded = self.embedding(input).view(1, 1, -1)        output = embedded        output, hidden = self.gru(output, hidden)        return output, hidden    def initHidden(self):        return torch.zeros(1, 1, self.hidden_size, device=device)
class DecoderRNN(nn.Module):    def __init__(self, hidden_size, output_size):        super(DecoderRNN, self).__init__()        self.hidden_size = hidden_size        self.embedding = nn.Embedding(output_size, hidden_size)        self.gru = nn.GRU(hidden_size, hidden_size)        self.out = nn.Linear(hidden_size, output_size)        self.softmax = nn.LogSoftmax(dim=1)    def forward(self, input, hidden):        output = self.embedding(input).view(1, 1, -1)        output = F.relu(output)        output, hidden = self.gru(output, hidden)        output = self.softmax(self.out(output[0]))        return output, hidden    def initHidden(self):        return torch.zeros(1, 1, self.hidden_size, device=device)
class AttnDecoderRNN(nn.Module):    def __init__(self, hidden_size, output_size, dropout_p=0.1, max_length=MAX_LENGTH):        super(AttnDecoderRNN, self).__init__()        self.hidden_size = hidden_size        self.output_size = output_size        self.dropout_p = dropout_p        self.max_length = max_length        self.embedding = nn.Embedding(self.output_size, self.hidden_size)        self.attn = nn.Linear(self.hidden_size * 2, self.max_length)        self.attn_combine = nn.Linear(self.hidden_size * 2, self.hidden_size)        self.dropout = nn.Dropout(self.dropout_p)        self.gru = nn.GRU(self.hidden_size, self.hidden_size)        self.out = nn.Linear(self.hidden_size, self.output_size)    def forward(self, input, hidden, encoder_outputs):        embedded = self.embedding(input).view(1, 1, -1)        embedded = self.dropout(embedded)        attn_weights = F.softmax(            self.attn(torch.cat((embedded[0], hidden[0]), 1)), dim=1)        attn_applied = torch.bmm(attn_weights.unsqueeze(0),                                 encoder_outputs.unsqueeze(0))        output = torch.cat((embedded[0], attn_applied[0]), 1)        output = self.attn_combine(output).unsqueeze(0)        output = F.relu(output)        output, hidden = self.gru(output, hidden)        output = F.log_softmax(self.out(output[0]), dim=1)        return output, hidden, attn_weights    def initHidden(self):        return torch.zeros(1, 1, self.hidden_size, device=device)
def indexesFromSentence(lang, sentence):    return [lang.word2index[word] for word in sentence.split(' ')]def indexesFromSentence_cn(lang, sentence):    return [lang.word2index[word] for word in list(jieba.cut(sentence))]def tensorFromSentence(lang, sentence):    indexes = indexesFromSentence(lang, sentence)    indexes.append(EOS_token)    return torch.tensor(indexes, dtype=torch.long, device=device).view(-1, 1)def tensorFromSentence_cn(lang, sentence):    indexes = indexesFromSentence_cn(lang, sentence)    indexes.append(EOS_token)    return torch.tensor(indexes, dtype=torch.long, device=device).view(-1, 1)def tensorsFromPair(pair):    input_tensor = tensorFromSentence_cn(input_lang, pair[0])    target_tensor = tensorFromSentence(output_lang, pair[1])    return (input_tensor, target_tensor)

训练模型

teacher_forcing_ratio = 0.5def train(input_tensor, target_tensor, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion, max_length=MAX_LENGTH):    encoder_hidden = encoder.initHidden()    encoder_optimizer.zero_grad()    decoder_optimizer.zero_grad()    input_length = input_tensor.size(0)    target_length = target_tensor.size(0)    encoder_outputs = torch.zeros(max_length, encoder.hidden_size, device=device)    loss = 0    for ei in range(input_length):        encoder_output, encoder_hidden = encoder(            input_tensor[ei], encoder_hidden)        encoder_outputs[ei] = encoder_output[0, 0]    decoder_input = torch.tensor([[SOS_token]], device=device)    decoder_hidden = encoder_hidden    use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False    if use_teacher_forcing:        # Teacher forcing: Feed the target as the next input        for di in range(target_length):            decoder_output, decoder_hidden, decoder_attention = decoder(                decoder_input, decoder_hidden, encoder_outputs)            loss += criterion(decoder_output, target_tensor[di])            decoder_input = target_tensor[di]  # Teacher forcing    else:        # Without teacher forcing: use its own predictions as the next input        for di in range(target_length):            decoder_output, decoder_hidden, decoder_attention = decoder(                decoder_input, decoder_hidden, encoder_outputs)            topv, topi = decoder_output.topk(1)            decoder_input = topi.squeeze().detach()  # detach from history as input            loss += criterion(decoder_output, target_tensor[di])            if decoder_input.item() == EOS_token:                break    loss.backward()    encoder_optimizer.step()    decoder_optimizer.step()    return loss.item() / target_length
import timeimport mathdef asMinutes(s):    m = math.floor(s / 60)    s -= m * 60    return '%dm %ds' % (m, s)def timeSince(since, percent):    now = time.time()    s = now - since    es = s / (percent)    rs = es - s    return '%s (- %s)' % (asMinutes(s), asMinutes(rs))
def trainIters(encoder, decoder, n_iters, print_every=1000, plot_every=100, learning_rate=0.01):    start = time.time()    plot_losses = []    print_loss_total = 0      plot_loss_total = 0     encoder_optimizer = optim.SGD(encoder.parameters(), lr=learning_rate)    decoder_optimizer = optim.SGD(decoder.parameters(), lr=learning_rate)    training_pairs = [tensorsFromPair(random.choice(pairs))                      for i in range(n_iters)]    criterion = nn.NLLLoss()    for iter in range(1, n_iters + 1):        training_pair = training_pairs[iter - 1]        input_tensor = training_pair[0]        target_tensor = training_pair[1]        loss = train(input_tensor, target_tensor, encoder,                     decoder, encoder_optimizer, decoder_optimizer, criterion)        print_loss_total += loss        plot_loss_total += loss        if iter % print_every == 0:            print_loss_avg = print_loss_total / print_every            print_loss_total = 0            print('%s (%d %d%%) %.4f' % (timeSince(start, iter / n_iters),                                         iter, iter / n_iters * 100, print_loss_avg))        if iter % plot_every == 0:            plot_loss_avg = plot_loss_total / plot_every            plot_losses.append(plot_loss_avg)            plot_loss_total = 0    showPlot(plot_losses)
import matplotlib.pyplot as plt%matplotlib inline#plt.switch_backend('agg')import matplotlib.ticker as tickerimport numpy as npdef showPlot(points):    plt.figure()    fig, ax = plt.subplots()    # this locator puts ticks at regular intervals    loc = ticker.MultipleLocator(base=0.2)    ax.yaxis.set_major_locator(loc)    plt.plot(points)
def evaluate(encoder, decoder, sentence, max_length=MAX_LENGTH):    with torch.no_grad():        input_tensor = tensorFromSentence_cn(input_lang, sentence)        input_length = input_tensor.size()[0]        encoder_hidden = encoder.initHidden()        encoder_outputs = torch.zeros(max_length, encoder.hidden_size, device=device)        for ei in range(input_length):            encoder_output, encoder_hidden = encoder(input_tensor[ei],                                                     encoder_hidden)            encoder_outputs[ei] += encoder_output[0, 0]        decoder_input = torch.tensor([[SOS_token]], device=device)  # SOS        decoder_hidden = encoder_hidden        decoded_words = []        decoder_attentions = torch.zeros(max_length, max_length)        for di in range(max_length):            decoder_output, decoder_hidden, decoder_attention = decoder(                decoder_input, decoder_hidden, encoder_outputs)            decoder_attentions[di] = decoder_attention.data            topv, topi = decoder_output.data.topk(1)            if topi.item() == EOS_token:                decoded_words.append('<EOS>')                break            else:                decoded_words.append(output_lang.index2word[topi.item()])            decoder_input = topi.squeeze().detach()        return decoded_words, decoder_attentions[:di + 1]
def evaluateRandomly(encoder, decoder, n=10):    for i in range(n):        pair = random.choice(pairs)        print('>', pair[0])        print('=', pair[1])        output_words, attentions = evaluate(encoder, decoder, pair[0])        output_sentence = ' '.join(output_words)        print('<', output_sentence)        print('')
hidden_size = 256encoder1 = EncoderRNN(input_lang.n_words, hidden_size).to(device)attn_decoder1 = AttnDecoderRNN(hidden_size, output_lang.n_words, dropout_p=0.1).to(device)trainIters(encoder1, attn_decoder1, 75000, print_every=5000)
1m 54s (- 26m 36s) (5000 6%) 2.63943m 43s (- 24m 10s) (10000 13%) 1.09165m 34s (- 22m 19s) (15000 20%) 0.20577m 29s (- 20m 36s) (20000 26%) 0.04459m 27s (- 18m 54s) (25000 33%) 0.025311m 25s (- 17m 7s) (30000 40%) 0.020213m 20s (- 15m 14s) (35000 46%) 0.017515m 17s (- 13m 23s) (40000 53%) 0.016717m 15s (- 11m 30s) (45000 60%) 0.014119m 13s (- 9m 36s) (50000 66%) 0.013721m 12s (- 7m 42s) (55000 73%) 0.011023m 12s (- 5m 48s) (60000 80%) 0.011625m 12s (- 3m 52s) (65000 86%) 0.012527m 11s (- 1m 56s) (70000 93%) 0.009129m 11s (- 0m 0s) (75000 100%) 0.0095<Figure size 432x288 with 0 Axes>

随机采样,对模型进行测试

evaluateRandomly(encoder1, attn_decoder1)
> 今天下午我會外出。
= i am going out this afternoon .
< i am going out this afternoon . <EOS>> 我相信他是無辜的。
= i am convinced that he is innocent .
< i am convinced that he is innocent . <EOS>> 他在自己房里玩。
= he is playing in his room .
< he is playing in his room . <EOS>> 我來自四國。
= i am from shikoku .
< i am from shikoku . <EOS>> 她戴著一頂帽子。
= she is wearing a hat .
< she is wearing a hat . <EOS>> 您非常勇敢。
= you are very courageous .
< you are very brave . <EOS>> 他有几分像学者。
= he is something of a scholar .
< he is something of a scholar . <EOS>> 你真傻。
= you are so stupid .
< you are so stupid . <EOS>> 他年紀夠大可以瞭解它。
= he is old enough to understand it .
< he is old enough to understand it . <EOS>> 你別小看了他。
= you are selling him short .
< you are selling him short . <EOS>
def evaluate_randomly():pair = random.choice(pairs)output_words, decoder_attn = evaluate(pair[0])output_sentence = ' '.join(output_words)print('>', pair[0])print('=', pair[1])print('<', output_sentence)print('')
def evaluateRandomly(encoder, decoder, n=20):for i in range(n):pair = random.choice(pairs)print('>', pair[0])print('=', pair[1])output_words, attentions = evaluate(encoder, decoder, pair[0])output_sentence = ' '.join(output_words)print('<', output_sentence)print('')

可视化注意力

def showAttention(input_sentence, output_words, attentions):# Set up figure with colorbarfig = plt.figure()ax = fig.add_subplot(111)cax = ax.matshow(attentions.numpy(), cmap='bone')fig.colorbar(cax)# Set up axesax.set_xticklabels([''] + list(jieba.cut(input_sentence)) +['<EOS>'], rotation=90,fontproperties=myfont)ax.set_yticklabels([''] + output_words)# Show label at every tickax.xaxis.set_major_locator(ticker.MultipleLocator(1))ax.yaxis.set_major_locator(ticker.MultipleLocator(1))plt.show()
def evaluateAndShowAttention(input_sentence):output_words, attentions = evaluate(encoder1, attn_decoder1, input_sentence)print('input =', input_sentence)print('output =', ' '.join(output_words))showAttention(input_sentence, output_words, attentions)evaluateAndShowAttention("我很幸福。")evaluateAndShowAttention("我们在严肃地谈论你的未来。")evaluateAndShowAttention("我在家。")evaluateAndShowAttention("我们在严肃地谈论你的未来。")
input = 我很幸福。
output = i am very happy . <EOS><ipython-input-23-2d6791f485ef>:9: UserWarning: FixedFormatter should only be used together with FixedLocatorax.set_xticklabels([''] + list(jieba.cut(input_sentence)) +
<ipython-input-23-2d6791f485ef>:11: UserWarning: FixedFormatter should only be used together with FixedLocatorax.set_yticklabels([''] + output_words)
/Users/maqi/opt/anaconda3/envs/mq_env/lib/python3.8/site-packages/matplotlib/backends/backend_agg.py:240: RuntimeWarning: Glyph 25105 missing from current font.font.set_text(s, 0.0, flags=flags)
/Users/maqi/opt/anaconda3/envs/mq_env/lib/python3.8/site-packages/matplotlib/backends/backend_agg.py:240: RuntimeWarning: Glyph 24456 missing from current font.font.set_text(s, 0.0, flags=flags)
/Users/maqi/opt/anaconda3/envs/mq_env/lib/python3.8/site-packages/matplotlib/backends/backend_agg.py:240: RuntimeWarning: Glyph 24184 missing from current font.font.set_text(s, 0.0, flags=flags)
/Users/maqi/opt/anaconda3/envs/mq_env/lib/python3.8/site-packages/matplotlib/backends/backend_agg.py:240: RuntimeWarning: Glyph 31119 missing from current font.font.set_text(s, 0.0, flags=flags)
/Users/maqi/opt/anaconda3/envs/mq_env/lib/python3.8/site-packages/matplotlib/backends/backend_agg.py:240: RuntimeWarning: Glyph 12290 missing from current font.font.set_text(s, 0.0, flags=flags)
/Users/maqi/opt/anaconda3/envs/mq_env/lib/python3.8/site-packages/matplotlib/backends/backend_agg.py:203: RuntimeWarning: Glyph 25105 missing from current font.font.set_text(s, 0, flags=flags)
/Users/maqi/opt/anaconda3/envs/mq_env/lib/python3.8/site-packages/matplotlib/backends/backend_agg.py:203: RuntimeWarning: Glyph 24456 missing from current font.font.set_text(s, 0, flags=flags)
/Users/maqi/opt/anaconda3/envs/mq_env/lib/python3.8/site-packages/matplotlib/backends/backend_agg.py:203: RuntimeWarning: Glyph 24184 missing from current font.font.set_text(s, 0, flags=flags)
/Users/maqi/opt/anaconda3/envs/mq_env/lib/python3.8/site-packages/matplotlib/backends/backend_agg.py:203: RuntimeWarning: Glyph 31119 missing from current font.font.set_text(s, 0, flags=flags)
/Users/maqi/opt/anaconda3/envs/mq_env/lib/python3.8/site-packages/matplotlib/backends/backend_agg.py:203: RuntimeWarning: Glyph 12290 missing from current font.font.set_text(s, 0, flags=flags)

input = 我们在严肃地谈论你的未来。
output = we are having a serious talk about your future . <EOS>/Users/maqi/opt/anaconda3/envs/mq_env/lib/python3.8/site-packages/matplotlib/backends/backend_agg.py:240: RuntimeWarning: Glyph 20204 missing from current font.font.set_text(s, 0.0, flags=flags)
/Users/maqi/opt/anaconda3/envs/mq_env/lib/python3.8/site-packages/matplotlib/backends/backend_agg.py:240: RuntimeWarning: Glyph 22312 missing from current font.font.set_text(s, 0.0, flags=flags)
/Users/maqi/opt/anaconda3/envs/mq_env/lib/python3.8/site-packages/matplotlib/backends/backend_agg.py:240: RuntimeWarning: Glyph 20005 missing from current font.font.set_text(s, 0.0, flags=flags)
/Users/maqi/opt/anaconda3/envs/mq_env/lib/python3.8/site-packages/matplotlib/backends/backend_agg.py:240: RuntimeWarning: Glyph 32899 missing from current font.font.set_text(s, 0.0, flags=flags)
/Users/maqi/opt/anaconda3/envs/mq_env/lib/python3.8/site-packages/matplotlib/backends/backend_agg.py:240: RuntimeWarning: Glyph 22320 missing from current font.font.set_text(s, 0.0, flags=flags)
/Users/maqi/opt/anaconda3/envs/mq_env/lib/python3.8/site-packages/matplotlib/backends/backend_agg.py:240: RuntimeWarning: Glyph 35848 missing from current font.font.set_text(s, 0.0, flags=flags)
/Users/maqi/opt/anaconda3/envs/mq_env/lib/python3.8/site-packages/matplotlib/backends/backend_agg.py:240: RuntimeWarning: Glyph 35770 missing from current font.font.set_text(s, 0.0, flags=flags)
/Users/maqi/opt/anaconda3/envs/mq_env/lib/python3.8/site-packages/matplotlib/backends/backend_agg.py:240: RuntimeWarning: Glyph 20320 missing from current font.font.set_text(s, 0.0, flags=flags)
/Users/maqi/opt/anaconda3/envs/mq_env/lib/python3.8/site-packages/matplotlib/backends/backend_agg.py:240: RuntimeWarning: Glyph 30340 missing from current font.font.set_text(s, 0.0, flags=flags)
/Users/maqi/opt/anaconda3/envs/mq_env/lib/python3.8/site-packages/matplotlib/backends/backend_agg.py:240: RuntimeWarning: Glyph 26410 missing from current font.font.set_text(s, 0.0, flags=flags)
/Users/maqi/opt/anaconda3/envs/mq_env/lib/python3.8/site-packages/matplotlib/backends/backend_agg.py:240: RuntimeWarning: Glyph 26469 missing from current font.font.set_text(s, 0.0, flags=flags)
/Users/maqi/opt/anaconda3/envs/mq_env/lib/python3.8/site-packages/matplotlib/backends/backend_agg.py:203: RuntimeWarning: Glyph 20204 missing from current font.font.set_text(s, 0, flags=flags)
/Users/maqi/opt/anaconda3/envs/mq_env/lib/python3.8/site-packages/matplotlib/backends/backend_agg.py:203: RuntimeWarning: Glyph 22312 missing from current font.font.set_text(s, 0, flags=flags)
/Users/maqi/opt/anaconda3/envs/mq_env/lib/python3.8/site-packages/matplotlib/backends/backend_agg.py:203: RuntimeWarning: Glyph 20005 missing from current font.font.set_text(s, 0, flags=flags)
/Users/maqi/opt/anaconda3/envs/mq_env/lib/python3.8/site-packages/matplotlib/backends/backend_agg.py:203: RuntimeWarning: Glyph 32899 missing from current font.font.set_text(s, 0, flags=flags)
/Users/maqi/opt/anaconda3/envs/mq_env/lib/python3.8/site-packages/matplotlib/backends/backend_agg.py:203: RuntimeWarning: Glyph 22320 missing from current font.font.set_text(s, 0, flags=flags)
/Users/maqi/opt/anaconda3/envs/mq_env/lib/python3.8/site-packages/matplotlib/backends/backend_agg.py:203: RuntimeWarning: Glyph 35848 missing from current font.font.set_text(s, 0, flags=flags)
/Users/maqi/opt/anaconda3/envs/mq_env/lib/python3.8/site-packages/matplotlib/backends/backend_agg.py:203: RuntimeWarning: Glyph 35770 missing from current font.font.set_text(s, 0, flags=flags)
/Users/maqi/opt/anaconda3/envs/mq_env/lib/python3.8/site-packages/matplotlib/backends/backend_agg.py:203: RuntimeWarning: Glyph 20320 missing from current font.font.set_text(s, 0, flags=flags)
/Users/maqi/opt/anaconda3/envs/mq_env/lib/python3.8/site-packages/matplotlib/backends/backend_agg.py:203: RuntimeWarning: Glyph 30340 missing from current font.font.set_text(s, 0, flags=flags)
/Users/maqi/opt/anaconda3/envs/mq_env/lib/python3.8/site-packages/matplotlib/backends/backend_agg.py:203: RuntimeWarning: Glyph 26410 missing from current font.font.set_text(s, 0, flags=flags)
/Users/maqi/opt/anaconda3/envs/mq_env/lib/python3.8/site-packages/matplotlib/backends/backend_agg.py:203: RuntimeWarning: Glyph 26469 missing from current font.font.set_text(s, 0, flags=flags)

input = 我在家。output = i am at home . <EOS>/Users/maqi/opt/anaconda3/envs/mq_env/lib/python3.8/site-packages/matplotlib/backends/backend_agg.py:240: RuntimeWarning: Glyph 23478 missing from current font.  font.set_text(s, 0.0, flags=flags)/Users/maqi/opt/anaconda3/envs/mq_env/lib/python3.8/site-packages/matplotlib/backends/backend_agg.py:203: RuntimeWarning: Glyph 23478 missing from current font.  font.set_text(s, 0, flags=flags)

input = 我们在严肃地谈论你的未来。output = we are having a serious talk about your future . <EOS>


用注意力机制实现中英文互译相关推荐

  1. 机器翻译:引入注意力机制的Encoder-Decoder深度神经网络训练实战中英文互译(完结篇)

    文章目录 前言 一.数据的预处理 二.模型配置 三.模型训练 总结 前言 注意力机制是一种在深度学习中广泛使用的技术,它可以有效地处理输入序列和输出序列之间的对应关系.其中,Encoder-Decod ...

  2. 中英文互译在线翻译-在线翻译软件

    中英文互译在线翻译哪个软件好用,今天给大家分享一款免费好用的批量翻译器,汇集了世界上最好的几个翻译平台(百度/谷歌/有道),为什么这么多人使用它?首先第一点翻译质量高,选择性多.第二点支持各种语言互译 ...

  3. Learning Hammerspoon中英文互译

    Learning Hammerspoon中英文互译 本书主要介绍mac的脚本软件Hammerspoon的基础使用,仅提供可阅读部分翻译. Unleash the power of automation ...

  4. 计算机英语短文互译,中英文互译的英语短文

    在英语学习中,阅读能力是学习者发展其它语言能力(听.说.写.译)的基础.阅读能力的高低,不仅决定了学习者获取知识和信息的水平,而且在一定程度上也反映出学习者综合运用英语的能力.小编精心收集了中英文互译 ...

  5. 多语种翻译器,中英文互译翻译器

    中英文互译翻译器可以批量处理文档,在线本地文档中英文互译翻译,多格式多语言输入本地批量导出.通过谷歌翻译.百度翻译和有道词典等api接口对接,中文.英文.日文.意大利语等多个语种可以互译. 中英文互译 ...

  6. 中英文互译之Excel表格

    在做英文数据库时,进行数据去重整理会在excel表格中操作. 很多英文标题或摘要看不懂,需要复制到翻译软件中进行翻译. 有没有办法可以直接在表格中给出翻译呢? 基于此,COOC开发了这一功能,提取出的 ...

  7. 计算机网络中英互译,计算机网络中英文互译.doc

    --- - 计算机网络中英翻译 ACK (ACKnowledgement) 确 认 帧 ADSL (Asymmetric Digital Subscriber Line) 非 对 称 数 字 用 户 ...

  8. 二极管名称中英文互译

    abrupt junction diode 突变结二极管 absorber diode 吸收二极管 alkali ion diode 碱离子二极管 alloy junction diode 合金结二极 ...

  9. cad怎么把图层英文变成中文_CAD图层标准中英文互译

    The CAD Layer Names DWS 标准文件简写专业码  English Name 英文名 Chinese Name 中 文名 Description 描述 Abbreviate 缩写 ...

最新文章

  1. Flutter控件--Switch 和 SwitchListTile
  2. MPB:南土所褚海燕组-​非靶标代谢组测定土壤可提取有机碳组分
  3. 计算机主键盘的布局,计算机键盘的布局,结构和布置
  4. Python线程同步机制: Locks, RLocks, Semaphores, Condition
  5. STM32之option bytes踩坑记录
  6. 【数据结构作业—01】用单循环链表解决约瑟夫问题
  7. Redis的两种备份方式:RDB和AOF
  8. C++获取文本文件字节数的一个小方法
  9. 数据库悲观锁和乐观锁
  10. 神经网络加速器设计研究:寒武纪DaDianNao论文阅读
  11. 程序员也要学英语——动词相关合集
  12. vue项目中通过cdn引入资源并配置
  13. 本机号码一键登录原理与应用(荣耀典藏版)
  14. 互信息(Mutual Information)的介绍
  15. 夜游灯光秀如何激活城市经济发展
  16. 用阿里云搭建Http代理服务器
  17. C++ 解决rand()函数生成的随机数每次都一样的问题
  18. BrainAGE作为大脑老化的神经影像标志物的十年
  19. 《游戏的人》笔记——第一章
  20. unturned服务器怎么自定义,《Unturned》机房服务器开服方法图文教程

热门文章

  1. js计时器,倒计时,如何停止
  2. MATLAB fsparse()函数使用教程
  3. NLP算法-关键词提取之Jieba算法库
  4. halcon 连接相机采集图像
  5. js 数组去重 的5种方法
  6. 赛迪智库丨谁能成功抢位操作系统,谁就能掌握未来汽车产业发展的主动权
  7. 地磁系统相对于机器视觉的优势
  8. MogDB 对于生僻字的存储和显示:㼆 㱔 䶮 (王莹)
  9. 【VBS教程】 VBS能够干什么
  10. SEO资源生态圈是什么(SEO资源生态圈如何建设呢)