1.数据读取

import os
data_dir = './data/atis/'
train_dir = os.path.join(data_dir,'train')
test_dir = os.path.join(data_dir,'test')
dev_dir = os.path.join(data_dir,'dev')
# 定义数据集text与labels的路径
train_text_path = os.path.join(train_dir,'seq.in')
train_label_path = os.path.join(train_dir,'seq.out')
dev_text_path = os.path.join(dev_dir,'seq.in')
dev_label_path = os.path.join(dev_dir,'seq.out')
test_text_path = os.path.join(test_dir,'seq.in')
test_label_path = os.path.join(test_dir,'seq.out')
# 读取路径
def read_text(file_name):lines = [line.strip().split() for line in open(file_name,'r',encoding = 'utf-8')]return lines
train_text = read_text(train_text_path)
train_label = read_text(train_label_path)
dev_text = read_text(dev_text_path)
dev_label = read_text(dev_label_path)
test_text = read_text(test_text_path)
test_label = read_text(test_label_path)

2.数据集构建

from fastNLP import DataSet,Instance
from fastNLP.io import DataBundle
def from_array_to_dataset(texts,labels):ds = DataSet()for text,label in zip(texts,labels):assert len(text) == len(label)ins = Instance(text = text,target = label)ds.append(ins)ds.add_seq_len('text')return ds
train_ds = from_array_to_dataset(train_text,train_label)
test_ds = from_array_to_dataset(test_text,test_label)
dev_ds = from_array_to_dataset(dev_text,dev_label)
data_bundle = DataBundle()
data_bundle.set_dataset(name = 'train',dataset = train_ds)
data_bundle.set_dataset(name = 'test',dataset = test_ds)
data_bundle.set_dataset(name = 'dev',dataset = dev_ds)
print(data_bundle.datasets)

{‘train’: ±-------------------------±-------------------------±--------+
| text | target | seq_len |
±-------------------------±-------------------------±--------+
| [‘i’, ‘want’, ‘to’, '… | [‘O’, ‘O’, ‘O’, ‘O’, … | 10 |
| [‘round’, ‘trip’, 'fa… | [‘B-round_trip’, 'I-r… | 33 |
| [‘show’, ‘me’, ‘the’,… | [‘O’, ‘O’, ‘O’, ‘O’, … | 10 |
| [‘what’, ‘are’, ‘the’… | [‘O’, ‘O’, ‘O’, ‘O’, … | 19 |
| [‘which’, ‘airlines’,… | [‘O’, ‘O’, ‘O’, ‘O’, … | 11 |
| [“i’m”, ‘looking’, 'f… | [‘O’, ‘O’, ‘O’, ‘O’, … | 25 |
| [‘okay’, ‘and’, 'then… | [‘O’, ‘O’, ‘O’, ‘O’, … | 14 |
| [‘show’, ‘me’, ‘all’,… | [‘O’, ‘O’, ‘O’, ‘O’, … | 9 |
| [‘okay’, “i’d”, 'like… | [‘O’, ‘O’, ‘O’, ‘O’, … | 18 |
| [‘on’, ‘tuesday’, 'wh… | [‘O’, 'B-depart_date… | 14 |
| [‘american’, 'flights… | [‘B-airline_name’, 'O… | 8 |
| [‘what’, ‘types’, 'of… | [‘O’, ‘O’, ‘O’, ‘O’, … | 11 |
| [‘in’, ‘the’, ‘next’,… | [‘O’, ‘O’, 'B-depart_… | 17 |
| [‘does’, 'continental… | [‘O’, 'B-airline_name… | 9 |
| [‘chicago’, ‘to’, 'mi… | ['B-fromloc.city_name… | 3 |
| [‘how’, ‘many’, 'flig… | [‘O’, ‘O’, ‘O’, ‘O’, … | 9 |
| [‘show’, ‘me’, ‘the’,… | [‘O’, ‘O’, ‘O’, ‘O’, … | 11 |
| [“i’d”, ‘like’, ‘to’,… | [‘O’, ‘O’, ‘O’, ‘O’, … | 14 |
| [‘how’, ‘many’, 'book… | [‘O’, ‘O’, ‘O’, ‘O’, … | 6 |
| [‘what’, ‘are’, ‘the’… | [‘O’, ‘O’, ‘O’, ‘O’, … | 13 |
| [‘what’, ‘flights’, '… | [‘O’, ‘O’, ‘O’, 'B-fr… | 14 |
| [‘please’, ‘list’, 'a… | [‘O’, ‘O’, ‘O’, ‘O’, … | 12 |
| [‘what’, ‘time’, 'zon… | [‘O’, ‘O’, ‘O’, ‘O’, … | 6 |
| [‘show’, ‘me’, 'groun… | [‘O’, ‘O’, ‘O’, ‘O’, … | 9 |
| [‘i’, ‘want’, ‘to’, '… | [‘O’, ‘O’, ‘O’, ‘O’, … | 15 |
| [‘from’, ‘seattle’, '… | [‘O’, 'B-fromloc.city… | 6 |
| [‘can’, ‘you’, ‘show’… | [‘O’, ‘O’, ‘O’, ‘O’, … | 13 |
| [‘what’, ‘flights’, '… | [‘O’, ‘O’, ‘O’, ‘O’, … | 5 |
| [‘what’, ‘are’, ‘the’… | [‘O’, ‘O’, ‘O’, ‘O’, … | 8 |
| [‘show’, ‘flights’, '… | [‘O’, ‘O’, ‘O’, 'B-fr… | 13 |
| [‘please’, ‘give’, 'm… | [‘O’, ‘O’, ‘O’, ‘O’, … | 16 |
| … | … | … |
±-------------------------±-------------------------±--------+, ‘test’: ±-------------------------±-------------------------±--------+
| text | target | seq_len |
±-------------------------±-------------------------±--------+
| [‘i’, ‘would’, ‘like’… | [‘O’, ‘O’, ‘O’, ‘O’, … | 19 |
| [‘on’, ‘april’, 'firs… | [‘O’, 'B-depart_date… | 16 |
| [‘on’, ‘april’, 'firs… | [‘O’, 'B-depart_date… | 13 |
| [‘i’, ‘would’, ‘like’… | [‘O’, ‘O’, ‘O’, ‘O’, … | 16 |
| [‘i’, ‘would’, ‘like’… | [‘O’, ‘O’, ‘O’, ‘O’, … | 17 |
| [‘i’, ‘need’, ‘a’, 'f… | [‘O’, ‘O’, ‘O’, ‘O’, … | 16 |
| [‘monday’, ‘morning’,… | ['B-depart_date.day_n… | 11 |
| [‘on’, ‘wednesday’, '… | [‘O’, 'B-depart_date… | 17 |
| [‘after’, ‘12’, ‘pm’,… | ['B-depart_time.time_… | 17 |
| [‘are’, ‘there’, 'any… | [‘O’, ‘O’, ‘O’, ‘O’, … | 13 |
| [‘find’, ‘a’, 'flight… | [‘O’, ‘O’, ‘O’, ‘O’, … | 8 |
| [‘on’, ‘next’, 'wedne… | [‘O’, 'B-depart_date… | 20 |
| [‘show’, ‘flight’, 'a… | [‘O’, ‘O’, ‘O’, ‘O’, … | 17 |
| [‘flight’, ‘on’, 'ame… | [‘O’, ‘O’, 'B-airline… | 13 |
| [‘find’, ‘flights’, '… | [‘O’, ‘O’, ‘O’, 'B-to… | 8 |
| [‘find’, ‘nonstop’, '… | [‘O’, ‘B-flight_stop’… | 14 |
| [‘show’, ‘flights’, '… | [‘O’, ‘O’, ‘O’, 'B-fr… | 8 |
| [‘show’, ‘flights’, '… | [‘O’, ‘O’, 'B-depart_… | 9 |
| [‘show’, ‘flights’, '… | [‘O’, ‘O’, 'B-depart_… | 9 |
| [‘show’, ‘flights’, '… | [‘O’, ‘O’, ‘O’, 'B-fr… | 9 |
| [‘show’, ‘flights’, '… | [‘O’, ‘O’, ‘O’, 'B-fr… | 8 |
| [‘show’, ‘flights’, '… | [‘O’, ‘O’, 'B-depart_… | 9 |
| [‘show’, ‘flights’, '… | [‘O’, ‘O’, 'B-depart_… | 9 |
| [‘which’, ‘flights’, … | [‘O’, ‘O’, ‘O’, ‘O’, … | 9 |
| [‘what’, ‘flights’, '… | [‘O’, ‘O’, ‘O’, ‘O’, … | 9 |
| [‘which’, ‘flights’, … | [‘O’, ‘O’, ‘O’, ‘O’, … | 12 |
| [‘which’, ‘flights’, … | [‘O’, ‘O’, ‘O’, ‘O’, … | 20 |
| [‘which’, ‘flights’, … | [‘O’, ‘O’, ‘O’, 'B-ai… | 10 |
| [‘which’, ‘flights’, … | [‘O’, ‘O’, ‘O’, ‘O’, … | 10 |
| [‘which’, ‘flights’, … | [‘O’, ‘O’, ‘O’, ‘O’, … | 11 |
| [‘which’, ‘flights’, … | [‘O’, ‘O’, ‘O’, ‘O’, … | 10 |
| … | … | … |
±-------------------------±-------------------------±--------+, ‘dev’: ±-------------------------±-------------------------±--------+
| text | target | seq_len |
±-------------------------±-------------------------±--------+
| [‘i’, ‘want’, ‘to’, '… | [‘O’, ‘O’, ‘O’, ‘O’, … | 18 |
| [‘show’, ‘me’, ‘all’,… | [‘O’, ‘O’, ‘O’, 'B-ro… | 11 |
| [‘i’, ‘would’, ‘like’… | [‘O’, ‘O’, ‘O’, ‘O’, … | 16 |
| [‘what’, ‘are’, ‘the’… | [‘O’, ‘O’, ‘O’, 'B-cl… | 16 |
| [“i’m”, ‘flying’, 'fr… | [‘O’, ‘O’, ‘O’, 'B-fr… | 8 |
| [‘okay’, ‘can’, ‘you’… | [‘O’, ‘O’, ‘O’, ‘O’, … | 12 |
| [‘from’, ‘montreal’, … | [‘O’, 'B-fromloc.city… | 5 |
| [‘what’, ‘is’, ‘the’,… | [‘O’, ‘O’, ‘O’, 'B-fl… | 11 |
| [‘flights’, ‘from’, '… | [‘O’, ‘O’, 'B-fromloc… | 11 |
| [‘what’, ‘is’, ‘the’,… | [‘O’, ‘O’, ‘O’, 'B-fl… | 11 |
| [‘flights’, ‘from’, '… | [‘O’, ‘O’, 'B-fromloc… | 5 |
| [‘i’, ‘would’, ‘like’… | [‘O’, ‘O’, ‘O’, ‘O’, … | 16 |
| [‘okay’, ‘that’, 'sou… | [‘O’, ‘O’, ‘O’, ‘O’, … | 22 |
| [‘show’, ‘me’, ‘the’,… | [‘O’, ‘O’, ‘O’, ‘O’, … | 12 |
| [‘flights’, ‘from’, '… | [‘O’, ‘O’, 'B-fromloc… | 10 |
| [“i’m”, ‘interested’,… | [‘O’, ‘O’, ‘O’, ‘O’, … | 9 |
| [‘i’, ‘am’, 'interest… | [‘O’, 'B-depart_time… | 27 |
| [“i’m”, ‘looking’, 'f… | [‘O’, ‘O’, ‘O’, ‘O’, … | 16 |
| [“what’s”, 'restricti… | [‘O’, ‘O’, 'B-restric… | 3 |
| [‘what’, ‘types’, 'of… | [‘O’, ‘O’, ‘O’, ‘O’, … | 9 |
| [‘what’, ‘does’, 'the… | [‘O’, ‘O’, ‘O’, ‘O’, … | 6 |
| [‘a’, ‘first’, 'class… | [‘O’, ‘B-class_type’,… | 13 |
| [‘please’, ‘list’, 't… | [‘O’, ‘O’, ‘O’, ‘O’, … | 11 |
| [‘what’, ‘flights’, '… | [‘O’, ‘O’, ‘O’, ‘O’, … | 10 |
| [‘on’, ‘united’, 'air… | [‘O’, 'B-airline_name… | 18 |
| [‘i’, ‘need’, ‘a’, 'f… | [‘O’, ‘O’, ‘O’, ‘O’, … | 14 |
| [‘what’, ‘are’, ‘the’… | [‘O’, ‘O’, ‘O’, 'B-co… | 11 |
| [‘does’, 'continental… | [‘O’, 'B-airline_name… | 8 |
| [‘i’, ‘would’, ‘like’… | [‘O’, ‘O’, ‘O’, ‘O’, … | 15 |
| [‘on’, ‘continental’,… | [‘O’, 'B-airline_name… | 11 |
| [‘find’, ‘me’, ‘the’,… | [‘O’, ‘O’, ‘O’, 'B-co… | 9 |
| … | … | … |
±-------------------------±-------------------------±--------+}
3.建立词典

from fastNLP import Vocabulary
vocab = Vocabulary()
vocab.from_dataset(data_bundle.get_dataset('train'),field_name = 'text',no_create_entry_dataset = [data_bundle.get_dataset('dev'),data_bundle.get_dataset('test')]
)
vocab.index_dataset(data_bundle.get_dataset('train'),data_bundle.get_dataset('test'),data_bundle.get_dataset('dev'),field_name = 'text')
target_vocab = Vocabulary(unknown=None)
target_vocab.from_dataset(data_bundle.get_dataset('train'),data_bundle.get_dataset('test'),data_bundle.get_dataset('dev'),field_name = 'target')
target_vocab.index_dataset(data_bundle.get_dataset('train'),data_bundle.get_dataset('dev'),data_bundle.get_dataset('test'),field_name = 'target')
data_bundle.set_vocab(field_name = 'text',vocab = vocab)
data_bundle.set_vocab(field_name = 'target',vocab = target_vocab)
data_bundle.set_target('target')
data_bundle.set_target('seq_len')
data_bundle.set_input('text')
data_bundle.set_input('seq_len')
data_bundle.set_input('target')
data_bundle.rename_field('text','words')

In total 3 datasets:
train has 4478 instances.
test has 893 instances.
dev has 500 instances.
In total 2 vocabs:
target has 128 entries.
words has 952 entries.
4.模型训练

from fastNLP import SpanFPreRecMetric,Trainer
from torch.optim import Adam
from fastNLP import LossInForward
import torch
from fastNLP.embeddings import BertEmbedding
embed = BertEmbedding(vocab = data_bundle.get_vocab('words'),model_dir_or_name = 'en')
model = BiLSTMCRF(embed = embed,num_classes = len(data_bundle.get_vocab('target')),num_layers = 1,hidden_size = 200,dropout = 0.5,target_vocab = data_bundle.get_vocab('target'))
metric = SpanFPreRecMetric(tag_vocab = data_bundle.get_vocab('target'))
optimizer = Adam(model.parameters(),lr = 2e-5)
loss = LossInForward()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
trainer = Trainer(data_bundle.get_dataset('train'),model,loss = loss,optimizer = optimizer,batch_size = 8,dev_data = data_bundle.get_dataset('dev'),metrics = metric,device = device)
trainer.train()

loading vocabulary file C:\Users\xiaoh.fastNLP\embedding\bert-base-cased\vocab.txt
Load pre-trained BERT parameters from file C:\Users\xiaoh.fastNLP\embedding\bert-base-cased\pytorch_model.bin.
Bert Model will return 1 layers (layer-0 is embedding result): [-1]
input fields after batch(if batch size is 2):
target: (1)type:torch.Tensor (2)dtype:torch.int64, (3)shape:torch.Size([2, 33])
seq_len: (1)type:torch.Tensor (2)dtype:torch.int64, (3)shape:torch.Size([2])
words: (1)type:torch.Tensor (2)dtype:torch.int64, (3)shape:torch.Size([2, 33])
target fields after batch(if batch size is 2):
target: (1)type:torch.Tensor (2)dtype:torch.int64, (3)shape:torch.Size([2, 33])
seq_len: (1)type:torch.Tensor (2)dtype:torch.int64, (3)shape:torch.Size([2])

training epochs started 2021-12-28-15-27-52-544078
Evaluate data in 1.47 seconds!
Evaluation on dev at Epoch 1/10. Step:560/5600:
SpanFPreRecMetric: f=0.76697, pre=0.770366, rec=0.763604

Evaluate data in 1.7 seconds!
Evaluation on dev at Epoch 2/10. Step:1120/5600:
SpanFPreRecMetric: f=0.888173, pre=0.888694, rec=0.887654

Evaluate data in 1.4 seconds!
Evaluation on dev at Epoch 3/10. Step:1680/5600:
SpanFPreRecMetric: f=0.923122, pre=0.922313, rec=0.923932

Evaluate data in 1.52 seconds!
Evaluation on dev at Epoch 4/10. Step:2240/5600:
SpanFPreRecMetric: f=0.943231, pre=0.938586, rec=0.947923

Evaluate data in 1.55 seconds!
Evaluation on dev at Epoch 5/10. Step:2800/5600:
SpanFPreRecMetric: f=0.954241, pre=0.950639, rec=0.95787

Evaluate data in 1.63 seconds!
Evaluation on dev at Epoch 6/10. Step:3360/5600:
SpanFPreRecMetric: f=0.966384, pre=0.965537, rec=0.967232

Evaluate data in 1.59 seconds!
Evaluation on dev at Epoch 7/10. Step:3920/5600:
SpanFPreRecMetric: f=0.972499, pre=0.972499, rec=0.972499

Evaluate data in 1.5 seconds!
Evaluation on dev at Epoch 8/10. Step:4480/5600:
SpanFPreRecMetric: f=0.972579, pre=0.96975, rec=0.975424

Evaluate data in 1.52 seconds!
Evaluation on dev at Epoch 9/10. Step:5040/5600:
SpanFPreRecMetric: f=0.973415, pre=0.971995, rec=0.974839

Evaluate data in 1.51 seconds!
Evaluation on dev at Epoch 10/10. Step:5600/5600:
SpanFPreRecMetric: f=0.977531, pre=0.974971, rec=0.980105

Reloaded the best model.

In Epoch:10/Step:5600, got best dev performance:
SpanFPreRecMetric: f=0.977531, pre=0.974971, rec=0.980105
{‘best_eval’: {‘SpanFPreRecMetric’: {‘f’: 0.977531,
‘pre’: 0.974971,
‘rec’: 0.980105}},
‘best_epoch’: 10,
‘best_step’: 5600,
‘seconds’: 505.73}

小黑fastNLP实战:实体识别1相关推荐

  1. NLP命名实体识别开源实战教程 | 深度应用

    作者 | 小宋是呢 来源 | CSDN博客 近几年来,基于神经网络的深度学习方法在计算机视觉.语音识别等领域取得了巨大成功,另外在自然语言处理领域也取得了不少进展.在NLP的关键性基础任务-命名实体识 ...

  2. 【NLP实战系列】Tensorflow命名实体识别实战

    实战是学习一门技术最好的方式,也是深入了解一门技术唯一的方式.因此,NLP专栏计划推出一个实战专栏,让有兴趣的同学在看文章之余也可以自己动手试一试. 本篇介绍自然语言处理中一种非常重要的任务:命名实体 ...

  3. NLP实战-中文命名实体识别

    NLP实战-中文命名实体识别:https://zhuanlan.zhihu.com/p/61227299

  4. Bi-LSTM-CRF命名实体识别实战

    一.数据集介绍 本项目的数据集来自于天池--基于糖尿病临床指南和研究论文的实体标注构建(瑞金医院MMC人工智能辅助构建知识图谱大赛第一赛季).即提供与糖尿病相关的学术论文以及糖尿病临床指南,要求在学术 ...

  5. NLP实战:面向中文电子病历的命名实体识别

    一.前言 本篇文章是关于NLP中的中文命名实体识别(Named Entity Recognition,NER)的实战项目,该项目利用了大型预训练语言模型BERT和BiLSTM神经网络结构来进行NER任 ...

  6. 信息抽取实战:命名实体识别NER【ALBERT+Bi-LSTM模型 vs. ALBERT+Bi-LSTM+CRF模型】(附代码)

    实战:命名实体识别NER 目录 实战:命名实体识别NER 一.命名实体识别(NER) 二.BERT的应用 NLP基本任务 查找相似词语 提取文本中的实体 问答中的实体对齐 三.ALBERT ALBER ...

  7. ELMO实战-命名实体识别

    ELMO模型实战-命名实体识别 数据处理 import numpy as np import torch import os# shared global variables to be import ...

  8. Python深度学习-NLP实战:命名实体识别(NER)之分词与序列标注、实体识别任务简介

    系列文章目录 Python深度学习-NLP实战:深度学习系列培训教程 Linux服务器软件的简单使用 Linux命令的简单使用 训练集.测试集.验证集的介绍及制作 字向量的训练与生成 文本分类之SVM ...

  9. 【项目实战课】基于BiLSTM+CRF的命名实体识别实战

    欢迎大家来到我们的项目实战课,本期内容是<基于BiLSTM+CRF的命名实体识别实战>.所谓项目课,就是以简单的原理回顾+详细的项目实战的模式,针对具体的某一个主题,进行代码级的实战讲解. ...

最新文章

  1. 查看哪个进程占用端口
  2. mysql include files_安装sphinx出现错误ERROR: cannot find MySQL include files.
  3. JavaOne 2014:会议与合同利益冲突
  4. 启动nginx服务报错Job for nginx.service failed because the control process exited with error code.
  5. Docker实践(五)docker部署MySQL5.7
  6. 位带操作全解释,个人觉得不错就转过来理解下
  7. [置顶] 杂七杂八
  8. Leetcode PHP题解--D7 905. Sort Array By Parity
  9. credential for git
  10. *第八周*数据结构实践项目一【建设顺序串算法库】
  11. springboot 建readme_README
  12. Javascript多线程引擎(五)
  13. Python 贪吃蛇小游戏
  14. 数控编程加工中心注意事项有哪些,你知道吗
  15. php打开文件fopen函数
  16. android卡刷教程,卡刷是什么意思?安卓系统卡刷教程详解
  17. 【链表】两个无序链表合并成递增有序链表(利用原表空间)
  18. web前端不用怕,外卖平台的项目开发流程,大全!!
  19. 浅谈Java中try catch 的用法
  20. 自己写Shader-翻书效果

热门文章

  1. html盒模型中border的写法,CSS盒模型--边框设置:border: 1px solid red(像素 样式 颜色 ),border-bottom:1px dotted #ccc...
  2. Python 打开/关闭其他应用程序方法
  3. 用Mothur制作OTUtable
  4. FPGA实现和ET1100通信verilog源码。 ethercat从站方案
  5. 4.2.9 Kafka集群与运维, 应用场景, 集群搭建, 集群监控JMX(度量指标, JConsole, 编程获取, Kafka Eagle)
  6. 要多大内存才满足_什么是延迟满足能力?“延迟满足”能力对孩子有多重要家长要清楚...
  7. mmdetection--自定义数据集
  8. matlab 二维插值 验证,科学网-利用MATLAB对非矩形域实现二维插值-张乐乐的博文...
  9. 乐乐的计算题目+题解
  10. OO2019助教工作总结