Commit f016f766 authored by zhoubaohang's avatar zhoubaohang
Browse files

upload the project

parent 63f6f87a
# E2EMERN
The source code for ACL 2021 paper
[Title] An End-to-End Progressive Multi-Task Learning Framework for Medical Named Entity Recognition and Normalization
[Authors] Baohang Zhou, Xiangrui Cai, Ying Zhang, Xiaojie Yuan
[ACL 2021 paper (Waiting for publication)]()
## Preparation
1. Clone the repo to your local.
2. Download Python version: 3.6.5.
3. Download the pre-trained Bio-BERT models from this [link](https://github.com/dmis-lab/biobert). We use the *BioBERT-Large* in our experiments.
4. Open the shell or cmd in this repo folder. Run this command to install necessary packages.
```cmd
pip install -r requirements.txt
```
## Experiments
1. For Linux systems, we have shell scripts to run the training procedures. You can run the following command:
```cmd
./train.ncbi.sh or ./train.bc5cdr.sh
```
2. You can also input the following command to train the model. There are different choices for some parameters shown in square brackets. The meaning of these parameters are shown in the following tables.
| Parameters | Value | Description|
| ---- | ---- | ---- |
| epoch | int | Training times |
| LAMBDA | float | hyper-parameter in loss function |
| MU | float | hyper-parameter in loss function |
| bert_path | str | folder path of pre-trained BERT model |
| save_pred_result | bool | save the prediction result |
```cmd
python main.py \
--seed 11 \
--epoch 12 \
--LAMBDA 0.125 \
--MU 0.1 \
--dataset [ncbi, cdr] \
--bert_path ./biobert_large \
--save_pred_result \
```
3. After training the model, the test result is saved in the "results" folder. And the weights of the model are saved in the "weights" folder.
4. We also provide the weights of the model to reimplement the results in our
paper. You can download the [weights file](https://pan.baidu.com/s/15DLSb2fvgbOiiv0V0ADFNg) (the extraction code **1234**) and put them into the "weights" folder. Then run the following command:
```cmd
./eval.ncbi.sh or ./eval.bc5cdr.sh
```
\ No newline at end of file
from keras import backend as K
from keras.layers import InputSpec
from keras.engine.topology import Layer
from keras import initializers as initializers, regularizers, constraints
class AttentionWeightedAverage(Layer):
"""
Computes a weighted average attention mechanism from:
Zhou, Peng, Wei Shi, Jun Tian, Zhenyu Qi, Bingchen Li, Hongwei Hao and Bo Xu.
“Attention-Based Bidirectional Long Short-Term Memory Networks for Relation Classification.”
ACL (2016). http://www.aclweb.org/anthology/P16-2034
How to use:
see: [BLOGPOST]
"""
def __init__(self, return_attention=False, **kwargs):
self.init = initializers.get('uniform')
self.supports_masking = True
self.return_attention = return_attention
super(AttentionWeightedAverage, self).__init__(** kwargs)
def build(self, input_shape):
self.input_spec = [InputSpec(ndim=3)]
assert len(input_shape) == 3
self.w = self.add_weight(shape=(input_shape[2], 1),
name='{}_w'.format(self.name),
initializer=self.init)
# self.trainable_weights = [self.w]
super(AttentionWeightedAverage, self).build(input_shape)
def call(self, h, mask=None):
h_shape = K.shape(h)
d_w, T = h_shape[0], h_shape[1]
logits = K.dot(h, self.w) # w^T h
logits = K.reshape(logits, (d_w, T))
alpha = K.exp(logits - K.max(logits, axis=-1, keepdims=True)) # exp
# masked timesteps have zero weight
if mask is not None:
mask = K.cast(mask, K.floatx())
alpha = alpha * mask
alpha = alpha / K.sum(alpha, axis=1, keepdims=True) # softmax
r = K.sum(h * K.expand_dims(alpha), axis=1) # r = h*alpha^T
h_star = K.tanh(r) # h^* = tanh(r)
if self.return_attention:
return [h_star, alpha]
return h_star
def get_output_shape_for(self, input_shape):
return self.compute_output_shape(input_shape)
def compute_output_shape(self, input_shape):
output_len = input_shape[2]
if self.return_attention:
return [(input_shape[0], output_len), (input_shape[0], input_shape[1])]
return (input_shape[0], output_len)
def compute_mask(self, input, input_mask=None):
if isinstance(input_mask, list):
return [None] * len(input_mask)
else:
return None
\ No newline at end of file
"""
This script applies to IOB2 or IOBES tagging scheme.
If you are using a different scheme, please convert to IOB2 or IOBES.
IOB2:
- B = begin,
- I = inside but not the first,
- O = outside
e.g.
John lives in New York City .
B-PER O O B-LOC I-LOC I-LOC O
IOBES:
- B = begin,
- E = end,
- S = singleton,
- I = inside but not the first or the last,
- O = outside
e.g.
John lives in New York City .
S-PER O O B-LOC I-LOC E-LOC O
prefix: IOBES
chunk_type: PER, LOC, etc.
"""
from __future__ import division, print_function, unicode_literals
import sys
from collections import defaultdict
def split_tag(chunk_tag):
"""
split chunk tag into IOBES prefix and chunk_type
e.g.
B-PER -> (B, PER)
O -> (O, None)
"""
if chunk_tag == "O":
return ("O", None)
return chunk_tag.split("-", maxsplit=1)
def is_chunk_end(prev_tag, tag):
"""
check if the previous chunk ended between the previous and current word
e.g.
(B-PER, I-PER) -> False
(B-LOC, O) -> True
Note: in case of contradicting tags, e.g. (B-PER, I-LOC)
this is considered as (B-PER, B-LOC)
"""
prefix1, chunk_type1 = split_tag(prev_tag)
prefix2, chunk_type2 = split_tag(tag)
if prefix1 == "O":
return False
if prefix2 == "O":
return prefix1 != "O"
if chunk_type1 != chunk_type2:
return True
return prefix2 in ["B", "S"] or prefix1 in ["E", "S"]
def is_chunk_start(prev_tag, tag):
"""
check if a new chunk started between the previous and current word
"""
prefix1, chunk_type1 = split_tag(prev_tag)
prefix2, chunk_type2 = split_tag(tag)
if prefix2 == "O":
return False
if prefix1 == "O":
return prefix2 != "O"
if chunk_type1 != chunk_type2:
return True
return prefix2 in ["B", "S"] or prefix1 in ["E", "S"]
def calc_metrics(tp, p, t, percent=True):
"""
compute overall precision, recall and FB1 (default values are 0.0)
if percent is True, return 100 * original decimal value
"""
precision = tp / p if p else 0
recall = tp / t if t else 0
fb1 = 2 * precision * recall / (precision + recall) if precision + recall else 0
if percent:
return 100 * precision, 100 * recall, 100 * fb1
else:
return precision, recall, fb1
def count_chunks(true_seqs, pred_seqs):
"""
true_seqs: a list of true tags
pred_seqs: a list of predicted tags
return:
correct_chunks: a dict (counter),
key = chunk types,
value = number of correctly identified chunks per type
true_chunks: a dict, number of true chunks per type
pred_chunks: a dict, number of identified chunks per type
correct_counts, true_counts, pred_counts: similar to above, but for tags
"""
correct_chunks = defaultdict(int)
true_chunks = defaultdict(int)
pred_chunks = defaultdict(int)
correct_counts = defaultdict(int)
true_counts = defaultdict(int)
pred_counts = defaultdict(int)
prev_true_tag, prev_pred_tag = "O", "O"
correct_chunk = None
for true_tag, pred_tag in zip(true_seqs, pred_seqs):
if true_tag == pred_tag:
correct_counts[true_tag] += 1
true_counts[true_tag] += 1
pred_counts[pred_tag] += 1
_, true_type = split_tag(true_tag)
_, pred_type = split_tag(pred_tag)
if correct_chunk is not None:
true_end = is_chunk_end(prev_true_tag, true_tag)
pred_end = is_chunk_end(prev_pred_tag, pred_tag)
if pred_end and true_end:
correct_chunks[correct_chunk] += 1
correct_chunk = None
elif pred_end != true_end or true_type != pred_type:
correct_chunk = None
true_start = is_chunk_start(prev_true_tag, true_tag)
pred_start = is_chunk_start(prev_pred_tag, pred_tag)
if true_start and pred_start and true_type == pred_type:
correct_chunk = true_type
if true_start:
true_chunks[true_type] += 1
if pred_start:
pred_chunks[pred_type] += 1
prev_true_tag, prev_pred_tag = true_tag, pred_tag
if correct_chunk is not None:
correct_chunks[correct_chunk] += 1
return (
correct_chunks,
true_chunks,
pred_chunks,
correct_counts,
true_counts,
pred_counts,
)
def get_result(
correct_chunks,
true_chunks,
pred_chunks,
correct_counts,
true_counts,
pred_counts,
verbose=True,
):
"""
if verbose, print overall performance, as well as preformance per chunk type;
otherwise, simply return overall prec, rec, f1 scores
"""
# sum counts
sum_correct_chunks = sum(correct_chunks.values())
sum_true_chunks = sum(true_chunks.values())
sum_pred_chunks = sum(pred_chunks.values())
sum_correct_counts = sum(correct_counts.values())
sum_true_counts = sum(true_counts.values())
nonO_correct_counts = sum(v for k, v in correct_counts.items() if k != "O")
nonO_true_counts = sum(v for k, v in true_counts.items() if k != "O")
chunk_types = sorted(list(set(list(true_chunks) + list(pred_chunks))))
# compute overall precision, recall and FB1 (default values are 0.0)
prec, rec, f1 = calc_metrics(sum_correct_chunks, sum_pred_chunks, sum_true_chunks)
res = (prec, rec, f1)
if not verbose:
return res
# print overall performance, and performance per chunk type
print(
"processed %i tokens with %i phrases; " % (sum_true_counts, sum_true_chunks),
end="",
)
print(
"found: %i phrases; correct: %i.\n" % (sum_pred_chunks, sum_correct_chunks),
end="",
)
print("accuracy: %6.2f%%; (non-O)" % (100 * nonO_correct_counts / nonO_true_counts))
print("accuracy: %6.2f%%; " % (100 * sum_correct_counts / sum_true_counts), end="")
print("precision: %6.2f%%; recall: %6.2f%%; FB1: %6.2f" % (prec, rec, f1))
# for each chunk type, compute precision, recall and FB1 (default values are 0.0)
for t in chunk_types:
prec, rec, f1 = calc_metrics(correct_chunks[t], pred_chunks[t], true_chunks[t])
print("%17s: " % t, end="")
print(
"precision: %6.2f%%; recall: %6.2f%%; FB1: %6.2f" % (prec, rec, f1), end=""
)
print(" %d" % pred_chunks[t])
return res
# you can generate LaTeX output for tables like in
# http://cnts.uia.ac.be/conll2003/ner/example.tex
# but I'm not implementing this
def evaluate(true_seqs, pred_seqs, verbose=True):
(
correct_chunks,
true_chunks,
pred_chunks,
correct_counts,
true_counts,
pred_counts,
) = count_chunks(true_seqs, pred_seqs)
result = get_result(
correct_chunks,
true_chunks,
pred_chunks,
correct_counts,
true_counts,
pred_counts,
verbose=verbose,
)
return result
def evaluate_conll_file(fileIterator):
true_seqs, pred_seqs = [], []
for line in fileIterator:
cols = line.strip().split()
# each non-empty line must contain >= 3 columns
if not cols:
true_seqs.append("O")
pred_seqs.append("O")
elif len(cols) < 3:
raise IOError("conlleval: too few columns in line %s\n" % line)
else:
# extract tags from last 2 columns
true_seqs.append(cols[-2])
pred_seqs.append(cols[-1])
return evaluate(true_seqs, pred_seqs)
import copy
import codecs
import numpy as np
import tensorflow as tf
from utils import BASEPATH
from typing import List, Dict
from conlleval import evaluate
from keras_bert import Tokenizer
from tensorflow.data import Dataset
from entitybase.entity_base_loader import EntityBase
from keras.preprocessing.sequence import pad_sequences
from sklearn.metrics import precision_score, recall_score, f1_score
class DataLoader(object):
def __init__(self, bert_path: str, dict_dataset: Dict):
self.__bert_path = f"{bert_path}/vocab.txt"
self.__tokenizer = self.__load_vocabulary()
self.__dict_dataset = dict_dataset
self.__entity_base = EntityBase()
self.__dict_ner_label = ["X"]
self.__max_seq_len = 100
self.__max_ent_len = 16
self.__batch_size = 6
self._train_data = self.__parse_data(f"{BASEPATH}{dict_dataset['train']}")
self._devel_data = self.__parse_data(f"{BASEPATH}{dict_dataset['dev']}")
self._test_data = self.__parse_data(f"{BASEPATH}{dict_dataset['test']}")
self._zs_test_data = self.__parse_data(f"{BASEPATH}{dict_dataset['zs_test']}")
def resampling_data(self, dtype):
if dtype == "train":
self._train_data = self.__parse_data(
f"{BASEPATH}{self.__dict_dataset['train']}"
)
elif dtype == "devel":
self._devel_data = self.__parse_data(
f"{BASEPATH}{self.__dict_dataset['dev']}"
)
elif dtype == "test":
self._test_data = self.__parse_data(
f"{BASEPATH}{self.__dict_dataset['test']}"
)
def parse_idx_tokens(self, ind):
return self.__tokenizer.decode(ind)
def parse_idx_ner_labels(self, ner):
return [self.__dict_ner_label[i] for i in ner]
def __parse_idx_sequence(self, pred, label):
res_pred, res_label = [], []
for i in range(len(pred)):
tmp_pred, tmp_label = [], []
for p, l in zip(pred[i], label[i]):
if self.__dict_ner_label[l] != "X":
tmp_label.append(self.__dict_ner_label[l])
if self.__dict_ner_label[p] == "X":
tmp_pred.append("O")
else:
tmp_pred.append(self.__dict_ner_label[p])
res_pred.append(tmp_pred)
res_label.append(tmp_label)
return res_pred, res_label
def evaluate_ner(self, logits, label, real_len):
pred = tf.argmax(logits, axis=-1)
pred, true = self.__parse_idx_sequence(pred, label)
y_real, pred_real = [], []
records = []
for i in range(len(real_len)):
record = " ".join(true[i]) + str(real_len[i])
if record not in records:
records.append(record)
y_real.extend(true[i][1 : 1 + real_len[i]])
pred_real.extend(pred[i][1 : 1 + real_len[i]])
prec, rec, f1 = evaluate(y_real, pred_real, verbose=False)
return (prec / 100, rec / 100, f1 / 100)
def __restore_ner_label(self, ner_logits, ner_label, real_len):
ner_pred = tf.argmax(ner_logits, axis=-1)
ner_pred, ner_truth = self.__parse_idx_sequence(ner_pred, ner_label)
ner_label_real, ner_pred_real = [], []
for i in range(len(real_len)):
ner_label_real.append(ner_truth[i][1 : 1 + real_len[i]])
ner_pred_real.append(ner_pred[i][1 : 1 + real_len[i]])
return ner_label_real, ner_pred_real
def __extract_entity_by_index(self, label_sequence, index):
length = len(label_sequence)
entity = []
tmp_index = index
while (
tmp_index >= 0 and tmp_index < length and label_sequence[tmp_index] != "O"
):
entity.insert(0, tmp_index)
if "B-" in label_sequence[tmp_index]:
break
tmp_index -= 1
tmp_index = index + 1
while tmp_index < length and label_sequence[tmp_index] != "O":
if "B-" in label_sequence[tmp_index]:
break
entity.append(tmp_index)
tmp_index += 1
return entity
def evaluate_nen(
self,
ner_logits,
ner_label,
cpt_ner_logits,
cpt_ner_label,
real_len,
nen_logits,
nen_label,
):
ner_label_real, ner_pred_real = self.__restore_ner_label(
ner_logits, ner_label, real_len
)
cpt_ner_label_real, cpt_ner_pred_real = self.__restore_ner_label(
cpt_ner_logits, cpt_ner_label, real_len
)
nen_pred = tf.argmax(nen_logits, axis=-1).numpy().tolist()
nen_label = nen_label.numpy().tolist()
tmp_nen_pred = []
tmp_nen_label = []
for i in range(len(nen_label)):
n_entity = 0
if nen_label[i] == 1:
for e in ner_label_real:
if "B-" in e:
n_entity += 1
tmp_nen_label.extend([1] * n_entity)
else:
tmp_nen_label.append(0)
if nen_pred[i] == 0:
tmp_nen_pred.extend([0] * n_entity)
else:
index = 0
flag = False
for p, t in zip(ner_pred_real[i], ner_label_real[i]):
if "B-" in p or "I-" in p:
if p == t:
if not flag:
if self.__extract_entity_by_index(
cpt_ner_pred_real[i], index
) == self.__extract_entity_by_index(
cpt_ner_label_real[i], index
):
tmp_nen_pred.append(1)
else:
tmp_nen_pred.append(0)
flag = True
else:
if not flag:
tmp_nen_pred.append(0)
flag = True
else:
flag = False
index += 1
if len(tmp_nen_label) < len(tmp_nen_pred):
size = len(tmp_nen_label)
for _ in range(len(tmp_nen_pred) - size):
tmp_nen_label.append(nen_label[i])
elif len(tmp_nen_label) > len(tmp_nen_pred):
size = len(tmp_nen_pred)
for _ in range(len(tmp_nen_label) - size):
tmp_nen_pred.append(0)
filtered_nen_label, filtered_nen_pred = [], []
for i in range(len(tmp_nen_label)):
if tmp_nen_label[i] == 0 and tmp_nen_pred[i] == 0:
continue
filtered_nen_label.append(tmp_nen_label[i])
filtered_nen_pred.append(tmp_nen_pred[i])
reca = recall_score(filtered_nen_label, filtered_nen_pred, average="weighted")
prec = precision_score(
filtered_nen_label, filtered_nen_pred, average="weighted"
)
f1 = f1_score(filtered_nen_label, filtered_nen_pred, average="weighted")
return (prec, reca, f1)
@property
def LABEL_SIZE(self):
return len(self.__dict_ner_label)
def Data(self, dtype: str):
return getattr(self, f"_{dtype}_data")
def __load_vocabulary(self):
token_dict = {}
with codecs.open(self.__bert_path, "r", "utf8") as reader:
for line in reader:
token = line.strip()
token_dict[token] = len(token_dict)
return Tokenizer(token_dict)
def __tokenize_entity(self, entities):
indices = []
segments = []
for e in entities:
ind, seg = self.__tokenizer.encode(first=e)
indices.append(ind)
segments.append(seg)
indices = pad_sequences(indices, self.__max_ent_len, value=0, padding="post")
segments = pad_sequences(segments, self.__max_ent_len, value=0, padding="post")
return (indices, segments)
def __tokenize_sample(self, sentence, label, cpt_label):
labels = []
cpt_labels = []
indices = []
segments = []
for i in range(len(sentence)):
s = sentence[i][: self.__max_seq_len - 2]