File size: 8,201 Bytes
667dd50 07beb35 667dd50 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 |
from seqeval.metrics import classification_report
from seqeval.scheme import IOB2
import numpy as np
import spacy
# preload the tokenizer
nlp = spacy.load("en_core_web_sm")
tokenizer = nlp.tokenizer
def evaluate_FOCAL_seqeval(references_jsonl, predictions_jsonl, print_reports=False):
'''
Computes SEQEVAL scores.
1. convert the text into 'word' tokens using default spaCy tokenizer
2. turn the references and the predictions into IOB2 style labels (one label per token, 'O' by default)
3. compute f1-scores using SEQEVAL
Returns 2 dictionaries in classification_report style, the first one with full seqeval scores,
the second converting all the labels to a generic LABEL.
In plain English, this 2nd one checks that you correctly found the parts of the paragraph that explain the function of the citation,
without checking if you correctly predicted the reason(s) a given citation was made (the function labels).
'''
# sort the refs and pred by unique ID
references_jsonl = sorted(references_jsonl, key=lambda x:x['Identifier'])
predictions_jsonl = sorted(predictions_jsonl, key=lambda x:x['Identifier'])
# list of columns for easier manipulation
ref_functions_texts = [e['Functions Text'] for e in references_jsonl]
ref_functions_labels = [e['Functions Label'] for e in references_jsonl]
ref_functions_start_end = [e['Functions Start End'] for e in references_jsonl]
ref_paragraphs = [e['Paragraph'] for e in references_jsonl]
pred_functions_texts = [e['Functions Text'] for e in predictions_jsonl]
pred_functions_labels = [e['Functions Label'] for e in predictions_jsonl]
pred_functions_start_end = [e['Functions Start End'] for e in predictions_jsonl]
pred_paragraphs = [e['Paragraph'] for e in predictions_jsonl]
# what will be used by classification report
y_true_all = []
y_pred_all = []
y_true_generic = []
y_pred_generic = []
# check that ref and pred text is the same
assert(ref_paragraphs==pred_paragraphs)
# go through each paragraph
for i, p in enumerate(ref_paragraphs):
# assign to each character a ref_label and pred_label
ref_labels_char = ['O' for _ in p]
pred_labels_char = ['O' for _ in p]
# go through each ref function to verify the data
for j,(start,end) in enumerate(ref_functions_start_end[i]):
# check that the text of the ref function matches the paragraph [start:end] section defined by the ref's start:end
assert(p[start:end]==ref_functions_texts[i][j])
# fill in the char level labels
ref_labels_char[start] = 'B-'+ ref_functions_labels[i][j]
for position in range(start+1, end):
ref_labels_char[position] = 'I-'+ ref_functions_labels[i][j]
# do the same for the pred functions
for j,(start,end) in enumerate(pred_functions_start_end[i]):
# check that the text of the pred function matches the paragraph [start:end] section defined by the pred's start:end
assert(p[start:end]==pred_functions_texts[i][j])
# fill in the char level labels
pred_labels_char[start] = 'B-'+ pred_functions_labels[i][j]
for position in range(start+1, end):
pred_labels_char[position] = 'I-'+ pred_functions_labels[i][j]
# tokenize the text
tokens = tokenizer(p)
# assign to each token a ref_label and a pred_label
ref_labels_tokens = ['O' for _ in tokens]
pred_labels_tokens = ['O' for _ in tokens]
# same but with making all labels the same generic label
ref_labels_tokens_generic= ['O' for _ in tokens]
pred_labels_tokens_generic = ['O' for _ in tokens]
for token_idx, token in enumerate(tokens):
# note that token_idx is the position in tokens
# and token.idx the position in characters
# heuristics to assign label
# assign the first non-'O' label we find
# for refs
label = next((x for x in ref_labels_char[token.idx: token.idx+len(token)] if x!='O'), 'O')
if label!='O':
# if the label starts on a white space, we might miss the B- since the tokenizer often skips whitespaces
# check if we need to change an I- into a B-
if label[:2]=='I-':
if token_idx==0 or (ref_labels_tokens!=ref_labels_tokens[token_idx-1]):
label='B-'+label[2:]
ref_labels_tokens[token_idx] = label
# use the B- or I- portion of the label for the generic label
ref_labels_tokens_generic[token_idx] = label[:2] + 'LABEL'
# based on construction, we should never have an I- label without either an I- or B- label before
if token_idx==0:
assert(label=='O' or label.startswith('B-'))
else:
if label.startswith('I-'):
# check prev label is same
assert(label[2:]==ref_labels_tokens[token_idx-1][2:] )
# for preds
label = next((x for x in pred_labels_char[token.idx: token.idx+len(token)] if x!='O'), 'O')
if label!='O':
if label[:2]=='I-':
if token_idx==0 or (pred_labels_tokens!=pred_labels_tokens[token_idx-1]):
label='B-'+label[2:]
pred_labels_tokens[token_idx] = label
# use the B- or I- portion of the label for the generic label
pred_labels_tokens_generic[token_idx] = label[:2] + 'LABEL'
# based on construction, we should never have an I- label without either an I- or B- label before
if token_idx==0:
assert(label=='O' or label.startswith('B-'))
else:
if label.startswith('I-'):
# check prev label is same
assert(label[2:]==pred_labels_tokens[token_idx-1][2:] )
y_true_all.append(ref_labels_tokens)
y_pred_all.append(pred_labels_tokens)
y_true_generic.append(ref_labels_tokens_generic)
y_pred_generic.append(pred_labels_tokens_generic)
# now we can evaluate using seqeval
# build report for printing
report_string_all = classification_report(y_true=y_true_all,
y_pred=y_pred_all,
scheme=IOB2,
zero_division=0.0,
output_dict=False
)
# return report as dict (can't do both at the same time? slight waste of compute)
report_dict_all = classification_report(y_true=y_true_all,
y_pred=y_pred_all,
scheme=IOB2,
zero_division=0.0,
output_dict=True
)
if print_reports:
print(report_string_all)
report_string_generic = classification_report(y_true=y_true_generic,
y_pred=y_pred_generic,
scheme=IOB2,
zero_division=0.0,
output_dict=False
)
# return report as dict (can't do both at the same time? slight waste of compute)
report_dict_generic = classification_report(y_true=y_true_generic,
y_pred=y_pred_generic,
scheme=IOB2,
zero_division=0.0,
output_dict=True
)
if print_reports:
print(report_string_generic)
return(report_dict_all, report_dict_generic)
|