|
from seqeval.metrics import classification_report |
|
from seqeval.scheme import IOB2 |
|
import numpy as np |
|
import spacy |
|
|
|
|
|
nlp = spacy.load("en_core_web_sm") |
|
tokenizer = nlp.tokenizer |
|
|
|
def evaluate_FOCAL_seqeval(references_jsonl, predictions_jsonl, print_reports=False): |
|
''' |
|
Computes SEQEVAL scores. |
|
1. convert the text into 'word' tokens using default spaCy tokenizer |
|
2. turn the references and the predictions into IOB2 style labels (one label per token, 'O' by default) |
|
3. compute f1-scores using SEQEVAL |
|
|
|
Returns 2 dictionaries in classification_report style, the first one with full seqeval scores, |
|
the second converting all the labels to a generic LABEL. |
|
|
|
In plain English, this 2nd one checks that you correctly found the parts of the paragraph that explain the function of the citation, |
|
without checking if you correctly predicted the reason(s) a given citation was made (the function labels). |
|
''' |
|
|
|
|
|
|
|
references_jsonl = sorted(references_jsonl, key=lambda x:x['Identifier']) |
|
predictions_jsonl = sorted(predictions_jsonl, key=lambda x:x['Identifier']) |
|
|
|
|
|
|
|
ref_functions_texts = [e['Functions Text'] for e in references_jsonl] |
|
ref_functions_labels = [e['Functions Label'] for e in references_jsonl] |
|
ref_functions_start_end = [e['Functions Start End'] for e in references_jsonl] |
|
ref_paragraphs = [e['Paragraph'] for e in references_jsonl] |
|
|
|
pred_functions_texts = [e['Functions Text'] for e in predictions_jsonl] |
|
pred_functions_labels = [e['Functions Label'] for e in predictions_jsonl] |
|
pred_functions_start_end = [e['Functions Start End'] for e in predictions_jsonl] |
|
pred_paragraphs = [e['Paragraph'] for e in predictions_jsonl] |
|
|
|
|
|
y_true_all = [] |
|
y_pred_all = [] |
|
y_true_generic = [] |
|
y_pred_generic = [] |
|
|
|
|
|
assert(ref_paragraphs==pred_paragraphs) |
|
|
|
|
|
for i, p in enumerate(ref_paragraphs): |
|
|
|
|
|
ref_labels_char = ['O' for _ in p] |
|
pred_labels_char = ['O' for _ in p] |
|
|
|
|
|
for j,(start,end) in enumerate(ref_functions_start_end[i]): |
|
|
|
assert(p[start:end]==ref_functions_texts[i][j]) |
|
|
|
|
|
ref_labels_char[start] = 'B-'+ ref_functions_labels[i][j] |
|
for position in range(start+1, end): |
|
ref_labels_char[position] = 'I-'+ ref_functions_labels[i][j] |
|
|
|
|
|
|
|
for j,(start,end) in enumerate(pred_functions_start_end[i]): |
|
|
|
assert(p[start:end]==pred_functions_texts[i][j]) |
|
|
|
|
|
pred_labels_char[start] = 'B-'+ pred_functions_labels[i][j] |
|
for position in range(start+1, end): |
|
pred_labels_char[position] = 'I-'+ pred_functions_labels[i][j] |
|
|
|
|
|
tokens = tokenizer(p) |
|
|
|
|
|
|
|
ref_labels_tokens = ['O' for _ in tokens] |
|
pred_labels_tokens = ['O' for _ in tokens] |
|
|
|
ref_labels_tokens_generic= ['O' for _ in tokens] |
|
pred_labels_tokens_generic = ['O' for _ in tokens] |
|
|
|
for token_idx, token in enumerate(tokens): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
label = next((x for x in ref_labels_char[token.idx: token.idx+len(token)] if x!='O'), 'O') |
|
if label!='O': |
|
|
|
|
|
if label[:2]=='I-': |
|
if token_idx==0 or (ref_labels_tokens!=ref_labels_tokens[token_idx-1]): |
|
label='B-'+label[2:] |
|
ref_labels_tokens[token_idx] = label |
|
|
|
ref_labels_tokens_generic[token_idx] = label[:2] + 'LABEL' |
|
|
|
|
|
if token_idx==0: |
|
assert(label=='O' or label.startswith('B-')) |
|
else: |
|
if label.startswith('I-'): |
|
|
|
assert(label[2:]==ref_labels_tokens[token_idx-1][2:] ) |
|
|
|
|
|
label = next((x for x in pred_labels_char[token.idx: token.idx+len(token)] if x!='O'), 'O') |
|
|
|
if label!='O': |
|
if label[:2]=='I-': |
|
if token_idx==0 or (pred_labels_tokens!=pred_labels_tokens[token_idx-1]): |
|
label='B-'+label[2:] |
|
pred_labels_tokens[token_idx] = label |
|
|
|
pred_labels_tokens_generic[token_idx] = label[:2] + 'LABEL' |
|
|
|
|
|
if token_idx==0: |
|
assert(label=='O' or label.startswith('B-')) |
|
else: |
|
if label.startswith('I-'): |
|
|
|
assert(label[2:]==pred_labels_tokens[token_idx-1][2:] ) |
|
|
|
y_true_all.append(ref_labels_tokens) |
|
y_pred_all.append(pred_labels_tokens) |
|
|
|
y_true_generic.append(ref_labels_tokens_generic) |
|
y_pred_generic.append(pred_labels_tokens_generic) |
|
|
|
|
|
|
|
|
|
|
|
report_string_all = classification_report(y_true=y_true_all, |
|
y_pred=y_pred_all, |
|
scheme=IOB2, |
|
zero_division=0.0, |
|
output_dict=False |
|
) |
|
|
|
|
|
report_dict_all = classification_report(y_true=y_true_all, |
|
y_pred=y_pred_all, |
|
scheme=IOB2, |
|
zero_division=0.0, |
|
output_dict=True |
|
) |
|
if print_reports: |
|
print(report_string_all) |
|
|
|
report_string_generic = classification_report(y_true=y_true_generic, |
|
y_pred=y_pred_generic, |
|
scheme=IOB2, |
|
zero_division=0.0, |
|
output_dict=False |
|
) |
|
|
|
|
|
report_dict_generic = classification_report(y_true=y_true_generic, |
|
y_pred=y_pred_generic, |
|
scheme=IOB2, |
|
zero_division=0.0, |
|
output_dict=True |
|
) |
|
if print_reports: |
|
print(report_string_generic) |
|
|
|
return(report_dict_all, report_dict_generic) |
|
|