Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
Libraries:
Datasets
pandas
License:
FOCAL / scoring_scripts /score_focal_seqeval.py
fgrezes's picture
typo
07beb35
raw
history blame
8.2 kB
from seqeval.metrics import classification_report
from seqeval.scheme import IOB2
import numpy as np
import spacy
# preload the tokenizer
nlp = spacy.load("en_core_web_sm")
tokenizer = nlp.tokenizer
def evaluate_FOCAL_seqeval(references_jsonl, predictions_jsonl, print_reports=False):
'''
Computes SEQEVAL scores.
1. convert the text into 'word' tokens using default spaCy tokenizer
2. turn the references and the predictions into IOB2 style labels (one label per token, 'O' by default)
3. compute f1-scores using SEQEVAL
Returns 2 dictionaries in classification_report style, the first one with full seqeval scores,
the second converting all the labels to a generic LABEL.
In plain English, this 2nd one checks that you correctly found the parts of the paragraph that explain the function of the citation,
without checking if you correctly predicted the reason(s) a given citation was made (the function labels).
'''
# sort the refs and pred by unique ID
references_jsonl = sorted(references_jsonl, key=lambda x:x['Identifier'])
predictions_jsonl = sorted(predictions_jsonl, key=lambda x:x['Identifier'])
# list of columns for easier manipulation
ref_functions_texts = [e['Functions Text'] for e in references_jsonl]
ref_functions_labels = [e['Functions Label'] for e in references_jsonl]
ref_functions_start_end = [e['Functions Start End'] for e in references_jsonl]
ref_paragraphs = [e['Paragraph'] for e in references_jsonl]
pred_functions_texts = [e['Functions Text'] for e in predictions_jsonl]
pred_functions_labels = [e['Functions Label'] for e in predictions_jsonl]
pred_functions_start_end = [e['Functions Start End'] for e in predictions_jsonl]
pred_paragraphs = [e['Paragraph'] for e in predictions_jsonl]
# what will be used by classification report
y_true_all = []
y_pred_all = []
y_true_generic = []
y_pred_generic = []
# check that ref and pred text is the same
assert(ref_paragraphs==pred_paragraphs)
# go through each paragraph
for i, p in enumerate(ref_paragraphs):
# assign to each character a ref_label and pred_label
ref_labels_char = ['O' for _ in p]
pred_labels_char = ['O' for _ in p]
# go through each ref function to verify the data
for j,(start,end) in enumerate(ref_functions_start_end[i]):
# check that the text of the ref function matches the paragraph [start:end] section defined by the ref's start:end
assert(p[start:end]==ref_functions_texts[i][j])
# fill in the char level labels
ref_labels_char[start] = 'B-'+ ref_functions_labels[i][j]
for position in range(start+1, end):
ref_labels_char[position] = 'I-'+ ref_functions_labels[i][j]
# do the same for the pred functions
for j,(start,end) in enumerate(pred_functions_start_end[i]):
# check that the text of the pred function matches the paragraph [start:end] section defined by the pred's start:end
assert(p[start:end]==pred_functions_texts[i][j])
# fill in the char level labels
pred_labels_char[start] = 'B-'+ pred_functions_labels[i][j]
for position in range(start+1, end):
pred_labels_char[position] = 'I-'+ pred_functions_labels[i][j]
# tokenize the text
tokens = tokenizer(p)
# assign to each token a ref_label and a pred_label
ref_labels_tokens = ['O' for _ in tokens]
pred_labels_tokens = ['O' for _ in tokens]
# same but with making all labels the same generic label
ref_labels_tokens_generic= ['O' for _ in tokens]
pred_labels_tokens_generic = ['O' for _ in tokens]
for token_idx, token in enumerate(tokens):
# note that token_idx is the position in tokens
# and token.idx the position in characters
# heuristics to assign label
# assign the first non-'O' label we find
# for refs
label = next((x for x in ref_labels_char[token.idx: token.idx+len(token)] if x!='O'), 'O')
if label!='O':
# if the label starts on a white space, we might miss the B- since the tokenizer often skips whitespaces
# check if we need to change an I- into a B-
if label[:2]=='I-':
if token_idx==0 or (ref_labels_tokens!=ref_labels_tokens[token_idx-1]):
label='B-'+label[2:]
ref_labels_tokens[token_idx] = label
# use the B- or I- portion of the label for the generic label
ref_labels_tokens_generic[token_idx] = label[:2] + 'LABEL'
# based on construction, we should never have an I- label without either an I- or B- label before
if token_idx==0:
assert(label=='O' or label.startswith('B-'))
else:
if label.startswith('I-'):
# check prev label is same
assert(label[2:]==ref_labels_tokens[token_idx-1][2:] )
# for preds
label = next((x for x in pred_labels_char[token.idx: token.idx+len(token)] if x!='O'), 'O')
if label!='O':
if label[:2]=='I-':
if token_idx==0 or (pred_labels_tokens!=pred_labels_tokens[token_idx-1]):
label='B-'+label[2:]
pred_labels_tokens[token_idx] = label
# use the B- or I- portion of the label for the generic label
pred_labels_tokens_generic[token_idx] = label[:2] + 'LABEL'
# based on construction, we should never have an I- label without either an I- or B- label before
if token_idx==0:
assert(label=='O' or label.startswith('B-'))
else:
if label.startswith('I-'):
# check prev label is same
assert(label[2:]==pred_labels_tokens[token_idx-1][2:] )
y_true_all.append(ref_labels_tokens)
y_pred_all.append(pred_labels_tokens)
y_true_generic.append(ref_labels_tokens_generic)
y_pred_generic.append(pred_labels_tokens_generic)
# now we can evaluate using seqeval
# build report for printing
report_string_all = classification_report(y_true=y_true_all,
y_pred=y_pred_all,
scheme=IOB2,
zero_division=0.0,
output_dict=False
)
# return report as dict (can't do both at the same time? slight waste of compute)
report_dict_all = classification_report(y_true=y_true_all,
y_pred=y_pred_all,
scheme=IOB2,
zero_division=0.0,
output_dict=True
)
if print_reports:
print(report_string_all)
report_string_generic = classification_report(y_true=y_true_generic,
y_pred=y_pred_generic,
scheme=IOB2,
zero_division=0.0,
output_dict=False
)
# return report as dict (can't do both at the same time? slight waste of compute)
report_dict_generic = classification_report(y_true=y_true_generic,
y_pred=y_pred_generic,
scheme=IOB2,
zero_division=0.0,
output_dict=True
)
if print_reports:
print(report_string_generic)
return(report_dict_all, report_dict_generic)