Datasets:
File size: 1,027 Bytes
ee3ae9f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 |
import re
PATTERN = re.compile(r'\b[A-D]\b')
def find_answer(s):
match = PATTERN.search(s)
if match is None:
return None
return match.group()
def accuracy_score(prediction, ground_truth):
letter_ground_truth = find_answer(ground_truth)
assert letter_ground_truth in ["A", "B", "C", "D"], f"Invalid ground truth: {ground_truth}"
letter_prediction = find_answer(str(prediction))
return letter_prediction == letter_ground_truth
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def compute_accuracy(predictions, references):
accuracy = 0
for prediction, ground_truths in zip(predictions, references):
accuracy += metric_max_over_ground_truths(accuracy_score, prediction, ground_truths)
return 100.0 * accuracy / len(predictions)
|