Calculate metric using evaluate instead of datasets

#5
Files changed (1) hide show
  1. metrics/scrolls.py +6 -5
metrics/scrolls.py CHANGED
@@ -3,6 +3,7 @@
3
  from collections import defaultdict
4
  from copy import deepcopy
5
  import datasets
 
6
 
7
  # fmt: off
8
  from .rouge import compute_rouge, postprocess_text as rouge_postprocess_text # From: https://huggingface.co/datasets/tau/scrolls/raw/main/metrics/rouge.py
@@ -58,20 +59,20 @@ Examples:
58
  predictions = ["exact match example", "hello there", "general kenobi"] # List[str]
59
  references = [["exact match example"], ["hello", "hi there"], ["commander kenobi"]] # List[List[str]]
60
 
61
- >>> scrolls_metric = datasets.load_metric(scrolls_metric_path, 'gov_report') # 'gov_report' or any of ["qmsum", "summ_screen_fd"]
62
  >>> results = scrolls_metric.compute(predictions=predictions, references=references)
63
  >>> print(results)
64
  {'rouge/rouge1': 72.2222, 'rouge/rouge2': 33.3333, 'rouge/rougeL': 72.2222, 'rouge/rougeLsum': 72.2222, 'rouge/geometric_mean': 55.8136,
65
  'num_predicted': 3, 'mean_prediction_length_characters': 14.6667, 'scrolls_score': 55.8136,
66
  'display_keys': ['rouge/rouge1', 'rouge/rouge2', 'rouge/rougeL'], 'display': [72.2222, 33.3333, 72.2222]}
67
 
68
- >>> scrolls_metric = datasets.load_metric(scrolls_metric_path, 'contract_nli') # 'contract_nli' or "quality"
69
  >>> results = scrolls_metric.compute(predictions=predictions, references=references)
70
  >>> print(results)
71
  {'exact_match': 33.3333, 'num_predicted': 3, 'mean_prediction_length_characters': 14.6667, 'scrolls_score': 33.3333,
72
  'display_keys': ['exact_match'], 'display': [33.3333]}
73
 
74
- >>> scrolls_metric = datasets.load_metric(scrolls_metric_path, 'narrative_qa') # 'narrative_qa' or "qasper"
75
  >>> results = scrolls_metric.compute(predictions=predictions, references=references)
76
  >>> print(results)
77
  {'f1': 72.2222, 'num_predicted': 3, 'mean_prediction_length_characters': 14.6667, 'scrolls_score': 72.2222,
@@ -123,7 +124,7 @@ DATASET_TO_METRICS = {
123
 
124
 
125
  @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
126
- class Scrolls(datasets.Metric):
127
  def __init__(self, *args, **kwargs):
128
  super().__init__(*args, **kwargs)
129
 
@@ -173,7 +174,7 @@ class Scrolls(datasets.Metric):
173
  self._metrics_to_compute = DATASET_TO_METRICS[self.config_name]["metrics_to_compute"]
174
 
175
  def _info(self):
176
- return datasets.MetricInfo(
177
  description=_DESCRIPTION,
178
  citation=_CITATION,
179
  inputs_description=_KWARGS_DESCRIPTION,
 
3
  from collections import defaultdict
4
  from copy import deepcopy
5
  import datasets
6
+ import evaluate
7
 
8
  # fmt: off
9
  from .rouge import compute_rouge, postprocess_text as rouge_postprocess_text # From: https://huggingface.co/datasets/tau/scrolls/raw/main/metrics/rouge.py
 
59
  predictions = ["exact match example", "hello there", "general kenobi"] # List[str]
60
  references = [["exact match example"], ["hello", "hi there"], ["commander kenobi"]] # List[List[str]]
61
 
62
+ >>> scrolls_metric = evaluate.load(scrolls_metric_path, 'gov_report') # 'gov_report' or any of ["qmsum", "summ_screen_fd"]
63
  >>> results = scrolls_metric.compute(predictions=predictions, references=references)
64
  >>> print(results)
65
  {'rouge/rouge1': 72.2222, 'rouge/rouge2': 33.3333, 'rouge/rougeL': 72.2222, 'rouge/rougeLsum': 72.2222, 'rouge/geometric_mean': 55.8136,
66
  'num_predicted': 3, 'mean_prediction_length_characters': 14.6667, 'scrolls_score': 55.8136,
67
  'display_keys': ['rouge/rouge1', 'rouge/rouge2', 'rouge/rougeL'], 'display': [72.2222, 33.3333, 72.2222]}
68
 
69
+ >>> scrolls_metric = evaluate.load(scrolls_metric_path, 'contract_nli') # 'contract_nli' or "quality"
70
  >>> results = scrolls_metric.compute(predictions=predictions, references=references)
71
  >>> print(results)
72
  {'exact_match': 33.3333, 'num_predicted': 3, 'mean_prediction_length_characters': 14.6667, 'scrolls_score': 33.3333,
73
  'display_keys': ['exact_match'], 'display': [33.3333]}
74
 
75
+ >>> scrolls_metric = evaluate.load(scrolls_metric_path, 'narrative_qa') # 'narrative_qa' or "qasper"
76
  >>> results = scrolls_metric.compute(predictions=predictions, references=references)
77
  >>> print(results)
78
  {'f1': 72.2222, 'num_predicted': 3, 'mean_prediction_length_characters': 14.6667, 'scrolls_score': 72.2222,
 
124
 
125
 
126
  @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
127
+ class Scrolls(evaluate.Metric):
128
  def __init__(self, *args, **kwargs):
129
  super().__init__(*args, **kwargs)
130
 
 
174
  self._metrics_to_compute = DATASET_TO_METRICS[self.config_name]["metrics_to_compute"]
175
 
176
  def _info(self):
177
+ return evaluate.MetricInfo(
178
  description=_DESCRIPTION,
179
  citation=_CITATION,
180
  inputs_description=_KWARGS_DESCRIPTION,