shounakpaul95 commited on
Commit
c00c6b5
1 Parent(s): 74b3358

Update eval_utils.py

Browse files
Files changed (1) hide show
  1. eval_utils.py +3 -3
eval_utils.py CHANGED
@@ -60,11 +60,12 @@ def evaluate_cjpe(gold_data, pred_data):
60
 
61
  f1 = f1_score(gold_labels, pred_labels, average="macro")
62
  prediction_result = {"cjpe-eval": f1}
63
-
 
64
  R = []
65
  B = []
66
  rl_evaluator = rouge.Rouge(metrics=['rouge-l'], max_n=2, limit_length=False, apply_avg=True)
67
- for x in range(1, 6):
68
  gold_explanations = []
69
  pred_explanations = []
70
  for k,v in gold_data['explanation'].items():
@@ -84,7 +85,6 @@ def evaluate_cjpe(gold_data, pred_data):
84
  "bleu": bleu_score,
85
  }
86
  }
87
- print("Macro-F1 on ILDC test:", prediction_result)
88
  print("Explanability for ILDC Expert:", explanation_result)
89
  return {**prediction_result, **explanation_result}
90
 
 
60
 
61
  f1 = f1_score(gold_labels, pred_labels, average="macro")
62
  prediction_result = {"cjpe-eval": f1}
63
+ print("Macro-F1 on ILDC test:", prediction_result)
64
+
65
  R = []
66
  B = []
67
  rl_evaluator = rouge.Rouge(metrics=['rouge-l'], max_n=2, limit_length=False, apply_avg=True)
68
+ for x in tqdm(range(1, 6), desc="cjpe explanation expert-wise"):
69
  gold_explanations = []
70
  pred_explanations = []
71
  for k,v in gold_data['explanation'].items():
 
85
  "bleu": bleu_score,
86
  }
87
  }
 
88
  print("Explanability for ILDC Expert:", explanation_result)
89
  return {**prediction_result, **explanation_result}
90