shounakpaul95 commited on
Commit
4bed9ae
1 Parent(s): 267a185

Update eval_utils.py

Browse files
Files changed (1) hide show
  1. eval_utils.py +5 -4
eval_utils.py CHANGED
@@ -256,13 +256,14 @@ def evaluate_summ(gold_data, pred_data):
256
  pred_summaries.append(pred_summary)
257
 
258
 
259
- rl_evaluator = rouge.Rouge(metrics=['rouge-l'], max_n=2, limit_length=False, apply_avg=True)
260
- rl_scores = rl_evaluator.get_scores(pred_summaries, gold_summaries)
261
- print("Rouge:", {k:v['f'] for k,v in rl_scores.items()}, flush=True)
262
 
263
  _, _, bs = bert_score.score(pred_summaries, gold_summaries, lang="en", verbose=True)
264
  print("BERTSCORE:", bs.mean().item())
265
- return {'ROUGE-L': rl_scores['rouge-l']['f'] * 100, 'BERTSCORE': bs.mean().item() * 100}
 
266
 
267
  def evaluate_lmt(gold_data, pred_data):
268
  tokenizer = AutoTokenizer.from_pretrained("ai4bharat/indic-bert", use_fast=False)
 
256
  pred_summaries.append(pred_summary)
257
 
258
 
259
+ # rl_evaluator = rouge.Rouge(metrics=['rouge-l'], max_n=2, limit_length=False, apply_avg=True)
260
+ # rl_scores = rl_evaluator.get_scores(pred_summaries, gold_summaries)
261
+ # print("Rouge:", {k:v['f'] for k,v in rl_scores.items()}, flush=True)
262
 
263
  _, _, bs = bert_score.score(pred_summaries, gold_summaries, lang="en", verbose=True)
264
  print("BERTSCORE:", bs.mean().item())
265
+ # return {'ROUGE-L': rl_scores['rouge-l']['f'] * 100, 'BERTSCORE': bs.mean().item() * 100}
266
+ return {'ROUGE-L': '-', 'BERTSCORE': bs.mean().item() * 100}
267
 
268
  def evaluate_lmt(gold_data, pred_data):
269
  tokenizer = AutoTokenizer.from_pretrained("ai4bharat/indic-bert", use_fast=False)