Datasets:

Languages:
English
Size:
< 1K
ArXiv:
Libraries:
Datasets
License:
asahi417 commited on
Commit
43c22bf
·
1 Parent(s): 4f83459
experiments/baseline_lm_lc.py CHANGED
@@ -1,6 +1,8 @@
1
  import os
2
  import json
 
3
 
 
4
  import pandas as pd
5
  from datasets import load_dataset
6
  from lmppl import EncoderDecoderLM, LM, OpenAI
@@ -17,29 +19,29 @@ prompt_dict = {
17
  data = load_dataset("cardiffnlp/relentless", split="test")
18
  full_result = []
19
  for lm, ppl_class, batch, pretty_name in [
20
- ("t5-small", EncoderDecoderLM, 256, "T5\textsubscript{SMALL}"),
21
- ("t5-base", EncoderDecoderLM, 128, "T5\textsubscript{BASE}"),
22
- ("t5-large", EncoderDecoderLM, 32, "T5\textsubscript{LARGE}"),
23
- ("t5-3b", EncoderDecoderLM, 1, "T5\textsubscript{XL}"),
24
- ("t5-11b", EncoderDecoderLM, 1, "T5\textsubscript{XXL}"),
25
- ("google/flan-t5-small", EncoderDecoderLM, 256, "Flan-T5\textsubscript{SMALL}"),
26
- ("google/flan-t5-base", EncoderDecoderLM, 128, "Flan-T5\textsubscript{BASE}"),
27
- ("google/flan-t5-large", EncoderDecoderLM, 32, "Flan-T5\textsubscript{LARGE}"),
28
- ("google/flan-t5-xl", EncoderDecoderLM, 1, "Flan-T5\textsubscript{XL}"),
29
- ("google/flan-t5-xxl", EncoderDecoderLM, 1, "Flan-T5\textsubscript{XXL}"),
30
  ("google/flan-ul2", EncoderDecoderLM, 1, "Flan-UL2"),
31
- ("facebook/opt-125m", LM, 256, "OPT\textsubscript{125M}"),
32
- ("facebook/opt-350m", LM, 128, "OPT\textsubscript{350M}"),
33
- ("facebook/opt-1.3b", LM, 1, "OPT\textsubscript{1.3B}"),
34
- ("facebook/opt-2.7b", LM, 1, "OPT\textsubscript{2.7B}"),
35
- ("facebook/opt-6.7b", LM, 1, "OPT\textsubscript{6.7B}"),
36
- ("facebook/opt-13b", LM, 1, "OPT\textsubscript{13B}"),
 
 
 
 
 
37
  ("facebook/opt-30b", LM, 1, "OPT\textsubscript{30B}"),
38
- ("facebook/opt-66b", LM, 1, "OPT\textsubscript{66B}"),
39
- ("facebook/opt-iml-1.3b", LM, 1, "OPT-IML\textsubscript{1.3B}"),
 
 
 
 
40
  ("facebook/opt-iml-30b", LM, 1, "OPT-IML\textsubscript{30B}"),
41
- ("facebook/opt-iml-max-1.3b", LM, 1, "OPT-IML\textsubscript{MAX-1.3B}"),
42
  ("facebook/opt-iml-max-30b", LM, 1, "OPT-IML\textsubscript{MAX-30B}"),
 
43
  # ("davinci", OpenAI, None, "GPT-3\textsubscript{davinci}")
44
  ]:
45
  os.makedirs(f"experiments/results/lm_lc/{os.path.basename(lm)}", exist_ok=True)
@@ -82,6 +84,9 @@ for lm, ppl_class, batch, pretty_name in [
82
  tmp = pd.DataFrame([true_rank, prediction], index=['true', 'pred']).T
83
  cor = tmp.corr("spearman").values[0, 1]
84
  full_result.append({"model": pretty_name, "relation_type": d['relation_type'], "correlation": cor})
 
 
 
85
 
86
  df = pd.DataFrame(full_result)
87
  models = df['model'].unique()
 
1
  import os
2
  import json
3
+ import gc
4
 
5
+ import torch
6
  import pandas as pd
7
  from datasets import load_dataset
8
  from lmppl import EncoderDecoderLM, LM, OpenAI
 
19
  data = load_dataset("cardiffnlp/relentless", split="test")
20
  full_result = []
21
  for lm, ppl_class, batch, pretty_name in [
 
 
 
 
 
 
 
 
 
 
22
  ("google/flan-ul2", EncoderDecoderLM, 1, "Flan-UL2"),
23
+ ("google/flan-t5-xxl", EncoderDecoderLM, 1, "Flan-T5\textsubscript{XXL}"),
24
+ ("google/flan-t5-xl", EncoderDecoderLM, 1, "Flan-T5\textsubscript{XL}"),
25
+ ("google/flan-t5-large", EncoderDecoderLM, 32, "Flan-T5\textsubscript{LARGE}"),
26
+ ("google/flan-t5-base", EncoderDecoderLM, 128, "Flan-T5\textsubscript{BASE}"),
27
+ ("google/flan-t5-small", EncoderDecoderLM, 256, "Flan-T5\textsubscript{SMALL}"),
28
+ ("t5-11b", EncoderDecoderLM, 1, "T5\textsubscript{XXL}"),
29
+ ("t5-3b", EncoderDecoderLM, 1, "T5\textsubscript{XL}"),
30
+ ("t5-large", EncoderDecoderLM, 32, "T5\textsubscript{LARGE}"),
31
+ ("t5-base", EncoderDecoderLM, 128, "T5\textsubscript{BASE}"),
32
+ ("t5-small", EncoderDecoderLM, 256, "T5\textsubscript{SMALL}"),
33
+ # ("facebook/opt-66b", LM, 1, "OPT\textsubscript{66B}"),
34
  ("facebook/opt-30b", LM, 1, "OPT\textsubscript{30B}"),
35
+ ("facebook/opt-13b", LM, 1, "OPT\textsubscript{13B}"),
36
+ ("facebook/opt-6.7b", LM, 1, "OPT\textsubscript{6.7B}"),
37
+ ("facebook/opt-2.7b", LM, 1, "OPT\textsubscript{2.7B}"),
38
+ ("facebook/opt-1.3b", LM, 1, "OPT\textsubscript{1.3B}"),
39
+ ("facebook/opt-350m", LM, 128, "OPT\textsubscript{350M}"),
40
+ ("facebook/opt-125m", LM, 256, "OPT\textsubscript{125M}"),
41
  ("facebook/opt-iml-30b", LM, 1, "OPT-IML\textsubscript{30B}"),
42
+ ("facebook/opt-iml-1.3b", LM, 1, "OPT-IML\textsubscript{1.3B}"),
43
  ("facebook/opt-iml-max-30b", LM, 1, "OPT-IML\textsubscript{MAX-30B}"),
44
+ ("facebook/opt-iml-max-1.3b", LM, 1, "OPT-IML\textsubscript{MAX-1.3B}"),
45
  # ("davinci", OpenAI, None, "GPT-3\textsubscript{davinci}")
46
  ]:
47
  os.makedirs(f"experiments/results/lm_lc/{os.path.basename(lm)}", exist_ok=True)
 
84
  tmp = pd.DataFrame([true_rank, prediction], index=['true', 'pred']).T
85
  cor = tmp.corr("spearman").values[0, 1]
86
  full_result.append({"model": pretty_name, "relation_type": d['relation_type'], "correlation": cor})
87
+ del scorer
88
+ gc.collect()
89
+ torch.cuda.empty_cache()
90
 
91
  df = pd.DataFrame(full_result)
92
  models = df['model'].unique()
experiments/baseline_lm_qa.py CHANGED
@@ -1,6 +1,8 @@
1
  import os
2
  import json
 
3
 
 
4
  import pandas as pd
5
  from datasets import load_dataset
6
  from lmppl import EncoderDecoderLM, LM, OpenAI
@@ -17,30 +19,30 @@ prompt_dict = {
17
  data = load_dataset("cardiffnlp/relentless", split="test")
18
  full_result = []
19
  for lm, ppl_class, batch, pretty_name in [
20
- ("t5-small", EncoderDecoderLM, 256, "T5\textsubscript{SMALL}"),
21
- ("t5-base", EncoderDecoderLM, 128, "T5\textsubscript{BASE}"),
22
- ("t5-large", EncoderDecoderLM, 32, "T5\textsubscript{LARGE}"),
23
- ("t5-3b", EncoderDecoderLM, 1, "T5\textsubscript{XL}"),
24
- ("t5-11b", EncoderDecoderLM, 1, "T5\textsubscript{XXL}"),
25
- ("google/flan-t5-small", EncoderDecoderLM, 256, "Flan-T5\textsubscript{SMALL}"),
26
- ("google/flan-t5-base", EncoderDecoderLM, 128, "Flan-T5\textsubscript{BASE}"),
27
- ("google/flan-t5-large", EncoderDecoderLM, 32, "Flan-T5\textsubscript{LARGE}"),
28
- ("google/flan-t5-xl", EncoderDecoderLM, 1, "Flan-T5\textsubscript{XL}"),
29
- ("google/flan-t5-xxl", EncoderDecoderLM, 1, "Flan-T5\textsubscript{XXL}"),
30
  ("google/flan-ul2", EncoderDecoderLM, 1, "Flan-UL2"),
31
- ("facebook/opt-125m", LM, 256, "OPT\textsubscript{125M}"),
32
- ("facebook/opt-350m", LM, 128, "OPT\textsubscript{350M}"),
33
- ("facebook/opt-1.3b", LM, 1, "OPT\textsubscript{1.3B}"),
34
- ("facebook/opt-2.7b", LM, 1, "OPT\textsubscript{2.7B}"),
35
- ("facebook/opt-6.7b", LM, 1, "OPT\textsubscript{6.7B}"),
36
- ("facebook/opt-13b", LM, 1, "OPT\textsubscript{13B}"),
 
 
 
 
 
37
  ("facebook/opt-30b", LM, 1, "OPT\textsubscript{30B}"),
38
- ("facebook/opt-66b", LM, 1, "OPT\textsubscript{66B}"),
39
- ("facebook/opt-iml-1.3b", LM, 1, "OPT-IML\textsubscript{1.3B}"),
 
 
 
 
40
  ("facebook/opt-iml-30b", LM, 1, "OPT-IML\textsubscript{30B}"),
41
- ("facebook/opt-iml-max-1.3b", LM, 1, "OPT-IML\textsubscript{MAX-1.3B}"),
42
  ("facebook/opt-iml-max-30b", LM, 1, "OPT-IML\textsubscript{MAX-30B}"),
43
- ("davinci", OpenAI, None, "GPT-3\textsubscript{davinci}")
 
44
  ]:
45
  os.makedirs(f"experiments/results/lm_qa/{os.path.basename(lm)}", exist_ok=True)
46
  scorer = None
@@ -80,6 +82,9 @@ for lm, ppl_class, batch, pretty_name in [
80
  tmp = pd.DataFrame([true_rank, prediction], index=['true', 'pred']).T
81
  cor = tmp.corr("spearman").values[0, 1]
82
  full_result.append({"model": pretty_name, "relation_type": d['relation_type'], "correlation": cor})
 
 
 
83
 
84
  df = pd.DataFrame(full_result)
85
  models = df['model'].unique()
 
1
  import os
2
  import json
3
+ import gc
4
 
5
+ import torch
6
  import pandas as pd
7
  from datasets import load_dataset
8
  from lmppl import EncoderDecoderLM, LM, OpenAI
 
19
  data = load_dataset("cardiffnlp/relentless", split="test")
20
  full_result = []
21
  for lm, ppl_class, batch, pretty_name in [
 
 
 
 
 
 
 
 
 
 
22
  ("google/flan-ul2", EncoderDecoderLM, 1, "Flan-UL2"),
23
+ ("google/flan-t5-xxl", EncoderDecoderLM, 1, "Flan-T5\textsubscript{XXL}"),
24
+ ("google/flan-t5-xl", EncoderDecoderLM, 1, "Flan-T5\textsubscript{XL}"),
25
+ ("google/flan-t5-large", EncoderDecoderLM, 32, "Flan-T5\textsubscript{LARGE}"),
26
+ ("google/flan-t5-base", EncoderDecoderLM, 128, "Flan-T5\textsubscript{BASE}"),
27
+ ("google/flan-t5-small", EncoderDecoderLM, 256, "Flan-T5\textsubscript{SMALL}"),
28
+ ("t5-11b", EncoderDecoderLM, 1, "T5\textsubscript{XXL}"),
29
+ ("t5-3b", EncoderDecoderLM, 1, "T5\textsubscript{XL}"),
30
+ ("t5-large", EncoderDecoderLM, 32, "T5\textsubscript{LARGE}"),
31
+ ("t5-base", EncoderDecoderLM, 128, "T5\textsubscript{BASE}"),
32
+ ("t5-small", EncoderDecoderLM, 256, "T5\textsubscript{SMALL}"),
33
+ # ("facebook/opt-66b", LM, 1, "OPT\textsubscript{66B}"),
34
  ("facebook/opt-30b", LM, 1, "OPT\textsubscript{30B}"),
35
+ ("facebook/opt-13b", LM, 1, "OPT\textsubscript{13B}"),
36
+ ("facebook/opt-6.7b", LM, 1, "OPT\textsubscript{6.7B}"),
37
+ ("facebook/opt-2.7b", LM, 1, "OPT\textsubscript{2.7B}"),
38
+ ("facebook/opt-1.3b", LM, 1, "OPT\textsubscript{1.3B}"),
39
+ ("facebook/opt-350m", LM, 128, "OPT\textsubscript{350M}"),
40
+ ("facebook/opt-125m", LM, 256, "OPT\textsubscript{125M}"),
41
  ("facebook/opt-iml-30b", LM, 1, "OPT-IML\textsubscript{30B}"),
42
+ ("facebook/opt-iml-1.3b", LM, 1, "OPT-IML\textsubscript{1.3B}"),
43
  ("facebook/opt-iml-max-30b", LM, 1, "OPT-IML\textsubscript{MAX-30B}"),
44
+ ("facebook/opt-iml-max-1.3b", LM, 1, "OPT-IML\textsubscript{MAX-1.3B}"),
45
+ # ("davinci", OpenAI, None, "GPT-3\textsubscript{davinci}")
46
  ]:
47
  os.makedirs(f"experiments/results/lm_qa/{os.path.basename(lm)}", exist_ok=True)
48
  scorer = None
 
82
  tmp = pd.DataFrame([true_rank, prediction], index=['true', 'pred']).T
83
  cor = tmp.corr("spearman").values[0, 1]
84
  full_result.append({"model": pretty_name, "relation_type": d['relation_type'], "correlation": cor})
85
+ del scorer
86
+ gc.collect()
87
+ torch.cuda.empty_cache()
88
 
89
  df = pd.DataFrame(full_result)
90
  models = df['model'].unique()