File size: 5,326 Bytes
1bb8d13 43c22bf 1bb8d13 43c22bf 1bb8d13 b94a8cf 1bb8d13 d1e73b5 1bb8d13 68cbc0d 215507d 43c22bf 1bb8d13 43c22bf 1bb8d13 43c22bf 1bb8d13 43c22bf 215507d 1bb8d13 67a4fb1 1bb8d13 67a4fb1 1bb8d13 90d2c06 1bb8d13 43c22bf 1bb8d13 954cbd4 1bb8d13 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 |
import os
import json
import gc
import torch
import pandas as pd
from datasets import load_dataset
from lmppl import EncoderDecoderLM, LM, OpenAI
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", None)
prompt_dict = {
"friend/ally of": "Complete the following list with examples of entities that are friends or allies",
"competitor/rival of": "Complete the following list with examples of entities that are competitors or rivals",
"known for": "Complete the following list with examples of what entities are known for",
"influenced by": "Complete the following list with examples of what has influenced different entities",
"similar to": "Complete the following list with examples of entities that are similar"
}
data = load_dataset("cardiffnlp/relentless", split="test")
full_result = []
for lm, ppl_class, batch, pretty_name in [
# ("google/flan-ul2", EncoderDecoderLM, 1, "Flan-UL2"),
# ("google/flan-t5-xxl", EncoderDecoderLM, 1, "Flan-T5\textsubscript{XXL}"),
# ("google/flan-t5-xl", EncoderDecoderLM, 1, "Flan-T5\textsubscript{XL}"),
# ("google/flan-t5-large", EncoderDecoderLM, 32, "Flan-T5\textsubscript{LARGE}"),
# ("google/flan-t5-base", EncoderDecoderLM, 128, "Flan-T5\textsubscript{BASE}"),
# ("google/flan-t5-small", EncoderDecoderLM, 256, "Flan-T5\textsubscript{SMALL}"),
# ("t5-11b", EncoderDecoderLM, 1, "T5\textsubscript{XXL}"),
# ("t5-3b", EncoderDecoderLM, 1, "T5\textsubscript{XL}"),
# ("t5-large", EncoderDecoderLM, 32, "T5\textsubscript{LARGE}"),
# ("t5-base", EncoderDecoderLM, 128, "T5\textsubscript{BASE}"),
# ("t5-small", EncoderDecoderLM, 256, "T5\textsubscript{SMALL}"),
# ("facebook/opt-66b", LM, 1, "OPT\textsubscript{66B}"),
("facebook/opt-30b", LM, 1, "OPT\textsubscript{30B}"),
("facebook/opt-13b", LM, 1, "OPT\textsubscript{13B}"),
("facebook/opt-6.7b", LM, 1, "OPT\textsubscript{6.7B}"),
("facebook/opt-2.7b", LM, 1, "OPT\textsubscript{2.7B}"),
("facebook/opt-1.3b", LM, 1, "OPT\textsubscript{1.3B}"),
("facebook/opt-350m", LM, 128, "OPT\textsubscript{350M}"),
("facebook/opt-125m", LM, 256, "OPT\textsubscript{125M}"),
("facebook/opt-iml-30b", LM, 1, "OPT-IML\textsubscript{30B}"),
("facebook/opt-iml-1.3b", LM, 1, "OPT-IML\textsubscript{1.3B}"),
("facebook/opt-iml-max-30b", LM, 1, "OPT-IML\textsubscript{MAX-30B}"),
("facebook/opt-iml-max-1.3b", LM, 1, "OPT-IML\textsubscript{MAX-1.3B}"),
("davinci", OpenAI, None, "GPT-3\textsubscript{davinci}")
]:
os.makedirs(f"results/lm_lc/{os.path.basename(lm)}", exist_ok=True)
scorer = None
for d in data:
ppl_file = f"results/lm_lc/{os.path.basename(lm)}/ppl.{d['relation_type'].replace(' ', '_').replace('/', '__')}.jsonl"
if not os.path.exists(ppl_file):
if scorer is None:
if ppl_class is OpenAI:
scorer = ppl_class(OPENAI_API_KEY, model=lm)
else:
scorer = ppl_class(lm, device_map='auto', low_cpu_mem_usage=True, offload_folder=f"./offload_folder/{os.path.basename(lm)}")
content = "\n".join([f'* ["{a}", "{b}"]' for a, b in d['prototypical_examples']])
prompt_input = f"{prompt_dict[d['relation_type']]}:\n{content}"
if ppl_class is LM:
prompt_input = [f'{prompt_input}\n* ["{x}", "{y}"]' for x, y in d['pairs']]
ppl = scorer.get_perplexity(input_texts=prompt_input, batch=batch)
output = [{"perplexity": p, "input": i, "output": ""} for p, i in zip(ppl, prompt_input)]
elif ppl_class is EncoderDecoderLM:
prompt_output = [f'* ["{x}", "{y}"]' for x, y in d['pairs']]
ppl = scorer.get_perplexity(input_texts=[prompt_input] * len(prompt_output), output_texts=prompt_output, batch=batch)
output = [{"perplexity": p, "input": prompt_input, "output": o} for p, o in zip(ppl, prompt_output)]
else:
prompt_input = [f'{prompt_input}\n* ["{x}", "{y}"]' for x, y in d['pairs']]
ppl = scorer.get_perplexity(input_texts=prompt_input)
output = [{"perplexity": p, "input": i, "output": ""} for p, i in zip(ppl, prompt_input)]
with open(ppl_file, "w") as f:
f.write("\n".join([json.dumps(i) for i in output]))
with open(ppl_file) as f:
ppl = [json.loads(i)['perplexity'] for i in f.read().split("\n") if len(i) > 0]
true_rank = d['ranks']
assert len(true_rank) == len(ppl), f"Mismatch in number of examples: {len(true_rank)} vs {len(ppl)}"
rank_map = {p: n for n, p in enumerate(sorted(ppl), 1)}
prediction = [rank_map[p] for p in ppl]
tmp = pd.DataFrame([true_rank, prediction], index=['true', 'pred']).T
cor = tmp.corr("spearman").values[0, 1]
full_result.append({"model": pretty_name, "relation_type": d['relation_type'], "correlation": cor})
del scorer
gc.collect()
torch.cuda.empty_cache()
df = pd.DataFrame(full_result)
models = df['model'].unique()
print(df)
df = df.pivot(columns="relation_type", index="model", values="correlation")
df = df.T[models].T
df['average'] = df.mean(1)
df.to_csv("results/lm_lc/lm.csv")
df = (100 * df).round()
print(df.to_markdown())
print(df.to_latex(escape=False))
|