|
import json |
|
from random import uniform, seed |
|
from statistics import mean |
|
import pandas as pd |
|
from datasets import load_dataset |
|
from scipy.stats import spearmanr |
|
with pd.option_context("max_colwidth", 1000): |
|
|
|
|
|
target = { |
|
"flan-t5-xxl": "Flan-T5\textsubscript{XXL}", |
|
"opt-13b": "OPT\textsubscript{13B}", |
|
"davinci": "GPT-3\textsubscript{davinci}" |
|
} |
|
pretty_name = { |
|
'is competitor/rival of': "Rival", |
|
'is friend/ally of': "Ally", |
|
'is influenced by': "Inf", |
|
'is known for': "Know", |
|
'is similar to': "Sim", |
|
'average': "Avg", |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def format_text(_x, _y, _z): |
|
bf = max(_x, _y, _z) |
|
wf = str(min(_x, _y, _z)) |
|
_x = "\textcolor{blue}{" + str(_x) + "}" if _x == bf else str(_x) |
|
_y = "\textcolor{blue}{" + str(_y) + "}" if _y == bf else str(_y) |
|
_z = "\textcolor{blue}{" + str(_z) + "}" if _z == bf else str(_z) |
|
_x = "\textcolor{red}{" + str(_x) + "}" if _x == wf else str(_x) |
|
_y = "\textcolor{red}{" + str(_y) + "}" if _y == wf else str(_y) |
|
_z = "\textcolor{red}{" + str(_z) + "}" if _z == wf else str(_z) |
|
return f"{_x} / {_y} / {_z}" |
|
|
|
|
|
data = load_dataset("cardiffnlp/relentless_full", split="test") |
|
table_full = [] |
|
for prompt in ['qa', 'lc']: |
|
output = [] |
|
for d in data: |
|
|
|
for i in target.keys(): |
|
with open(f"experiments/results/lm_{prompt}/{i}/ppl.{d['relation_type'].replace(' ', '_').replace('/', '__')}.jsonl") as f: |
|
ppl = [json.loads(x)['perplexity'] for x in f.read().split("\n") if len(x) > 0] |
|
rank_map = {p: n for n, p in enumerate(sorted(ppl), 1)} |
|
prediction = [rank_map[p] for p in ppl] |
|
|
|
|
|
total_n = len(d['ranks']) |
|
p = int(total_n/3) |
|
top_n = [0, int(total_n * p / 100) + 1] |
|
top_label = [x for x, _ in sorted(enumerate(d['ranks']), key=lambda x: x[1])[top_n[0]: top_n[1]]] |
|
bottom_n = [total_n - int(total_n * p / 100), total_n] |
|
bottom_label = [x for x, _ in sorted(enumerate(d['ranks']), key=lambda x: x[1])[bottom_n[0]: bottom_n[1]]] |
|
mid_n = [top_n[1], bottom_n[0]] |
|
mid_label = [x for x, _ in sorted(enumerate(d['ranks']), key=lambda x: x[1])[mid_n[0]: mid_n[1]]] |
|
|
|
|
|
top_pred = [x for x, _ in sorted(enumerate(prediction), key=lambda x: x[1])[top_n[0]: top_n[1]]] |
|
top_acc = len(set(top_pred).intersection(set(top_label))) / len(top_label) * 100 |
|
|
|
mid_pred = [x for x, _ in sorted(enumerate(prediction), key=lambda x: x[1])[mid_n[0]: mid_n[1]]] |
|
mid_acc = len(set(mid_pred).intersection(set(mid_label))) / len(mid_label) * 100 |
|
|
|
bottom_pred = [x for x, _ in sorted(enumerate(prediction), key=lambda x: x[1])[bottom_n[0]: bottom_n[1]]] |
|
bottom_acc = len(set(bottom_pred).intersection(set(bottom_label))) / len(bottom_label) * 100 |
|
|
|
output.append({"model": i, "relation_type": d['relation_type'], "top": round(top_acc, 1), "bottom": round(bottom_acc, 1), "middle": round(mid_acc, 1)}) |
|
|
|
for i in target.keys(): |
|
output.append({ |
|
"model": i, "relation_type": "average", |
|
"top": round(mean([o['top'] for o in output if o['model'] == i]), 0), |
|
"bottom": round(mean([o['bottom'] for o in output if o['model'] == i]), 0), |
|
"middle": round(mean([o['middle'] for o in output if o['model'] == i]), 0) |
|
}) |
|
|
|
df = pd.DataFrame(output) |
|
df['accuracy'] = [format_text(x, y, z) for x, y, z in zip(df['top'], df['middle'], df['bottom'])] |
|
table = df.pivot(index="relation_type", columns="model", values="accuracy") |
|
table.columns.name = None |
|
table.index.name = None |
|
table = table[target.keys()] |
|
table.columns = [target[i] for i in table.columns] |
|
table.index = [pretty_name[i] for i in table.index] |
|
table = table.T[list(pretty_name.values())] |
|
table = table.T |
|
table = table.to_latex(escape=False) |
|
table = table.split(r"\midrule")[1].split(r"\bottomrule")[0] |
|
table = r"\multicolumn{4}{l}{\emph{" + prompt.upper() + r" template}} \\ " + table |
|
table_full.append(table) |
|
|
|
table_full = "\midrule".join(table_full) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print() |
|
print() |
|
print(table_full) |