Datasets:

Languages:
English
Size:
< 1K
ArXiv:
Libraries:
Datasets
License:
relentless / experiments /analysis /get_error_in_top_bottom.py
asahi417's picture
init
1bb8d13
raw
history blame
7.6 kB
import json
from random import uniform, seed
from statistics import mean
import pandas as pd
from datasets import load_dataset
from scipy.stats import spearmanr
with pd.option_context("max_colwidth", 1000):
# baselines
target = {
"flan-t5-xxl": "Flan-T5\textsubscript{XXL}",
"opt-13b": "OPT\textsubscript{13B}",
"davinci": "GPT-3\textsubscript{davinci}"
}
pretty_name = {
'is competitor/rival of': "Rival",
'is friend/ally of': "Ally",
'is influenced by': "Inf",
'is known for': "Know",
'is similar to': "Sim",
'average': "Avg",
}
# def get_iaa(scores_all):
# avg = [[mean(__s for _m, __s in enumerate(_s) if _m != _n) for _s in scores_all] for _n in range(7)]
# single = [[_s[_n] for _s in scores_all] for _n in range(7)]
# tmptmp = []
# ps = []
# for a, s in zip(avg, single):
# c = round(pd.DataFrame([a, s]).T.corr("spearman").values[0][1] * 100, 1)
# ps.append(spearmanr(a, s)[1] < 0.05)
# # if str(c) == "nan":
# # seed(0)
# # c_tmp = []
# # for _ in range(1000):
# # s_tmp = [_s + uniform(-0.5, 0.5) for _s in s]
# # c_tmp.append(round(pd.DataFrame([a, s_tmp]).T.corr("spearman").values[0][1] * 100, 1))
# # c = mean(c_tmp)
# tmptmp.append(c)
# list(zip(tmptmp, ps))
# return mean(tmptmp)
def format_text(_x, _y, _z):
bf = max(_x, _y, _z)
wf = str(min(_x, _y, _z))
_x = "\textcolor{blue}{" + str(_x) + "}" if _x == bf else str(_x)
_y = "\textcolor{blue}{" + str(_y) + "}" if _y == bf else str(_y)
_z = "\textcolor{blue}{" + str(_z) + "}" if _z == bf else str(_z)
_x = "\textcolor{red}{" + str(_x) + "}" if _x == wf else str(_x)
_y = "\textcolor{red}{" + str(_y) + "}" if _y == wf else str(_y)
_z = "\textcolor{red}{" + str(_z) + "}" if _z == wf else str(_z)
return f"{_x} / {_y} / {_z}"
data = load_dataset("cardiffnlp/relentless_full", split="test")
table_full = []
for prompt in ['qa', 'lc']:
output = []
for d in data:
for i in target.keys():
with open(f"experiments/results/lm_{prompt}/{i}/ppl.{d['relation_type'].replace(' ', '_').replace('/', '__')}.jsonl") as f:
ppl = [json.loads(x)['perplexity'] for x in f.read().split("\n") if len(x) > 0]
rank_map = {p: n for n, p in enumerate(sorted(ppl), 1)}
prediction = [rank_map[p] for p in ppl]
# get index
total_n = len(d['ranks'])
p = int(total_n/3)
top_n = [0, int(total_n * p / 100) + 1]
top_label = [x for x, _ in sorted(enumerate(d['ranks']), key=lambda x: x[1])[top_n[0]: top_n[1]]]
bottom_n = [total_n - int(total_n * p / 100), total_n]
bottom_label = [x for x, _ in sorted(enumerate(d['ranks']), key=lambda x: x[1])[bottom_n[0]: bottom_n[1]]]
mid_n = [top_n[1], bottom_n[0]]
mid_label = [x for x, _ in sorted(enumerate(d['ranks']), key=lambda x: x[1])[mid_n[0]: mid_n[1]]]
# top
top_pred = [x for x, _ in sorted(enumerate(prediction), key=lambda x: x[1])[top_n[0]: top_n[1]]]
top_acc = len(set(top_pred).intersection(set(top_label))) / len(top_label) * 100
# middle
mid_pred = [x for x, _ in sorted(enumerate(prediction), key=lambda x: x[1])[mid_n[0]: mid_n[1]]]
mid_acc = len(set(mid_pred).intersection(set(mid_label))) / len(mid_label) * 100
# top
bottom_pred = [x for x, _ in sorted(enumerate(prediction), key=lambda x: x[1])[bottom_n[0]: bottom_n[1]]]
bottom_acc = len(set(bottom_pred).intersection(set(bottom_label))) / len(bottom_label) * 100
output.append({"model": i, "relation_type": d['relation_type'], "top": round(top_acc, 1), "bottom": round(bottom_acc, 1), "middle": round(mid_acc, 1)})
for i in target.keys():
output.append({
"model": i, "relation_type": "average",
"top": round(mean([o['top'] for o in output if o['model'] == i]), 0),
"bottom": round(mean([o['bottom'] for o in output if o['model'] == i]), 0),
"middle": round(mean([o['middle'] for o in output if o['model'] == i]), 0)
})
df = pd.DataFrame(output)
df['accuracy'] = [format_text(x, y, z) for x, y, z in zip(df['top'], df['middle'], df['bottom'])]
table = df.pivot(index="relation_type", columns="model", values="accuracy")
table.columns.name = None
table.index.name = None
table = table[target.keys()]
table.columns = [target[i] for i in table.columns]
table.index = [pretty_name[i] for i in table.index]
table = table.T[list(pretty_name.values())]
table = table.T
table = table.to_latex(escape=False)
table = table.split(r"\midrule")[1].split(r"\bottomrule")[0]
table = r"\multicolumn{4}{l}{\emph{" + prompt.upper() + r" template}} \\ " + table
table_full.append(table)
table_full = "\midrule".join(table_full)
#
# output = []
# top_all = []
# mid_all = []
# bottom_all = []
#
# for d in data:
# if d['relation_type'] == "is influenced by":
# break
# total_n = len(d['ranks'])
# p = int(total_n / 3)
# top_n = [0, int(total_n * p / 100) + 1]
# top_label = [x for x, _ in sorted(enumerate(d['ranks']), key=lambda x: x[1])[top_n[0]: top_n[1]]]
# bottom_n = [total_n - int(total_n * p / 100), total_n]
# bottom_label = [x for x, _ in sorted(enumerate(d['ranks']), key=lambda x: x[1])[bottom_n[0]: bottom_n[1]]]
# mid_n = [top_n[1], bottom_n[0]]
# mid_label = [x for x, _ in sorted(enumerate(d['ranks']), key=lambda x: x[1])[mid_n[0]: mid_n[1]]]
#
# output.append({
# "model": "IAA", "relation_type": d['relation_type'],
# "top": round(get_iaa([d['scores_all'][_i] for _i in top_label]), 1),
# "bottom": round(get_iaa([d['scores_all'][_i] for _i in mid_label]), 1),
# "middle": round(get_iaa([d['scores_all'][_i] for _i in bottom_label]), 1)
# })
# top_all += [d['scores_all'][_i] for _i in top_label]
# mid_all += [d['scores_all'][_i] for _i in mid_label]
# bottom_all += [d['scores_all'][_i] for _i in bottom_label]
# output.append({
# "model": "IAA", "relation_type": "average",
# "top": round(get_iaa(top_all), 1),
# "bottom": round(get_iaa(mid_all), 1),
# "middle": round(get_iaa(bottom_all), 1)
# })
#
# df = pd.DataFrame(output)
# df['accuracy'] = [format_text(x, y, z) for x, y, z in zip(df['top'], df['middle'], df['bottom'])]
# table = df.pivot(index="relation_type", columns="model", values="accuracy")
# table.columns.name = None
# table.index.name = None
# table.index = [pretty_name[i] for i in table.index]
# table = table.T[list(pretty_name.values())]
# table = table.to_latex(escape=False)
# table = table.split(r"\midrule")[1].split(r"\bottomrule")[0]
# # table = r"\multicolumn{4}{l}{\emph{" + prompt.upper() + r" template}} \\ " + table
# table_full = table_full + table
print()
print()
print(table_full)