Datasets:

Languages:
English
Size:
< 1K
ArXiv:
Libraries:
Datasets
License:
File size: 4,394 Bytes
1bb8d13
 
 
 
 
 
b94a8cf
1bb8d13
b94a8cf
1bb8d13
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b94a8cf
 
 
 
 
1bb8d13
 
 
 
b94a8cf
1bb8d13
 
 
 
 
 
 
 
 
b94a8cf
1bb8d13
 
 
 
 
 
b94a8cf
1bb8d13
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fa12a97
b94a8cf
1bb8d13
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
import os
from time import sleep
import pandas as pd
import openai
from datasets import load_dataset

data = load_dataset("cardiffnlp/relentless", split="test")
openai.api_key = os.getenv("OPENAI_API_KEY", None)
pretty_name = {"competitor/rival of": "Rival", "friend/ally of": "Ally", "influenced by": "Inf", "known for": "Know", "similar to": "Sim"}
pretty_model = {"gpt-3.5-turbo": "GPT-3.5", "gpt-4": "GPT-4"}


def get_reply(model, text):
    while True:
        try:
            reply = openai.ChatCompletion.create(model=model, messages=[{"role": "user", "content": text}])
            break
        except Exception:
            print('Rate limit exceeded. Waiting for 10 seconds.')
            sleep(10)
    return reply['choices'][0]['message']['content']


prompt_dict = {
    "friend/ally of": "entities that are friends or allies",
    "competitor/rival of": "entities that are competitors or rivals",
    "known for": "what entities are known for",
    "influenced by": "what has influenced different entities",
    "similar to": "entities that are similar"
}


def get_prompt(_data):
    ref = "\n".join([str(_i) for _i in _data["prototypical_examples"]])
    prefix = f'Consider the following reference list of {prompt_dict[_data["relation_type"]]}, \n{ref}\n' \
             f'Now sort the entity pairs from the following list based on the extent to which they also represent ' \
             f'{prompt_dict[_data["relation_type"]]} in descending order. Do not include the pairs from the reference list. ' \
             f'The output should contain all the entity pairs from the following list and no duplicates:\n'
    x = "\n".join([f'{str(_i)}' for _i in _data["pairs"]])
    return f'{prefix}\n\n{x}'


if __name__ == '__main__':
    os.makedirs('results/chat', exist_ok=True)

    full_result = []
    valid_count = []
    for target_model in ['gpt-3.5-turbo', 'gpt-4']:

        for d in data:
            output_file = f"results/chat/{target_model}.{d['relation_type'].replace(' ', '_').replace('/', '-')}.json"
            if not os.path.exists(output_file):
                print(target_model, d['relation_type'])
                i = get_prompt(d)
                out = get_reply(target_model, i)
                with open(output_file, 'w') as f:
                    f.write(out)
            with open(output_file) as f:
                string_pairs = [f'{str(_i)}' for _i in d["pairs"]]
                out = [i for i in f.read().split("\n") if len(i) > 0]
                # out = [str(eval(i)) for i in out]
            new_out = []
            for i in out:
                try:
                    i = "[" + i.replace("],", "]").split("[")[1]
                    i = i.split("]")[0] + "]"
                    i = str(eval(i))
                    if i not in new_out:
                        new_out.append(i)
                except Exception:
                    continue
            ex = [i for i in string_pairs if i not in new_out]
            valid_n = len(d['pairs']) - len(ex)
            # valid_count.append({"model": target_model, "relation_type": d['relation_type'], "valid": f"{valid_n} ({round(100 * valid_n/len(d['pairs']))}%)"})
            valid_count.append({"model": target_model, "relation_type": d['relation_type'], "valid": 100 * valid_n / len(d['pairs'])})
            new_out = new_out + ex
            maps = {x: n + 1 for n, x in enumerate(new_out)}
            prediction = [maps[i] for i in string_pairs]

            true_rank = d['ranks']
            tmp = pd.DataFrame([true_rank, prediction], index=['true', 'pred']).T
            cor = tmp.corr("spearman").values[0, 1]
            full_result.append({"model": target_model, "relation_type": d['relation_type'], "correlation": cor})

    df = pd.DataFrame(full_result)
    df = df.pivot(columns="relation_type", index="model", values="correlation")
    df['Avg'] = df.mean(1)
    df = (df * 100).round(1)

    df_cnt = pd.DataFrame(valid_count)
    df_cnt = df_cnt.pivot(index='model', columns='relation_type')
    df_cnt['Avg'] = df_cnt.mean(1)
    df_cnt = df_cnt.round(1)

    df = pd.DataFrame(df.astype(str).values + " (" + df_cnt.astype(str).values + "%)", columns=[pretty_name[c] if c in pretty_name else c for c in df.columns], index=df.index)
    df.index = [pretty_model[m] for m in df.index]
    df = df.T
    print(df.to_latex())
    # df.to_csv("results/chat/chat.csv")