File size: 7,012 Bytes
24ca5f7 9edeb25 a0c348d 9edeb25 24ca5f7 a0c348d 9edeb25 a0c348d 9edeb25 f2da732 9edeb25 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 |
import json
import os
from itertools import combinations
from random import shuffle, seed
import pandas as pd
from datasets import load_dataset
def get_stats(filename):
with open(filename) as f:
_data = [json.loads(i) for i in f.read().splitlines()]
return len(_data), list(set([len(i['choice']) for i in _data])), len(list(set([i['prefix'] for i in _data])))
def lexical_overlap(word_a, word_b):
for a in word_a.split(" "):
for b in word_b.split(" "):
if a.lower() == b.lower():
return True
return False
def create_analogy(_data, output_path, negative_per_relation, instance_per_relation=100):
# if os.path.exists(output_path):
# return
df = _data.to_pandas()
analogy_data = []
for _, i in df.iterrows():
target = [(q.tolist(), c.tolist()) for q, c in combinations(i['positives'], 2)
if not any(lexical_overlap(c[0], y) or lexical_overlap(c[1], y) for y in q)]
if len(target) == 0:
continue
if len(target) > instance_per_relation:
seed(42)
shuffle(target)
target = target[:instance_per_relation]
for m, (q, c) in enumerate(target):
negative = []
for r in df['relation_type']:
if r == i['relation_type']:
continue
target_per_relation = [y.tolist() for y in df[df['relation_type'] == r]['positives'].values[0]]
shuffle(target_per_relation)
negative += target_per_relation[:negative_per_relation]
analogy_data.append({
"stem": q,
"choice": [c, c[::-1]] + negative,
"answer": 0,
"prefix": i["relation_type"]
})
os.makedirs(os.path.dirname(output_path), exist_ok=True)
with open(output_path, "w") as f:
f.write("\n".join([json.dumps(i) for i in analogy_data]))
stat = []
###################################################################
# create analogy from `relbert/semeval2012_relational_similarity` #
###################################################################
if not os.path.exists("dataset/semeval2012_relational_similarity/valid.jsonl"):
data = load_dataset("relbert/semeval2012_relational_similarity", split="validation")
analogy_data = [{
"stem": i['positives'][0], "choice": i["negatives"] + [i['positives'][1]], "answer": 2, "prefix": i["relation_type"]
} for i in data]
os.makedirs("dataset/semeval2012_relational_similarity", exist_ok=True)
with open("dataset/semeval2012_relational_similarity/valid.jsonl", "w") as f:
f.write("\n".join([json.dumps(i) for i in analogy_data]))
v_size, v_num_choice, v_relation_type = get_stats("dataset/semeval2012_relational_similarity/valid.jsonl")
stat.append({
"name": "`semeval2012_relational_similarity`",
"Size (valid/test)": f"{v_size}/-",
"Num of choice (valid/test)": f"{','.join([str(n) for n in v_num_choice])}/-",
"Num of relation group (valid/test)": f"{v_relation_type}/-",
"Original Reference": "[relbert/semeval2012_relational_similarity](https://huggingface.co/datasets/relbert/semeval2012_relational_similarity)"
})
#############################################################
# create analogy from `relbert/t_rex_relational_similarity` #
#############################################################
data = load_dataset("relbert/t_rex_relational_similarity", "filter_unified.min_entity_1_max_predicate_100", split="test")
create_analogy(data, "dataset/t_rex_relational_similarity/test.jsonl", negative_per_relation=2)
data = load_dataset("relbert/t_rex_relational_similarity", "filter_unified.min_entity_4_max_predicate_100", split="validation")
create_analogy(data, "dataset/t_rex_relational_similarity/valid.jsonl", negative_per_relation=1)
t_size, t_num_choice, t_relation_type = get_stats("dataset/t_rex_relational_similarity/test.jsonl")
v_size, v_num_choice, v_relation_type = get_stats("dataset/t_rex_relational_similarity/valid.jsonl")
stat.append({
"name": "`t_rex_relational_similarity`",
"Size (valid/test)": f"{v_size}/{t_size}",
"Num of choice (valid/test)": f"{','.join([str(n) for n in v_num_choice])}/{','.join([str(n) for n in t_num_choice])}",
"Num of relation group (valid/test)": f"{v_relation_type}/{t_relation_type}",
"Original Reference": "[relbert/t_rex_relational_similarity](https://huggingface.co/datasets/relbert/t_rex_relational_similarity)"
})
##################################################################
# create analogy from `relbert/conceptnet_relational_similarity` #
##################################################################
data = load_dataset("relbert/conceptnet_relational_similarity", split="test")
create_analogy(data, "dataset/conceptnet_relational_similarity/test.jsonl", negative_per_relation=1)
data = load_dataset("relbert/conceptnet_relational_similarity", split="validation")
create_analogy(data, "dataset/conceptnet_relational_similarity/valid.jsonl", negative_per_relation=1)
t_size, t_num_choice, t_relation_type = get_stats("dataset/conceptnet_relational_similarity/test.jsonl")
v_size, v_num_choice, v_relation_type = get_stats("dataset/conceptnet_relational_similarity/valid.jsonl")
stat.append({
"name": "`conceptnet_relational_similarity`",
"Size (valid/test)": f"{v_size}/{t_size}",
"Num of choice (valid/test)": f"{','.join([str(n) for n in v_num_choice])}/{','.join([str(n) for n in t_num_choice])}",
"Num of relation group (valid/test)": f"{v_relation_type}/{t_relation_type}",
"Original Reference": "[relbert/conceptnet_relational_similarity](https://huggingface.co/datasets/relbert/conceptnet_relational_similarity)"
})
##################################################################
# create analogy from `relbert/conceptnet_relational_similarity` #
##################################################################
data = load_dataset("relbert/nell_relational_similarity", split="test")
create_analogy(data, "dataset/nell_relational_similarity/test.jsonl", negative_per_relation=1)
data = load_dataset("relbert/nell_relational_similarity", split="validation")
create_analogy(data, "dataset/nell_relational_similarity/valid.jsonl", negative_per_relation=1)
t_size, t_num_choice, t_relation_type = get_stats("dataset/nell_relational_similarity/test.jsonl")
v_size, v_num_choice, v_relation_type = get_stats("dataset/nell_relational_similarity/valid.jsonl")
stat.append({
"name": "`nell_relational_similarity`",
"Size (valid/test)": f"{v_size}/{t_size}",
"Num of choice (valid/test)": f"{','.join([str(n) for n in v_num_choice])}/{','.join([str(n) for n in t_num_choice])}",
"Num of relation group (valid/test)": f"{v_relation_type}/{t_relation_type}",
"Original Reference": "[relbert/nell_relational_similarity](https://huggingface.co/datasets/relbert/nell_relational_similarity)"
})
print(pd.DataFrame(stat).to_markdown(index=False))
|