File size: 6,926 Bytes
0c1f7b6 9aa4bea 0c1f7b6 9aa4bea 0c1f7b6 9aa4bea 0c1f7b6 9aa4bea 0c1f7b6 9aa4bea 0c1f7b6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 |
import json
import os
import tarfile
import zipfile
import gzip
import requests
from glob import glob
import gdown
k = 10 # the 3rd level negative-distance ranking
m = 5 # the 3rd level negative-distance ranking
top_n = 10 # threshold of positive pairs in the 1st and 2nd relation
def wget(url, cache_dir: str = './cache', gdrive_filename: str = None):
""" wget and uncompress data_iterator """
os.makedirs(cache_dir, exist_ok=True)
if url.startswith('https://drive.google.com'):
assert gdrive_filename is not None, 'please provide fileaname for gdrive download'
gdown.download(url, f'{cache_dir}/{gdrive_filename}', quiet=False)
filename = gdrive_filename
else:
filename = os.path.basename(url)
with open(f'{cache_dir}/{filename}', "wb") as f:
r = requests.get(url)
f.write(r.content)
path = f'{cache_dir}/{filename}'
if path.endswith('.tar.gz') or path.endswith('.tgz') or path.endswith('.tar'):
if path.endswith('.tar'):
tar = tarfile.open(path)
else:
tar = tarfile.open(path, "r:gz")
tar.extractall(cache_dir)
tar.close()
os.remove(path)
elif path.endswith('.zip'):
with zipfile.ZipFile(path, 'r') as zip_ref:
zip_ref.extractall(cache_dir)
os.remove(path)
elif path.endswith('.gz'):
with gzip.open(path, 'rb') as f:
with open(path.replace('.gz', ''), 'wb') as f_write:
f_write.write(f.read())
os.remove(path)
def get_training_data(return_validation_set: bool = False):
""" Get RelBERT training data
Returns
-------
pairs: dictionary of list (positive pairs, negative pairs)
{'1b': [[0.6, ('office', 'desk'), ..], [[-0.1, ('aaa', 'bbb'), ...]]
"""
cache_dir = 'cache'
os.makedirs(cache_dir, exist_ok=True)
remove_relation = None
path_answer = f'{cache_dir}/Phase2Answers'
path_scale = f'{cache_dir}/Phase2AnswersScaled'
url = 'https://drive.google.com/u/0/uc?id=0BzcZKTSeYL8VYWtHVmxUR3FyUmc&export=download'
filename = 'SemEval-2012-Platinum-Ratings.tar.gz'
if not (os.path.exists(path_scale) and os.path.exists(path_answer)):
wget(url, gdrive_filename=filename, cache_dir=cache_dir)
files_answer = [os.path.basename(i) for i in glob(f'{path_answer}/*.txt')]
files_scale = [os.path.basename(i) for i in glob(f'{path_scale}/*.txt')]
assert files_answer == files_scale, f'files are not matched: {files_scale} vs {files_answer}'
positives = {}
negatives = {}
all_relation_type = {}
positives_score = {}
# score_range = [90.0, 88.7] # the absolute value of max/min prototypicality rating
for i in files_scale:
relation_id = i.split('-')[-1].replace('.txt', '')
if remove_relation and int(relation_id[:-1]) in remove_relation:
continue
with open(f'{path_answer}/{i}', 'r') as f:
lines_answer = [_l.replace('"', '').split('\t') for _l in f.read().split('\n')
if not _l.startswith('#') and len(_l)]
relation_type = list(set(list(zip(*lines_answer))[-1]))
assert len(relation_type) == 1, relation_type
relation_type = relation_type[0]
with open(f'{path_scale}/{i}', 'r') as f:
# list of tuple [score, ("a", "b")]
scales = [[float(_l[:5]), _l[6:].replace('"', '')] for _l in f.read().split('\n')
if not _l.startswith('#') and len(_l)]
scales = sorted(scales, key=lambda _x: _x[0])
# positive pairs are in the reverse order of prototypicality score
positive_pairs = [[s, tuple(p.split(':'))] for s, p in filter(lambda _x: _x[0] > 0, scales)]
positive_pairs = sorted(positive_pairs, key=lambda x: x[0], reverse=True)
if return_validation_set:
positive_pairs = positive_pairs[min(top_n, len(positive_pairs)):]
if len(positive_pairs) == 0:
continue
else:
positive_pairs = positive_pairs[:min(top_n, len(positive_pairs))]
positives_score[relation_id] = positive_pairs
positives[relation_id] = list(list(zip(*positive_pairs))[1])
negatives[relation_id] = [tuple(p.split(':')) for s, p in filter(lambda _x: _x[0] < 0, scales)]
all_relation_type[relation_id] = relation_type
parent = list(set([i[:-1] for i in all_relation_type.keys()]))
# 1st level relation contrast (among parent relations)
relation_pairs_1st = []
for p in parent:
child_positive = list(filter(lambda x: x.startswith(p), list(all_relation_type.keys())))
child_negative = list(filter(lambda x: not x.startswith(p), list(all_relation_type.keys())))
positive_pairs = []
negative_pairs = []
for c in child_positive:
positive_pairs += positives[c]
# negative_pairs += negatives[c]
for c in child_negative:
negative_pairs += positives[c]
# negative_pairs += negatives[c]
relation_pairs_1st += [{
"positives": positive_pairs, "negatives": negative_pairs, "relation_type": p, "level": "parent"
}]
# 2nd level relation contrast (among child relations) & 3rd level relation contrast (within child relations)
relation_pairs_2nd = []
relation_pairs_3rd = []
for p in all_relation_type.keys():
positive_pairs = positives[p]
negative_pairs = []
# negative_pairs = negatives[p]
for n in all_relation_type.keys():
if p == n:
continue
negative_pairs += positives[n]
relation_pairs_2nd += [{
"positives": positive_pairs, "negatives": negative_pairs, "relation_type": p, "level": "child"
}]
negative_pairs = positive_pairs + negatives[p]
for n, anchor in enumerate(positive_pairs):
if n > m:
continue
for _n, posi in enumerate(positive_pairs):
if n < _n and len(negative_pairs) > _n + k:
relation_pairs_3rd += [{
"positives": [(anchor, posi)],
"negatives": [(anchor, neg) for neg in negative_pairs[_n+k:]],
"relation_type": p,
"level": "child_prototypical"
}]
return relation_pairs_1st + relation_pairs_2nd + relation_pairs_3rd
if __name__ == '__main__':
data_train = get_training_data(return_validation_set=False)
with open('dataset/train.jsonl', 'w') as f_writer:
f_writer.write('\n'.join([json.dumps(i) for i in data_train]))
data_valid = get_training_data(return_validation_set=True)
with open('dataset/valid.jsonl', 'w') as f_writer:
f_writer.write('\n'.join([json.dumps(i) for i in data_valid]))
|