Datasets:

Modalities:
Text
Languages:
English
Libraries:
Datasets
License:
asahi417 commited on
Commit
c18e48a
1 Parent(s): f015c01
README.md CHANGED
@@ -38,6 +38,8 @@ This dataset contains 5 different word analogy questions used in [Analogy Langua
38
  | `t_rex_relational_similarity` | 496/183 | 74/48 | 60/19 | [relbert/t_rex_relational_similarity](https://huggingface.co/datasets/relbert/t_rex_relational_similarity) |
39
  | `conceptnet_relational_similarity` | 1112/1192 | 19/17 | 18/16 | [relbert/conceptnet_relational_similarity](https://huggingface.co/datasets/relbert/conceptnet_relational_similarity) |
40
  | `nell_relational_similarity` | 400/600 | 5/7 | 4/6 | [relbert/nell_relational_similarity](https://huggingface.co/datasets/relbert/nell_relational_similarity) |
 
 
41
 
42
  ## Dataset Structure
43
  ### Data Instances
 
38
  | `t_rex_relational_similarity` | 496/183 | 74/48 | 60/19 | [relbert/t_rex_relational_similarity](https://huggingface.co/datasets/relbert/t_rex_relational_similarity) |
39
  | `conceptnet_relational_similarity` | 1112/1192 | 19/17 | 18/16 | [relbert/conceptnet_relational_similarity](https://huggingface.co/datasets/relbert/conceptnet_relational_similarity) |
40
  | `nell_relational_similarity` | 400/600 | 5/7 | 4/6 | [relbert/nell_relational_similarity](https://huggingface.co/datasets/relbert/nell_relational_similarity) |
41
+ | `scan` | 178/1616 | 3,36,136,10,45,78,15,21,55,120,153,91,28/3,36,136,10,45,78,15,21,55,120,153,91,28 | 2/2 | [relbert/scientific_and_creative_analogy](https://huggingface.co/datasets/relbert/scientific_and_creative_analogy) |
42
+
43
 
44
  ## Dataset Structure
45
  ### Data Instances
add_new_analogy_2.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ from itertools import combinations
4
+ from random import seed, randint, shuffle
5
+
6
+ import pandas as pd
7
+ from datasets import load_dataset
8
+
9
+
10
+ def get_stats(filename):
11
+ with open(filename) as f:
12
+ _data = [json.loads(i) for i in f.read().splitlines()]
13
+ return len(_data), list(set([len(i['choice']) for i in _data])), len(list(set([i['prefix'] for i in _data])))
14
+
15
+
16
+ def create_analogy(_data):
17
+ analogy_data = []
18
+ seed(12)
19
+ for i in _data:
20
+ source = []
21
+ target = []
22
+ for s, t in zip(i['source'], i['target']):
23
+ if s not in source and t not in target:
24
+ source.append(s)
25
+ target.append(t)
26
+ assert len(source) == len(target), f"{len(source)} != {len(target)}"
27
+ all_combinations = list(combinations(range(len(source)), 2))
28
+ for n, (q_h_id, q_t_id) in enumerate(all_combinations):
29
+ choice = [[target[x], target[y]] for m, (x, y) in enumerate(all_combinations) if m != n]
30
+ answer_id = randint(0, len(source) - 1)
31
+ choice = choice[:answer_id] + [[target[q_h_id], target[q_t_id]]] + choice[answer_id:]
32
+ assert choice[answer_id] == [target[q_h_id], target[q_t_id]]
33
+ analogy_data.append({
34
+ "stem": [source[q_h_id], source[q_t_id]],
35
+ "choice": choice,
36
+ "answer": answer_id,
37
+ "prefix": i["type"]
38
+ })
39
+ return analogy_data
40
+
41
+ data = load_dataset("relbert/scientific_and_creative_analogy", split='test')
42
+ data = create_analogy(data)
43
+ data_m = [i for i in data if i['prefix'] == 'metaphor']
44
+ data_s = [i for i in data if i['prefix'] != 'metaphor']
45
+ seed(12)
46
+ shuffle(data_m)
47
+ shuffle(data_s)
48
+ validation = data_s[:int(0.1 * len(data_s))] + data_m[:int(0.1 * len(data_m))]
49
+ test = data_s[int(0.1 * len(data_s)):] + data_m[int(0.1 * len(data_m)):]
50
+ os.makedirs("dataset/scan", exist_ok=True)
51
+ with open("dataset/scan/valid.jsonl", "w") as f:
52
+ f.write("\n".join([json.dumps(i) for i in validation]))
53
+
54
+ with open("dataset/scan/test.jsonl", "w") as f:
55
+ f.write("\n".join([json.dumps(i) for i in test]))
56
+
57
+ t_size, t_num_choice, t_relation_type = get_stats("dataset/scan/test.jsonl")
58
+ v_size, v_num_choice, v_relation_type = get_stats("dataset/scan/valid.jsonl")
59
+ stat = [{
60
+ "name": "`scan`",
61
+ "Size (valid/test)": f"{v_size}/{t_size}",
62
+ "Num of choice (valid/test)": f"{','.join([str(n) for n in v_num_choice])}/{','.join([str(n) for n in t_num_choice])}",
63
+ "Num of relation group (valid/test)": f"{v_relation_type}/{t_relation_type}",
64
+ "Original Reference": "[relbert/scientific_and_creative_analogy](https://huggingface.co/datasets/relbert/scientific_and_creative_analogy)"
65
+ }]
66
+ print(pd.DataFrame(stat).to_markdown(index=False))
analogy_questions.py CHANGED
@@ -5,7 +5,7 @@ import datasets
5
  logger = datasets.logging.get_logger(__name__)
6
  _DESCRIPTION = """[Analogy Question](https://aclanthology.org/2021.acl-long.280/)"""
7
  _NAME = "analogy_questions"
8
- _VERSION = "2.0.3"
9
  _CITATION = """
10
  @inproceedings{ushio-etal-2021-bert,
11
  title = "{BERT} is to {NLP} what {A}lex{N}et is to {CV}: Can Pre-Trained Language Models Identify Analogies?",
@@ -31,13 +31,14 @@ _URLS = {
31
  'bats': [f'{_URL}/bats/test.jsonl'],
32
  'google': [f'{_URL}/google/test.jsonl'],
33
  'sat': [f'{_URL}/sat/test.jsonl'],
34
- 'sat_metaphor': [f'{_URL}/sat_metaphor/test.jsonl'],
35
  'sat_full': [f'{_URL}/sat/test.jsonl', f'{_URL}/sat/valid.jsonl'],
36
  'u2': [f'{_URL}/u2/test.jsonl'],
37
  'u4': [f'{_URL}/u4/test.jsonl'],
38
  "t_rex_relational_similarity": [f'{_URL}/t_rex_relational_similarity/test.jsonl'],
39
  "conceptnet_relational_similarity": [f'{_URL}/conceptnet_relational_similarity/test.jsonl'],
40
- "nell_relational_similarity": [f'{_URL}/nell_relational_similarity/test.jsonl']
 
41
  },
42
  str(datasets.Split.VALIDATION): {
43
  'bats': [f'{_URL}/bats/valid.jsonl'],
@@ -48,7 +49,8 @@ _URLS = {
48
  "semeval2012_relational_similarity": [f'{_URL}/semeval2012_relational_similarity/valid.jsonl'],
49
  "t_rex_relational_similarity": [f'{_URL}/t_rex_relational_similarity/valid.jsonl'],
50
  "conceptnet_relational_similarity": [f'{_URL}/conceptnet_relational_similarity/valid.jsonl'],
51
- "nell_relational_similarity": [f'{_URL}/nell_relational_similarity/valid.jsonl']
 
52
  }
53
  }
54
 
 
5
  logger = datasets.logging.get_logger(__name__)
6
  _DESCRIPTION = """[Analogy Question](https://aclanthology.org/2021.acl-long.280/)"""
7
  _NAME = "analogy_questions"
8
+ _VERSION = "2.0.5"
9
  _CITATION = """
10
  @inproceedings{ushio-etal-2021-bert,
11
  title = "{BERT} is to {NLP} what {A}lex{N}et is to {CV}: Can Pre-Trained Language Models Identify Analogies?",
 
31
  'bats': [f'{_URL}/bats/test.jsonl'],
32
  'google': [f'{_URL}/google/test.jsonl'],
33
  'sat': [f'{_URL}/sat/test.jsonl'],
34
+ # 'sat_metaphor': [f'{_URL}/sat_metaphor/test.jsonl'],
35
  'sat_full': [f'{_URL}/sat/test.jsonl', f'{_URL}/sat/valid.jsonl'],
36
  'u2': [f'{_URL}/u2/test.jsonl'],
37
  'u4': [f'{_URL}/u4/test.jsonl'],
38
  "t_rex_relational_similarity": [f'{_URL}/t_rex_relational_similarity/test.jsonl'],
39
  "conceptnet_relational_similarity": [f'{_URL}/conceptnet_relational_similarity/test.jsonl'],
40
+ "nell_relational_similarity": [f'{_URL}/nell_relational_similarity/test.jsonl'],
41
+ 'scan': [f'{_URL}/scan/test.jsonl'],
42
  },
43
  str(datasets.Split.VALIDATION): {
44
  'bats': [f'{_URL}/bats/valid.jsonl'],
 
49
  "semeval2012_relational_similarity": [f'{_URL}/semeval2012_relational_similarity/valid.jsonl'],
50
  "t_rex_relational_similarity": [f'{_URL}/t_rex_relational_similarity/valid.jsonl'],
51
  "conceptnet_relational_similarity": [f'{_URL}/conceptnet_relational_similarity/valid.jsonl'],
52
+ "nell_relational_similarity": [f'{_URL}/nell_relational_similarity/valid.jsonl'],
53
+ 'scan': [f'{_URL}/scan/valid.jsonl'],
54
  }
55
  }
56
 
dataset/scan/test.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
dataset/scan/valid.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
dataset/t_rex_relational_similarity/test.jsonl CHANGED
The diff for this file is too large to render. See raw diff