rojagtap commited on
Commit
6e94fbe
1 Parent(s): d67bc5b

Create subsets to separate long answers, short answer and either if available

Browse files

In the original dataset, not all questions have answers available. A good amount of questions do not have answers in the context.
To train a model without a bias on "Not available" answers, it would be nice to have subsets with only available answers.

Hence we create 3 subsets:
1. long: Will only have long answer
2. short: Will only have short answer
3. either: Will have one of long/short answers based on availability. In case both are present, long answer will be preferred.

Files changed (1) hide show
  1. filternq.py +143 -0
filternq.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ long:
3
+
4
+ {
5
+ "document": "",
6
+ "question": "",
7
+ "long_answer_candidates": ["", "", ""],
8
+ "long_answer_candidate_index": 0
9
+ }
10
+
11
+ short:
12
+
13
+ {
14
+ "document": "",
15
+ "question": "",
16
+ "short_answer": ""
17
+ }
18
+
19
+ either:
20
+
21
+ {
22
+ "document": "",
23
+ "question": "",
24
+ "answer": ""
25
+ }
26
+
27
+ """
28
+
29
+
30
+ import sys
31
+ import jsonlines
32
+ from datasets import load_dataset
33
+ from huggingface_hub import HfApi
34
+
35
+
36
+ def filter(raw, short_path, long_path, either_path):
37
+ fps = open(short_path, "a")
38
+ writers = jsonlines.Writer(fps)
39
+
40
+ fpl = open(long_path, "a")
41
+ writerl = jsonlines.Writer(fpl)
42
+
43
+ fpe = open(either_path, "a")
44
+ writere = jsonlines.Writer(fpe)
45
+
46
+ count = 0
47
+ long = []
48
+ short = []
49
+ either = []
50
+ for sample in raw:
51
+ try:
52
+ answer = ""
53
+
54
+ if sample["short_answers"][0]:
55
+ answer = sample["short_answers"][0]
56
+
57
+ short.append({
58
+ "document": sample["document"],
59
+ "question": sample["question"],
60
+ "short_answer": answer
61
+ })
62
+
63
+ if sample["long_answer_candidate_index"] != -1:
64
+ answer = sample["long_answer_candidates"][sample["long_answer_candidate_index"]] # long answer will have precedence over short answer
65
+
66
+ long.append({
67
+ "document": sample["document"],
68
+ "question": sample["question"],
69
+ "long_answer_candidates": sample["long_answer_candidates"],
70
+ "long_answer_candidate_index": sample["long_answer_candidate_index"]
71
+ })
72
+
73
+ if answer:
74
+ count += 1 # count only if there is an answer
75
+
76
+ either.append({
77
+ "document": sample["document"],
78
+ "question": sample["question"],
79
+ "answer": answer
80
+ })
81
+
82
+ except Exception as ex:
83
+ # raise ex
84
+ print("Exception: " + str(ex))
85
+
86
+ if (count + 1) % 1000 == 0:
87
+ writere.write_all(either)
88
+ either = []
89
+
90
+ if short:
91
+ writers.write_all(short)
92
+ short = []
93
+
94
+ if long:
95
+ writerl.write_all(long)
96
+ long = []
97
+
98
+ print("Done: " + str(count), end="\r")
99
+
100
+
101
+ if either:
102
+ writere.write_all(either)
103
+ either = []
104
+
105
+ if short:
106
+ writers.write_all(short)
107
+ short = []
108
+
109
+ if long:
110
+ writerl.write_all(long)
111
+ long = []
112
+
113
+ writere.close()
114
+ fpe.close()
115
+
116
+ writers.close()
117
+ fps.close()
118
+
119
+ writerl.close()
120
+ fpl.close()
121
+
122
+
123
+ if __name__ == "__main__":
124
+ if len(sys.argv) < 1:
125
+ raise AttributeError("Missing required argument: repository id")
126
+
127
+ repo = sys.argv[1]
128
+
129
+ api = HfApi()
130
+
131
+ train_data = load_dataset(repo, split="train", streaming=True)
132
+ filter(raw=train_data, short_path="data/short/train.jsonl", long_path="data/long/train.jsonl", either_path="data/either/train.jsonl")
133
+
134
+ val_data = load_dataset(repo, split="validation", streaming=True)
135
+ filter(raw=val_data, short_path="data/short/validation.jsonl", long_path="data/long/validation.jsonl", either_path="data/either/validation.jsonl")
136
+
137
+ api.upload_folder(
138
+ folder_path="data/",
139
+ repo_id=repo,
140
+ repo_type="dataset",
141
+ multi_commits=True,
142
+ multi_commits_verbose=True
143
+ )