File size: 3,086 Bytes
1258a21
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d67bc5b
1258a21
 
 
 
 
 
 
 
 
 
d67bc5b
1258a21
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
"""
{
    "document": "",
    "question": "",
    "long_answer_candidates": ["", "", ""],
    "long_answer_candidate_index": 0,
    "short_answers": ["", "", ""]
}
"""


import sys
import jsonlines
from datasets import load_dataset
from huggingface_hub import HfApi


def clean(raw, path):
    fp = open(path, "a")
    writer = jsonlines.Writer(fp)

    count = 0
    dataset = []
    for data in raw:
        try:
            document = ""

            startmax, endmax = max(data["document"]["tokens"]["start_byte"]), max(data["document"]["tokens"]["end_byte"])

            start2token, end2start = [-1] * (startmax + 1), [-1] * (endmax + 1)

            tokens = data["document"]["tokens"]
            for i in range(len(tokens["token"])):
                start2token[tokens["start_byte"][i]] = {
                    "token": tokens["token"][i],
                    "is_html": tokens["is_html"][i]
                }

                end2start[tokens["end_byte"][i]] = tokens["start_byte"][i]

                if not(tokens["is_html"][i]):
                    document += tokens["token"][i] + " "

            candidates = []
            for i in range(len(data["long_answer_candidates"]["start_byte"])):
                candidates.append(" ".join(start2token[j]["token"] for j in range(data["long_answer_candidates"]["start_byte"][i], end2start[data["long_answer_candidates"]["end_byte"][i]]) if (start2token[j] != -1) and not(start2token[j]["is_html"])))

            short_answers = list(map(lambda x: x["text"][0] if x["text"] else "", data["annotations"]["short_answers"]))

            dataset.append({
                "id": data["id"],
                "document": document,
                "question": data["question"]["text"],
                "long_answer_candidates": candidates,
                "long_answer_candidate_index": data["annotations"]["long_answer"][0]["candidate_index"],
                "short_answers": short_answers
            })
        except Exception as ex:
            # raise ex
            print("Exception: " + str(ex))

        if (count + 1) % 1000 == 0:
            writer.write_all(dataset)
            dataset = []

        print("Done: " + str(count), end="\r")
        count += 1

    if dataset:
        writer.write_all(dataset)

    writer.close()
    fp.close()



if __name__ == "__main__":
    if len(sys.argv) < 1:
        raise AttributeError("Missing required argument: repository id")

    repo = sys.argv[1]

    api = HfApi()

    train = load_dataset("natural_questions", split="train", streaming=True)
    train_path = "data/train.jsonl"
    clean(train, train_path)
    
    api.upload_file(
        path_or_fileobj=train_path,
        path_in_repo="raw/train.jsonl",
        repo_id=repo,
        repo_type="dataset",
    )

    val = load_dataset("natural_questions", split="validation", streaming=True)
    val_path = "data/validation.jsonl"
    clean(val, val_path)

    api.upload_file(
        path_or_fileobj=val_path,
        path_in_repo="raw/validation.jsonl",
        repo_id=repo,
        repo_type="dataset",
    )