File size: 7,114 Bytes
0d0c10b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
# -*- coding: utf-8 -*-
"""
@author:XuMing(xuming624@qq.com)
@description:

Natural Language Generation Chinese Corpus.(medical)
"""

import os
import json
import datasets
_DESCRIPTION = """纯文本数据,中文医疗数据集,包含预训练数据的百科数据,指令微调数据和奖励模型数据。"""
_HOMEPAGE = "https://github.com/shibing624/MedicalGPT"
_CITATION = ""
_LICENSE = ""
_BASE_URL = "https://huggingface.co/datasets/shibing624/medical/resolve/main/"
# file url: https://huggingface.co/datasets/shibing624/medical/resolve/main/finetune/test_zh_0.json

class NewDataset(datasets.GeneratorBasedBuilder):
    """Medical Chinese Version"""

    VERSION = datasets.Version("1.0.1")

    BUILDER_CONFIGS = [
        datasets.BuilderConfig(name="pretrain", version=VERSION, description="pretrain data"),
        datasets.BuilderConfig(name="finetune", version=VERSION, description="finetune data"),
        datasets.BuilderConfig(name="reward", version=VERSION, description="reward data"),
    ]

    def _info(self):
        if self.config.name == "pretrain":
            features = datasets.Features(
                {
                    "text": datasets.Value("string")
                }
            )
        elif self.config.name == 'finetune': 
            features = datasets.Features(
                {
                    "instruction": datasets.Value("string"),
                    "input": datasets.Value("string"),
                    "output": datasets.Value("string")
                }
            )
        elif self.config.name == 'reward': 
            features = datasets.Features(
                {
                    "question": datasets.Value("string"),
                    "response_chosen": datasets.Value("string"),
                    "response_rejected": datasets.Value("string")
                }
            )
        
        return datasets.DatasetInfo(
            # This is the description that will appear on the datasets page.
            description=_DESCRIPTION,
            # This defines the different columns of the dataset and their types
            features=features,  # Here we define them above because they are different between the two configurations
            # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
            # specify them. They'll be used if as_supervised=True in builder.as_dataset.
            # supervised_keys=("sentence", "label"),
            # Homepage of the dataset for documentation
            homepage=_HOMEPAGE,
            # License for the dataset if available
            license=_LICENSE,
            # Citation for the dataset
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        data_url = _BASE_URL + self.config.name

        if self.config.name == 'pretrain':
            return [
                datasets.SplitGenerator(
                    name=datasets.Split.TRAIN,
                    gen_kwargs={
                        "filepath": dl_manager.download_and_extract(f"{data_url}/train_encyclopedia.json"),
                        "split": "train"
                    },
                ),
                datasets.SplitGenerator(
                    name=datasets.Split.VALIDATION,
                    gen_kwargs={
                        "filepath": dl_manager.download_and_extract(f"{data_url}/valid_encyclopedia.json"),
                        "split": "dev"
                    },
                ),
                datasets.SplitGenerator(
                    name=datasets.Split.TEST,
                    gen_kwargs={
                        "filepath": dl_manager.download_and_extract(f"{data_url}/test_encyclopedia.json"),
                        "split": "test"
                    },
                ),
            ]
        elif self.config.name == 'finetune':
            return [
                datasets.SplitGenerator(
                    name=datasets.Split.TRAIN,
                    gen_kwargs={
                        "filepath": dl_manager.download_and_extract([f"{data_url}/train_zh_0.json", f"{data_url}/train_en_1.json"]),
                        "split": "train"
                    },
                ),
                datasets.SplitGenerator(
                    name=datasets.Split.VALIDATION,
                    gen_kwargs={
                        "filepath": dl_manager.download_and_extract([f"{data_url}/valid_zh_0.json", f"{data_url}/valid_en_1.json"]),
                        "split": "dev"
                    },
                ),
                datasets.SplitGenerator(
                    name=datasets.Split.TEST,
                    gen_kwargs={
                        "filepath": dl_manager.download_and_extract([f"{data_url}/test_zh_0.json", f"{data_url}/test_en_1.json"]),
                        "split": "test"
                    },
                ),
            ]
        elif self.config.name == 'reward':
            return [
                datasets.SplitGenerator(
                    name=datasets.Split.TRAIN,
                    gen_kwargs={
                        "filepath": dl_manager.download_and_extract(f"{data_url}/train.json"),
                        "split": "train"
                    },
                ),
                datasets.SplitGenerator(
                    name=datasets.Split.VALIDATION,
                    gen_kwargs={
                        "filepath": dl_manager.download_and_extract(f"{data_url}/valid.json"),
                        "split": "dev"
                    },
                ),
                datasets.SplitGenerator(
                    name=datasets.Split.TEST,
                    gen_kwargs={
                        "filepath": dl_manager.download_and_extract(f"{data_url}/test.json"),
                        "split": "test"
                    },
                ),
            ]
        
    # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
    def _generate_examples(self, filepath, split):
        id = 0
        if isinstance(filepath, str):
            filepath = [filepath]
        for file in filepath:
            with open(file, encoding="utf-8") as f:
                for key, row in enumerate(f):
                    data = json.loads(row)
                    if self.config.name == "pretrain":
                        yield id, {
                            "text": data["text"]
                        }
                    elif self.config.name == 'finetune':
                        yield id, {
                            "instruction": data["instruction"],
                            "input": data["input"],
                            "output": data["output"]
                        }
                    elif self.config.name == 'reward':
                        yield id, {
                            "question": data["question"],
                            "response_chosen": data["response_chosen"],
                            "response_rejected": data["response_rejected"]
                        }
                    id += 1