Datasets:

Modalities:
Text
Formats:
json
Languages:
Chinese
Size:
< 1K
ArXiv:
Libraries:
Datasets
pandas
License:
CMB / CMB.py
XiangBo's picture
Update CMB.py
b405173
raw
history blame
8.52 kB
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""The General Language Understanding Evaluation (GLUE) benchmark."""
import csv
import os
import sys
import json
import io
import textwrap
import numpy as np
import datasets
_CMB_CITATION = """\
coming soon~
"""
_CMB_DESCRIPTION = """\
coming soon~
"""
_DATASETS_FILE = "https://huggingface.co/datasets/FreedomIntelligence/CMB/resolve/main/CMB-datasets.zip"
class CMBConfig(datasets.BuilderConfig):
"""BuilderConfig for GLUE."""
def __init__(
self,
features,
data_url,
data_dir,
citation,
url,
**kwargs,
):
super(CMBConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
self.features = features
self.data_url = data_url
self.data_dir = data_dir
self.citation = citation
self.url = url
class CMB(datasets.GeneratorBasedBuilder):
"""The General Language Understanding Evaluation (GLUE) benchmark."""
BUILDER_CONFIGS = [
CMBConfig(
name="main",
description=textwrap.dedent(
"""\
主要数据集,包含 train val test 三个组成部分."""
),
features=datasets.Features(
{
"id": datasets.Value("string"),
"exam_type": datasets.Value("string"),
"exam_class": datasets.Value("string"),
"chapter": datasets.Value("string"),
"exam_subject": datasets.Value("string"),
"exercise": datasets.Value("string"),
"question": datasets.Value("string"),
"question_type": datasets.Value("string"),
"option": datasets.Value("string"),
"answer": datasets.Value("string"),
"explanation": datasets.Value("string")
}
),
data_url=_DATASETS_FILE,
data_dir="CMB-main",
citation=textwrap.dedent(
"""\
}"""
),
url="https://github.com/FreedomIntelligence/CMB",
),
CMBConfig(
name="paper-exampaper",
description=textwrap.dedent(
"""\
历史真题
."""
),
features=datasets.Features(
{
"id": datasets.Value("string"),
"source": datasets.Value("string"),
"exam_type": datasets.Value("string"),
"exam_class": datasets.Value("string"),
"exam_subject": datasets.Value("string"),
"question": datasets.Value("string"),
"question_type": datasets.Value("string"),
"option": datasets.Value("string"),
"answer": datasets.Value("string")
}
),
data_url=_DATASETS_FILE,
data_dir="CMB-test-exampaper",
citation=textwrap.dedent(
"""\
}"""
),
url="https://github.com/FreedomIntelligence/CMB",
),
CMBConfig(
name="qa",
description=textwrap.dedent(
"""\
QA 格式的考题
"""
),
features=datasets.Features(
{
"id": datasets.Value("string"),
"title": datasets.Value("string"),
"description": datasets.Value("string"),
"QA_pairs": datasets.Value("string")
}
),
data_url=_DATASETS_FILE,
data_dir="CMB-test-qa",
citation=textwrap.dedent(
"""\
}"""
),
url="https://github.com/FreedomIntelligence/CMB",
),
]
def _info(self):
return datasets.DatasetInfo(
description=_CMB_DESCRIPTION,
features=self.config.features,
homepage=self.config.url,
citation=self.config.citation + "\n" + _CMB_CITATION,
)
def _split_generators(self, dl_manager):
if self.config.name == "main":
data_file = dl_manager.extract(self.config.data_url)
main_data_dir = os.path.join(data_file, self.config.data_dir)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"data_file": os.path.join(main_data_dir, 'CMB-train', 'CMB-train-merge.json'),
"split": "train",
},
)
,
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"data_file": os.path.join(main_data_dir, 'CMB-val', 'CMB-val-merge.json'),
"split": "val",
},
)
,
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"data_file": os.path.join(main_data_dir, 'CMB-test', 'CMB-test-choice-question-merge.json'),
"split": "test",
},
)
]
if self.config.name == "paper-exampaper":
data_file = dl_manager.extract(self.config.data_url)
main_data_dir = os.path.join(data_file, self.config.data_dir)
return [
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"data_file": os.path.join(main_data_dir, 'CMB-test-zhenti-merge.json'),
"split": "test",
},
)
]
if self.config.name == "qa":
data_file = dl_manager.extract(self.config.data_url)
main_data_dir = os.path.join(data_file, self.config.data_dir)
return [
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"data_file": os.path.join(main_data_dir, 'CMB-test-qa.json'),
"split": "test",
},
)
]
def _generate_examples(self, data_file, split, mrpc_files=None):
if self.config.name == 'main':
examples = json.loads(io.open(data_file, 'r').read())
for idx in range(len(examples)):
vals = examples[idx]
vals['explanation'] = vals.get('explanation','')
vals['exercise'] = vals.get('exercise','')
vals['chapter'] = vals.get('chapter','')
vals['answer'] = vals.get('answer','')
vals['id'] = vals.get('id',idx)
yield idx, vals
if self.config.name == 'paper-exampaper':
examples = json.loads(io.open(data_file, 'r').read())
for idx in range(len(examples)):
vals = examples[idx]
vals['answer'] = vals.get('answer','')
vals['source'] = vals.get('source','')
vals['id'] = vals.get('id',idx)
yield idx, vals
if self.config.name == 'qa':
examples = json.loads(io.open(data_file, 'r').read())
for idx in range(len(examples)):
vals = examples[idx]
vals['id'] = vals.get('id',idx)
yield idx, vals
if __name__ == '__main__':
from datasets import load_dataset
dataset = load_dataset('CMB.py', 'main')
# dataset = load_dataset('CMB.py', 'qa')
print()