Datasets:

ArXiv:
License:
File size: 1,945 Bytes
c163af6
 
 
 
 
314cc95
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a54a55b
c163af6
 
 
314cc95
 
 
 
 
 
 
1f4e257
9d88bff
 
 
 
 
 
 
 
 
1f4e257
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
import json
import os

import datasets

class TestBConfig(datasets.BuilderConfig):
    """BuilderConfig for SuperGLUE."""

    def __init__(self, data_url, **kwargs):
        """BuilderConfig for SuperGLUE.

        Args:
        features: *list[string]*, list of the features that will appear in the
            feature dict. Should not include "label".
        data_url: *string*, url to download the zip file from.
        citation: *string*, citation for the data set.
        url: *string*, url for information about the data set.
        label_classes: *list[string]*, the list of classes for the label if the
            label is present as a string. Non-string labels will be cast to either
            'False' or 'True'.
        **kwargs: keyword arguments forwarded to super.
        """
        # Version history:
        # 1.0.2: Fixed non-nondeterminism in ReCoRD.
        # 1.0.1: Change from the pre-release trial version of SuperGLUE (v1.9) to
        #        the full release (v2.0).
        # 1.0.0: S3 (new shuffling, sharding and slicing mechanism).
        # 0.0.2: Initial version.
        super().__init__(version=datasets.Version("1.0.2"), **kwargs)
        self.data_url = data_url


class TestB(datasets.GeneratorBasedBuilder):
    """The SuperGLUE benchmark."""
    BUILDER_CONFIGS = [
        TestBConfig(
            name="data1",
            data_url="./data1/",
        ),
        TestBConfig(
            name="data2",
            data_url="./data2",
        )]

     def _info(self):
        features = {feature: datasets.Value("string") for feature in self.config.features}

        return datasets.DatasetInfo(
            description='desc',
            features=datasets.Features(features),
        )
         
    def _generate_examples(self, data_file, split):
        with open(data_file, encoding="utf-8") as f:
            for line in f:
                row = json.loads(line)
                yield row