# Copyright 2020 The HuggingFace Datasets Authors. # Copyright 2023 Bingbin Liu, Jordan Ash, Surbhi Goel, Akshay Krishnamurthy, and Cyril Zhang. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import csv import json import os import datasets import numpy as np _CITATION = """\ """ _DESCRIPTION = """\ Online dataset mockup. """ _HOMEPAGE = "" _LICENSE = "" _URLS = {} class MockupDataset(datasets.GeneratorBasedBuilder): """TODO: Short description of my dataset.""" VERSION = datasets.Version("0.0.0") BUILDER_CONFIGS = [] def __init__(self, name=None, data_config={}, **kwargs): super().__init__(**kwargs) """ Set default configs """ if name is None: name = 'parity' if 'length' not in data_config: data_config['length'] = 20 if 'size' not in data_config: data_config['size'] = 100 self.data_config = data_config # self.sampler = AutomatonSampler(name, data_config) self.sampler = dataset_map[name](data_config) def _info(self): features = datasets.Features( { "x": datasets.Sequence(datasets.Value("int32"), length=-1), "y": datasets.Sequence(datasets.Value("int32"), length=-1) } ) return datasets.DatasetInfo( description=_DESCRIPTION, features=features, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION, ) def _split_generators(self, dl_manager): return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "split": "train", }, ) ] # method parameters are unpacked from `gen_kwargs` as given in `_split_generators` def _generate_examples(self, split): for i in range(self.data_config['size']): x, y = self.sampler.sample() yield i, { "x": x, "y": y } class AutomatonSampler: def __init__(self, data_config): # self.name = name self.data_config = data_config if 'seed' in self.data_config: self.np_rng = np.random.default_rng(self.data_config['seed']) else: self.np_rng = np.random.default_rng() self.n_states = data_config['n_states'] self.T = self.data_config['length'] def f(self, x): """ Get output sequence given an input seq """ raise NotImplementedError() def sample(self): raise NotImplementedError() class ParitySampler(AutomatonSampler): def __init__(self, data_config): super(ParitySampler, self).__init__(data_config) self.name = 'parity' self.data_config = data_config def f(self, x): return np.cumsum(x) % 2 def sample(self): x = self.np_rng.binomial(1,0.5,size=self.T) return x, self.f(x) class FlipflopSampler(AutomatonSampler): def __init__(self, data_config): super(FlipflopSampler, self).__init__(data_config) self.name = 'parity' self.data_config = data_config self.n_actions = self.n_states + 1 self.transition = np.array([list(range(self.n_actions))] + [[i+1]*self.n_actions for i in range(self.n_states)]).T def f(self, x): state, states = 0, [] for action in x: state = self.transition[state, action] states += state, return np.array(states) def sample(self): rand = np.random.uniform(size=self.T) nonzero_pos = (rand < 0.5).astype(np.int64) writes = np.random.choice(range(1, self.n_states+1), size=self.T) x = writes * nonzero_pos return x, self.f(x) dataset_map = { 'parity': ParitySampler, 'flipflop': FlipflopSampler, # TODO: more datasets }