# Copyright 2020 The HuggingFace Datasets Authors. # Copyright 2023 Bingbin Liu, Jordan Ash, Surbhi Goel, Akshay Krishnamurthy, and Cyril Zhang. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import csv import json import os import itertools from sympy.combinatorics.permutations import Permutation import datasets import numpy as np from copy import copy # check python version import sys major, minor = sys.version_info[:2] version = major + 0.1*minor OLD_PY_VERSION = 1 if version < 3.8 else 0 _CITATION = """\ """ _DESCRIPTION = """\ Online dataset mockup. """ _HOMEPAGE = "" _LICENSE = "" _URLS = {} class SyntheticAutomataDataset(datasets.GeneratorBasedBuilder): """TODO: Short description of my dataset.""" VERSION = datasets.Version("0.0.0") BUILDER_CONFIGS = [] def __init__(self, config={}, **kwargs): super().__init__(**kwargs) """ Set default configs """ if 'name' not in config: config['name'] = 'parity' # if 'length' not in config: # sequence length # config['length'] = 20 if 'size' not in config: # number of sequences config['size'] = -1 self.data_config = config self.sampler = dataset_map[config['name']](config) def _info(self): features = datasets.Features( { "input_ids": datasets.Sequence(datasets.Value("int32"), length=-1), "label_ids": datasets.Sequence(datasets.Value("int32"), length=-1) } ) return datasets.DatasetInfo( description=_DESCRIPTION, features=features, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION, ) def _split_generators(self, dl_manager): return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "split": "train", }, ) ] def _generate_examples(self, split): for i in itertools.count(start=0): if i == self.data_config['size']: break x, y = self.sampler.sample() yield i, { "input_ids": x, "label_ids": y } class AutomatonSampler: """ This is a parent class that must be inherited. """ def __init__(self, data_config): self.data_config = data_config if 'seed' in self.data_config: self.np_rng = np.random.default_rng(self.data_config['seed']) else: self.np_rng = np.random.default_rng() if 'length' not in data_config: # sequence length data_config['length'] = 20 self.T = self.data_config['length'] if 'random_length' not in data_config: data_config['random_length'] = 0 self.random_length = data_config['random_length'] self.__info__ = " - T (int): sequence length.\n" \ + " - random_length (int in {0, 1}): whether to randomly sample a length per sample.\n" def f(self, x): """ Get output sequence given an input seq """ raise NotImplementedError() def sample(self): raise NotImplementedError() def sample_length(self): if self.random_length: return self.np_rng.choice(range(1, self.T+1)) return self.T def help(self): print(self.__info__) class BinaryInputSampler(AutomatonSampler): """ This is a parent class that must be inherited. Subclasses: ParitySampler, GridworldSampler, ABABSampler """ def __init__(self, data_config): super().__init__(data_config) if 'prob1' not in data_config: data_config['prob1'] = 0.5 self.prob1 = data_config['prob1'] self.__info__ = " - prob1 (float in [0,1]): probability of token 1\n" \ + self.__info__ def f(self, x): raise NotImplementedError() def sample(self): T = self.sample_length() x = self.np_rng.binomial(1, self.prob1, size=T) return x, self.f(x) class ParitySampler(BinaryInputSampler): def __init__(self, data_config): super().__init__(data_config) self.name = 'parity' self.__info__ = "Parity machine with 2 states: \n" \ + "- Inputs: binary strings\n" \ + "- Labels: binary strings of the partial parity\n" \ + "- Config: \n" \ + self.__info__ def f(self, x): return np.cumsum(x) % 2 class GridworldSampler(BinaryInputSampler): """ Note: gridworld currently doesn't include a no-op. """ def __init__(self, data_config): super().__init__(data_config) if 'n' not in data_config: data_config['n'] = 9 """ NOTE: n is the number of states, and S is the id (0-indexing) of the rightmost state. i.e. the states are 0,1,2,...,S, where S=n-1. """ self.n = data_config['n'] self.S = self.n - 1 if 'label_type' not in data_config: # Options: state, parity, boundary data_config['label_type'] = 'state' self.label_type = data_config['label_type'] self.name = f'Grid{self.n}' self.__info__ = f"1d Gridworld of n={self.n} states:\n" \ + "- Inputs: binary strings, i.e. move left(0) or right(1)\n" \ + "- Labels: depending on 'label_type'. \n" \ + "- Config: \n" \ + " - n (int): number of states; i.e. the states are 0,1,2,...,n-1.\n" \ + " - label_type (str): choosing from the following options:\n" \ + " - 'state' (default): the state id, i.e. 0 to n-1.\n" \ + " - 'parity': the state id mod 2.\n" \ + " - 'boundary': whether the current state is in {0, n-1} or not.\n" \ + self.__info__ def f(self, x): x = copy(x) x[x == 0] = -1 if OLD_PY_VERSION: # NOTE: for Python 3.7 or below, accumulate doesn't have the 'initial' argument. x = np.concatenate([np.array([0]), x]).astype(np.int64) states = list(itertools.accumulate(x, lambda a,b: max(min(a+b, self.S), 0))) states = states[1:] else: states = list(itertools.accumulate(x, lambda a,b: max(min(a+b, self.S), 0), initial=0)) states = states[1:] # remove the 1st entry with is the (meaningless) initial value 0 return np.array(states).astype(np.int64) class ABABSampler(BinaryInputSampler): def __init__(self, data_config): super().__init__(data_config) self.name = 'abab' if 'prob_abab_pos_sample' not in data_config: # The probability of having a positive sequence, i.e. 010101010101... data_config['prob_abab_pos_sample'] = 0.25 if 'label_type' not in data_config: # Options: 'state', 'boundary' data_config['label_type'] = 'state' self.prob_abab_pos_sample = data_config['prob_abab_pos_sample'] self.label_type = data_config['label_type'] self.transition = np.array( [[4, 1], # state 0 [2, 4], # state 1 [4, 3], # state 2 [0, 4], # state 3 [4, 4], # state 4 ]) self.__info__ = "abab: an automaton with 4 states + 1 absorbing state:\n" \ + "- Inputs: binary strings\n" \ + "- Labels: depending on 'label_type'.\n" \ + "- Config:\n" \ + " - prob_abab_pos_sample (float in [0,1]): probability of having a 'positive' sequence, i.e. 01010101010...\n" \ + " - label_type (str): choosing from the following options:\n" \ + " - 'state' (default): the state id.\n" \ + " - 'boundary': whether the state is in state 3 (the states are 0,1,2,3).\n" \ + self.__info__ def f(self, x): labels = [] curr_state = 3 for each in x: curr_state = self.transition[curr_state, each] labels += curr_state, labels = np.array(labels).astype(np.int64) if self.label_type == 'boundary': labels = (labels == 3).astype(np.int64) return labels def sample(self): pos_sample = self.np_rng.random() < self.prob_abab_pos_sample if pos_sample: T = self.sample_length() x = [0,1,0,1] * (T//4) x += [0,1,0,1][:(T%4)] x = np.array(x) return x, self.f(x) else: return super().sample() class FlipFlopSampler(AutomatonSampler): def __init__(self, data_config): super().__init__(data_config) self.name = 'flipflop' if 'n' not in data_config: data_config['n'] = 2 self.n_states = data_config['n'] self.n_actions = self.n_states + 1 self.transition = np.array([list(range(self.n_actions))] + [[i+1]*self.n_actions for i in range(self.n_states)]).T self.__info__ = f"Flipflop with n={self.n_states} states:\n" \ +f"- Inputs: tokens are either 0 (read) or 1:{self.n} (write).\n" \ + "- Labels: depending on 'label_type'.\n" \ + "- Config:\n" \ + " - n (int): number of write states; i.e. the states are 1,2,...,n, plus a default start state 0.\n" \ + self.__info__ def f(self, x): state, states = 0, [] for action_id in x: state = self.transition[state, action_id] states += state, return np.array(states) def sample(self): T = self.sample_length() rand = self.np_rng.uniform(size=T) nonzero_pos = (rand < 0.5).astype(np.int64) writes = self.np_rng.choice(range(1, self.n_states+1), size=T) x = writes * nonzero_pos return x, self.f(x) class PermutationSampler(AutomatonSampler): """ This is a parent class that must be inherited. Subclasses: SymmetricSampler, AlternatingSampler """ def __init__(self, data_config): super().__init__(data_config) if 'n' not in data_config: data_config['n'] = 5 if 'label_type' not in data_config: # Options: 'state', 'first_chair' data_config['label_type'] = 'state' self.n = data_config['n'] # the symmetric group Sn self.label_type = data_config['label_type'] self.__info__ = \ " - label_type (str): choosing from the following options:\n" \ + " - 'state' (default): the state id.\n" \ + " - 'first_chair': the element in the first position of the permutation.\n" \ + " e.g. if the current permutation is [2,1,4,3], then 'first_chair' is 2.\n" \ + self.__info__ def get_state_label(self, state): enc = self.state_encode(state) return self.state_label_map[enc] def f(self, x): curr_state = np.arange(self.n) labels = [] for action_id in x: curr_state = self.actions[action_id].dot(curr_state) if self.label_type == 'state': labels += self.get_state_label(curr_state), elif self.label_type == 'first_chair': labels += curr_state[0], return np.array(labels) def sample(self): T = self.sample_length() x = self.np_rng.choice(range(self.n_actions), replace=True, size=T) return x, self.f(x) class SymmetricSampler(PermutationSampler): """ TODO: add options for labels as functions of states - parity (whether a state is even): this may need packages (e.g. Permutation from sympy) - position / toggle: for S3 ~ D6, we can add labels for substructures as in Dihedral groups. """ def __init__(self, data_config): super().__init__(data_config) self.name = f'S{self.n}' """ Get states """ self.state_encode = lambda state: ''.join([str(int(each)) for each in state]) self.state_label_map = {} for si, state in enumerate(itertools.permutations(range(self.n))): enc = self.state_encode(state) self.state_label_map[enc] = si """ Get actions (3 defaults: id, shift-by-1, swap-first-two) """ if 'n_actions' not in data_config: data_config['n_actions'] = 3 self.n_actions = data_config['n_actions'] self.actions = {0: np.eye(self.n)} # shift all elements to the right by 1 shift_idx = list(range(1, self.n)) + [0] self.actions[1] = np.eye(self.n)[shift_idx] # swap the first 2 elements shift_idx = [1, 0] + list(range(2, self.n)) self.actions[2] = np.eye(self.n)[shift_idx] if self.n_actions > 3: # add permutations in the order given by itertools.permutations self.all_permutations = list(itertools.permutations(range(self.n)))[1:] cnt = 2 for each in self.all_permutations: action = np.eye(self.n)[list(each)] if np.linalg.norm(action - self.actions[0]) == 0: continue elif np.linalg.norm(action - self.actions[1]) == 0: continue self.actions[cnt] = action cnt += 1 if cnt == self.n_actions: break self.__info__ = f"Symmetric group on n={self.n} objects:\n" \ +f"- Inputs: tokens are either 0 (no-op), or 1:{self.n_actions} (corresponding to {self.n_actions} permutations).\n" \ + "- Labels: depending on 'label_type'.\n" \ + "- Config:\n" \ + " - n (int): number of objects, i.e. there are n! states.\n" \ + " - n_actions (int): number of permutations to include in the generator set;\n" \ + " the ordering is given by itertools.permutations, and the first 'n_actions' permutations will be included.\n" \ + self.__info__ class AlternatingSampler(PermutationSampler): """ TODO: other choices of generators (currently using (12x))? """ def __init__(self, data_config): super().__init__(data_config) self.name = f'A{self.n}' """ Get states """ self.state_label_map = {} self.state_encode = lambda state: ''.join([str(int(each)) for each in state]) cnt = 0 for si, state in enumerate(itertools.permutations(range(self.n))): if not Permutation(state).is_even: continue enc = self.state_encode(state) self.state_label_map[enc] = cnt cnt += 1 """ Get actions: all 3 cycles of the form (12x) """ self.actions = {0: np.eye(self.n)} for idx in range(2, self.n): # (1, 2, idx) shift_idx = list(range(self.n)) shift_idx[0],shift_idx[1], shift_idx[idx] = shift_idx[1], shift_idx[idx], shift_idx[0] self.actions[idx-1] = np.eye(self.n)[shift_idx] self.n_actions = len(self.actions) self.__info__ = f"Alternating group on n={self.n} objects:\n" \ +f"- Inputs: tokens from 0 to n-3, corresponding to all 3-cycles of the form (12x).\n" \ + "- Labels: depending on 'label_type'.\n" \ + "- Config:\n" \ + " - n (int): number of objects, i.e. there are n!/2 states.\n" \ + self.__info__ class CyclicSampler(AutomatonSampler): def __init__(self, data_config): super().__init__(data_config) if 'n' not in data_config: data_config['n'] = 5 self.n = data_config['n'] """ Get actions: shift by i positions, for i = 0 to n_actions-1 """ if 'n_actions' not in data_config: data_config['n_actions'] = 2 self.n_actions = data_config['n_actions'] shift_idx = list(range(1, self.n)) + [0] self.actions = {} for i in range(self.n_actions): shift_idx = list(range(i, self.n)) + list(range(0, i)) self.actions[i] = np.eye(self.n)[shift_idx] def f(self, x): if OLD_PY_VERSION: # NOTE: for Python 3.7 or below, accumulate doesn't have the 'initial' argument. x_padded = np.concatenate([np.array([0]), x]).astype(np.int64) states = list(itertools.accumulate(x_padded, lambda a,b: (a+b)%self.n )) states = states[1:] else: states = list(itertools.accumulate(x, lambda a,b: (a+b)%self.n, initial=0)) states = states[1:] # remove the 1st entry with is the (meaningless) initial value 0 return np.array(states).astype(np.int64) def sample(self): T = self.sample_length() x = self.np_rng.choice(range(self.n_actions), replace=True, size=T) return x, self.f(x) dataset_map = { 'abab': ABABSampler, 'alternating': AlternatingSampler, 'cyclic': CyclicSampler, 'flipflop': FlipFlopSampler, 'gridworld': GridworldSampler, 'parity': ParitySampler, 'symmetric': SymmetricSampler, # TODO: more datasets }