text
stringlengths
1
1.02k
class_index
int64
0
271
source
stringclasses
76 values
Args: features ([`Features`]): New features to cast the dataset to. The name of the fields in the features must match the current column names. The type of the data must also be convertible from one type to the other. For non-trivial conversion, e.g. `str` <-> `ClassLabel` you should use [`~datasets.Dataset.map`] to update the Dataset. batch_size (`int`, defaults to `1000`): Number of examples per batch provided to cast. If `batch_size <= 0` or `batch_size == None` then provide the full dataset as a single batch to cast. keep_in_memory (`bool`, defaults to `False`): Whether to copy the data in-memory. load_from_cache_file (`bool`, defaults to `True` if caching is enabled): If a cache file storing the current computation from `function` can be identified, use it instead of recomputing.
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
cache_file_name (`str`, *optional*, defaults to `None`): Provide the name of a path for the cache file. It is used to store the results of the computation instead of the automatically generated cache file name. writer_batch_size (`int`, defaults to `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running [`~datasets.Dataset.map`]. num_proc (`int`, *optional*, defaults to `None`): Number of processes for multiprocessing. By default it doesn't use multiprocessing.
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
Returns: [`Dataset`]: A copy of the dataset with casted features. Example:
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
```py >>> from datasets import load_dataset, ClassLabel, Value >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.features {'label': ClassLabel(names=['neg', 'pos'], id=None), 'text': Value(dtype='string', id=None)} >>> new_features = ds.features.copy() >>> new_features['label'] = ClassLabel(names=['bad', 'good']) >>> new_features['text'] = Value('large_string') >>> ds = ds.cast(new_features) >>> ds.features {'label': ClassLabel(names=['bad', 'good'], id=None), 'text': Value(dtype='large_string', id=None)} ``` """ if sorted(features) != sorted(self._data.column_names): raise ValueError( f"The columns in features ({list(features)}) must be identical " f"as the columns in the dataset: {self._data.column_names}" )
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
schema = features.arrow_schema format = self.format dataset = self.with_format("arrow") # capture the PyArrow version here to make the lambda serializable on Windows dataset = dataset.map( partial(table_cast, schema=schema), batched=True, batch_size=batch_size, keep_in_memory=keep_in_memory, load_from_cache_file=load_from_cache_file, cache_file_name=cache_file_name, writer_batch_size=writer_batch_size, num_proc=num_proc, features=features, desc="Casting the dataset", ) dataset = dataset.with_format(**format) return dataset @fingerprint_transform(inplace=False) def cast_column(self, column: str, feature: FeatureType, new_fingerprint: Optional[str] = None) -> "Dataset": """Cast column to feature for decoding.
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
Args: column (`str`): Column name. feature (`FeatureType`): Target feature. new_fingerprint (`str`, *optional*): The new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments. Returns: [`Dataset`] Example:
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
```py >>> from datasets import load_dataset, ClassLabel >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.features {'label': ClassLabel(names=['neg', 'pos'], id=None), 'text': Value(dtype='string', id=None)} >>> ds = ds.cast_column('label', ClassLabel(names=['bad', 'good'])) >>> ds.features {'label': ClassLabel(names=['bad', 'good'], id=None), 'text': Value(dtype='string', id=None)} ``` """ if hasattr(feature, "decode_example"): dataset = copy.deepcopy(self) dataset._info.features[column] = feature dataset._fingerprint = new_fingerprint dataset._data = dataset._data.cast(dataset.features.arrow_schema) dataset._data = update_metadata_with_features(dataset._data, dataset.features) return dataset else: features = self.features features[column] = feature
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
return self.cast(features)
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
@transmit_format @fingerprint_transform(inplace=False) def remove_columns(self, column_names: Union[str, List[str]], new_fingerprint: Optional[str] = None) -> "Dataset": """ Remove one or several column(s) in the dataset and the features associated to them. You can also remove a column using [`~datasets.Dataset.map`] with `remove_columns` but the present method doesn't copy the data of the remaining columns and is thus faster. Args: column_names (`Union[str, List[str]]`): Name of the column(s) to remove. new_fingerprint (`str`, *optional*): The new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments. Returns: [`Dataset`]: A copy of the dataset object without the columns to remove. Example:
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds = ds.remove_columns('label') Dataset({ features: ['text'], num_rows: 1066 }) >>> ds = ds.remove_columns(column_names=ds.column_names) # Removing all the columns returns an empty dataset with the `num_rows` property set to 0 Dataset({ features: [], num_rows: 0 }) ``` """ dataset = copy.deepcopy(self) if isinstance(column_names, str): column_names = [column_names] missing_columns = set(column_names) - set(self._data.column_names) if missing_columns: raise ValueError( f"Column name {list(missing_columns)} not in the dataset. " f"Current columns in the dataset: {dataset._data.column_names}" )
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
for column_name in column_names: del dataset._info.features[column_name] dataset._data = dataset._data.drop(column_names) dataset._data = update_metadata_with_features(dataset._data, dataset.features) dataset._fingerprint = new_fingerprint return dataset @fingerprint_transform(inplace=False) def rename_column( self, original_column_name: str, new_column_name: str, new_fingerprint: Optional[str] = None ) -> "Dataset": """ Rename a column in the dataset, and move the features associated to the original column under the new column name.
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
Args: original_column_name (`str`): Name of the column to rename. new_column_name (`str`): New name for the column. new_fingerprint (`str`, *optional*): The new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments. Returns: [`Dataset`]: A copy of the dataset with a renamed column. Example:
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds = ds.rename_column('label', 'label_new') Dataset({ features: ['text', 'label_new'], num_rows: 1066 }) ``` """ dataset = copy.deepcopy(self) if original_column_name not in dataset._data.column_names: raise ValueError( f"Original column name {original_column_name} not in the dataset. " f"Current columns in the dataset: {dataset._data.column_names}" ) if new_column_name in dataset._data.column_names: raise ValueError( f"New column name {new_column_name} already in the dataset. " f"Please choose a column name which is not already in the dataset. " f"Current columns in the dataset: {dataset._data.column_names}" ) if not new_column_name:
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
raise ValueError("New column name is empty.")
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
def rename(columns): return [new_column_name if col == original_column_name else col for col in columns] new_column_names = rename(self._data.column_names) if self._format_columns is not None: dataset._format_columns = rename(self._format_columns) dataset._info.features = Features( { new_column_name if col == original_column_name else col: feature for col, feature in self._info.features.items() } ) dataset._data = dataset._data.rename_columns(new_column_names) dataset._data = update_metadata_with_features(dataset._data, dataset.features) dataset._fingerprint = new_fingerprint return dataset
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
@fingerprint_transform(inplace=False) def rename_columns(self, column_mapping: Dict[str, str], new_fingerprint: Optional[str] = None) -> "Dataset": """ Rename several columns in the dataset, and move the features associated to the original columns under the new column names. Args: column_mapping (`Dict[str, str]`): A mapping of columns to rename to their new names new_fingerprint (`str`, *optional*): The new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments. Returns: [`Dataset`]: A copy of the dataset with renamed columns Example:
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds = ds.rename_columns({'text': 'text_new', 'label': 'label_new'}) Dataset({ features: ['text_new', 'label_new'], num_rows: 1066 }) ``` """ dataset = copy.deepcopy(self) extra_columns = set(column_mapping.keys()) - set(dataset.column_names) if extra_columns: raise ValueError( f"Original column names {extra_columns} not in the dataset. " f"Current columns in the dataset: {dataset._data.column_names}" )
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
number_of_duplicates_in_new_columns = len(column_mapping.values()) - len(set(column_mapping.values())) if number_of_duplicates_in_new_columns != 0: raise ValueError( "New column names must all be different, but this column mapping " f"has {number_of_duplicates_in_new_columns} duplicates" ) empty_new_columns = [new_col for new_col in column_mapping.values() if not new_col] if empty_new_columns: raise ValueError(f"New column names {empty_new_columns} are empty.") def rename(columns): return [column_mapping[col] if col in column_mapping else col for col in columns] new_column_names = rename(self._data.column_names) if self._format_columns is not None: dataset._format_columns = rename(self._format_columns)
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
dataset._info.features = Features( { column_mapping[col] if col in column_mapping else col: feature for col, feature in (self._info.features or {}).items() } ) dataset._data = dataset._data.rename_columns(new_column_names) dataset._data = update_metadata_with_features(dataset._data, dataset.features) dataset._fingerprint = new_fingerprint return dataset @transmit_format @fingerprint_transform(inplace=False) def select_columns(self, column_names: Union[str, List[str]], new_fingerprint: Optional[str] = None) -> "Dataset": """Select one or several column(s) in the dataset and the features associated to them.
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
Args: column_names (`Union[str, List[str]]`): Name of the column(s) to keep. new_fingerprint (`str`, *optional*): The new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments. Returns: [`Dataset`]: A copy of the dataset object which only consists of selected columns. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.select_columns(['text']) Dataset({ features: ['text'], num_rows: 1066 }) ``` """ if isinstance(column_names, str): column_names = [column_names]
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
missing_columns = set(column_names) - set(self._data.column_names) if missing_columns: raise ValueError( f"Column name {list(missing_columns)} not in the " "dataset. Current columns in the dataset: " f"{self._data.column_names}." ) dataset = copy.deepcopy(self) dataset._data = dataset._data.select(column_names) dataset._info.features = Features({col: self._info.features[col] for col in dataset._data.column_names}) dataset._data = update_metadata_with_features(dataset._data, dataset.features) dataset._fingerprint = new_fingerprint return dataset def __len__(self): """Number of rows in the dataset. Example:
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.__len__ <bound method Dataset.__len__ of Dataset({ features: ['text', 'label'], num_rows: 1066 })> ``` """ return self.num_rows def __iter__(self): """Iterate through the examples.
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
If a formatting is set with [`Dataset.set_format`] rows will be returned with the selected format. """ if self._indices is None: # Fast iteration # Benchmark: https://gist.github.com/mariosasko/0248288a2e3a7556873969717c1fe52b (fast_iter_batch) format_kwargs = self._format_kwargs if self._format_kwargs is not None else {} formatter = get_formatter(self._format_type, features=self._info.features, **format_kwargs) batch_size = config.ARROW_READER_BATCH_SIZE_IN_DATASET_ITER for pa_subtable in table_iter(self.data, batch_size=batch_size): for i in range(pa_subtable.num_rows): pa_subtable_ex = pa_subtable.slice(i, 1) formatted_output = format_table( pa_subtable_ex, 0, formatter=formatter, format_columns=self._format_columns,
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
output_all_columns=self._output_all_columns, ) yield formatted_output else: for i in range(self.num_rows): yield self._getitem( i, )
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
def iter(self, batch_size: int, drop_last_batch: bool = False): """Iterate through the batches of size `batch_size`. If a formatting is set with [`~datasets.Dataset.set_format`] rows will be returned with the selected format.
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
Args: batch_size (:obj:`int`): size of each batch to yield. drop_last_batch (:obj:`bool`, default `False`): Whether a last batch smaller than the batch_size should be dropped """ if self._indices is None: # Fast iteration # Benchmark: https://gist.github.com/mariosasko/0248288a2e3a7556873969717c1fe52b (fast_iter_batch) format_kwargs = self._format_kwargs if self._format_kwargs is not None else {} formatter = get_formatter(self._format_type, features=self._info.features, **format_kwargs) for pa_subtable in table_iter(self.data, batch_size=batch_size, drop_last_batch=drop_last_batch): formatted_batch = format_table( pa_subtable, range(pa_subtable.num_rows), formatter=formatter, format_columns=self._format_columns, output_all_columns=self._output_all_columns,
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
) yield formatted_batch else: num_rows = self.num_rows if not drop_last_batch else self.num_rows // batch_size * batch_size for i in range(0, num_rows, batch_size): yield self._getitem( slice(i, i + batch_size), )
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
def __repr__(self): return f"Dataset({{\n features: {list(self._info.features.keys())},\n num_rows: {self.num_rows}\n}})" @property def format(self): return { "type": self._format_type, "format_kwargs": self._format_kwargs, "columns": self.column_names if self._format_columns is None else self._format_columns, "output_all_columns": self._output_all_columns, } @contextlib.contextmanager def formatted_as( self, type: Optional[str] = None, columns: Optional[List] = None, output_all_columns: bool = False, **format_kwargs, ): """To be used in a `with` statement. Set `__getitem__` return format (type and columns).
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
Args: type (`str`, *optional*): Output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`. `None` means `__getitem__`` returns python objects (default). columns (`List[str]`, *optional*): Columns to format in the output. `None` means `__getitem__` returns all columns (default). output_all_columns (`bool`, defaults to `False`): Keep un-formatted columns as well in the output (as python objects). **format_kwargs (additional keyword arguments): Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`. """ old_format_type = self._format_type old_format_kwargs = self._format_kwargs old_format_columns = self._format_columns old_output_all_columns = self._output_all_columns try:
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
self.set_format(type, columns, output_all_columns, **format_kwargs) yield finally: self.set_format(old_format_type, old_format_columns, old_output_all_columns, **old_format_kwargs)
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
@fingerprint_transform(inplace=True) def set_format( self, type: Optional[str] = None, columns: Optional[List] = None, output_all_columns: bool = False, **format_kwargs, ): """Set `__getitem__` return format (type and columns). The data formatting is applied on-the-fly. The format `type` (for example "numpy") is used to format batches when using `__getitem__`. It's also possible to use custom transforms for formatting using [`~datasets.Dataset.set_transform`].
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
Args: type (`str`, *optional*): Either output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`. `None` means `__getitem__` returns python objects (default). columns (`List[str]`, *optional*): Columns to format in the output. `None` means `__getitem__` returns all columns (default). output_all_columns (`bool`, defaults to `False`): Keep un-formatted columns as well in the output (as python objects). **format_kwargs (additional keyword arguments): Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`.
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
It is possible to call [`~datasets.Dataset.map`] after calling `set_format`. Since `map` may add new columns, then the list of formatted columns gets updated. In this case, if you apply `map` on a dataset to add a new column, then this column will be formatted as: ``` new formatted columns = (all columns - previously unformatted columns) ``` Example:
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
```py >>> from datasets import load_dataset >>> from transformers import AutoTokenizer >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") >>> ds = ds.map(lambda x: tokenizer(x['text'], truncation=True, padding=True), batched=True) >>> ds.set_format(type='numpy', columns=['text', 'label']) >>> ds.format {'type': 'numpy', 'format_kwargs': {}, 'columns': ['text', 'label'], 'output_all_columns': False} ``` """ format_kwargs.update(format_kwargs.pop("format_kwargs", {})) # allow to use self.set_format(**self.format) # Check that the format_type and format_kwargs are valid and make it possible to have a Formatter type = get_format_type_from_alias(type) get_formatter(type, features=self._info.features, **format_kwargs)
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
# Check filter column if isinstance(columns, str): columns = [columns] if isinstance(columns, tuple): columns = list(columns) if columns is not None: missing_columns = set(columns) - set(self._data.column_names) if missing_columns: raise ValueError( f"Columns {list(missing_columns)} not in the dataset. Current columns in the dataset: {self._data.column_names}" ) if columns is not None: columns = columns.copy() # Ensures modifications made to the list after this call don't cause bugs
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
self._format_type = type self._format_kwargs = format_kwargs self._format_columns = columns self._output_all_columns = output_all_columns logger.debug( "Set __getitem__(key) output type to %s for %s columns " " (when key is int or slice) and %s output other (un-formatted) columns.", "python objects" if type is None else type, "no" if columns is None else str(columns), "do" if output_all_columns else "don't", ) def reset_format(self): """Reset `__getitem__` return format to python objects and all columns. Same as `self.set_format()` Example:
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
```py >>> from datasets import load_dataset >>> from transformers import AutoTokenizer >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") >>> ds = ds.map(lambda x: tokenizer(x['text'], truncation=True, padding=True), batched=True) >>> ds.set_format(type='numpy', columns=['input_ids', 'token_type_ids', 'attention_mask', 'label']) >>> ds.format {'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'], 'format_kwargs': {}, 'output_all_columns': False, 'type': 'numpy'} >>> ds.reset_format() >>> ds.format {'columns': ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'], 'format_kwargs': {}, 'output_all_columns': False, 'type': None} ``` """ self.set_format()
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
def set_transform( self, transform: Optional[Callable], columns: Optional[List] = None, output_all_columns: bool = False, ): """Set `__getitem__` return format using this transform. The transform is applied on-the-fly on batches when `__getitem__` is called. As [`~datasets.Dataset.set_format`], this can be reset using [`~datasets.Dataset.reset_format`].
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
Args: transform (`Callable`, *optional*): User-defined formatting transform, replaces the format defined by [`~datasets.Dataset.set_format`]. A formatting function is a callable that takes a batch (as a `dict`) as input and returns a batch. This function is applied right before returning the objects in `__getitem__`. columns (`List[str]`, *optional*): Columns to format in the output. If specified, then the input batch of the transform only contains those columns. output_all_columns (`bool`, defaults to `False`): Keep un-formatted columns as well in the output (as python objects). If set to True, then the other un-formatted columns are kept with the output of the transform. Example:
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
```py >>> from datasets import load_dataset >>> from transformers import AutoTokenizer >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased') >>> def encode(batch): ... return tokenizer(batch['text'], padding=True, truncation=True, return_tensors='pt') >>> ds.set_transform(encode) >>> ds[0] {'attention_mask': tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]), 'input_ids': tensor([ 101, 29353, 2135, 15102, 1996, 9428, 20868, 2890, 8663, 6895, 20470, 2571, 3663, 2090, 4603, 3017, 3008, 1998, 2037, 24211, 5637, 1998, 11690, 2336, 1012, 102]), 'token_type_ids': tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])} ``` """
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
self.set_format("custom", columns=columns, output_all_columns=output_all_columns, transform=transform)
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
def with_format( self, type: Optional[str] = None, columns: Optional[List] = None, output_all_columns: bool = False, **format_kwargs, ): """Set `__getitem__` return format (type and columns). The data formatting is applied on-the-fly. The format `type` (for example "numpy") is used to format batches when using `__getitem__`. It's also possible to use custom transforms for formatting using [`~datasets.Dataset.with_transform`]. Contrary to [`~datasets.Dataset.set_format`], `with_format` returns a new [`Dataset`] object.
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
Args: type (`str`, *optional*): Either output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`. `None` means `__getitem__` returns python objects (default). columns (`List[str]`, *optional*): Columns to format in the output. `None` means `__getitem__` returns all columns (default). output_all_columns (`bool`, defaults to `False`): Keep un-formatted columns as well in the output (as python objects). **format_kwargs (additional keyword arguments): Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`. Example:
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
```py >>> from datasets import load_dataset >>> from transformers import AutoTokenizer >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") >>> ds = ds.map(lambda x: tokenizer(x['text'], truncation=True, padding=True), batched=True) >>> ds.format {'columns': ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'], 'format_kwargs': {}, 'output_all_columns': False, 'type': None} >>> ds = ds.with_format("torch") >>> ds.format {'columns': ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'], 'format_kwargs': {}, 'output_all_columns': False, 'type': 'torch'} >>> ds[0] {'text': 'compassionately explores the seemingly irreconcilable situation between conservative christian parents and their estranged gay and lesbian children .', 'label': tensor(1),
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
'input_ids': tensor([ 101, 18027, 16310, 16001, 1103, 9321, 178, 11604, 7235, 6617, 1742, 2165, 2820, 1206, 6588, 22572, 12937, 1811, 2153, 1105, 1147, 12890, 19587, 6463, 1105, 15026, 1482, 119, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), 'token_type_ids': tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), 'attention_mask': tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])} ``` """ dataset = copy.deepcopy(self) dataset.set_format(type=type, columns=columns, output_all_columns=output_all_columns, **format_kwargs) return dataset
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
def with_transform( self, transform: Optional[Callable], columns: Optional[List] = None, output_all_columns: bool = False, ): """Set `__getitem__` return format using this transform. The transform is applied on-the-fly on batches when `__getitem__` is called. As [`~datasets.Dataset.set_format`], this can be reset using [`~datasets.Dataset.reset_format`]. Contrary to [`~datasets.Dataset.set_transform`], `with_transform` returns a new [`Dataset`] object.
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
Args: transform (`Callable`, `optional`): User-defined formatting transform, replaces the format defined by [`~datasets.Dataset.set_format`]. A formatting function is a callable that takes a batch (as a `dict`) as input and returns a batch. This function is applied right before returning the objects in `__getitem__`. columns (`List[str]`, `optional`): Columns to format in the output. If specified, then the input batch of the transform only contains those columns. output_all_columns (`bool`, defaults to `False`): Keep un-formatted columns as well in the output (as python objects). If set to `True`, then the other un-formatted columns are kept with the output of the transform. Example:
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
```py >>> from datasets import load_dataset >>> from transformers import AutoTokenizer >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") >>> def encode(example): ... return tokenizer(example["text"], padding=True, truncation=True, return_tensors='pt') >>> ds = ds.with_transform(encode) >>> ds[0] {'attention_mask': tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]), 'input_ids': tensor([ 101, 18027, 16310, 16001, 1103, 9321, 178, 11604, 7235, 6617, 1742, 2165, 2820, 1206, 6588, 22572, 12937, 1811, 2153, 1105, 1147, 12890, 19587, 6463, 1105, 15026, 1482, 119, 102]), 'token_type_ids': tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])} ``` """
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
dataset = copy.deepcopy(self) dataset.set_transform(transform=transform, columns=columns, output_all_columns=output_all_columns) return dataset
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
def _getitem(self, key: Union[int, slice, str, ListLike[int]], **kwargs) -> Union[Dict, List]: """ Can be used to index columns (by string names) or rows (by integer, slice, or list-like of integer indices) """ if isinstance(key, bool): raise TypeError("dataset index must be int, str, slice or collection of int, not bool") format_type = kwargs["format_type"] if "format_type" in kwargs else self._format_type format_columns = kwargs["format_columns"] if "format_columns" in kwargs else self._format_columns output_all_columns = ( kwargs["output_all_columns"] if "output_all_columns" in kwargs else self._output_all_columns ) format_kwargs = kwargs["format_kwargs"] if "format_kwargs" in kwargs else self._format_kwargs format_kwargs = format_kwargs if format_kwargs is not None else {} formatter = get_formatter(format_type, features=self._info.features, **format_kwargs)
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
pa_subtable = query_table(self._data, key, indices=self._indices) formatted_output = format_table( pa_subtable, key, formatter=formatter, format_columns=format_columns, output_all_columns=output_all_columns ) return formatted_output
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
@overload def __getitem__(self, key: Union[int, slice, Iterable[int]]) -> Dict: # noqa: F811 ... @overload def __getitem__(self, key: str) -> List: # noqa: F811 ... def __getitem__(self, key): # noqa: F811 """Can be used to index columns (by string names) or rows (by integer index or iterable of indices or bools).""" return self._getitem(key) def __getitems__(self, keys: List) -> List: """Can be used to get a batch using a list of integers indices.""" batch = self.__getitem__(keys) n_examples = len(batch[next(iter(batch))]) return [{col: array[i] for col, array in batch.items()} for i in range(n_examples)] def cleanup_cache_files(self) -> int: """Clean up all cache files in the dataset cache directory, excepted the currently used cache file if there is one. Be careful when running this command that no other process is currently using other cache files.
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
Returns: `int`: Number of removed files. Example:
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> ds.cleanup_cache_files() 10 ``` """ current_cache_files = [os.path.abspath(cache_file["filename"]) for cache_file in self.cache_files] if not current_cache_files: return 0 cache_directory = os.path.dirname(current_cache_files[0]) logger.info(f"Listing files in {cache_directory}") files: List[str] = os.listdir(cache_directory) files_to_remove = [] for f_name in files: full_name = os.path.abspath(os.path.join(cache_directory, f_name)) if f_name.startswith("cache-") and f_name.endswith(".arrow"): if full_name in current_cache_files: logger.info(f"Keeping currently used cache file at {full_name}") continue files_to_remove.append(full_name) for file_path in files_to_remove:
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
logger.info(f"Removing {file_path}") os.remove(file_path) return len(files_to_remove)
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
def _get_cache_file_path(self, fingerprint): if is_caching_enabled() and self.cache_files: cache_file_name = "cache-" + fingerprint + ".arrow" cache_directory = os.path.dirname(self.cache_files[0]["filename"]) else: cache_file_name = "cache-" + generate_random_fingerprint() + ".arrow" cache_directory = get_temporary_cache_files_directory() cache_file_path = os.path.join(cache_directory, cache_file_name) return cache_file_path
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
@transmit_format def map( self, function: Optional[Callable] = None, with_indices: bool = False, with_rank: bool = False, input_columns: Optional[Union[str, List[str]]] = None, batched: bool = False, batch_size: Optional[int] = 1000, drop_last_batch: bool = False, remove_columns: Optional[Union[str, List[str]]] = None, keep_in_memory: bool = False, load_from_cache_file: Optional[bool] = None, cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 1000, features: Optional[Features] = None, disable_nullable: bool = False, fn_kwargs: Optional[dict] = None, num_proc: Optional[int] = None, suffix_template: str = "_{rank:05d}_of_{num_proc:05d}", new_fingerprint: Optional[str] = None, desc: Optional[str] = None, ) -> "Dataset": """
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
Apply a function to all the examples in the table (individually or in batches) and update the table. If your function returns a column that already exists, then it overwrites it.
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
You can specify whether the function should be batched or not with the `batched` parameter: - If batched is `False`, then the function takes 1 example in and should return 1 example. An example is a dictionary, e.g. `{"text": "Hello there !"}`. - If batched is `True` and `batch_size` is 1, then the function takes a batch of 1 example as input and can return a batch with 1 or more examples. A batch is a dictionary, e.g. a batch of 1 example is `{"text": ["Hello there !"]}`. - If batched is `True` and `batch_size` is `n > 1`, then the function takes a batch of `n` examples as input and can return a batch with `n` examples, or with an arbitrary number of examples. Note that the last batch may have less than `n` examples. A batch is a dictionary, e.g. a batch of `n` examples is `{"text": ["Hello there !"] * n}`. Args: function (`Callable`): Function with one of the following signatures:
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
- `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False` and `with_rank=False` - `function(example: Dict[str, Any], *extra_args) -> Dict[str, Any]` if `batched=False` and `with_indices=True` and/or `with_rank=True` (one extra arg for each) - `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False` and `with_rank=False` - `function(batch: Dict[str, List], *extra_args) -> Dict[str, List]` if `batched=True` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
For advanced usage, the function can also return a `pyarrow.Table`. Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged. If no function is provided, default to identity function: `lambda x: x`. with_indices (`bool`, defaults to `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx[, rank]): ...`. with_rank (`bool`, defaults to `False`): Provide process rank to `function`. Note that in this case the signature of `function` should be `def function(example[, idx], rank): ...`. input_columns (`Optional[Union[str, List[str]]]`, defaults to `None`): The columns to be passed into `function`
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
as positional arguments. If `None`, a `dict` mapping to all formatted columns is passed as one argument. batched (`bool`, defaults to `False`): Provide batch of examples to `function`. batch_size (`int`, *optional*, defaults to `1000`): Number of examples per batch provided to `function` if `batched=True`. If `batch_size <= 0` or `batch_size == None`, provide the full dataset as a single batch to `function`. drop_last_batch (`bool`, defaults to `False`): Whether a last batch smaller than the batch_size should be dropped instead of being processed by the function. remove_columns (`Optional[Union[str, List[str]]]`, defaults to `None`): Remove a selection of columns while doing the mapping. Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
columns with names in `remove_columns`, these columns will be kept. keep_in_memory (`bool`, defaults to `False`): Keep the dataset in memory instead of writing it to a cache file. load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled): If a cache file storing the current computation from `function` can be identified, use it instead of recomputing. cache_file_name (`str`, *optional*, defaults to `None`): Provide the name of a path for the cache file. It is used to store the results of the computation instead of the automatically generated cache file name. writer_batch_size (`int`, defaults to `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed.
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`. features (`Optional[datasets.Features]`, defaults to `None`): Use a specific Features to store the cache file instead of the automatically generated one. disable_nullable (`bool`, defaults to `False`): Disallow null values in the table. fn_kwargs (`Dict`, *optional*, defaults to `None`): Keyword arguments to be passed to `function`. num_proc (`int`, *optional*, defaults to `None`): Max number of processes when generating cache. Already cached shards are loaded sequentially. suffix_template (`str`): If `cache_file_name` is specified, then this suffix will be added at the end of the base name of each. Defaults to `"_{rank:05d}_of_{num_proc:05d}"`. For example, if `cache_file_name` is "processed.arrow", then for
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
`rank=1` and `num_proc=4`, the resulting file would be `"processed_00001_of_00004.arrow"` for the default suffix. new_fingerprint (`str`, *optional*, defaults to `None`): The new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments. desc (`str`, *optional*, defaults to `None`): Meaningful description to be displayed alongside with the progress bar while mapping examples.
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> def add_prefix(example): ... example["text"] = "Review: " + example["text"] ... return example >>> ds = ds.map(add_prefix) >>> ds[0:3]["text"] ['Review: compassionately explores the seemingly irreconcilable situation between conservative christian parents and their estranged gay and lesbian children .', 'Review: the soundtrack alone is worth the price of admission .', 'Review: rodriguez does a splendid job of racial profiling hollywood style--casting excellent latin actors of all ages--a trend long overdue .']
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
# process a batch of examples >>> ds = ds.map(lambda example: tokenizer(example["text"]), batched=True) # set number of processors >>> ds = ds.map(add_prefix, num_proc=4) ``` """ if keep_in_memory and cache_file_name is not None: raise ValueError("Please use either `keep_in_memory` or `cache_file_name` but not both.") if num_proc is not None and num_proc <= 0: raise ValueError("num_proc must be an integer > 0.")
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
# If the array is empty we do nothing (but we make sure to handle an empty indices mapping and remove the requested columns anyway) if len(self) == 0: if self._indices is not None: # empty indices mapping self = Dataset( self.data.slice(0, 0), info=self.info.copy(), split=self.split, fingerprint=new_fingerprint, ) if remove_columns: return self.remove_columns(remove_columns) else: return self if function is None: function = lambda x: x # noqa: E731 if isinstance(input_columns, str): input_columns = [input_columns]
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
if input_columns is not None: missing_columns = set(input_columns) - set(self._data.column_names) if missing_columns: raise ValueError( f"Input column {list(missing_columns)} not in the dataset. Current columns in the dataset: {self._data.column_names}" ) if isinstance(remove_columns, str): remove_columns = [remove_columns] if remove_columns is not None: missing_columns = set(remove_columns) - set(self._data.column_names) if missing_columns: raise ValueError( f"Column to remove {list(missing_columns)} not in the dataset. Current columns in the dataset: {self._data.column_names}" ) load_from_cache_file = load_from_cache_file if load_from_cache_file is not None else is_caching_enabled() if fn_kwargs is None: fn_kwargs = {}
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
if num_proc is not None and num_proc > len(self): num_proc = len(self) logger.warning( f"num_proc must be <= {len(self)}. Reducing num_proc to {num_proc} for dataset of size {len(self)}." ) dataset_kwargs = { "shard": self, "function": function, "with_indices": with_indices, "with_rank": with_rank, "input_columns": input_columns, "batched": batched, "batch_size": batch_size, "drop_last_batch": drop_last_batch, "remove_columns": remove_columns, "keep_in_memory": keep_in_memory, "writer_batch_size": writer_batch_size, "features": features, "disable_nullable": disable_nullable, "fn_kwargs": fn_kwargs, }
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
if new_fingerprint is None: # we create a unique hash from the function, # current dataset file and the mapping args transform = format_transform_for_fingerprint(Dataset._map_single) kwargs_for_fingerprint = format_kwargs_for_fingerprint(Dataset._map_single, (), dataset_kwargs) kwargs_for_fingerprint["fingerprint_name"] = "new_fingerprint" new_fingerprint = update_fingerprint(self._fingerprint, transform, kwargs_for_fingerprint) else: validate_fingerprint(new_fingerprint) dataset_kwargs["new_fingerprint"] = new_fingerprint if self.cache_files: if cache_file_name is None: cache_file_name = self._get_cache_file_path(new_fingerprint) dataset_kwargs["cache_file_name"] = cache_file_name
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
def load_processed_shard_from_cache(shard_kwargs): """Load a processed shard from cache if it exists, otherwise throw an error.""" shard = shard_kwargs["shard"] # Check if we've already cached this computation (indexed by a hash) if shard_kwargs["cache_file_name"] is not None: if os.path.exists(shard_kwargs["cache_file_name"]) and load_from_cache_file: info = shard.info.copy() info.features = features return Dataset.from_file(shard_kwargs["cache_file_name"], info=info, split=shard.split) raise NonExistentDatasetError num_shards = num_proc if num_proc is not None else 1 if batched and drop_last_batch: pbar_total = len(self) // num_shards // batch_size * num_shards * batch_size else: pbar_total = len(self)
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
shards_done = 0 if num_proc is None or num_proc == 1: transformed_dataset = None try: transformed_dataset = load_processed_shard_from_cache(dataset_kwargs) logger.info(f"Loading cached processed dataset at {dataset_kwargs['cache_file_name']}") except NonExistentDatasetError: pass if transformed_dataset is None: with hf_tqdm( unit=" examples", total=pbar_total, desc=desc or "Map", ) as pbar: for rank, done, content in Dataset._map_single(**dataset_kwargs): if done: shards_done += 1 logger.debug(f"Finished processing shard number {rank} of {num_shards}.") transformed_dataset = content else: pbar.update(content)
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
assert transformed_dataset is not None, "Failed to retrieve the result from map" # update fingerprint if the dataset changed if transformed_dataset._fingerprint != self._fingerprint: transformed_dataset._fingerprint = new_fingerprint return transformed_dataset else:
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
def format_cache_file_name( cache_file_name: Optional[str], rank: Union[int, Literal["*"]], # noqa: F722 ) -> Optional[str]: if not cache_file_name: return cache_file_name sep = cache_file_name.rindex(".") base_name, extension = cache_file_name[:sep], cache_file_name[sep:] if isinstance(rank, int): cache_file_name = base_name + suffix_template.format(rank=rank, num_proc=num_proc) + extension logger.info(f"Process #{rank} will write at {cache_file_name}") else: cache_file_name = ( base_name + suffix_template.replace("{rank:05d}", "{rank}").format(rank=rank, num_proc=num_proc) + extension ) return cache_file_name
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
def format_new_fingerprint(new_fingerprint: str, rank: int) -> str: new_fingerprint = new_fingerprint + suffix_template.format(rank=rank, num_proc=num_proc) validate_fingerprint(new_fingerprint) return new_fingerprint
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
prev_env = deepcopy(os.environ) # check if parallelism if off # from https://github.com/huggingface/tokenizers/blob/bb668bc439dc34389b71dbb8ce0c597f15707b53/tokenizers/src/utils/parallelism.rs#L22 if prev_env.get("TOKENIZERS_PARALLELISM", "false").lower() not in ( "", "off", "false", "f", "no", "n", "0", ): logger.warning("Setting TOKENIZERS_PARALLELISM=false for forked processes.") os.environ["TOKENIZERS_PARALLELISM"] = "false" shards = [ self.shard(num_shards=num_proc, index=rank, contiguous=True, keep_in_memory=keep_in_memory) for rank in range(num_proc) ] kwargs_per_job = [ { **dataset_kwargs, "shard": shards[rank],
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
"cache_file_name": format_cache_file_name(cache_file_name, rank), "rank": rank, "offset": sum(len(s) for s in shards[:rank]), "new_fingerprint": format_new_fingerprint(new_fingerprint, rank), } for rank in range(num_shards) ]
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
transformed_shards = [None] * num_shards for rank in range(num_shards): try: transformed_shards[rank] = load_processed_shard_from_cache(kwargs_per_job[rank]) kwargs_per_job[rank] = None except NonExistentDatasetError: pass kwargs_per_job = [kwargs for kwargs in kwargs_per_job if kwargs is not None]
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
# We try to create a pool with as many workers as dataset not yet cached. if kwargs_per_job: if len(kwargs_per_job) < num_shards: logger.info( f"Reprocessing {len(kwargs_per_job)}/{num_shards} shards because some of them were missing from the cache." ) with Pool(len(kwargs_per_job)) as pool: os.environ = prev_env logger.info(f"Spawning {num_proc} processes") with hf_tqdm( unit=" examples", total=pbar_total, desc=(desc or "Map") + f" (num_proc={num_proc})", ) as pbar: for rank, done, content in iflatmap_unordered( pool, Dataset._map_single, kwargs_iterable=kwargs_per_job ): if done: shards_done += 1
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
logger.debug(f"Finished processing shard number {rank} of {num_shards}.") transformed_shards[rank] = content else: pbar.update(content) # Avoids PermissionError on Windows (the error: https://github.com/huggingface/datasets/actions/runs/4026734820/jobs/6921621805) for kwargs in kwargs_per_job: del kwargs["shard"] else: logger.info(f"Loading cached processed dataset at {format_cache_file_name(cache_file_name, '*')}") assert None not in transformed_shards, ( f"Failed to retrieve results from map: result list {transformed_shards} still contains None - at least one worker failed to return its results" ) logger.info(f"Concatenating {num_proc} shards") result = _concatenate_map_style_datasets(transformed_shards)
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
# update fingerprint if the dataset changed if any( transformed_shard._fingerprint != shard._fingerprint for transformed_shard, shard in zip(transformed_shards, shards) ): result._fingerprint = new_fingerprint else: result._fingerprint = self._fingerprint return result
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
@staticmethod def _map_single( shard: "Dataset", function: Optional[Callable] = None, with_indices: bool = False, with_rank: bool = False, input_columns: Optional[List[str]] = None, batched: bool = False, batch_size: Optional[int] = 1000, drop_last_batch: bool = False, remove_columns: Optional[List[str]] = None, keep_in_memory: bool = False, cache_file_name: Optional[str] = None, writer_batch_size: Optional[int] = 1000, features: Optional[Features] = None, disable_nullable: bool = False, fn_kwargs: Optional[dict] = None, new_fingerprint: Optional[str] = None, rank: Optional[int] = None, offset: int = 0, ) -> Iterable[Tuple[int, bool, Union[int, "Dataset"]]]: """Apply a function to all the elements in the table (individually or in batches) and update the table (if function does update examples).
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
Args: shard (`datasets.Dataset`): Dataset to map the transform on. function (`Callable`): with one of the following signature: - `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False` and `with_rank=False` - `function(example: Dict[str, Any], *extra_args) -> Dict[str, Any]` if `batched=False` and `with_indices=True` and/or `with_rank=True` (one extra arg for each) - `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False` and `with_rank=False` - `function(batch: Dict[str, List], *extra_args) -> Dict[str, List]` if `batched=True` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
For advanced usage, the function can also return a `pyarrow.Table`. Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged. If no function is provided, default to identity function: lambda x: x with_indices (`bool`, defaults to `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx[, rank]): ...`. with_rank (`bool`, default `False`): Provide process rank to `function`. Note that in this case the signature of `function` should be `def function(example[, idx], rank): ...`. input_columns (`Optional[List[str]]`, defaults to `None`): The columns to be passed into `function` as positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument. batched (`bool`, defaults to `False`): Provide batch of examples to `function`
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
batch_size (`int`, optional, defaults to `1000`): Number of examples per batch provided to `function` if `batched=True` `batch_size <= 0` or `batch_size == None`: Provide the full dataset as a single batch to `function` drop_last_batch (`bool`, default: `False`): Whether a last batch smaller than the batch_size should be dropped instead of being processed by the function. remove_columns (`Optional[List[str]]`, defaults to `None`): Remove a selection of columns while doing the mapping. Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding columns with names in `remove_columns`, these columns will be kept. keep_in_memory (`bool`, defaults to `False`): Keep the dataset in memory instead of writing it to a cache file.
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
cache_file_name (`str`, optional, defaults to `None`): Provide the name of a path for the cache file. It is used to store the results of the computation instead of the automatically generated cache file name. writer_batch_size (`int`, default `1000`): Number of rows per write operation for the cache file writer. This value is a good trade-off between memory usage during the processing, and processing speed. Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `.map()`. features (`Optional[datasets.Features]`, defaults to `None`): Use a specific Features to store the cache file instead of the automatically generated one. disable_nullable (`bool`, defaults to `False`): Disallow null values in the table. fn_kwargs (`Dict`, optional, defaults to `None`): Keyword arguments to be passed to `function`
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
new_fingerprint (`str`, optional, defaults to `None`): the new fingerprint of the dataset after transform. If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments rank: (`int`, optional, defaults to `None`): If specified, this is the process rank when doing multiprocessing offset: (`int`, defaults to 0): If specified, this is an offset applied to the indices passed to `function` if `with_indices=True`. """ if fn_kwargs is None: fn_kwargs = {}
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
# If we do batch computation but no batch size is provided, default to the full dataset if batched and (batch_size is None or batch_size <= 0): batch_size = shard.num_rows # We set this variable to True after processing the first example/batch in # `apply_function_on_filtered_inputs` if the map function returns a dict. # If set to False, no new arrow table will be created update_data = None format_kwargs = shard._format_kwargs.copy() # Lazy formatting is only available for the default format (None/python) if not input_columns and shard._format_type is None: format_kwargs["lazy"] = True input_formatter = get_formatter( shard._format_type, features=shard.features, **format_kwargs, ) class NumExamplesMismatchError(Exception): pass
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
def validate_function_output(processed_inputs, indices): """Validate output of the map function.""" allowed_processed_inputs_types = (Mapping, pa.Table, pd.DataFrame) if config.POLARS_AVAILABLE and "polars" in sys.modules: import polars as pl
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
allowed_processed_inputs_types += (pl.DataFrame,) if processed_inputs is not None and not isinstance(processed_inputs, allowed_processed_inputs_types): raise TypeError( f"Provided `function` which is applied to all elements of table returns a variable of type {type(processed_inputs)}. Make sure provided `function` returns a variable of type `dict` (or a pyarrow table) to update the dataset or `None` if you are only interested in side effects." ) elif isinstance(indices, list) and isinstance(processed_inputs, Mapping): allowed_batch_return_types = (list, np.ndarray, pd.Series) if config.POLARS_AVAILABLE and "polars" in sys.modules: import polars as pl allowed_batch_return_types += (pl.Series, pl.DataFrame) if config.TF_AVAILABLE and "tensorflow" in sys.modules: import tensorflow as tf
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
allowed_batch_return_types += (tf.Tensor,) if config.TORCH_AVAILABLE and "torch" in sys.modules: import torch allowed_batch_return_types += (torch.Tensor,) if config.JAX_AVAILABLE and "jax" in sys.modules: import jax.numpy as jnp allowed_batch_return_types += (jnp.ndarray,) all_dict_values_are_lists = all( isinstance(value, allowed_batch_return_types) for value in processed_inputs.values() ) if all_dict_values_are_lists is False: raise TypeError( f"Provided `function` which is applied to all elements of table returns a `dict` of types {[type(x) for x in processed_inputs.values()]}. When using `batched=True`, make sure provided `function` returns a `dict` of types like `{allowed_batch_return_types}`." )
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
def apply_function_on_filtered_inputs(pa_inputs, indices, check_same_num_examples=False, offset=0): """Utility to apply the function on a selection of columns.""" nonlocal update_data inputs = format_table( pa_inputs, 0 if not batched else range(pa_inputs.num_rows), format_columns=input_columns, formatter=input_formatter, ) fn_args = [inputs] if input_columns is None else [inputs[col] for col in input_columns] if offset == 0: effective_indices = indices else: effective_indices = [i + offset for i in indices] if isinstance(indices, list) else indices + offset additional_args = () if with_indices: additional_args += (effective_indices,) if with_rank: additional_args += (rank,) processed_inputs = function(*fn_args, *additional_args, **fn_kwargs)
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
if isinstance(processed_inputs, LazyDict): processed_inputs = { k: v for k, v in processed_inputs.data.items() if k not in processed_inputs.keys_to_format } returned_lazy_dict = True else: returned_lazy_dict = False if update_data is None: # Check if the function returns updated examples updatable_types = (Mapping, pa.Table, pd.DataFrame) if config.POLARS_AVAILABLE and "polars" in sys.modules: import polars as pl
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
updatable_types += (pl.DataFrame,) update_data = isinstance(processed_inputs, updatable_types) validate_function_output(processed_inputs, indices) if not update_data: return None # Nothing to update, let's move on if shard._format_type or input_columns: # TODO(QL, MS): ideally the behavior should be the same even if the dataset is formatted (may require major release) inputs_to_merge = dict(zip(pa_inputs.column_names, pa_inputs.itercolumns())) elif isinstance(inputs, LazyDict): inputs_to_merge = { k: (v if k not in inputs.keys_to_format else pa_inputs[k]) for k, v in inputs.data.items() } else: inputs_to_merge = inputs if remove_columns is not None: for column in remove_columns:
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
# `function` can modify input in-place causing column to be already removed. if column in inputs_to_merge: inputs_to_merge.pop(column) if returned_lazy_dict and column in processed_inputs: processed_inputs.pop(column) if check_same_num_examples: input_num_examples = len(pa_inputs) processed_inputs_num_examples = len(processed_inputs[next(iter(processed_inputs.keys()))]) if input_num_examples != processed_inputs_num_examples: raise NumExamplesMismatchError() if isinstance(inputs, Mapping) and isinstance(processed_inputs, Mapping): # The .map() transform *updates* the dataset: # the output dictionary contains both the the input data and the output data. # The output dictionary may contain Arrow values from `inputs_to_merge` so that we can re-write them efficiently.
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
return {**inputs_to_merge, **processed_inputs} else: return processed_inputs
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
def init_buffer_and_writer(): # Prepare output buffer and batched writer in memory or on file if we update the table writer_features = features if writer_features is None: writer_features = shard.features update_features = True else: update_features = False if keep_in_memory or cache_file_name is None: buf_writer = pa.BufferOutputStream() tmp_file = None writer = ArrowWriter( features=writer_features, stream=buf_writer, writer_batch_size=writer_batch_size, update_features=update_features, fingerprint=new_fingerprint, disable_nullable=disable_nullable, ) else: buf_writer = None logger.info(f"Caching processed dataset at {cache_file_name}")
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
cache_dir = os.path.dirname(cache_file_name) os.makedirs(cache_dir, exist_ok=True) tmp_file = tempfile.NamedTemporaryFile("wb", dir=cache_dir, delete=False) writer = ArrowWriter( features=writer_features, path=tmp_file.name, writer_batch_size=writer_batch_size, update_features=update_features, fingerprint=new_fingerprint, disable_nullable=disable_nullable, ) return buf_writer, writer, tmp_file
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py