Missing "languages" kwarg error

#6
by neo-nlp-dev - opened

Stacktrace provided below:
```python

TypeError Traceback (most recent call last)
Cell In[2], line 1
----> 1 mc4random = load_dataset(
2 "bertin-project/mc4-sampling", "es",
3 split="train",
4 streaming=True,
5 sampling_method="random",
6 factor=0.5,
7 )

File ~/.conda/envs/sample_env/lib/python3.9/site-packages/datasets/load.py:1735, in load_dataset(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, ignore_verifications, keep_in_memory, save_infos, revision, use_auth_token, task, streaming, num_proc, **config_kwargs)
1732 ignore_verifications = ignore_verifications or save_infos
1734 # Create a dataset builder
-> 1735 builder_instance = load_dataset_builder(
1736 path=path,
1737 name=name,
1738 data_dir=data_dir,
1739 data_files=data_files,
1740 cache_dir=cache_dir,
1741 features=features,
1742 download_config=download_config,
1743 download_mode=download_mode,
1744 revision=revision,
1745 use_auth_token=use_auth_token,
1746 **config_kwargs,
1747 )
1749 # Return iterable dataset in case of streaming
1750 if streaming:

File ~/.conda/envs/sample_env/lib/python3.9/site-packages/datasets/load.py:1519, in load_dataset_builder(path, name, data_dir, data_files, cache_dir, features, download_config, download_mode, revision, use_auth_token, **config_kwargs)
1516 raise ValueError(error_msg)
1518 # Instantiate the dataset builder
-> 1519 builder_instance: DatasetBuilder = builder_cls(
1520 cache_dir=cache_dir,
1521 config_name=config_name,
1522 data_dir=data_dir,
1523 data_files=data_files,
1524 hash=hash,
1525 features=features,
1526 use_auth_token=use_auth_token,
1527 **builder_kwargs,
1528 **config_kwargs,
1529 )
1531 return builder_instance

File ~/.cache/huggingface/modules/datasets_modules/datasets/bertin-project--mc4-sampling/ae7065df866e6fa45f5bdf929ecc5400934c9d2d9dd5db39393d47a8459048b5/mc4-sampling.py:316, in Mc4Sampling.init(self, writer_batch_size, *args, **kwargs)
311 self.should_keep_doc = self._should_keep_doc_step
312 init_kwargs = {
313 prop: kwargs.get(prop)
314 for prop in ("name", "version", "data_dir", "data_files", "description")
315 }
--> 316 super().init(*args, writer_batch_size=writer_batch_size, **init_kwargs)

File ~/.conda/envs/sample_env/lib/python3.9/site-packages/datasets/builder.py:1357, in GeneratorBasedBuilder.init(self, writer_batch_size, *args, **kwargs)
1356 def init(self, *args, writer_batch_size=None, **kwargs):
-> 1357 super().init(*args, **kwargs)
1358 # Batch size used by the ArrowWriter
1359 # It defines the number of samples that are kept in memory before writing them
1360 # and also the length of the arrow chunks
1361 # None means that the ArrowWriter will use its default value
1362 self._writer_batch_size = writer_batch_size or self.DEFAULT_WRITER_BATCH_SIZE

File ~/.conda/envs/sample_env/lib/python3.9/site-packages/datasets/builder.py:322, in DatasetBuilder.init(self, cache_dir, config_name, hash, base_path, info, features, use_auth_token, repo_id, data_files, data_dir, name, **config_kwargs)
320 if data_dir is not None:
321 config_kwargs["data_dir"] = data_dir
--> 322 self.config, self.config_id = self._create_builder_config(
323 config_name=config_name,
324 custom_features=features,
325 **config_kwargs,
326 )
328 # prepare info: DatasetInfo are a standardized dataclass across all datasets
329 # Prefill datasetinfo
330 if info is None:

File ~/.conda/envs/sample_env/lib/python3.9/site-packages/datasets/builder.py:475, in DatasetBuilder._create_builder_config(self, config_name, custom_features, **config_kwargs)
473 if "version" not in config_kwargs and hasattr(self, "VERSION") and self.VERSION:
474 config_kwargs["version"] = self.VERSION
--> 475 builder_config = self.BUILDER_CONFIG_CLASS(**config_kwargs)
477 # otherwise use the config_kwargs to overwrite the attributes
478 else:
479 builder_config = copy.deepcopy(builder_config)

TypeError: init() missing 1 required keyword-only argument: 'languages'




**Note:** Including "languages" in the ```init_kwargs``` at https://huggingface.co/datasets/bertin-project/mc4-sampling/blob/main/mc4-sampling.py#L314  seems to solve the problem (kinda), as long as "languages" kwarg is passed in the ```load_dataset``` call with a single entry:


```python
    311             self.should_keep_doc = self._should_keep_doc_step
    312 init_kwargs = {
    313     prop: kwargs.get(prop)
    314     for prop in ("name", "version", "data_dir", "data_files", "description", "languages")
    315 }
    316 super().__init__(*args, writer_batch_size=writer_batch_size, **init_kwargs)

Modified call:

mc4random = load_dataset(
    "bertin-project/mc4-sampling", languages=["es"],
    split="train",
    streaming=True,
    sampling_method="random",
    factor=0.5,
)

I still got error TypeError: Mc4SamplingConfig.init() missing 1 required keyword-only argument: 'languages'

BERTIN Project org

Should be fixed now

In [1]: from datasets import load_dataset
   ...:
   ...: sampled_mc4 = load_dataset("bertin-project/mc4-sampling", "es", trust_remote_code=True, streaming=True)
   ...: for item in sampled_mc4["train"]:
   ...:    print(item)
   ...:    break
   ...:
{'text': 'Comprar Zapatillas para niña en chancla con goma...', 'timestamp': '2019-01-18T17:11:30Z', 'url': 'https://www.calzadoslabalear.com/es/zapatillas-mujer/136-comprar-Zapatillas-para-nina-en-chancla-con-goma-por-detras-Gioseppo-en-rosa-online.html'}

In [2]: from datasets import load_dataset
   ...:
   ...: sampled_mc4 = load_dataset("bertin-project/mc4-sampling", languages=["en", "es"], trust_remote_code=True, streaming=True)
   ...: for item in sampled_mc4["train"]:
   ...:    print(item)
   ...:    break
   ...:
{'text': 'Posts 4,362\tMore Info\nOkay so to those of you that were very helpful...', 'timestamp': '2014-03-09T04:06:28Z', 'url': 'http://www.polkaudio.com/forums/showthread.php?58429-Are-my-speakers-magnetically-shielded&goto=nextoldest'}

Sign up or log in to comment