c4-en-10k / dataset_infos.json
stas's picture
new ds
13c338d
raw
history blame
3.04 kB
{"plain_text": {"description": "This is a small subset representing 10K records from the original C4 dataset, \"unshuffled_deduplicated_en\" subset - created for testing. The records were extracted after having been shuffled.\n\nThe full 1TB+ dataset is at https://huggingface.co/datasets/c4.\n", "citation": "@inproceedings{OrtizSuarezSagotRomary2019,\n author = {Pedro Javier {Ortiz Su{'a}rez} and Benoit Sagot and Laurent Romary},\n title = {Asynchronous pipelines for processing huge corpora on medium to low resource infrastructures},\n series = {Proceedings of the Workshop on Challenges in the Management of Large Corpora (CMLC-7) 2019. Cardiff, 22nd July 2019},\n editor = {Piotr Ba\u0144ski and Adrien Barbaresi and Hanno Biber and Evelyn Breiteneder and Simon Clematide and Marc Kupietz and Harald L{\"u}ngen and Caroline Iliadi},\n publisher = {Leibniz-Institut f{\"u}r Deutsche Sprache},\n address = {Mannheim},\n doi = {10.14618/ids-pub-9021},\n url = {http://nbn-resolving.de/urn:nbn:de:bsz:mh39-90215},\n pages = {9 -- 16},\n year = {2019},\n abstract = {Common Crawl is a considerably large, heterogeneous multilingual corpus comprised of crawled documents from the internet, surpassing 20TB of data and distributed as a set of more than 50 thousand plain text files where each contains many documents written in a wide variety of languages. Even though each document has a metadata block associated to it, this data lacks any information about the language in which each document is written, making it extremely difficult to use Common Crawl for monolingual applications. We propose a general, highly parallel, multithreaded pipeline to clean and classify Common Crawl by language; we specifically design it so that it runs efficiently on medium to low resource infrastructures where I/O speeds are the main constraint. We develop the pipeline so that it can be easily reapplied to any kind of heterogeneous corpus and so that it can be parameterised to a wide range of infrastructures. We also distribute a 6.3TB version of Common Crawl, filtered, classified by language, shuffled at line level in order to avoid copyright issues, and ready to be used for NLP applications.},\n language = {en}\n}\n", "homepage": "https://c4-corpus.com/", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "c4_en10k", "config_name": "plain_text", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 21498951, "num_examples": 10000, "dataset_name": "c4_en10k"}}, "download_checksums": {"https://cdn-datasets.huggingface.co/nlp/datasets/c4/c4-en-10k.tar.xz": {"num_bytes": 6627716, "checksum": "d743c7a76595877c5810427bb6254c7af73211a483810b32016026225231fdd3"}}, "download_size": 6627716, "post_processing_size": null, "dataset_size": 21498951, "size_in_bytes": 28126667}}