pere commited on
Commit
e0ec849
1 Parent(s): f1deb67

first with data loader

Browse files
Files changed (1) hide show
  1. italian_tweets_1M.py +86 -0
italian_tweets_1M.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Norwegian Colossal Corpus v2 dataset."""
2
+ import gzip
3
+ import json
4
+ import datasets
5
+
6
+ logger = datasets.logging.get_logger(__name__)
7
+ _DESCRIPTION = """\\nItalian tweets."""
8
+ _DATA_URL = "https://huggingface.co/datasets/pere/italian_tweets_1M/resolve/main/data/{split_suffix}-shard-{index:04d}-of-{n_shards:04d}.json.gz"
9
+ _N_SHARDS_PER_SPLIT = {
10
+ "train": 1, "validation": 1
11
+ }
12
+
13
+
14
+ class italian_tweets_1MConfig(datasets.BuilderConfig):
15
+ """BuilderConfig for NbNn."""
16
+
17
+ def __init__(self, *args, **kwargs):
18
+ """BuilderConfig for NbNn.
19
+ Args:
20
+ **kwargs: keyword arguments forwarded to super.
21
+ """
22
+ super().__init__(
23
+ *args,
24
+ name="italian_tweets_1M",
25
+ **kwargs,
26
+ )
27
+
28
+
29
+ class italian_tweets_1M(datasets.GeneratorBasedBuilder):
30
+ """Norwegian Colossal Corpus v2."""
31
+ BUILDER_CONFIGS = [italian_tweets_1MConfig()]
32
+ BUILDER_CONFIG_CLASS = italian_tweets_1MConfig
33
+
34
+ def _info(self):
35
+ return datasets.DatasetInfo(
36
+ description=_DESCRIPTION,
37
+ features=datasets.Features(
38
+ {
39
+ "id": datasets.Value("string"),
40
+ "doc_type": datasets.Value("string"),
41
+ "publish_year": datasets.Value("int32"),
42
+ "lang_fasttext": datasets.Value("string"),
43
+ "lang_fasttext_conf": datasets.Value("string"),
44
+ "text": datasets.Value("string"),
45
+
46
+ }
47
+ ),
48
+ supervised_keys=None,
49
+ homepage=_URL,
50
+ citation=_CITATION,
51
+ )
52
+
53
+ def _split_generators(self, dl_manager):
54
+ data_urls = {}
55
+ for split in ["train", "validation"]:
56
+ data_urls[split] = [
57
+ _DATA_URL.format(
58
+ language=self.config.name,
59
+ split_suffix=split,
60
+ index=index,
61
+ n_shards=_N_SHARDS_PER_SPLIT[split],
62
+ )
63
+ for index in range(1, _N_SHARDS_PER_SPLIT[split] + 1)
64
+ ]
65
+ train_downloaded_files = dl_manager.download(data_urls["train"])
66
+ validation_downloaded_files = dl_manager.download(data_urls["validation"])
67
+
68
+ return [
69
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": train_downloaded_files}),
70
+ datasets.SplitGenerator(
71
+ name=datasets.Split.VALIDATION, gen_kwargs={"filepaths": validation_downloaded_files}
72
+ ),
73
+
74
+ ]
75
+
76
+ def _generate_examples(self, filepaths):
77
+ """This function returns the examples in the raw (text) form by iterating on all the files."""
78
+ id_ = 0
79
+ for filepath in filepaths:
80
+ logger.info("generating examples from = %s", filepath)
81
+ with gzip.open(open(filepath, "rb"), "rt", encoding="utf-8") as f:
82
+ for line in f:
83
+ if line:
84
+ example = json.loads(line)
85
+ yield id_, example
86
+ id_ += 1