Convert dataset to Parquet

#3
by albertvillanova HF staff - opened
README.md CHANGED
@@ -30,10 +30,15 @@ dataset_info:
30
  '1': negative
31
  splits:
32
  - name: train
33
- num_bytes: 114670811
34
  num_examples: 50000
35
- download_size: 31510992
36
- dataset_size: 114670811
 
 
 
 
 
37
  ---
38
 
39
  # Dataset Card for ImDB Urdu Reviews
 
30
  '1': negative
31
  splits:
32
  - name: train
33
+ num_bytes: 114670791
34
  num_examples: 50000
35
+ download_size: 56234303
36
+ dataset_size: 114670791
37
+ configs:
38
+ - config_name: default
39
+ data_files:
40
+ - split: train
41
+ path: data/train-*
42
  ---
43
 
44
  # Dataset Card for ImDB Urdu Reviews
data/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df742bf617460342a5ef9691e66be05b4ce9787cc3a12d6875d53b8c24e515a0
3
+ size 56234303
imdb_urdu_reviews.py DELETED
@@ -1,73 +0,0 @@
1
- """IMDB Urdu movie reviews dataset."""
2
-
3
-
4
- import csv
5
- import os
6
-
7
- import datasets
8
- from datasets.tasks import TextClassification
9
-
10
-
11
- _CITATION = """
12
- @InProceedings{maas-EtAl:2011:ACL-HLT2011,
13
- author = {Maas, Andrew L. and Daly,nRaymond E. and Pham, Peter T. and Huang, Dan and Ng, Andrew Y...},
14
- title = {Learning Word Vectors for Sentiment Analysis},
15
- month = {June},
16
- year = {2011},
17
- address = {Portland, Oregon, USA},
18
- publisher = {Association for Computational Linguistics},
19
- pages = {142--150},
20
- url = {http://www.aclweb.org/anthology/P11-1015}
21
- }
22
- """
23
-
24
- _DESCRIPTION = """
25
- Large Movie translated Urdu Reviews Dataset.
26
- This is a dataset for binary sentiment classification containing substantially more data than previous
27
- benchmark datasets. We provide a set of 40,000 highly polar movie reviews for training, and 10,000 for testing.
28
- To increase the availability of sentiment analysis dataset for a low recourse language like Urdu,
29
- we opted to use the already available IMDB Dataset. we have translated this dataset using google translator.
30
- This is a binary classification dataset having two classes as positive and negative.
31
- The reason behind using this dataset is high polarity for each class.
32
- It contains 50k samples equally divided in two classes.
33
- """
34
-
35
- _URL = "https://github.com/mirfan899/Urdu/blob/master/sentiment/imdb_urdu_reviews.csv.tar.gz?raw=true"
36
-
37
- _HOMEPAGE = "https://github.com/mirfan899/Urdu"
38
-
39
-
40
- class ImdbUrduReviews(datasets.GeneratorBasedBuilder):
41
- VERSION = datasets.Version("1.0.0")
42
-
43
- def _info(self):
44
- return datasets.DatasetInfo(
45
- description=_DESCRIPTION,
46
- features=datasets.Features(
47
- {
48
- "sentence": datasets.Value("string"),
49
- "sentiment": datasets.ClassLabel(names=["positive", "negative"]),
50
- }
51
- ),
52
- citation=_CITATION,
53
- homepage=_HOMEPAGE,
54
- task_templates=[TextClassification(text_column="sentence", label_column="sentiment")],
55
- )
56
-
57
- def _split_generators(self, dl_manager):
58
- """Returns SplitGenerators."""
59
- dl_path = dl_manager.download_and_extract(_URL)
60
- return [
61
- datasets.SplitGenerator(
62
- name=datasets.Split.TRAIN, gen_kwargs={"filepath": os.path.join(dl_path, "imdb_urdu_reviews.csv")}
63
- ),
64
- ]
65
-
66
- def _generate_examples(self, filepath):
67
- """Yields examples."""
68
- with open(filepath, encoding="utf-8") as f:
69
- reader = csv.reader(f, delimiter=",")
70
- for id_, row in enumerate(reader):
71
- if id_ == 0:
72
- continue
73
- yield id_, {"sentiment": row[1], "sentence": row[0]}