Datasets:
Multiple updates
Browse files- NaijaSenti-Twitter.py +2 -5
- README.md +2 -2
NaijaSenti-Twitter.py
CHANGED
@@ -49,7 +49,6 @@ _CITATION = """\
|
|
49 |
"""
|
50 |
|
51 |
|
52 |
-
import csv
|
53 |
import textwrap
|
54 |
import pandas as pd
|
55 |
|
@@ -105,8 +104,7 @@ class NaijaSenti(datasets.GeneratorBasedBuilder):
|
|
105 |
NaijaSentiConfig(
|
106 |
name=lang,
|
107 |
description=textwrap.dedent(
|
108 |
-
f"""
|
109 |
-
{lang} dataset."""
|
110 |
),
|
111 |
text_features={"tweet": "tweet"},
|
112 |
label_classes=["positive", "neutral", "negative"],
|
@@ -115,8 +113,7 @@ class NaijaSenti(datasets.GeneratorBasedBuilder):
|
|
115 |
valid_url=f"https://raw.githubusercontent.com/hausanlp/NaijaSenti/main/data/annotated_tweets/{lang}/dev.tsv",
|
116 |
test_url=f"https://raw.githubusercontent.com/hausanlp/NaijaSenti/main/data/annotated_tweets/{lang}/test.tsv",
|
117 |
citation=textwrap.dedent(
|
118 |
-
f"""
|
119 |
-
{lang} citation"""
|
120 |
),
|
121 |
),
|
122 |
)
|
|
|
49 |
"""
|
50 |
|
51 |
|
|
|
52 |
import textwrap
|
53 |
import pandas as pd
|
54 |
|
|
|
104 |
NaijaSentiConfig(
|
105 |
name=lang,
|
106 |
description=textwrap.dedent(
|
107 |
+
f"""{_DESCRIPTION}"""
|
|
|
108 |
),
|
109 |
text_features={"tweet": "tweet"},
|
110 |
label_classes=["positive", "neutral", "negative"],
|
|
|
113 |
valid_url=f"https://raw.githubusercontent.com/hausanlp/NaijaSenti/main/data/annotated_tweets/{lang}/dev.tsv",
|
114 |
test_url=f"https://raw.githubusercontent.com/hausanlp/NaijaSenti/main/data/annotated_tweets/{lang}/test.tsv",
|
115 |
citation=textwrap.dedent(
|
116 |
+
f"""{_CITATION}"""
|
|
|
117 |
),
|
118 |
),
|
119 |
)
|
README.md
CHANGED
@@ -21,7 +21,7 @@ language:
|
|
21 |
- ibo
|
22 |
- pcm
|
23 |
- yor
|
24 |
-
pretty_name:
|
25 |
---
|
26 |
|
27 |
<p align="center">
|
@@ -102,7 +102,7 @@ The NaijaSenti dataset has 3 splits: train, validation, and test. Below are the
|
|
102 |
```python
|
103 |
from datasets import load_dataset
|
104 |
|
105 |
-
# you can load specific languages (e.g.,
|
106 |
ds = load_dataset("HausaNLP/NaijaSenti-Twitter", "hau")
|
107 |
|
108 |
# train set only
|
|
|
21 |
- ibo
|
22 |
- pcm
|
23 |
- yor
|
24 |
+
pretty_name: NaijaSenti
|
25 |
---
|
26 |
|
27 |
<p align="center">
|
|
|
102 |
```python
|
103 |
from datasets import load_dataset
|
104 |
|
105 |
+
# you can load specific languages (e.g., Hausa). This download train, validation and test sets.
|
106 |
ds = load_dataset("HausaNLP/NaijaSenti-Twitter", "hau")
|
107 |
|
108 |
# train set only
|