Datasets:
File size: 2,607 Bytes
33e8a13 1d8a000 4e69905 33e8a13 bc89637 33e8a13 49c1c71 9120e54 49c1c71 33e8a13 49c1c71 9120e54 49c1c71 33e8a13 4e69905 33e8a13 a757a64 33e8a13 4e69905 33e8a13 4e69905 33e8a13 4e69905 33e8a13 90a7387 bc89637 33e8a13 bc89637 1d8a000 33e8a13 8681eb8 e834dbc 106245e af26279 1d8a000 8681eb8 1b998eb f39c917 106245e 3296ae3 0824cbf 947735c 09a6ad3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 |
"""ViHOS - Vietnamese Hate and Offensive Spans dataset"""
import pandas as pd
import datasets
_DESCRIPTION = """\
This is a dataset of Vietnamese Hate and Offensive Spans dataset from social media texts.
"""
_HOMEPAGE = "https://huggingface.co/datasets/phusroyal/ViHOS"
_LICENSE = "mit"
_URLS = [
"https://raw.githubusercontent.com/phusroyal/ViHOS/master/data/Span_Extraction_based_version/dev.csv",
"https://raw.githubusercontent.com/phusroyal/ViHOS/master/data/Span_Extraction_based_version/train.csv",
"https://raw.githubusercontent.com/phusroyal/ViHOS/master/data/Test_data/test.csv"
]
class ViHOS_config(datasets.BuilderConfig):
def __init__(self, **kwargs):
super(ViHOS_config, self).__init__(**kwargs)
class ViHOS(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
ViHOS_config(name="ViHOS", version=datasets.Version("2.0.0"), description=_DESCRIPTION),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"content": datasets.Value("string"),
"span_ids": datasets.Value("string")
}
),
homepage=_HOMEPAGE,
license=_LICENSE
)
def _split_generators(self, dl_manager):
data_dir = dl_manager.download_and_extract(_URLS)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": data_dir[1],
"split": "test",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepath": data_dir[0],
"split": "dev",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": data_dir[2],
"split": "test",
},
)
]
def _generate_examples(self, filepath, split):
data = pd.read_csv(filepath, header=None, sep=",", on_bad_lines='skip', skiprows=[0])
for i in range(len(data)):
content = str(data.loc[i, 1])
span_ids = str(data.loc[i, 2])
if span_ids is None:
span_ids = ''
yield i, {
"content": content,
"span_ids": span_ids,
} |