Datasets:
Tasks:
Text Classification
Formats:
json
Sub-tasks:
multi-label-classification
Languages:
Finnish
Size:
100K - 1M
Tags:
toxicity, multi-label
License:
"""Comments from Jigsaw Toxic Comment Classification Kaggle Competition """ | |
import json | |
import pandas as pd | |
import datasets | |
_DESCRIPTION = """\ | |
This dataset consists of a large number of Wikipedia comments translated to Finnish which have been labeled by human raters for toxic behavior. | |
""" | |
_HOMEPAGE = "https://turkunlp.org/" | |
_URLS = { | |
"train": "https://huggingface.co/datasets/TurkuNLP/wikipedia-toxicity-data-fi/resolve/main/train_fi_deepl.jsonl.bz2", | |
"test": "https://huggingface.co/datasets/TurkuNLP/wikipedia-toxicity-data-fi/resolve/main/test_fi_deepl.jsonl.bz2" | |
} | |
class JigsawToxicityPred(datasets.GeneratorBasedBuilder): | |
"""This is a dataset of comments from Wikipedia’s talk page edits which have been labeled by human raters for toxic behavior.""" | |
VERSION = datasets.Version("1.1.0") | |
def _info(self): | |
return datasets.DatasetInfo( | |
# This is the description that will appear on the datasets page. | |
description=_DESCRIPTION, | |
# This defines the different columns of the dataset and their types | |
features=datasets.Features( | |
{ | |
"text": datasets.Value("string"), | |
"label_toxicity": datasets.ClassLabel(names=["false", "true"]), | |
"label_severe_toxicity": datasets.ClassLabel(names=["false", "true"]), | |
"label_obscene": datasets.ClassLabel(names=["false", "true"]), | |
"label_threat": datasets.ClassLabel(names=["false", "true"]), | |
"label_insult": datasets.ClassLabel(names=["false", "true"]), | |
"label_identity_attack": datasets.ClassLabel(names=["false", "true"]), | |
} | |
), | |
# If there's a common (input, target) tuple from the features, | |
# specify them here. They'll be used if as_supervised=True in | |
# builder.as_dataset. | |
supervised_keys=None, | |
# Homepage of the dataset for documentation | |
homepage=_HOMEPAGE | |
) | |
def _split_generators(self, dl_manager): | |
"""Returns SplitGenerators.""" | |
# This method is tasked with downloading/extracting the data and defining the splits depending on the configuration | |
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name | |
urls_to_download = _URLS | |
downloaded_files = dl_manager.download_and_extract(urls_to_download) | |
return [ | |
datasets.SplitGenerator( | |
name=datasets.Split.TRAIN, | |
# These kwargs will be passed to _generate_examples | |
gen_kwargs={"filepath": downloaded_files["train"]} | |
), | |
datasets.SplitGenerator( | |
name=datasets.Split.TEST, | |
# These kwargs will be passed to _generate_examples | |
gen_kwargs={ | |
"filepath": downloaded_files["test"], | |
}, | |
), | |
] | |
def _generate_examples(self, filepath): | |
"""Yields examples.""" | |
# This method will receive as arguments the `gen_kwargs` defined in the previous `_split_generators` method. | |
# It is in charge of opening the given file and yielding (key, example) tuples from the dataset | |
# The key is not important, it's more here for legacy reason (legacy from tfds) | |
# read the json into dictionaries | |
with open(filepath, 'r') as json_file: | |
json_list = list(json_file) | |
lines = [json.loads(jline) for jline in json_list] | |
for data in lines: | |
example = {} | |
example["text"] = data["text"] | |
for label in ["label_toxicity", "label_severe_toxicity", "label_obscene", "label_threat", "label_insult", "label_identity_attack"]: | |
example[label] = int(data[label]) | |
yield (data["id"], example) | |