Datasets:

Modalities:
Text
Formats:
parquet
Languages:
Catalan
DOI:
Libraries:
Datasets
pandas
License:
File size: 3,692 Bytes
0757796
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10e2f91
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
# Loading script for the ReviewsFinder dataset.


import json

import datasets


logger = datasets.logging.get_logger(__name__)


_CITATION = """ """


_DESCRIPTION = """  The CaSSA dataset is a corpus of 6,400 reviews and forum messages annotated with polar expressions. Each piece of text is annotated with all the expressions of polarity that it contains. For each polar expression, we annotated the expression itself, the target (the object of the expression), and the source (the subject expressing the sentiment). 25,453 polar expressions have been annotated.
 """


_HOMEPAGE = """ https://huggingface.co/datasets/projecte-aina/CaSSA-catalan-structured-sentiment-analysis/ """



_URL = "https://huggingface.co/datasets/projecte-aina/CaSSA-catalan-structured-sentiment-analysis/resolve/main/"
_FILE = "data.jsonl"


class CaSSAConfig(datasets.BuilderConfig):
    """ Builder config for the CaSSA dataset """

    def __init__(self, **kwargs):
        """BuilderConfig for CaSSA.
        Args:
          **kwargs: keyword arguments forwarded to super.
        """
        super(CaSSAConfig, self).__init__(**kwargs)


class CaSSA(datasets.GeneratorBasedBuilder):
    """ CaSSA Dataset """


    BUILDER_CONFIGS = [
        CaSSAConfig(
            name="CaSSA",
            version=datasets.Version("1.0.0"),
            description="CaSSA dataset",
        ),
    ]


    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {"sent_id": datasets.Value("string"),
                 "text": datasets.Value("string"),
                "opinions": [
                	{
                	"Source": datasets.features.Sequence(datasets.features.Sequence(datasets.Value("string"))), 
                	"Target": datasets.features.Sequence(datasets.features.Sequence(datasets.Value("string"))), 
        			"Polar_expression": datasets.features.Sequence(datasets.features.Sequence(datasets.Value("string"))), 
        			"Polarity": datasets.Value("string"),
        			"Intensity": datasets.Value("string")
        			}
                            ]
                }),
            homepage=_HOMEPAGE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        urls_to_download = {
            "data": f"{_URL}{_FILE}",
        }
        downloaded_files = dl_manager.download_and_extract(urls_to_download)

        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["data"]}),
        ]

    def _generate_examples(self, filepath):
        """This function returns the examples in the raw (text) form."""
        logger.info("generating examples from = %s", filepath)
        with open(filepath, encoding="utf-8") as f:
            data = [json.loads(line) for line in f]
            for id_, text in enumerate(data):
                    yield id_, {
                         "sent_id": text['sent_id'],
        		         "text": text['text'],
        		         "opinions": [{
                                "Source": text_iter["Source"] if not text_iter["Source"] == [[], []] else None,
                                "Target": text_iter["Target"] if not text_iter["Target"] == [[], []] else None,
                                "Polar_expression": text_iter["Polar_expression"] if not text_iter["Polar_expression"] == [[], []] else None,
                                "Polarity": text_iter["Polarity"],
                                "Intensity": text_iter["Intensity"]
                         } for text_iter in text["opinions"]]
                    }