Datasets:

Languages:
English
ArXiv:
License:
holylovenia commited on
Commit
271ba2d
1 Parent(s): f320c1b

Upload ph_fake_news_corpus.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. ph_fake_news_corpus.py +116 -0
ph_fake_news_corpus.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ import os
16
+ from pathlib import Path
17
+ from typing import Dict, List, Tuple
18
+
19
+ import datasets
20
+ import pandas as pd
21
+
22
+ from seacrowd.utils.configs import SEACrowdConfig
23
+ from seacrowd.utils.constants import Licenses, Tasks
24
+
25
+ _CITATION = """
26
+ @inproceedings{hernandez-devaraj-2019-phfakenews,
27
+ author = {Fernandez, Aaron Carl T. and Devaraj, Madhavi},
28
+ title = {Computing the Linguistic-Based Cues of Fake News in the Philippines Towards its Detection},
29
+ booktitle = {Proceedings of the 9th International Conference on Web Intelligence, Mining and Semantics},
30
+ publisher = {Association for Computing Machinery},
31
+ year = {2019},
32
+ url = {https://dl.acm.org/doi/abs/10.1145/3326467.3326490},
33
+ doi = {10.1145/3326467.3326490},
34
+ pages = {1-9},
35
+ }
36
+ """
37
+
38
+ _LOCAL = False
39
+ _LANGUAGES = ["eng"]
40
+ _DATASETNAME = "ph_fake_news_corpus"
41
+ _DESCRIPTION = """
42
+ The Philippine Fake News Corpus consists of news headlines and content from various "credible" and "non-credible"
43
+ national news outlets. "Credible" sources were national broadsheets available in the National Library of the
44
+ Philippines, while "non-credible" sources were sources included in lists of websites with fake or unverified content
45
+ provided by government and private institutions.
46
+ """
47
+
48
+ _HOMEPAGE = "https://github.com/aaroncarlfernandez/Philippine-Fake-News-Corpus"
49
+ _LICENSE = Licenses.UNKNOWN.value
50
+ _URL = "https://github.com/aaroncarlfernandez/Philippine-Fake-News-Corpus/raw/master/Philippine%20Fake%20News%20Corpus.zip/"
51
+
52
+ _SUPPORTED_TASKS = [Tasks.FACT_CHECKING]
53
+ _SOURCE_VERSION = "1.0.0"
54
+ _SEACROWD_VERSION = "2024.06.20"
55
+
56
+
57
+ class PhilippineFakeNewsDataset(datasets.GeneratorBasedBuilder):
58
+ """
59
+ Dataset of English news articles from the Philippines manually annotated as "credible" or
60
+ "non-credible" based on source.
61
+ """
62
+
63
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
64
+
65
+ BUILDER_CONFIGS = [
66
+ SEACrowdConfig(
67
+ name=f"{_DATASETNAME}_source",
68
+ version=SOURCE_VERSION,
69
+ description=f"{_DATASETNAME} source schema",
70
+ schema="source",
71
+ subset_id=_DATASETNAME,
72
+ ),
73
+ ]
74
+
75
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
76
+
77
+ def _info(self) -> datasets.DatasetInfo:
78
+ features = datasets.Features(
79
+ {
80
+ "Headline": datasets.Value("string"),
81
+ "Content": datasets.Value("string"),
82
+ "Authors": datasets.Value("string"),
83
+ "Date": datasets.Value("string"),
84
+ "URL": datasets.Value("string"),
85
+ "Brand": datasets.Value("string"),
86
+ "Label": datasets.Value("string"),
87
+ }
88
+ )
89
+
90
+ return datasets.DatasetInfo(
91
+ description=_DESCRIPTION,
92
+ features=features,
93
+ homepage=_HOMEPAGE,
94
+ license=_LICENSE,
95
+ citation=_CITATION,
96
+ )
97
+
98
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
99
+ """Returns SplitGenerators."""
100
+ data_dir = dl_manager.download_and_extract(_URL)
101
+
102
+ return [
103
+ datasets.SplitGenerator(
104
+ name=datasets.Split.TRAIN,
105
+ gen_kwargs={
106
+ "filepath": os.path.join(data_dir, "Philippine Fake News Corpus.csv"),
107
+ "split": "train",
108
+ },
109
+ ),
110
+ ]
111
+
112
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
113
+ """Yields examples as (key, example) tuples."""
114
+ df = pd.read_csv(filepath, index_col=None, header="infer", encoding="utf-8")
115
+ for index, example in df.iterrows():
116
+ yield index, example.to_dict()