Commit
•
7f7442a
1
Parent(s):
791806a
Convert dataset to Parquet (#4)
Browse files- Convert dataset to Parquet (db66342589f75b35be57cb96597f0525072581d6)
- Delete loading script (b0c6f3e06cb9c5f26fc344f3f4d2a133f0a020f9)
- README.md +8 -3
- data/train-00000-of-00001.parquet +3 -0
- fake_news_english.py +0 -91
README.md
CHANGED
@@ -33,10 +33,15 @@ dataset_info:
|
|
33 |
dtype: string
|
34 |
splits:
|
35 |
- name: train
|
36 |
-
num_bytes:
|
37 |
num_examples: 492
|
38 |
-
download_size:
|
39 |
-
dataset_size:
|
|
|
|
|
|
|
|
|
|
|
40 |
---
|
41 |
|
42 |
# Dataset Card for Fake News English
|
|
|
33 |
dtype: string
|
34 |
splits:
|
35 |
- name: train
|
36 |
+
num_bytes: 78070
|
37 |
num_examples: 492
|
38 |
+
download_size: 43101
|
39 |
+
dataset_size: 78070
|
40 |
+
configs:
|
41 |
+
- config_name: default
|
42 |
+
data_files:
|
43 |
+
- split: train
|
44 |
+
path: data/train-*
|
45 |
---
|
46 |
|
47 |
# Dataset Card for Fake News English
|
data/train-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1fd158cef23f0d477e9986b19b99558d81750cdae9a9e81c1277f99d9aef075b
|
3 |
+
size 43101
|
fake_news_english.py
DELETED
@@ -1,91 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
"""Fake News vs Satire: A Dataset and Analysis."""
|
16 |
-
|
17 |
-
|
18 |
-
import os
|
19 |
-
|
20 |
-
import openpyxl # noqa: requires this pandas optional dependency for reading xlsx files
|
21 |
-
import pandas as pd
|
22 |
-
|
23 |
-
import datasets
|
24 |
-
|
25 |
-
|
26 |
-
_CITATION = """
|
27 |
-
@inproceedings{inproceedings,
|
28 |
-
author = {Golbeck, Jennifer and Everett, Jennine and Falak, Waleed and Gieringer, Carl and Graney, Jack and Hoffman, Kelly and Huth, Lindsay and Ma, Zhenya and Jha, Mayanka and Khan, Misbah and Kori, Varsha and Mauriello, Matthew and Lewis, Elo and Mirano, George and IV, William and Mussenden, Sean and Nelson, Tammie and Mcwillie, Sean and Pant, Akshat and Cheakalos, Paul},
|
29 |
-
year = {2018},
|
30 |
-
month = {05},
|
31 |
-
pages = {17-21},
|
32 |
-
title = {Fake News vs Satire: A Dataset and Analysis},
|
33 |
-
doi = {10.1145/3201064.3201100}
|
34 |
-
}
|
35 |
-
"""
|
36 |
-
|
37 |
-
_DESCRIPTION = """
|
38 |
-
Fake news has become a major societal issue and a technical challenge for social media companies to identify. This content is difficult to identify because the term "fake news" covers intentionally false, deceptive stories as well as factual errors, satire, and sometimes, stories that a person just does not like. Addressing the problem requires clear definitions and examples. In this work, we present a dataset of fake news and satire stories that are hand coded, verified, and, in the case of fake news, include rebutting stories. We also include a thematic content analysis of the articles, identifying major themes that include hyperbolic support or condemnation of a gure, conspiracy theories, racist themes, and discrediting of reliable sources. In addition to releasing this dataset for research use, we analyze it and show results based on language that are promising for classification purposes. Overall, our contribution of a dataset and initial analysis are designed to support future work by fake news researchers.
|
39 |
-
"""
|
40 |
-
|
41 |
-
_HOMEPAGE = "https://dl.acm.org/doi/10.1145/3201064.3201100"
|
42 |
-
|
43 |
-
# _LICENSE = ""
|
44 |
-
|
45 |
-
_URLs = "https://github.com/jgolbeck/fakenews/raw/master/FakeNewsData.zip"
|
46 |
-
|
47 |
-
|
48 |
-
class FakeNewsEnglish(datasets.GeneratorBasedBuilder):
|
49 |
-
"""Fake News vs Satire: A Dataset and Analysis"""
|
50 |
-
|
51 |
-
VERSION = datasets.Version("1.1.0")
|
52 |
-
|
53 |
-
def _info(self):
|
54 |
-
features = datasets.Features(
|
55 |
-
{
|
56 |
-
"article_number": datasets.Value("int32"),
|
57 |
-
"url_of_article": datasets.Value("string"),
|
58 |
-
"fake_or_satire": datasets.ClassLabel(names=["Satire", "Fake"]),
|
59 |
-
"url_of_rebutting_article": datasets.Value("string"),
|
60 |
-
}
|
61 |
-
)
|
62 |
-
return datasets.DatasetInfo(
|
63 |
-
description=_DESCRIPTION,
|
64 |
-
features=features,
|
65 |
-
supervised_keys=None,
|
66 |
-
homepage=_HOMEPAGE,
|
67 |
-
citation=_CITATION,
|
68 |
-
)
|
69 |
-
|
70 |
-
def _split_generators(self, dl_manager):
|
71 |
-
"""Returns SplitGenerators."""
|
72 |
-
data_dir = dl_manager.download_and_extract(_URLs)
|
73 |
-
return [
|
74 |
-
datasets.SplitGenerator(
|
75 |
-
name=datasets.Split.TRAIN,
|
76 |
-
# These kwargs will be passed to _generate_examples
|
77 |
-
gen_kwargs={"filepath": os.path.join(data_dir, "FakeNewsData", "Fake News Stories.xlsx")},
|
78 |
-
)
|
79 |
-
]
|
80 |
-
|
81 |
-
def _generate_examples(self, filepath):
|
82 |
-
"""Yields examples."""
|
83 |
-
with open(filepath, "rb") as f:
|
84 |
-
f = pd.read_excel(f, engine="openpyxl")
|
85 |
-
for id_, row in f.iterrows():
|
86 |
-
yield id_, {
|
87 |
-
"article_number": row["Article Number"],
|
88 |
-
"url_of_article": str(row["URL of article"]),
|
89 |
-
"fake_or_satire": str(row["Fake or Satire?"]),
|
90 |
-
"url_of_rebutting_article": str(row["URL of rebutting article"]),
|
91 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|