Datasets:
Initialize (#1)
Browse files* add wrime.py
* add poetry files
* add files for tests
* add gitignore
* add workflow file for the CI
* remove unused imports
* fix for the test
* fix for the CI
* add README.md
* add workflow for pussing the dataset to HF dataset
* fix the repository name
- .github/workflows/ci.yaml +47 -0
- .github/workflows/push_to_hub.yaml +26 -0
- .gitignore +170 -0
- README.md +3 -0
- poetry.lock +0 -0
- pyproject.toml +23 -0
- tests/__init__.py +0 -0
- tests/wrime_test.py +29 -0
- wrime.py +223 -0
.github/workflows/ci.yaml
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: CI
|
2 |
+
|
3 |
+
on:
|
4 |
+
push:
|
5 |
+
branches: [main]
|
6 |
+
paths-ignore:
|
7 |
+
- 'README.md'
|
8 |
+
pull_request:
|
9 |
+
branches: [main]
|
10 |
+
paths-ignore:
|
11 |
+
- 'README.md'
|
12 |
+
|
13 |
+
jobs:
|
14 |
+
test:
|
15 |
+
runs-on: ubuntu-latest
|
16 |
+
strategy:
|
17 |
+
matrix:
|
18 |
+
python-version: ['3.8', '3.9', '3.10']
|
19 |
+
|
20 |
+
steps:
|
21 |
+
- uses: actions/checkout@v2
|
22 |
+
- name: Set up Python ${{ matrix.python-version }}
|
23 |
+
uses: actions/setup-python@v2
|
24 |
+
with:
|
25 |
+
python-version: ${{ matrix.python-version }}
|
26 |
+
|
27 |
+
- name: Install dependencies
|
28 |
+
run: |
|
29 |
+
pip install -U pip setuptools wheel poetry
|
30 |
+
poetry install
|
31 |
+
- name: Format
|
32 |
+
run: |
|
33 |
+
poetry run black --check .
|
34 |
+
- name: Lint
|
35 |
+
run: |
|
36 |
+
poetry run flake8 . --ignore=E501,W503,E203
|
37 |
+
- name: Type check
|
38 |
+
run: |
|
39 |
+
poetry run mypy . \
|
40 |
+
--ignore-missing-imports \
|
41 |
+
--no-strict-optional \
|
42 |
+
--no-site-packages \
|
43 |
+
--cache-dir=/dev/null
|
44 |
+
|
45 |
+
- name: Run tests
|
46 |
+
run: |
|
47 |
+
poetry run pytest --color=yes -rf
|
.github/workflows/push_to_hub.yaml
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Sync to Hugging Face Hub
|
2 |
+
|
3 |
+
on:
|
4 |
+
workflow_run:
|
5 |
+
workflows:
|
6 |
+
- CI
|
7 |
+
branches:
|
8 |
+
- main
|
9 |
+
types:
|
10 |
+
- completed
|
11 |
+
|
12 |
+
jobs:
|
13 |
+
push_to_hub:
|
14 |
+
runs-on: ubuntu-latest
|
15 |
+
|
16 |
+
steps:
|
17 |
+
- name: Checkout repository
|
18 |
+
uses: actions/checkout@v2
|
19 |
+
|
20 |
+
- name: Push to Huggingface hub
|
21 |
+
env:
|
22 |
+
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
23 |
+
HF_USERNAME: ${{ secrets.HF_USERNAME }}
|
24 |
+
run: |
|
25 |
+
git fetch --unshallow
|
26 |
+
git push --force https://${HF_USERNAME}:${HF_TOKEN}@huggingface.co/datasets/${HF_USERNAME}/wrime main
|
.gitignore
ADDED
@@ -0,0 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Created by https://www.toptal.com/developers/gitignore/api/python
|
2 |
+
# Edit at https://www.toptal.com/developers/gitignore?templates=python
|
3 |
+
|
4 |
+
### Python ###
|
5 |
+
# Byte-compiled / optimized / DLL files
|
6 |
+
__pycache__/
|
7 |
+
*.py[cod]
|
8 |
+
*$py.class
|
9 |
+
|
10 |
+
# C extensions
|
11 |
+
*.so
|
12 |
+
|
13 |
+
# Distribution / packaging
|
14 |
+
.Python
|
15 |
+
build/
|
16 |
+
develop-eggs/
|
17 |
+
dist/
|
18 |
+
downloads/
|
19 |
+
eggs/
|
20 |
+
.eggs/
|
21 |
+
lib/
|
22 |
+
lib64/
|
23 |
+
parts/
|
24 |
+
sdist/
|
25 |
+
var/
|
26 |
+
wheels/
|
27 |
+
share/python-wheels/
|
28 |
+
*.egg-info/
|
29 |
+
.installed.cfg
|
30 |
+
*.egg
|
31 |
+
MANIFEST
|
32 |
+
|
33 |
+
# PyInstaller
|
34 |
+
# Usually these files are written by a python script from a template
|
35 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
36 |
+
*.manifest
|
37 |
+
*.spec
|
38 |
+
|
39 |
+
# Installer logs
|
40 |
+
pip-log.txt
|
41 |
+
pip-delete-this-directory.txt
|
42 |
+
|
43 |
+
# Unit test / coverage reports
|
44 |
+
htmlcov/
|
45 |
+
.tox/
|
46 |
+
.nox/
|
47 |
+
.coverage
|
48 |
+
.coverage.*
|
49 |
+
.cache
|
50 |
+
nosetests.xml
|
51 |
+
coverage.xml
|
52 |
+
*.cover
|
53 |
+
*.py,cover
|
54 |
+
.hypothesis/
|
55 |
+
.pytest_cache/
|
56 |
+
cover/
|
57 |
+
|
58 |
+
# Translations
|
59 |
+
*.mo
|
60 |
+
*.pot
|
61 |
+
|
62 |
+
# Django stuff:
|
63 |
+
*.log
|
64 |
+
local_settings.py
|
65 |
+
db.sqlite3
|
66 |
+
db.sqlite3-journal
|
67 |
+
|
68 |
+
# Flask stuff:
|
69 |
+
instance/
|
70 |
+
.webassets-cache
|
71 |
+
|
72 |
+
# Scrapy stuff:
|
73 |
+
.scrapy
|
74 |
+
|
75 |
+
# Sphinx documentation
|
76 |
+
docs/_build/
|
77 |
+
|
78 |
+
# PyBuilder
|
79 |
+
.pybuilder/
|
80 |
+
target/
|
81 |
+
|
82 |
+
# Jupyter Notebook
|
83 |
+
.ipynb_checkpoints
|
84 |
+
|
85 |
+
# IPython
|
86 |
+
profile_default/
|
87 |
+
ipython_config.py
|
88 |
+
|
89 |
+
# pyenv
|
90 |
+
# For a library or package, you might want to ignore these files since the code is
|
91 |
+
# intended to run in multiple environments; otherwise, check them in:
|
92 |
+
.python-version
|
93 |
+
|
94 |
+
# pipenv
|
95 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
96 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
97 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
98 |
+
# install all needed dependencies.
|
99 |
+
#Pipfile.lock
|
100 |
+
|
101 |
+
# poetry
|
102 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
103 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
104 |
+
# commonly ignored for libraries.
|
105 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
106 |
+
#poetry.lock
|
107 |
+
|
108 |
+
# pdm
|
109 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
110 |
+
#pdm.lock
|
111 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
112 |
+
# in version control.
|
113 |
+
# https://pdm.fming.dev/#use-with-ide
|
114 |
+
.pdm.toml
|
115 |
+
|
116 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
117 |
+
__pypackages__/
|
118 |
+
|
119 |
+
# Celery stuff
|
120 |
+
celerybeat-schedule
|
121 |
+
celerybeat.pid
|
122 |
+
|
123 |
+
# SageMath parsed files
|
124 |
+
*.sage.py
|
125 |
+
|
126 |
+
# Environments
|
127 |
+
.env
|
128 |
+
.venv
|
129 |
+
env/
|
130 |
+
venv/
|
131 |
+
ENV/
|
132 |
+
env.bak/
|
133 |
+
venv.bak/
|
134 |
+
|
135 |
+
# Spyder project settings
|
136 |
+
.spyderproject
|
137 |
+
.spyproject
|
138 |
+
|
139 |
+
# Rope project settings
|
140 |
+
.ropeproject
|
141 |
+
|
142 |
+
# mkdocs documentation
|
143 |
+
/site
|
144 |
+
|
145 |
+
# mypy
|
146 |
+
.mypy_cache/
|
147 |
+
.dmypy.json
|
148 |
+
dmypy.json
|
149 |
+
|
150 |
+
# Pyre type checker
|
151 |
+
.pyre/
|
152 |
+
|
153 |
+
# pytype static type analyzer
|
154 |
+
.pytype/
|
155 |
+
|
156 |
+
# Cython debug symbols
|
157 |
+
cython_debug/
|
158 |
+
|
159 |
+
# PyCharm
|
160 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
161 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
162 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
163 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
164 |
+
#.idea/
|
165 |
+
|
166 |
+
### Python Patch ###
|
167 |
+
# Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration
|
168 |
+
poetry.toml
|
169 |
+
|
170 |
+
# End of https://www.toptal.com/developers/gitignore/api/python
|
README.md
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
# Dataset Card for WRIME
|
2 |
+
|
3 |
+
[![CI](https://github.com/shunk031/huggingface-datasets_wrime/actions/workflows/ci.yaml/badge.svg)](https://github.com/shunk031/huggingface-datasets_wrime/actions/workflows/ci.yaml)
|
poetry.lock
ADDED
The diff for this file is too large to render.
See raw diff
|
|
pyproject.toml
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[tool.poetry]
|
2 |
+
name = "huggingface-datasets-wrime"
|
3 |
+
version = "0.1.0"
|
4 |
+
description = ""
|
5 |
+
authors = ["Shunsuke KITADA <shunsuke.kitada.0831@gmail.com>"]
|
6 |
+
readme = "README.md"
|
7 |
+
packages = []
|
8 |
+
|
9 |
+
[tool.poetry.dependencies]
|
10 |
+
python = ">=3.8.1,<4.0"
|
11 |
+
datasets = "^2.8.0"
|
12 |
+
|
13 |
+
|
14 |
+
[tool.poetry.group.dev.dependencies]
|
15 |
+
black = "^22.12.0"
|
16 |
+
isort = "^5.11.4"
|
17 |
+
flake8 = "^6.0.0"
|
18 |
+
mypy = "^0.991"
|
19 |
+
pytest = "^7.2.0"
|
20 |
+
|
21 |
+
[build-system]
|
22 |
+
requires = ["poetry-core"]
|
23 |
+
build-backend = "poetry.core.masonry.api"
|
tests/__init__.py
ADDED
File without changes
|
tests/wrime_test.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import datasets as ds
|
2 |
+
import pytest
|
3 |
+
|
4 |
+
|
5 |
+
@pytest.fixture
|
6 |
+
def dataset_path() -> str:
|
7 |
+
return "wrime.py"
|
8 |
+
|
9 |
+
|
10 |
+
@pytest.mark.parametrize(
|
11 |
+
"dataset_name, expected_train_num_rows, expected_val_num_rows, expected_test_num_rows,",
|
12 |
+
(
|
13 |
+
("ver1", 40000, 1200, 2000),
|
14 |
+
("ver2", 30000, 2500, 2500),
|
15 |
+
),
|
16 |
+
)
|
17 |
+
def test_load_dataset(
|
18 |
+
dataset_path: str,
|
19 |
+
dataset_name: str,
|
20 |
+
expected_train_num_rows: int,
|
21 |
+
expected_val_num_rows: int,
|
22 |
+
expected_test_num_rows: int,
|
23 |
+
) -> None:
|
24 |
+
|
25 |
+
dataset = ds.load_dataset(path=dataset_path, name=dataset_name)
|
26 |
+
|
27 |
+
assert dataset["train"].num_rows == expected_train_num_rows # type: ignore
|
28 |
+
assert dataset["validation"].num_rows == expected_val_num_rows # type: ignore
|
29 |
+
assert dataset["test"].num_rows == expected_test_num_rows # type: ignore
|
wrime.py
ADDED
@@ -0,0 +1,223 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
from typing import TypedDict
|
3 |
+
|
4 |
+
import datasets as ds
|
5 |
+
import pandas as pd
|
6 |
+
|
7 |
+
logger = logging.getLogger(__name__)
|
8 |
+
|
9 |
+
_CITATION = """\
|
10 |
+
@inproceedings{kajiwara-etal-2021-wrime,
|
11 |
+
title = "{WRIME}: A New Dataset for Emotional Intensity Estimation with Subjective and Objective Annotations",
|
12 |
+
author = "Kajiwara, Tomoyuki and
|
13 |
+
Chu, Chenhui and
|
14 |
+
Takemura, Noriko and
|
15 |
+
Nakashima, Yuta and
|
16 |
+
Nagahara, Hajime",
|
17 |
+
booktitle = "Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies",
|
18 |
+
month = jun,
|
19 |
+
year = "2021",
|
20 |
+
address = "Online",
|
21 |
+
publisher = "Association for Computational Linguistics",
|
22 |
+
url = "https://aclanthology.org/2021.naacl-main.169",
|
23 |
+
doi = "10.18653/v1/2021.naacl-main.169",
|
24 |
+
pages = "2095--2104",
|
25 |
+
abstract = "We annotate 17,000 SNS posts with both the writer{'}s subjective emotional intensity and the reader{'}s objective one to construct a Japanese emotion analysis dataset. In this study, we explore the difference between the emotional intensity of the writer and that of the readers with this dataset. We found that the reader cannot fully detect the emotions of the writer, especially anger and trust. In addition, experimental results in estimating the emotional intensity show that it is more difficult to estimate the writer{'}s subjective labels than the readers{'}. The large gap between the subjective and objective emotions imply the complexity of the mapping from a post to the subjective emotion intensities, which also leads to a lower performance with machine learning models.",
|
26 |
+
}
|
27 |
+
"""
|
28 |
+
|
29 |
+
_DESCRIPTION = """\
|
30 |
+
WRIME dataset is a new dataset for emotional intensity estimation with subjective and objective annotations.
|
31 |
+
"""
|
32 |
+
|
33 |
+
_HOMEPAGE = "https://github.com/ids-cv/wrime"
|
34 |
+
|
35 |
+
_LICENSE = """\
|
36 |
+
- The dataset is available for research purposes only.
|
37 |
+
- Redistribution of the dataset is prohibited.
|
38 |
+
"""
|
39 |
+
|
40 |
+
|
41 |
+
class URLs(TypedDict):
|
42 |
+
ver1: str
|
43 |
+
ver2: str
|
44 |
+
|
45 |
+
|
46 |
+
_URLS: URLs = {
|
47 |
+
"ver1": "https://raw.githubusercontent.com/ids-cv/wrime/master/wrime-ver1.tsv",
|
48 |
+
"ver2": "https://raw.githubusercontent.com/ids-cv/wrime/master/wrime-ver2.tsv",
|
49 |
+
}
|
50 |
+
|
51 |
+
|
52 |
+
def _fix_typo_in_dataset(df: pd.DataFrame) -> pd.DataFrame:
|
53 |
+
# ref. https://github.com/ids-cv/wrime/pull/4
|
54 |
+
df = df.rename(
|
55 |
+
columns={
|
56 |
+
"Reader2_Saddness": "Reader2_Sadness",
|
57 |
+
"Reader3_Saddness": "Reader3_Sadness",
|
58 |
+
}
|
59 |
+
)
|
60 |
+
return df
|
61 |
+
|
62 |
+
|
63 |
+
def _load_tsv(tsv_path: str) -> pd.DataFrame:
|
64 |
+
logger.info(f"Load TSV file from {tsv_path}")
|
65 |
+
df = pd.read_csv(tsv_path, delimiter="\t")
|
66 |
+
|
67 |
+
df = _fix_typo_in_dataset(df)
|
68 |
+
|
69 |
+
return df
|
70 |
+
|
71 |
+
|
72 |
+
class WrimeDataset(ds.GeneratorBasedBuilder):
|
73 |
+
BUILDER_CONFIGS = [
|
74 |
+
ds.BuilderConfig(
|
75 |
+
name="ver1",
|
76 |
+
version=ds.Version("1.0.0"),
|
77 |
+
description="WRIME dataset ver. 1",
|
78 |
+
),
|
79 |
+
ds.BuilderConfig(
|
80 |
+
name="ver2",
|
81 |
+
version=ds.Version("2.0.0"),
|
82 |
+
description="WRIME dataset ver. 2",
|
83 |
+
),
|
84 |
+
]
|
85 |
+
|
86 |
+
def _info(self) -> ds.DatasetInfo:
|
87 |
+
features = ds.Features(
|
88 |
+
{
|
89 |
+
"sentence": ds.Value("string"),
|
90 |
+
"user_id": ds.Value("string"),
|
91 |
+
"datetime": ds.Value("string"),
|
92 |
+
"writer": {
|
93 |
+
"joy": ds.Value("uint8"),
|
94 |
+
"sadness": ds.Value("uint8"),
|
95 |
+
"anticipation": ds.Value("uint8"),
|
96 |
+
"surprise": ds.Value("uint8"),
|
97 |
+
"anger": ds.Value("uint8"),
|
98 |
+
"fear": ds.Value("uint8"),
|
99 |
+
"disgust": ds.Value("uint8"),
|
100 |
+
"trust": ds.Value("uint8"),
|
101 |
+
},
|
102 |
+
"reader1": {
|
103 |
+
"joy": ds.Value("uint8"),
|
104 |
+
"sadness": ds.Value("uint8"),
|
105 |
+
"anticipation": ds.Value("uint8"),
|
106 |
+
"surprise": ds.Value("uint8"),
|
107 |
+
"anger": ds.Value("uint8"),
|
108 |
+
"fear": ds.Value("uint8"),
|
109 |
+
"disgust": ds.Value("uint8"),
|
110 |
+
"trust": ds.Value("uint8"),
|
111 |
+
},
|
112 |
+
"reader2": {
|
113 |
+
"joy": ds.Value("uint8"),
|
114 |
+
"sadness": ds.Value("uint8"),
|
115 |
+
"anticipation": ds.Value("uint8"),
|
116 |
+
"surprise": ds.Value("uint8"),
|
117 |
+
"anger": ds.Value("uint8"),
|
118 |
+
"fear": ds.Value("uint8"),
|
119 |
+
"disgust": ds.Value("uint8"),
|
120 |
+
"trust": ds.Value("uint8"),
|
121 |
+
},
|
122 |
+
"reader3": {
|
123 |
+
"joy": ds.Value("uint8"),
|
124 |
+
"sadness": ds.Value("uint8"),
|
125 |
+
"anticipation": ds.Value("uint8"),
|
126 |
+
"surprise": ds.Value("uint8"),
|
127 |
+
"anger": ds.Value("uint8"),
|
128 |
+
"fear": ds.Value("uint8"),
|
129 |
+
"disgust": ds.Value("uint8"),
|
130 |
+
"trust": ds.Value("uint8"),
|
131 |
+
},
|
132 |
+
"avg_readers": {
|
133 |
+
"joy": ds.Value("uint8"),
|
134 |
+
"sadness": ds.Value("uint8"),
|
135 |
+
"anticipation": ds.Value("uint8"),
|
136 |
+
"surprise": ds.Value("uint8"),
|
137 |
+
"anger": ds.Value("uint8"),
|
138 |
+
"fear": ds.Value("uint8"),
|
139 |
+
"disgust": ds.Value("uint8"),
|
140 |
+
"trust": ds.Value("uint8"),
|
141 |
+
},
|
142 |
+
}
|
143 |
+
)
|
144 |
+
return ds.DatasetInfo(
|
145 |
+
description=_DESCRIPTION,
|
146 |
+
features=features,
|
147 |
+
homepage=_HOMEPAGE,
|
148 |
+
license=_LICENSE,
|
149 |
+
citation=_CITATION,
|
150 |
+
)
|
151 |
+
|
152 |
+
def _split_generators(self, dl_manager: ds.DownloadManager):
|
153 |
+
wrime_datasets = dl_manager.download_and_extract(_URLS)
|
154 |
+
major_version_name = f"ver{self.config.version.major}" # type: ignore
|
155 |
+
|
156 |
+
wrime_df = _load_tsv(tsv_path=wrime_datasets[major_version_name])
|
157 |
+
tng_wrime_df = wrime_df[wrime_df["Train/Dev/Test"] == "train"]
|
158 |
+
dev_wrime_df = wrime_df[wrime_df["Train/Dev/Test"] == "dev"]
|
159 |
+
tst_wrime_df = wrime_df[wrime_df["Train/Dev/Test"] == "test"]
|
160 |
+
|
161 |
+
return [
|
162 |
+
ds.SplitGenerator(
|
163 |
+
name=ds.Split.TRAIN, # type: ignore
|
164 |
+
gen_kwargs={"df": tng_wrime_df},
|
165 |
+
),
|
166 |
+
ds.SplitGenerator(
|
167 |
+
name=ds.Split.VALIDATION, # type: ignore
|
168 |
+
gen_kwargs={"df": dev_wrime_df},
|
169 |
+
),
|
170 |
+
ds.SplitGenerator(
|
171 |
+
name=ds.Split.TEST, # type: ignore
|
172 |
+
gen_kwargs={"df": tst_wrime_df},
|
173 |
+
),
|
174 |
+
]
|
175 |
+
|
176 |
+
def _generate_examples( # type: ignore[override]
|
177 |
+
self,
|
178 |
+
df: pd.DataFrame,
|
179 |
+
):
|
180 |
+
for i in range(len(df)):
|
181 |
+
row_df = df.iloc[i]
|
182 |
+
|
183 |
+
example_dict = {
|
184 |
+
"sentence": row_df["Sentence"],
|
185 |
+
"user_id": row_df["UserID"],
|
186 |
+
"datetime": row_df["Datetime"],
|
187 |
+
}
|
188 |
+
|
189 |
+
example_dict["writer"] = {
|
190 |
+
"joy": row_df["Writer_Joy"],
|
191 |
+
"sadness": row_df["Writer_Sadness"],
|
192 |
+
"anticipation": row_df["Writer_Anticipation"],
|
193 |
+
"surprise": row_df["Writer_Surprise"],
|
194 |
+
"anger": row_df["Writer_Anger"],
|
195 |
+
"fear": row_df["Writer_Fear"],
|
196 |
+
"disgust": row_df["Writer_Disgust"],
|
197 |
+
"trust": row_df["Writer_Trust"],
|
198 |
+
}
|
199 |
+
|
200 |
+
for reader_num in range(1, 4):
|
201 |
+
example_dict[f"reader{reader_num}"] = {
|
202 |
+
"joy": row_df[f"Reader{reader_num}_Joy"],
|
203 |
+
"sadness": row_df[f"Reader{reader_num}_Sadness"],
|
204 |
+
"anticipation": row_df[f"Reader{reader_num}_Anticipation"],
|
205 |
+
"surprise": row_df[f"Reader{reader_num}_Surprise"],
|
206 |
+
"anger": row_df[f"Reader{reader_num}_Anger"],
|
207 |
+
"fear": row_df[f"Reader{reader_num}_Fear"],
|
208 |
+
"disgust": row_df[f"Reader{reader_num}_Disgust"],
|
209 |
+
"trust": row_df[f"Reader{reader_num}_Trust"],
|
210 |
+
}
|
211 |
+
|
212 |
+
example_dict["avg_readers"] = {
|
213 |
+
"joy": row_df["Avg. Readers_Joy"],
|
214 |
+
"sadness": row_df["Avg. Readers_Sadness"],
|
215 |
+
"anticipation": row_df["Avg. Readers_Anticipation"],
|
216 |
+
"surprise": row_df["Avg. Readers_Surprise"],
|
217 |
+
"anger": row_df["Avg. Readers_Anger"],
|
218 |
+
"fear": row_df["Avg. Readers_Fear"],
|
219 |
+
"disgust": row_df["Avg. Readers_Disgust"],
|
220 |
+
"trust": row_df["Avg. Readers_Trust"],
|
221 |
+
}
|
222 |
+
|
223 |
+
yield i, example_dict
|