url
stringlengths 58
61
| repository_url
stringclasses 1
value | labels_url
stringlengths 72
75
| comments_url
stringlengths 67
70
| events_url
stringlengths 65
68
| html_url
stringlengths 46
51
| id
int64 599M
1.28B
| node_id
stringlengths 18
32
| number
int64 1
4.56k
| title
stringlengths 1
276
| user
dict | labels
list | state
stringclasses 2
values | locked
bool 1
class | assignee
dict | assignees
list | milestone
dict | comments
sequence | created_at
int64 1,587B
1,656B
| updated_at
int64 1,587B
1,656B
| closed_at
int64 1,587B
1,656B
โ | author_association
stringclasses 3
values | active_lock_reason
null | draft
bool 2
classes | pull_request
dict | body
stringlengths 0
228k
โ | reactions
dict | timeline_url
stringlengths 67
70
| performed_via_github_app
null | state_reason
stringclasses 1
value | is_pull_request
bool 2
classes |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
https://api.github.com/repos/huggingface/datasets/issues/2634 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2634/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2634/comments | https://api.github.com/repos/huggingface/datasets/issues/2634/events | https://github.com/huggingface/datasets/pull/2634 | 942,805,621 | MDExOlB1bGxSZXF1ZXN0Njg4NDk2Mzc2 | 2,634 | Inject ASR template for lj_speech dataset | {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | {
"url": "https://api.github.com/repos/huggingface/datasets/milestones/6",
"html_url": "https://github.com/huggingface/datasets/milestone/6",
"labels_url": "https://api.github.com/repos/huggingface/datasets/milestones/6/labels",
"id": 6836458,
"node_id": "MDk6TWlsZXN0b25lNjgzNjQ1OA==",
"number": 6,
"title": "1.10",
"description": "Next minor release",
"creator": {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
},
"open_issues": 0,
"closed_issues": 29,
"state": "closed",
"created_at": 1623178113000,
"updated_at": 1626881809000,
"due_on": 1628146800000,
"closed_at": 1626881809000
} | [] | 1,626,156,294,000 | 1,626,167,109,000 | 1,626,167,109,000 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2634",
"html_url": "https://github.com/huggingface/datasets/pull/2634",
"diff_url": "https://github.com/huggingface/datasets/pull/2634.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2634.patch",
"merged_at": 1626167109000
} | Related to: #2565, #2633.
cc: @lewtun | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2634/reactions",
"total_count": 1,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 1,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2634/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2633 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2633/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2633/comments | https://api.github.com/repos/huggingface/datasets/issues/2633/events | https://github.com/huggingface/datasets/pull/2633 | 942,396,414 | MDExOlB1bGxSZXF1ZXN0Njg4MTMwOTA5 | 2,633 | Update ASR tags | {
"login": "lewtun",
"id": 26859204,
"node_id": "MDQ6VXNlcjI2ODU5MjA0",
"avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lewtun",
"html_url": "https://github.com/lewtun",
"followers_url": "https://api.github.com/users/lewtun/followers",
"following_url": "https://api.github.com/users/lewtun/following{/other_user}",
"gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lewtun/subscriptions",
"organizations_url": "https://api.github.com/users/lewtun/orgs",
"repos_url": "https://api.github.com/users/lewtun/repos",
"events_url": "https://api.github.com/users/lewtun/events{/privacy}",
"received_events_url": "https://api.github.com/users/lewtun/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | {
"url": "https://api.github.com/repos/huggingface/datasets/milestones/6",
"html_url": "https://github.com/huggingface/datasets/milestone/6",
"labels_url": "https://api.github.com/repos/huggingface/datasets/milestones/6/labels",
"id": 6836458,
"node_id": "MDk6TWlsZXN0b25lNjgzNjQ1OA==",
"number": 6,
"title": "1.10",
"description": "Next minor release",
"creator": {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
},
"open_issues": 0,
"closed_issues": 29,
"state": "closed",
"created_at": 1623178113000,
"updated_at": 1626881809000,
"due_on": 1628146800000,
"closed_at": 1626881809000
} | [] | 1,626,119,911,000 | 1,626,155,126,000 | 1,626,155,113,000 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2633",
"html_url": "https://github.com/huggingface/datasets/pull/2633",
"diff_url": "https://github.com/huggingface/datasets/pull/2633.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2633.patch",
"merged_at": 1626155113000
} | This PR updates the ASR tags of the 5 datasets added in #2565 following the change of task categories in #2620 | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2633/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2633/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2632 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2632/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2632/comments | https://api.github.com/repos/huggingface/datasets/issues/2632/events | https://github.com/huggingface/datasets/pull/2632 | 942,293,727 | MDExOlB1bGxSZXF1ZXN0Njg4MDQyMjcw | 2,632 | add image-classification task template | {
"login": "nateraw",
"id": 32437151,
"node_id": "MDQ6VXNlcjMyNDM3MTUx",
"avatar_url": "https://avatars.githubusercontent.com/u/32437151?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/nateraw",
"html_url": "https://github.com/nateraw",
"followers_url": "https://api.github.com/users/nateraw/followers",
"following_url": "https://api.github.com/users/nateraw/following{/other_user}",
"gists_url": "https://api.github.com/users/nateraw/gists{/gist_id}",
"starred_url": "https://api.github.com/users/nateraw/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/nateraw/subscriptions",
"organizations_url": "https://api.github.com/users/nateraw/orgs",
"repos_url": "https://api.github.com/users/nateraw/repos",
"events_url": "https://api.github.com/users/nateraw/events{/privacy}",
"received_events_url": "https://api.github.com/users/nateraw/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [
"Awesome!",
"Thanks for adding a new task template - great work @nateraw ๐ !"
] | 1,626,111,663,000 | 1,626,191,068,000 | 1,626,190,096,000 | CONTRIBUTOR | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2632",
"html_url": "https://github.com/huggingface/datasets/pull/2632",
"diff_url": "https://github.com/huggingface/datasets/pull/2632.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2632.patch",
"merged_at": 1626190095000
} | Snippet below is the tl;dr, but you can try it out directly here:
[![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/gist/nateraw/005c025d41f0e48ae3d4ee61c0f20b70/image-classification-task-template-demo.ipynb)
```python
from datasets import load_dataset
ds = load_dataset('nateraw/image-folder', data_files='PetImages/')
# DatasetDict({
# train: Dataset({
# features: ['file', 'labels'],
# num_rows: 23410
# })
# })
ds = ds.prepare_for_task('image-classification')
# DatasetDict({
# train: Dataset({
# features: ['image_file_path', 'labels'],
# num_rows: 23410
# })
# })
``` | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2632/reactions",
"total_count": 3,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 3,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2632/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2631 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2631/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2631/comments | https://api.github.com/repos/huggingface/datasets/issues/2631/events | https://github.com/huggingface/datasets/pull/2631 | 942,242,271 | MDExOlB1bGxSZXF1ZXN0Njg3OTk3MzM2 | 2,631 | Delete extracted files when loading dataset | {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [
"Sure @stas00, it is still a draft pull request. :)",
"Yes, I noticed it after reviewing - my apologies.",
"The problem with this approach is that it also deletes the downloaded files (if they need not be extracted). ๐ ",
"> The problem with this approach is that it also deletes the downloaded files (if they need not be extracted). worried\r\n\r\nRight! These probably should not be deleted by default, but having an option for those users who are tight on disc space?",
"> Right! These probably should not be deleted by default, but having an option for those users who are tight on disc space?\r\n\r\nI propose leaving that for another PR, and leave this one handling only with \"extracted\" files. Is it OK for you? :) ",
"Awesome thanks !\r\nI just have one question: what about image/audio datasets for which we store the path to the extracted file on the arrow data ?\r\nIn this case the default should be to keep the extracted files.\r\n\r\nSo for now I would just make `keep_extracted=True` by default until we have a way to separate extracted files that can be deleted and extracted files that are actual resources of the dataset.",
"@lhoestq, current implementation only deletes extracted \"files\", not extracted \"directories\", as it uses: `os.remove(path)`. I'm going to add a filter on files, so that this line does not throw an exception when passed a directory.\r\n\r\nFor audio datasets, the audio files are inside the extracted \"directory\", so they are not deleted.",
"I'm still more in favor of having `keep_extracted=True` by default:\r\n- When working with a dataset, you call `load_dataset` many times. By default we want to keep objects extracted to not extract them over and over again (it can take a long time). Then once you know what you're doing and you want to optimize disk space, you can do `keep_extracted=False`. Deleting the extracted files by default is a regression that can lead to slow downs for people calling `load_dataset` many times, which is common when experimenting\r\n- This behavior doesn't sound natural as a default behavior. In the rest of the library, things are cached and not removed unless you explicitly say do (`map` caching for example). Moreover the function in the download manager is called `download_and_extract`, not `download_and_extract_and_remove_extracted_files`\r\n\r\nLet me know what you think !",
"I think the main issue is that after doing some work users typically move on to other datasets and the amount of disc space used keeps on growing. So your logic is very sound and perhaps what's really needed is a cleansweep function that can go through **all** datasets and clean them up to the desired degree:\r\n\r\n- delete all extracted files\r\n- delete all sources\r\n- delete all caches\r\n- delete all caches that haven't been accessed in 6 months\r\n- delete completely old datasets that haven't been accessed in 6 months\r\n- more?\r\n\r\nSo a user can launch a little application, choose what they want to clean up and voila they have just freed up a huge amount of disc space. Makes me think of Ubuntu Tweak's Janitor app - very useful.\r\n\r\nAt the moment, this process of linting is very daunting and error-prone, especially due to all those dirs/files with hash names.",
"@stas00 I've had the same idea. Instead of the full-fledged app, a simpler approach would be to add a new command to the CLI.",
"oh, CLI would be perfect. I didn't mean to request a GUI-one specifically, was just using it as an example.\r\n\r\nOne could even do a crontab to delete old datasets that haven't been accesses in X months.",
"@lhoestq I totally agree with you. I'm addressing that change.\r\n\r\n@stas00, @mariosasko, that could eventually be addressed in another pull request. The objective of this PR is:\r\n- add an option to pass to `load_dataset`, so that extracted files are deleted\r\n- do this deletion file per file, once the file has been already used to generate the cache Arrow file",
"I also like the idea of having a CLI tool to help users clean their cache and save disk space, good idea !"
] | 1,626,107,973,000 | 1,626,685,699,000 | 1,626,685,699,000 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2631",
"html_url": "https://github.com/huggingface/datasets/pull/2631",
"diff_url": "https://github.com/huggingface/datasets/pull/2631.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2631.patch",
"merged_at": 1626685698000
} | Close #2481, close #2604, close #2591.
cc: @stas00, @thomwolf, @BirgerMoell | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2631/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2631/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2630 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2630/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2630/comments | https://api.github.com/repos/huggingface/datasets/issues/2630/events | https://github.com/huggingface/datasets/issues/2630 | 942,102,956 | MDU6SXNzdWU5NDIxMDI5NTY= | 2,630 | Progress bars are not properly rendered in Jupyter notebook | {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892857,
"node_id": "MDU6TGFiZWwxOTM1ODkyODU3",
"url": "https://api.github.com/repos/huggingface/datasets/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
}
] | closed | false | null | [] | null | [
"To add my experience when trying to debug this issue:\r\n\r\nSeems like previously the workaround given [here](https://github.com/tqdm/tqdm/issues/485#issuecomment-473338308) worked around this issue. But with the latest version of jupyter/tqdm I still get terminal warnings that IPython tried to send a message from a forked process.",
"Hi @mludv, thanks for the hint!!! :) \r\n\r\nWe will definitely take it into account to try to fix this issue... It seems somehow related to `multiprocessing` and `tqdm`..."
] | 1,626,098,833,000 | 1,643,903,733,000 | 1,643,903,733,000 | MEMBER | null | null | null | ## Describe the bug
The progress bars are not Jupyter widgets; regular progress bars appear (like in a terminal).
## Steps to reproduce the bug
```python
ds.map(tokenize, num_proc=10)
```
## Expected results
Jupyter widgets displaying the progress bars.
## Actual results
Simple plane progress bars.
cc: Reported by @thomwolf | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2630/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2630/timeline | null | completed | false |
https://api.github.com/repos/huggingface/datasets/issues/2629 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2629/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2629/comments | https://api.github.com/repos/huggingface/datasets/issues/2629/events | https://github.com/huggingface/datasets/issues/2629 | 941,819,205 | MDU6SXNzdWU5NDE4MTkyMDU= | 2,629 | Load datasets from the Hub without requiring a dataset script | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [
{
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
}
] | null | [
"This is so cool, let us know if we can help with anything on the hub side (@Pierrci @elishowk) ๐ "
] | 1,626,079,517,000 | 1,629,901,088,000 | 1,629,901,088,000 | MEMBER | null | null | null | As a user I would like to be able to upload my csv/json/text/parquet/etc. files in a dataset repository on the Hugging Face Hub and be able to load this dataset with `load_dataset` without having to implement a dataset script.
Moreover I would like to be able to specify which file goes into which split using the `data_files` argument.
This feature should be compatible with private repositories and dataset streaming.
This can be implemented by checking the extension of the files in the dataset repository and then by using the right dataset builder that is already packaged in the library (csv/json/text/parquet/etc.) | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2629/reactions",
"total_count": 11,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 2,
"confused": 0,
"heart": 7,
"rocket": 2,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2629/timeline | null | completed | false |
https://api.github.com/repos/huggingface/datasets/issues/2628 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2628/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2628/comments | https://api.github.com/repos/huggingface/datasets/issues/2628/events | https://github.com/huggingface/datasets/pull/2628 | 941,676,404 | MDExOlB1bGxSZXF1ZXN0Njg3NTE0NzQz | 2,628 | Use ETag of remote data files | {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | {
"url": "https://api.github.com/repos/huggingface/datasets/milestones/6",
"html_url": "https://github.com/huggingface/datasets/milestone/6",
"labels_url": "https://api.github.com/repos/huggingface/datasets/milestones/6/labels",
"id": 6836458,
"node_id": "MDk6TWlsZXN0b25lNjgzNjQ1OA==",
"number": 6,
"title": "1.10",
"description": "Next minor release",
"creator": {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
},
"open_issues": 0,
"closed_issues": 29,
"state": "closed",
"created_at": 1623178113000,
"updated_at": 1626881809000,
"due_on": 1628146800000,
"closed_at": 1626881809000
} | [] | 1,626,066,610,000 | 1,626,098,914,000 | 1,626,079,207,000 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2628",
"html_url": "https://github.com/huggingface/datasets/pull/2628",
"diff_url": "https://github.com/huggingface/datasets/pull/2628.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2628.patch",
"merged_at": 1626079207000
} | Use ETag of remote data files to create config ID.
Related to #2616. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2628/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2628/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2627 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2627/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2627/comments | https://api.github.com/repos/huggingface/datasets/issues/2627/events | https://github.com/huggingface/datasets/pull/2627 | 941,503,349 | MDExOlB1bGxSZXF1ZXN0Njg3MzczMDg1 | 2,627 | Minor fix tests with Windows paths | {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | {
"url": "https://api.github.com/repos/huggingface/datasets/milestones/6",
"html_url": "https://github.com/huggingface/datasets/milestone/6",
"labels_url": "https://api.github.com/repos/huggingface/datasets/milestones/6/labels",
"id": 6836458,
"node_id": "MDk6TWlsZXN0b25lNjgzNjQ1OA==",
"number": 6,
"title": "1.10",
"description": "Next minor release",
"creator": {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
},
"open_issues": 0,
"closed_issues": 29,
"state": "closed",
"created_at": 1623178113000,
"updated_at": 1626881809000,
"due_on": 1628146800000,
"closed_at": 1626881809000
} | [] | 1,626,026,148,000 | 1,626,098,927,000 | 1,626,078,890,000 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2627",
"html_url": "https://github.com/huggingface/datasets/pull/2627",
"diff_url": "https://github.com/huggingface/datasets/pull/2627.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2627.patch",
"merged_at": 1626078890000
} | Minor fix tests with Windows paths. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2627/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2627/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2626 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2626/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2626/comments | https://api.github.com/repos/huggingface/datasets/issues/2626/events | https://github.com/huggingface/datasets/pull/2626 | 941,497,830 | MDExOlB1bGxSZXF1ZXN0Njg3MzY4OTMz | 2,626 | Use correct logger in metrics.py | {
"login": "mariosasko",
"id": 47462742,
"node_id": "MDQ6VXNlcjQ3NDYyNzQy",
"avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/mariosasko",
"html_url": "https://github.com/mariosasko",
"followers_url": "https://api.github.com/users/mariosasko/followers",
"following_url": "https://api.github.com/users/mariosasko/following{/other_user}",
"gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}",
"starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions",
"organizations_url": "https://api.github.com/users/mariosasko/orgs",
"repos_url": "https://api.github.com/users/mariosasko/repos",
"events_url": "https://api.github.com/users/mariosasko/events{/privacy}",
"received_events_url": "https://api.github.com/users/mariosasko/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | {
"url": "https://api.github.com/repos/huggingface/datasets/milestones/6",
"html_url": "https://github.com/huggingface/datasets/milestone/6",
"labels_url": "https://api.github.com/repos/huggingface/datasets/milestones/6/labels",
"id": 6836458,
"node_id": "MDk6TWlsZXN0b25lNjgzNjQ1OA==",
"number": 6,
"title": "1.10",
"description": "Next minor release",
"creator": {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
},
"open_issues": 0,
"closed_issues": 29,
"state": "closed",
"created_at": 1623178113000,
"updated_at": 1626881809000,
"due_on": 1628146800000,
"closed_at": 1626881809000
} | [] | 1,626,024,150,000 | 1,626,098,934,000 | 1,626,069,269,000 | CONTRIBUTOR | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2626",
"html_url": "https://github.com/huggingface/datasets/pull/2626",
"diff_url": "https://github.com/huggingface/datasets/pull/2626.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2626.patch",
"merged_at": 1626069269000
} | Fixes #2624 | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2626/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2626/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2625 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2625/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2625/comments | https://api.github.com/repos/huggingface/datasets/issues/2625/events | https://github.com/huggingface/datasets/issues/2625 | 941,439,922 | MDU6SXNzdWU5NDE0Mzk5MjI= | 2,625 | โ๏ธ๐โ๏ธ๐ | {
"login": "hustlen0mics",
"id": 50596661,
"node_id": "MDQ6VXNlcjUwNTk2NjYx",
"avatar_url": "https://avatars.githubusercontent.com/u/50596661?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/hustlen0mics",
"html_url": "https://github.com/hustlen0mics",
"followers_url": "https://api.github.com/users/hustlen0mics/followers",
"following_url": "https://api.github.com/users/hustlen0mics/following{/other_user}",
"gists_url": "https://api.github.com/users/hustlen0mics/gists{/gist_id}",
"starred_url": "https://api.github.com/users/hustlen0mics/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/hustlen0mics/subscriptions",
"organizations_url": "https://api.github.com/users/hustlen0mics/orgs",
"repos_url": "https://api.github.com/users/hustlen0mics/repos",
"events_url": "https://api.github.com/users/hustlen0mics/events{/privacy}",
"received_events_url": "https://api.github.com/users/hustlen0mics/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | 1,626,005,674,000 | 1,626,069,359,000 | 1,626,069,359,000 | NONE | null | null | null | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2625/reactions",
"total_count": 1,
"+1": 1,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2625/timeline | null | completed | false |
|
https://api.github.com/repos/huggingface/datasets/issues/2624 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2624/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2624/comments | https://api.github.com/repos/huggingface/datasets/issues/2624/events | https://github.com/huggingface/datasets/issues/2624 | 941,318,247 | MDU6SXNzdWU5NDEzMTgyNDc= | 2,624 | can't set verbosity for `metric.py` | {
"login": "thomas-happify",
"id": 66082334,
"node_id": "MDQ6VXNlcjY2MDgyMzM0",
"avatar_url": "https://avatars.githubusercontent.com/u/66082334?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/thomas-happify",
"html_url": "https://github.com/thomas-happify",
"followers_url": "https://api.github.com/users/thomas-happify/followers",
"following_url": "https://api.github.com/users/thomas-happify/following{/other_user}",
"gists_url": "https://api.github.com/users/thomas-happify/gists{/gist_id}",
"starred_url": "https://api.github.com/users/thomas-happify/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/thomas-happify/subscriptions",
"organizations_url": "https://api.github.com/users/thomas-happify/orgs",
"repos_url": "https://api.github.com/users/thomas-happify/repos",
"events_url": "https://api.github.com/users/thomas-happify/events{/privacy}",
"received_events_url": "https://api.github.com/users/thomas-happify/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892857,
"node_id": "MDU6TGFiZWwxOTM1ODkyODU3",
"url": "https://api.github.com/repos/huggingface/datasets/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
}
] | closed | false | null | [] | null | [
"Thanks @thomas-happify for reporting and thanks @mariosasko for the fix."
] | 1,625,948,625,000 | 1,626,069,269,000 | 1,626,069,269,000 | NONE | null | null | null | ## Describe the bug
```
[2021-07-10 20:13:11,528][datasets.utils.filelock][INFO] - Lock 139705371374976 acquired on /root/.cache/huggingface/metrics/seqeval/default/default_experiment-1-0.arrow.lock
[2021-07-10 20:13:11,529][datasets.arrow_writer][INFO] - Done writing 32 examples in 6100 bytes /root/.cache/huggingface/metrics/seqeval/default/default_experiment-1-0.arrow.
[2021-07-10 20:13:11,531][datasets.arrow_dataset][INFO] - Set __getitem__(key) output type to python objects for no columns (when key is int or slice) and don't output other (un-formatted) columns.
[2021-07-10 20:13:11,543][/conda/envs/myenv/lib/python3.8/site-packages/datasets/metric.py][INFO] - Removing /root/.cache/huggingface/metrics/seqeval/default/default_experiment-1-0.arrow
```
As you can see, `datasets` logging come from different places.
`filelock`, `arrow_writer` & `arrow_dataset` comes from `datasets.*` which are expected
However, `metric.py` logging comes from `/conda/envs/myenv/lib/python3.8/site-packages/datasets/`
So when setting `datasets.utils.logging.set_verbosity_error()`, it still logs the last message which is annoying during evaluation.
I had to do
```
logging.getLogger("/conda/envs/myenv/lib/python3.8/site-packages/datasets/metric").setLevel(logging.ERROR)
```
to fully mute these messages
## Expected results
it shouldn't log these messages when setting `datasets.utils.logging.set_verbosity_error()`
## Environment info
<!-- You can run the command `datasets-cli env` and copy-and-paste its output below. -->
- `datasets` version: tried both 1.8.0 & 1.9.0
- Platform: Ubuntu 18.04.5 LTS
- Python version: 3.8.10
- PyArrow version: 3.0.0
| {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2624/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2624/timeline | null | completed | false |
https://api.github.com/repos/huggingface/datasets/issues/2623 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2623/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2623/comments | https://api.github.com/repos/huggingface/datasets/issues/2623/events | https://github.com/huggingface/datasets/pull/2623 | 941,265,342 | MDExOlB1bGxSZXF1ZXN0Njg3MTk0MjM3 | 2,623 | [Metrics] added wiki_split metrics | {
"login": "bhadreshpsavani",
"id": 26653468,
"node_id": "MDQ6VXNlcjI2NjUzNDY4",
"avatar_url": "https://avatars.githubusercontent.com/u/26653468?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/bhadreshpsavani",
"html_url": "https://github.com/bhadreshpsavani",
"followers_url": "https://api.github.com/users/bhadreshpsavani/followers",
"following_url": "https://api.github.com/users/bhadreshpsavani/following{/other_user}",
"gists_url": "https://api.github.com/users/bhadreshpsavani/gists{/gist_id}",
"starred_url": "https://api.github.com/users/bhadreshpsavani/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/bhadreshpsavani/subscriptions",
"organizations_url": "https://api.github.com/users/bhadreshpsavani/orgs",
"repos_url": "https://api.github.com/users/bhadreshpsavani/repos",
"events_url": "https://api.github.com/users/bhadreshpsavani/events{/privacy}",
"received_events_url": "https://api.github.com/users/bhadreshpsavani/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | {
"login": "patrickvonplaten",
"id": 23423619,
"node_id": "MDQ6VXNlcjIzNDIzNjE5",
"avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/patrickvonplaten",
"html_url": "https://github.com/patrickvonplaten",
"followers_url": "https://api.github.com/users/patrickvonplaten/followers",
"following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}",
"gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}",
"starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions",
"organizations_url": "https://api.github.com/users/patrickvonplaten/orgs",
"repos_url": "https://api.github.com/users/patrickvonplaten/repos",
"events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}",
"received_events_url": "https://api.github.com/users/patrickvonplaten/received_events",
"type": "User",
"site_admin": false
} | [
{
"login": "patrickvonplaten",
"id": 23423619,
"node_id": "MDQ6VXNlcjIzNDIzNjE5",
"avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/patrickvonplaten",
"html_url": "https://github.com/patrickvonplaten",
"followers_url": "https://api.github.com/users/patrickvonplaten/followers",
"following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}",
"gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}",
"starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions",
"organizations_url": "https://api.github.com/users/patrickvonplaten/orgs",
"repos_url": "https://api.github.com/users/patrickvonplaten/repos",
"events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}",
"received_events_url": "https://api.github.com/users/patrickvonplaten/received_events",
"type": "User",
"site_admin": false
}
] | null | [
"Looks all good to me thanks :)\r\nJust did some minor corrections in the docstring"
] | 1,625,928,710,000 | 1,626,272,893,000 | 1,626,129,271,000 | CONTRIBUTOR | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2623",
"html_url": "https://github.com/huggingface/datasets/pull/2623",
"diff_url": "https://github.com/huggingface/datasets/pull/2623.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2623.patch",
"merged_at": 1626129271000
} | Fixes: #2606
This pull request adds combine metrics for the wikisplit or English sentence split task
Reviewer: @patrickvonplaten | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2623/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2623/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2622 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2622/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2622/comments | https://api.github.com/repos/huggingface/datasets/issues/2622/events | https://github.com/huggingface/datasets/issues/2622 | 941,127,785 | MDU6SXNzdWU5NDExMjc3ODU= | 2,622 | Integration with AugLy | {
"login": "Darktex",
"id": 890615,
"node_id": "MDQ6VXNlcjg5MDYxNQ==",
"avatar_url": "https://avatars.githubusercontent.com/u/890615?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/Darktex",
"html_url": "https://github.com/Darktex",
"followers_url": "https://api.github.com/users/Darktex/followers",
"following_url": "https://api.github.com/users/Darktex/following{/other_user}",
"gists_url": "https://api.github.com/users/Darktex/gists{/gist_id}",
"starred_url": "https://api.github.com/users/Darktex/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/Darktex/subscriptions",
"organizations_url": "https://api.github.com/users/Darktex/orgs",
"repos_url": "https://api.github.com/users/Darktex/repos",
"events_url": "https://api.github.com/users/Darktex/events{/privacy}",
"received_events_url": "https://api.github.com/users/Darktex/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892871,
"node_id": "MDU6TGFiZWwxOTM1ODkyODcx",
"url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement",
"name": "enhancement",
"color": "a2eeef",
"default": true,
"description": "New feature or request"
}
] | open | false | null | [] | null | [
"Hi,\r\n\r\nyou can define your own custom formatting with `Dataset.set_transform()` and then run the tokenizer with the batches of augmented data as follows:\r\n```python\r\ndset = load_dataset(\"imdb\", split=\"train\") # Let's say we are working with the IMDB dataset\r\ndset.set_transform(lambda ex: {\"text\": augly_text_augmentation(ex[\"text\"])}, columns=\"text\", output_all_columns=True)\r\ndataloader = torch.utils.data.DataLoader(dset, batch_size=32)\r\nfor epoch in range(5):\r\n for batch in dataloader:\r\n tokenizer_output = tokenizer(batch.pop(\"text\"), padding=True, truncation=True, return_tensors=\"pt\")\r\n batch.update(tokenizer_output)\r\n output = model(**batch)\r\n ...\r\n```"
] | 1,625,875,389,000 | 1,626,023,291,000 | null | NONE | null | null | null | **Is your feature request related to a problem? Please describe.**
Facebook recently launched a library, [AugLy](https://github.com/facebookresearch/AugLy) , that has a unified API for augmentations for image, video and text.
It would be pretty exciting to have it hooked up to HF libraries so that we can make NLP models robust to misspellings or to punctuation, or emojis etc. Plus, with Transformers supporting more CV use cases, having augmentations support becomes crucial.
**Describe the solution you'd like**
The biggest difference between augmentations and preprocessing is that preprocessing happens only once, but you are running augmentations once per epoch. AugLy operates on text directly, so this breaks the typical workflow where we would run the tokenizer once, set format to pt tensors and be ready for the Dataloader.
**Describe alternatives you've considered**
One possible way of implementing these is to make a custom Dataset class where getitem(i) runs the augmentation and the tokenizer every time, though this would slow training down considerably given we wouldn't even run the tokenizer in batches.
| {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2622/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2622/timeline | null | null | false |
https://api.github.com/repos/huggingface/datasets/issues/2621 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2621/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2621/comments | https://api.github.com/repos/huggingface/datasets/issues/2621/events | https://github.com/huggingface/datasets/pull/2621 | 940,916,446 | MDExOlB1bGxSZXF1ZXN0Njg2OTE1Mzcw | 2,621 | Use prefix to allow exceed Windows MAX_PATH | {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [
"Does this mean the `FileNotFoundError` that avoids infinite loop can be removed?",
"Yes, I think so...",
"Or maybe we could leave it in case a relative path exceeds the MAX_PATH limit?",
" > Or maybe we could leave it in case a relative path exceeds the MAX_PATH limit?\r\n\r\nWhat about converting relative paths to absolute?",
"Nice ! Have you had a chance to test it on a windows machine with the max path limit enabled ? Afaik the CI doesn't have the path limit",
"Sure @lhoestq: I've tested on my machine... And this fixes most of the tests... ๐
"
] | 1,625,848,793,000 | 1,626,449,292,000 | 1,626,449,291,000 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2621",
"html_url": "https://github.com/huggingface/datasets/pull/2621",
"diff_url": "https://github.com/huggingface/datasets/pull/2621.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2621.patch",
"merged_at": 1626449291000
} | By using this prefix, you can exceed the Windows MAX_PATH limit.
See: https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDN#win32-file-namespaces
Related to #2524, #2220. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2621/reactions",
"total_count": 1,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 1,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2621/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2620 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2620/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2620/comments | https://api.github.com/repos/huggingface/datasets/issues/2620/events | https://github.com/huggingface/datasets/pull/2620 | 940,893,389 | MDExOlB1bGxSZXF1ZXN0Njg2ODk3MDky | 2,620 | Add speech processing tasks | {
"login": "lewtun",
"id": 26859204,
"node_id": "MDQ6VXNlcjI2ODU5MjA0",
"avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lewtun",
"html_url": "https://github.com/lewtun",
"followers_url": "https://api.github.com/users/lewtun/followers",
"following_url": "https://api.github.com/users/lewtun/following{/other_user}",
"gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lewtun/subscriptions",
"organizations_url": "https://api.github.com/users/lewtun/orgs",
"repos_url": "https://api.github.com/users/lewtun/repos",
"events_url": "https://api.github.com/users/lewtun/events{/privacy}",
"received_events_url": "https://api.github.com/users/lewtun/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [
"Are there any `task_categories:automatic-speech-recognition` dataset for which we should update the tags ?",
"> Are there any `task_categories:automatic-speech-recognition` dataset for which we should update the tags ?\r\n\r\nYes there's a few - I'll fix them tomorrow :)"
] | 1,625,846,849,000 | 1,626,114,779,000 | 1,626,111,122,000 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2620",
"html_url": "https://github.com/huggingface/datasets/pull/2620",
"diff_url": "https://github.com/huggingface/datasets/pull/2620.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2620.patch",
"merged_at": 1626111122000
} | This PR replaces the `automatic-speech-recognition` task category with a broader `speech-processing` category.
The tasks associated with this category are derived from the [SUPERB benchmark](https://arxiv.org/abs/2105.01051), and ASR is included in this set. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2620/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2620/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2619 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2619/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2619/comments | https://api.github.com/repos/huggingface/datasets/issues/2619/events | https://github.com/huggingface/datasets/pull/2619 | 940,858,236 | MDExOlB1bGxSZXF1ZXN0Njg2ODY3NDA4 | 2,619 | Add ASR task for SUPERB | {
"login": "lewtun",
"id": 26859204,
"node_id": "MDQ6VXNlcjI2ODU5MjA0",
"avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lewtun",
"html_url": "https://github.com/lewtun",
"followers_url": "https://api.github.com/users/lewtun/followers",
"following_url": "https://api.github.com/users/lewtun/following{/other_user}",
"gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lewtun/subscriptions",
"organizations_url": "https://api.github.com/users/lewtun/orgs",
"repos_url": "https://api.github.com/users/lewtun/repos",
"events_url": "https://api.github.com/users/lewtun/events{/privacy}",
"received_events_url": "https://api.github.com/users/lewtun/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | {
"url": "https://api.github.com/repos/huggingface/datasets/milestones/6",
"html_url": "https://github.com/huggingface/datasets/milestone/6",
"labels_url": "https://api.github.com/repos/huggingface/datasets/milestones/6/labels",
"id": 6836458,
"node_id": "MDk6TWlsZXN0b25lNjgzNjQ1OA==",
"number": 6,
"title": "1.10",
"description": "Next minor release",
"creator": {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
},
"open_issues": 0,
"closed_issues": 29,
"state": "closed",
"created_at": 1623178113000,
"updated_at": 1626881809000,
"due_on": 1628146800000,
"closed_at": 1626881809000
} | [
"Wait until #2620 is merged before pushing the README tags in this PR",
"> Thanks!\r\n> \r\n> One question: aren't you adding `task_templates` to the `_info` method (and to the `dataset_infos.json`?\r\n\r\ngreat catch! i've now added the asr task template (along with a mapping from superb task -> template) and updated the `dataset_infos.json` :) ",
"> Good!\r\n> \r\n> I have a suggested refactoring... Tell me what you think! :)\r\n\r\nyour approach is much more elegant - i've included your suggestions ๐ "
] | 1,625,843,985,000 | 1,626,339,358,000 | 1,626,180,018,000 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2619",
"html_url": "https://github.com/huggingface/datasets/pull/2619",
"diff_url": "https://github.com/huggingface/datasets/pull/2619.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2619.patch",
"merged_at": 1626180018000
} | This PR starts building up the SUPERB benchmark by including the ASR task as described in the [SUPERB paper](https://arxiv.org/abs/2105.01051) and `s3prl` [instructions](https://github.com/s3prl/s3prl/tree/v0.2.0/downstream#asr-automatic-speech-recognition).
Usage:
```python
from datasets import load_dataset
asr = load_dataset("superb", "asr")
# DatasetDict({
# train: Dataset({
# features: ['file', 'text', 'speaker_id', 'chapter_id', 'id'],
# num_rows: 28539
# })
# validation: Dataset({
# features: ['file', 'text', 'speaker_id', 'chapter_id', 'id'],
# num_rows: 2703
# })
# test: Dataset({
# features: ['file', 'text', 'speaker_id', 'chapter_id', 'id'],
# num_rows: 2620
# })
# })
```
I've used the GLUE benchmark as a guide for filling out the README.
To move fast during the evaluation PoC I propose to merge one task at a time, so we can continue building the training / evaluation framework in parallel.
Note: codewise this PR is ready for review - I'll add the missing YAML tags once #2620 is merged :) | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2619/reactions",
"total_count": 2,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 2,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2619/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2618 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2618/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2618/comments | https://api.github.com/repos/huggingface/datasets/issues/2618/events | https://github.com/huggingface/datasets/issues/2618 | 940,852,640 | MDU6SXNzdWU5NDA4NTI2NDA= | 2,618 | `filelock.py` Error | {
"login": "liyucheng09",
"id": 27999909,
"node_id": "MDQ6VXNlcjI3OTk5OTA5",
"avatar_url": "https://avatars.githubusercontent.com/u/27999909?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/liyucheng09",
"html_url": "https://github.com/liyucheng09",
"followers_url": "https://api.github.com/users/liyucheng09/followers",
"following_url": "https://api.github.com/users/liyucheng09/following{/other_user}",
"gists_url": "https://api.github.com/users/liyucheng09/gists{/gist_id}",
"starred_url": "https://api.github.com/users/liyucheng09/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/liyucheng09/subscriptions",
"organizations_url": "https://api.github.com/users/liyucheng09/orgs",
"repos_url": "https://api.github.com/users/liyucheng09/repos",
"events_url": "https://api.github.com/users/liyucheng09/events{/privacy}",
"received_events_url": "https://api.github.com/users/liyucheng09/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892857,
"node_id": "MDU6TGFiZWwxOTM1ODkyODU3",
"url": "https://api.github.com/repos/huggingface/datasets/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
}
] | open | false | null | [] | null | [
"Hi @liyucheng09, thanks for reporting.\r\n\r\nApparently this issue has to do with your environment setup. One question: is your data in an NFS share? Some people have reported this error when using `fcntl` to write to an NFS share... If this is the case, then it might be that your NFS just may not be set up to provide file locks. You should ask your system administrator, or try these commands in the terminal:\r\n```shell\r\nsudo systemctl enable rpc-statd\r\nsudo systemctl start rpc-statd\r\n```"
] | 1,625,843,569,000 | 1,626,070,830,000 | null | NONE | null | null | null | ## Describe the bug
It seems that the `filelock.py` went error.
```
>>> ds=load_dataset('xsum')
^CTraceback (most recent call last):
File "/user/HS502/yl02706/.conda/envs/lyc/lib/python3.6/site-packages/datasets/utils/filelock.py", line 402, in _acquire
fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
OSError: [Errno 37] No locks available
```
According to error log, it is OSError, but there is an `except` in the `_acquire` function.
```
def _acquire(self):
open_mode = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
fd = os.open(self._lock_file, open_mode)
except (IOError, OSError):
pass
else:
self._lock_file_fd = fd
return None
```
I don't know why it stucked rather than `pass` directly.
I am not quite familiar with filelock operation, so any help is highly appriciated.
## Steps to reproduce the bug
```python
ds = load_dataset('xsum')
```
## Expected results
A clear and concise description of the expected results.
## Actual results
```
>>> ds=load_dataset('xsum')
^CTraceback (most recent call last):
File "/user/HS502/yl02706/.conda/envs/lyc/lib/python3.6/site-packages/datasets/utils/filelock.py", line 402, in _acquire
fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
OSError: [Errno 37] No locks available
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/user/HS502/yl02706/.conda/envs/lyc/lib/python3.6/site-packages/datasets/load.py", line 818, in load_dataset
use_auth_token=use_auth_token,
File "/user/HS502/yl02706/.conda/envs/lyc/lib/python3.6/site-packages/datasets/load.py", line 470, in prepare_module
with FileLock(lock_path):
File "/user/HS502/yl02706/.conda/envs/lyc/lib/python3.6/site-packages/datasets/utils/filelock.py", line 323, in __enter__
self.acquire()
File "/user/HS502/yl02706/.conda/envs/lyc/lib/python3.6/site-packages/datasets/utils/filelock.py", line 272, in acquire
self._acquire()
File "/user/HS502/yl02706/.conda/envs/lyc/lib/python3.6/site-packages/datasets/utils/filelock.py", line 402, in _acquire
fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
KeyboardInterrupt
```
## Environment info
<!-- You can run the command `datasets-cli env` and copy-and-paste its output below. -->
- `datasets` version: 1.9.0
- Platform: Linux-4.15.0-135-generic-x86_64-with-debian-buster-sid
- Python version: 3.6.13
- PyArrow version: 4.0.1
| {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2618/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2618/timeline | null | null | false |
https://api.github.com/repos/huggingface/datasets/issues/2617 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2617/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2617/comments | https://api.github.com/repos/huggingface/datasets/issues/2617/events | https://github.com/huggingface/datasets/pull/2617 | 940,846,847 | MDExOlB1bGxSZXF1ZXN0Njg2ODU3NzQz | 2,617 | Fix missing EOL issue in to_json for old versions of pandas | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | {
"url": "https://api.github.com/repos/huggingface/datasets/milestones/6",
"html_url": "https://github.com/huggingface/datasets/milestone/6",
"labels_url": "https://api.github.com/repos/huggingface/datasets/milestones/6/labels",
"id": 6836458,
"node_id": "MDk6TWlsZXN0b25lNjgzNjQ1OA==",
"number": 6,
"title": "1.10",
"description": "Next minor release",
"creator": {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
},
"open_issues": 0,
"closed_issues": 29,
"state": "closed",
"created_at": 1623178113000,
"updated_at": 1626881809000,
"due_on": 1628146800000,
"closed_at": 1626881809000
} | [] | 1,625,843,145,000 | 1,626,098,940,000 | 1,625,844,513,000 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2617",
"html_url": "https://github.com/huggingface/datasets/pull/2617",
"diff_url": "https://github.com/huggingface/datasets/pull/2617.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2617.patch",
"merged_at": 1625844513000
} | Some versions of pandas don't add an EOL at the end of the output of `to_json`.
Therefore users could end up having two samples in the same line
Close https://github.com/huggingface/datasets/issues/2615 | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2617/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2617/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2616 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2616/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2616/comments | https://api.github.com/repos/huggingface/datasets/issues/2616/events | https://github.com/huggingface/datasets/pull/2616 | 940,799,038 | MDExOlB1bGxSZXF1ZXN0Njg2ODE3NjYz | 2,616 | Support remote data files | {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892871,
"node_id": "MDU6TGFiZWwxOTM1ODkyODcx",
"url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement",
"name": "enhancement",
"color": "a2eeef",
"default": true,
"description": "New feature or request"
}
] | closed | false | null | [] | {
"url": "https://api.github.com/repos/huggingface/datasets/milestones/6",
"html_url": "https://github.com/huggingface/datasets/milestone/6",
"labels_url": "https://api.github.com/repos/huggingface/datasets/milestones/6/labels",
"id": 6836458,
"node_id": "MDk6TWlsZXN0b25lNjgzNjQ1OA==",
"number": 6,
"title": "1.10",
"description": "Next minor release",
"creator": {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
},
"open_issues": 0,
"closed_issues": 29,
"state": "closed",
"created_at": 1623178113000,
"updated_at": 1626881809000,
"due_on": 1628146800000,
"closed_at": 1626881809000
} | [
"@lhoestq maybe we could also use (if available) the ETag of the remote file in `create_config_id`?",
"> @lhoestq maybe we could also use (if available) the ETag of the remote file in `create_config_id`?\r\n\r\nSure ! We can get the ETag with\r\n```python\r\nheaders = get_authentication_headers_for_url(url, use_auth_token=use_auth_token) # auth for private repos\r\netag = http_head(url, headers=headers).headers.get(\"ETag\")\r\n```\r\n\r\nSince the computation of the `config_id` is done in the `DatasetBuilder.__init__`, then this means that we need to add a new parameter `use_auth_token` in `DatasetBuilder.__init__`\r\n\r\nDoes that sound good ? We can add this in a following PR"
] | 1,625,839,658,000 | 1,625,847,221,000 | 1,625,847,221,000 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2616",
"html_url": "https://github.com/huggingface/datasets/pull/2616",
"diff_url": "https://github.com/huggingface/datasets/pull/2616.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2616.patch",
"merged_at": 1625847221000
} | Add support for (streaming) remote data files:
```python
data_files = f"https://huggingface.co/datasets/{repo_id}/resolve/main/{relative_file_path}"
ds = load_dataset("json", split="train", data_files=data_files, streaming=True)
```
cc: @thomwolf | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2616/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2616/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2615 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2615/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2615/comments | https://api.github.com/repos/huggingface/datasets/issues/2615/events | https://github.com/huggingface/datasets/issues/2615 | 940,794,339 | MDU6SXNzdWU5NDA3OTQzMzk= | 2,615 | Jsonlines export error | {
"login": "TevenLeScao",
"id": 26709476,
"node_id": "MDQ6VXNlcjI2NzA5NDc2",
"avatar_url": "https://avatars.githubusercontent.com/u/26709476?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/TevenLeScao",
"html_url": "https://github.com/TevenLeScao",
"followers_url": "https://api.github.com/users/TevenLeScao/followers",
"following_url": "https://api.github.com/users/TevenLeScao/following{/other_user}",
"gists_url": "https://api.github.com/users/TevenLeScao/gists{/gist_id}",
"starred_url": "https://api.github.com/users/TevenLeScao/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/TevenLeScao/subscriptions",
"organizations_url": "https://api.github.com/users/TevenLeScao/orgs",
"repos_url": "https://api.github.com/users/TevenLeScao/repos",
"events_url": "https://api.github.com/users/TevenLeScao/events{/privacy}",
"received_events_url": "https://api.github.com/users/TevenLeScao/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892857,
"node_id": "MDU6TGFiZWwxOTM1ODkyODU3",
"url": "https://api.github.com/repos/huggingface/datasets/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
}
] | closed | false | {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
} | [
{
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
},
{
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
}
] | null | [
"Thanks for reporting @TevenLeScao! I'm having a look...",
"(not sure what just happened on the assignations sorry)",
"For some reason this happens (both `datasets` version are on master) only on Python 3.6 and not Python 3.8.",
"@TevenLeScao we are using `pandas` to serialize the dataset to JSON Lines. So it must be due to pandas. Could you please check the pandas version causing the issue?",
"@TevenLeScao I have just checked it: this was a bug in `pandas` and it was fixed in version 1.2: https://github.com/pandas-dev/pandas/pull/36898",
"Thanks ! I'm creating a PR",
"Well I though it was me who has taken on this issue... ๐
",
"Sorry, I was also talking to teven offline so I already had the PR ready before noticing x)",
"I was also already working in my PR... Nevermind. Next time we should pay attention if there is somebody (self-)assigned to an issue and if he/she is still working on it before overtaking it... ๐ ",
"The fix is available on `master` @TevenLeScao , thanks for reporting"
] | 1,625,839,325,000 | 1,625,844,547,000 | 1,625,844,513,000 | MEMBER | null | null | null | ## Describe the bug
When exporting large datasets in jsonlines (c4 in my case) the created file has an error every 9999 lines: the 9999th and 10000th are concatenated, thus breaking the jsonlines format. This sounds like it is related to batching, which is by 10000 by default
## Steps to reproduce the bug
This what I'm running:
in python:
```
from datasets import load_dataset
ptb = load_dataset("ptb_text_only")
ptb["train"].to_json("ptb.jsonl")
```
then out of python:
```
head -10000 ptb.jsonl
```
## Expected results
Properly separated lines
## Actual results
The last line is a concatenation of two lines
## Environment info
<!-- You can run the command `datasets-cli env` and copy-and-paste its output below. -->
- `datasets` version: 1.9.1.dev0
- Platform: Linux-5.4.0-1046-gcp-x86_64-with-Ubuntu-18.04-bionic
- Python version: 3.6.9
- PyArrow version: 4.0.1 | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2615/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2615/timeline | null | completed | false |
https://api.github.com/repos/huggingface/datasets/issues/2614 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2614/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2614/comments | https://api.github.com/repos/huggingface/datasets/issues/2614/events | https://github.com/huggingface/datasets/pull/2614 | 940,762,427 | MDExOlB1bGxSZXF1ZXN0Njg2Nzg2NTg3 | 2,614 | Convert numpy scalar to python float in Pearsonr output | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | {
"url": "https://api.github.com/repos/huggingface/datasets/milestones/6",
"html_url": "https://github.com/huggingface/datasets/milestone/6",
"labels_url": "https://api.github.com/repos/huggingface/datasets/milestones/6/labels",
"id": 6836458,
"node_id": "MDk6TWlsZXN0b25lNjgzNjQ1OA==",
"number": 6,
"title": "1.10",
"description": "Next minor release",
"creator": {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
},
"open_issues": 0,
"closed_issues": 29,
"state": "closed",
"created_at": 1623178113000,
"updated_at": 1626881809000,
"due_on": 1628146800000,
"closed_at": 1626881809000
} | [] | 1,625,836,975,000 | 1,626,099,182,000 | 1,625,839,478,000 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2614",
"html_url": "https://github.com/huggingface/datasets/pull/2614",
"diff_url": "https://github.com/huggingface/datasets/pull/2614.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2614.patch",
"merged_at": 1625839478000
} | Following of https://github.com/huggingface/datasets/pull/2612 | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2614/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2614/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2613 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2613/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2613/comments | https://api.github.com/repos/huggingface/datasets/issues/2613/events | https://github.com/huggingface/datasets/pull/2613 | 940,759,852 | MDExOlB1bGxSZXF1ZXN0Njg2Nzg0MzY0 | 2,613 | Use ndarray.item instead of ndarray.tolist | {
"login": "lewtun",
"id": 26859204,
"node_id": "MDQ6VXNlcjI2ODU5MjA0",
"avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lewtun",
"html_url": "https://github.com/lewtun",
"followers_url": "https://api.github.com/users/lewtun/followers",
"following_url": "https://api.github.com/users/lewtun/following{/other_user}",
"gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lewtun/subscriptions",
"organizations_url": "https://api.github.com/users/lewtun/orgs",
"repos_url": "https://api.github.com/users/lewtun/repos",
"events_url": "https://api.github.com/users/lewtun/events{/privacy}",
"received_events_url": "https://api.github.com/users/lewtun/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | {
"url": "https://api.github.com/repos/huggingface/datasets/milestones/6",
"html_url": "https://github.com/huggingface/datasets/milestone/6",
"labels_url": "https://api.github.com/repos/huggingface/datasets/milestones/6/labels",
"id": 6836458,
"node_id": "MDk6TWlsZXN0b25lNjgzNjQ1OA==",
"number": 6,
"title": "1.10",
"description": "Next minor release",
"creator": {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
},
"open_issues": 0,
"closed_issues": 29,
"state": "closed",
"created_at": 1623178113000,
"updated_at": 1626881809000,
"due_on": 1628146800000,
"closed_at": 1626881809000
} | [] | 1,625,836,775,000 | 1,626,099,177,000 | 1,625,838,605,000 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2613",
"html_url": "https://github.com/huggingface/datasets/pull/2613",
"diff_url": "https://github.com/huggingface/datasets/pull/2613.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2613.patch",
"merged_at": 1625838605000
} | This PR follows up on #2612 to use `numpy.ndarray.item` instead of `numpy.ndarray.tolist` as the latter is somewhat confusing to the developer (even though it works).
Judging from the `numpy` docs, `ndarray.item` is closer to what we want: https://numpy.org/doc/stable/reference/generated/numpy.ndarray.item.html#numpy-ndarray-item
PS. Sorry for the duplicate work here. I should have read the numpy docs more carefully in #2612
| {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2613/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2613/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2612 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2612/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2612/comments | https://api.github.com/repos/huggingface/datasets/issues/2612/events | https://github.com/huggingface/datasets/pull/2612 | 940,604,512 | MDExOlB1bGxSZXF1ZXN0Njg2NjUwMjk3 | 2,612 | Return Python float instead of numpy.float64 in sklearn metrics | {
"login": "lewtun",
"id": 26859204,
"node_id": "MDQ6VXNlcjI2ODU5MjA0",
"avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lewtun",
"html_url": "https://github.com/lewtun",
"followers_url": "https://api.github.com/users/lewtun/followers",
"following_url": "https://api.github.com/users/lewtun/following{/other_user}",
"gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lewtun/subscriptions",
"organizations_url": "https://api.github.com/users/lewtun/orgs",
"repos_url": "https://api.github.com/users/lewtun/repos",
"events_url": "https://api.github.com/users/lewtun/events{/privacy}",
"received_events_url": "https://api.github.com/users/lewtun/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | {
"url": "https://api.github.com/repos/huggingface/datasets/milestones/6",
"html_url": "https://github.com/huggingface/datasets/milestone/6",
"labels_url": "https://api.github.com/repos/huggingface/datasets/milestones/6/labels",
"id": 6836458,
"node_id": "MDk6TWlsZXN0b25lNjgzNjQ1OA==",
"number": 6,
"title": "1.10",
"description": "Next minor release",
"creator": {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
},
"open_issues": 0,
"closed_issues": 29,
"state": "closed",
"created_at": 1623178113000,
"updated_at": 1626881809000,
"due_on": 1628146800000,
"closed_at": 1626881809000
} | [
"I opened an issue on the `sklearn` repo to understand why `numpy.float64` is the default: https://github.com/scikit-learn/scikit-learn/discussions/20490",
"It could be surprising at first to use `tolist()` on numpy scalars but it works ^^",
"did the same for Pearsonr here: https://github.com/huggingface/datasets/pull/2614"
] | 1,625,824,089,000 | 1,626,099,173,000 | 1,625,835,834,000 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2612",
"html_url": "https://github.com/huggingface/datasets/pull/2612",
"diff_url": "https://github.com/huggingface/datasets/pull/2612.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2612.patch",
"merged_at": 1625835834000
} | This PR converts the return type of all `sklearn` metrics to be Python `float` instead of `numpy.float64`.
The reason behind this is that our Hub evaluation framework relies on converting benchmark-specific metrics to YAML ([example](https://huggingface.co/datasets/autonlp/autonlp-benchmark-raft-neelalex__raft-test-neelalex__raft-predictions-3/blob/main/README.md#L11)) and the `numpy.float64` format produces garbage like:
```python
import yaml
from datasets import load_metric
metric = load_metric("accuracy")
score = metric.compute(predictions=[0,1], references=[0,1])
print(yaml.dump(score["accuracy"])) # output below
# !!python/object/apply:numpy.core.multiarray.scalar
# - !!python/object/apply:numpy.dtype
# args:
# - f8
# - false
# - true
# state: !!python/tuple
# - 3
# - <
# - null
# - null
# - null
# - -1
# - -1
# - 0
# - !!binary |
# AAAAAAAA8D8=
``` | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2612/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2612/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2611 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2611/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2611/comments | https://api.github.com/repos/huggingface/datasets/issues/2611/events | https://github.com/huggingface/datasets/pull/2611 | 940,307,053 | MDExOlB1bGxSZXF1ZXN0Njg2Mzk5MjU3 | 2,611 | More consistent naming | {
"login": "mariosasko",
"id": 47462742,
"node_id": "MDQ6VXNlcjQ3NDYyNzQy",
"avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/mariosasko",
"html_url": "https://github.com/mariosasko",
"followers_url": "https://api.github.com/users/mariosasko/followers",
"following_url": "https://api.github.com/users/mariosasko/following{/other_user}",
"gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}",
"starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions",
"organizations_url": "https://api.github.com/users/mariosasko/orgs",
"repos_url": "https://api.github.com/users/mariosasko/repos",
"events_url": "https://api.github.com/users/mariosasko/events{/privacy}",
"received_events_url": "https://api.github.com/users/mariosasko/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | 1,625,789,357,000 | 1,626,196,399,000 | 1,626,192,510,000 | CONTRIBUTOR | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2611",
"html_url": "https://github.com/huggingface/datasets/pull/2611",
"diff_url": "https://github.com/huggingface/datasets/pull/2611.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2611.patch",
"merged_at": 1626192510000
} | As per @stas00's suggestion in #2500, this PR inserts a space between the logo and the lib name (`๐คDatasets` -> `๐ค Datasets`) for consistency with the Transformers lib. Additionally, more consistent names are used for Datasets Hub, etc. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2611/reactions",
"total_count": 2,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 2,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2611/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2610 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2610/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2610/comments | https://api.github.com/repos/huggingface/datasets/issues/2610/events | https://github.com/huggingface/datasets/pull/2610 | 939,899,829 | MDExOlB1bGxSZXF1ZXN0Njg2MDUwMzI5 | 2,610 | Add missing WikiANN language tags | {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | {
"url": "https://api.github.com/repos/huggingface/datasets/milestones/6",
"html_url": "https://github.com/huggingface/datasets/milestone/6",
"labels_url": "https://api.github.com/repos/huggingface/datasets/milestones/6/labels",
"id": 6836458,
"node_id": "MDk6TWlsZXN0b25lNjgzNjQ1OA==",
"number": 6,
"title": "1.10",
"description": "Next minor release",
"creator": {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
},
"open_issues": 0,
"closed_issues": 29,
"state": "closed",
"created_at": 1623178113000,
"updated_at": 1626881809000,
"due_on": 1628146800000,
"closed_at": 1626881809000
} | [] | 1,625,753,281,000 | 1,626,099,136,000 | 1,625,759,044,000 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2610",
"html_url": "https://github.com/huggingface/datasets/pull/2610",
"diff_url": "https://github.com/huggingface/datasets/pull/2610.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2610.patch",
"merged_at": 1625759044000
} | Add missing language tags for WikiANN datasets. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2610/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2610/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2609 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2609/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2609/comments | https://api.github.com/repos/huggingface/datasets/issues/2609/events | https://github.com/huggingface/datasets/pull/2609 | 939,616,682 | MDExOlB1bGxSZXF1ZXN0Njg1ODA3MTMz | 2,609 | Fix potential DuplicatedKeysError | {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | {
"url": "https://api.github.com/repos/huggingface/datasets/milestones/6",
"html_url": "https://github.com/huggingface/datasets/milestone/6",
"labels_url": "https://api.github.com/repos/huggingface/datasets/milestones/6/labels",
"id": 6836458,
"node_id": "MDk6TWlsZXN0b25lNjgzNjQ1OA==",
"number": 6,
"title": "1.10",
"description": "Next minor release",
"creator": {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
},
"open_issues": 0,
"closed_issues": 29,
"state": "closed",
"created_at": 1623178113000,
"updated_at": 1626881809000,
"due_on": 1628146800000,
"closed_at": 1626881809000
} | [
"Finally, I'm splitting this PR."
] | 1,625,733,484,000 | 1,626,099,196,000 | 1,625,848,928,000 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2609",
"html_url": "https://github.com/huggingface/datasets/pull/2609",
"diff_url": "https://github.com/huggingface/datasets/pull/2609.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2609.patch",
"merged_at": 1625848928000
} | Fix potential DiplicatedKeysError by ensuring keys are unique.
We should promote as a good practice, that the keys should be programmatically generated as unique, instead of read from data (which might be not unique). | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2609/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2609/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2608 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2608/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2608/comments | https://api.github.com/repos/huggingface/datasets/issues/2608/events | https://github.com/huggingface/datasets/pull/2608 | 938,897,626 | MDExOlB1bGxSZXF1ZXN0Njg1MjAwMDYw | 2,608 | Support streaming JSON files | {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | {
"url": "https://api.github.com/repos/huggingface/datasets/milestones/6",
"html_url": "https://github.com/huggingface/datasets/milestone/6",
"labels_url": "https://api.github.com/repos/huggingface/datasets/milestones/6/labels",
"id": 6836458,
"node_id": "MDk6TWlsZXN0b25lNjgzNjQ1OA==",
"number": 6,
"title": "1.10",
"description": "Next minor release",
"creator": {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
},
"open_issues": 0,
"closed_issues": 29,
"state": "closed",
"created_at": 1623178113000,
"updated_at": 1626881809000,
"due_on": 1628146800000,
"closed_at": 1626881809000
} | [] | 1,625,664,622,000 | 1,626,099,151,000 | 1,625,760,521,000 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2608",
"html_url": "https://github.com/huggingface/datasets/pull/2608",
"diff_url": "https://github.com/huggingface/datasets/pull/2608.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2608.patch",
"merged_at": 1625760520000
} | Use open in JSON dataset builder, so that it can be patched with xopen for streaming.
Close #2607. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2608/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2608/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2607 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2607/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2607/comments | https://api.github.com/repos/huggingface/datasets/issues/2607/events | https://github.com/huggingface/datasets/issues/2607 | 938,796,902 | MDU6SXNzdWU5Mzg3OTY5MDI= | 2,607 | Streaming local gzip compressed JSON line files is not working | {
"login": "thomwolf",
"id": 7353373,
"node_id": "MDQ6VXNlcjczNTMzNzM=",
"avatar_url": "https://avatars.githubusercontent.com/u/7353373?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/thomwolf",
"html_url": "https://github.com/thomwolf",
"followers_url": "https://api.github.com/users/thomwolf/followers",
"following_url": "https://api.github.com/users/thomwolf/following{/other_user}",
"gists_url": "https://api.github.com/users/thomwolf/gists{/gist_id}",
"starred_url": "https://api.github.com/users/thomwolf/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/thomwolf/subscriptions",
"organizations_url": "https://api.github.com/users/thomwolf/orgs",
"repos_url": "https://api.github.com/users/thomwolf/repos",
"events_url": "https://api.github.com/users/thomwolf/events{/privacy}",
"received_events_url": "https://api.github.com/users/thomwolf/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892857,
"node_id": "MDU6TGFiZWwxOTM1ODkyODU3",
"url": "https://api.github.com/repos/huggingface/datasets/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
}
] | closed | false | {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
} | [
{
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
}
] | null | [
"Updating to pyarrow-4.0.1 didn't fix the issue",
"Here is an exemple dataset with 2 of these compressed JSON files: https://huggingface.co/datasets/thomwolf/github-python",
"Hi @thomwolf, thanks for reporting.\r\n\r\nIt seems this might be due to the fact that the JSON Dataset builder uses `pyarrow.json` (`paj.read_json`) to read the data without using the Python standard `open(file,...` (which is the one patched with `xopen` to work in streaming mode).\r\n\r\nThis has to be fixed.",
"Sorry for reopening this, but I'm having the same issue as @thomwolf when streaming a gzipped JSON Lines file from the hub. Or is that just not possible by definition?\r\nI installed `datasets`in editable mode from source (so probably includes the fix from #2608 ?): \r\n```\r\n>>> datasets.__version__\r\n'1.9.1.dev0'\r\n```\r\n\r\n```\r\n>>> msmarco = datasets.load_dataset(\"webis/msmarco\", \"corpus\", streaming=True)\r\nUsing custom data configuration corpus-174d3b7155eb68db\r\n>>> msmarco_iter = iter(msmarco['train'])\r\n>>> print(next(msmarco_iter))\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/media/ssd/TREC/msmarco/datasets/src/datasets/iterable_dataset.py\", line 338, in __iter__\r\n for key, example in self._iter():\r\n File \"/media/ssd/TREC/msmarco/datasets/src/datasets/iterable_dataset.py\", line 335, in _iter\r\n yield from ex_iterable\r\n File \"/media/ssd/TREC/msmarco/datasets/src/datasets/iterable_dataset.py\", line 78, in __iter__\r\n for key, example in self.generate_examples_fn(**self.kwargs):\r\n File \"/home/christopher/.cache/huggingface/modules/datasets_modules/datasets/msmarco/eb63dff8d83107168e973c7a655a6082d37e08d71b4ac39a0afada479c138745/msmarco.py\", line 96, in _generate_examples\r\n with gzip.open(file, \"rt\", encoding=\"utf-8\") as f:\r\n File \"/usr/lib/python3.6/gzip.py\", line 53, in open\r\n binary_file = GzipFile(filename, gz_mode, compresslevel)\r\n File \"/usr/lib/python3.6/gzip.py\", line 163, in __init__\r\n fileobj = self.myfileobj = builtins.open(filename, mode or 'rb')\r\nFileNotFoundError: [Errno 2] No such file or directory: 'https://huggingface.co/datasets/webis/msmarco/resolve/main/msmarco_doc_00.gz'\r\n```\r\n\r\nLoading the dataset without streaming set to True, works fine.",
"Hi ! To make the streaming work, we extend `open` in the dataset builder to work with urls.\r\n\r\nTherefore you just need to use `open` before using `gzip.open`:\r\n```diff\r\n- with gzip.open(file, \"rt\", encoding=\"utf-8\") as f:\r\n+ with gzip.open(open(file, \"rb\"), \"rt\", encoding=\"utf-8\") as f:\r\n```\r\n\r\nYou can see that it is the case for oscar.py and c4.py for example:\r\n\r\nhttps://github.com/huggingface/datasets/blob/8814b393984c1c2e1800ba370de2a9f7c8644908/datasets/oscar/oscar.py#L358-L358\r\n\r\nhttps://github.com/huggingface/datasets/blob/8814b393984c1c2e1800ba370de2a9f7c8644908/datasets/c4/c4.py#L88-L88\r\n\r\n",
"@lhoestq Sorry I missed that. Thank you Quentin!"
] | 1,625,657,793,000 | 1,626,774,619,000 | 1,625,760,521,000 | MEMBER | null | null | null | ## Describe the bug
Using streaming to iterate on local gzip compressed JSON files raise a file not exist error
## Steps to reproduce the bug
```python
from datasets import load_dataset
streamed_dataset = load_dataset('json', split='train', data_files=data_files, streaming=True)
next(iter(streamed_dataset))
```
## Actual results
```
FileNotFoundError Traceback (most recent call last)
<ipython-input-6-27a664e29784> in <module>
----> 1 next(iter(streamed_dataset))
~/Documents/GitHub/datasets/src/datasets/iterable_dataset.py in __iter__(self)
336
337 def __iter__(self):
--> 338 for key, example in self._iter():
339 if self.features:
340 # we encode the example for ClassLabel feature types for example
~/Documents/GitHub/datasets/src/datasets/iterable_dataset.py in _iter(self)
333 else:
334 ex_iterable = self._ex_iterable
--> 335 yield from ex_iterable
336
337 def __iter__(self):
~/Documents/GitHub/datasets/src/datasets/iterable_dataset.py in __iter__(self)
76
77 def __iter__(self):
---> 78 for key, example in self.generate_examples_fn(**self.kwargs):
79 yield key, example
80
~/Documents/GitHub/datasets/src/datasets/iterable_dataset.py in wrapper(**kwargs)
282 def wrapper(**kwargs):
283 python_formatter = PythonFormatter()
--> 284 for key, table in generate_tables_fn(**kwargs):
285 batch = python_formatter.format_batch(table)
286 for i, example in enumerate(_batch_to_examples(batch)):
~/Documents/GitHub/datasets/src/datasets/packaged_modules/json/json.py in _generate_tables(self, files, original_files)
85 file,
86 read_options=self.config.pa_read_options,
---> 87 parse_options=self.config.pa_parse_options,
88 )
89 except pa.ArrowInvalid as err:
~/miniconda2/envs/datasets/lib/python3.7/site-packages/pyarrow/_json.pyx in pyarrow._json.read_json()
~/miniconda2/envs/datasets/lib/python3.7/site-packages/pyarrow/_json.pyx in pyarrow._json._get_reader()
~/miniconda2/envs/datasets/lib/python3.7/site-packages/pyarrow/io.pxi in pyarrow.lib.get_input_stream()
~/miniconda2/envs/datasets/lib/python3.7/site-packages/pyarrow/io.pxi in pyarrow.lib.get_native_file()
~/miniconda2/envs/datasets/lib/python3.7/site-packages/pyarrow/io.pxi in pyarrow.lib.OSFile.__cinit__()
~/miniconda2/envs/datasets/lib/python3.7/site-packages/pyarrow/io.pxi in pyarrow.lib.OSFile._open_readable()
~/miniconda2/envs/datasets/lib/python3.7/site-packages/pyarrow/error.pxi in pyarrow.lib.pyarrow_internal_check_status()
~/miniconda2/envs/datasets/lib/python3.7/site-packages/pyarrow/error.pxi in pyarrow.lib.check_status()
FileNotFoundError: [Errno 2] Failed to open local file 'gzip://file-000000000000.json::/Users/thomwolf/github-dataset/file-000000000000.json.gz'. Detail: [errno 2] No such file or directory
```
## Environment info
- `datasets` version: 1.9.1.dev0
- Platform: Darwin-19.6.0-x86_64-i386-64bit
- Python version: 3.7.7
- PyArrow version: 1.0.0 | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2607/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2607/timeline | null | completed | false |
https://api.github.com/repos/huggingface/datasets/issues/2606 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2606/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2606/comments | https://api.github.com/repos/huggingface/datasets/issues/2606/events | https://github.com/huggingface/datasets/issues/2606 | 938,763,684 | MDU6SXNzdWU5Mzg3NjM2ODQ= | 2,606 | [Metrics] addition of wiki_split metrics | {
"login": "bhadreshpsavani",
"id": 26653468,
"node_id": "MDQ6VXNlcjI2NjUzNDY4",
"avatar_url": "https://avatars.githubusercontent.com/u/26653468?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/bhadreshpsavani",
"html_url": "https://github.com/bhadreshpsavani",
"followers_url": "https://api.github.com/users/bhadreshpsavani/followers",
"following_url": "https://api.github.com/users/bhadreshpsavani/following{/other_user}",
"gists_url": "https://api.github.com/users/bhadreshpsavani/gists{/gist_id}",
"starred_url": "https://api.github.com/users/bhadreshpsavani/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/bhadreshpsavani/subscriptions",
"organizations_url": "https://api.github.com/users/bhadreshpsavani/orgs",
"repos_url": "https://api.github.com/users/bhadreshpsavani/repos",
"events_url": "https://api.github.com/users/bhadreshpsavani/events{/privacy}",
"received_events_url": "https://api.github.com/users/bhadreshpsavani/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892871,
"node_id": "MDU6TGFiZWwxOTM1ODkyODcx",
"url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement",
"name": "enhancement",
"color": "a2eeef",
"default": true,
"description": "New feature or request"
},
{
"id": 2459308248,
"node_id": "MDU6TGFiZWwyNDU5MzA4MjQ4",
"url": "https://api.github.com/repos/huggingface/datasets/labels/metric%20request",
"name": "metric request",
"color": "d4c5f9",
"default": false,
"description": "Requesting to add a new metric"
}
] | closed | false | {
"login": "bhadreshpsavani",
"id": 26653468,
"node_id": "MDQ6VXNlcjI2NjUzNDY4",
"avatar_url": "https://avatars.githubusercontent.com/u/26653468?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/bhadreshpsavani",
"html_url": "https://github.com/bhadreshpsavani",
"followers_url": "https://api.github.com/users/bhadreshpsavani/followers",
"following_url": "https://api.github.com/users/bhadreshpsavani/following{/other_user}",
"gists_url": "https://api.github.com/users/bhadreshpsavani/gists{/gist_id}",
"starred_url": "https://api.github.com/users/bhadreshpsavani/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/bhadreshpsavani/subscriptions",
"organizations_url": "https://api.github.com/users/bhadreshpsavani/orgs",
"repos_url": "https://api.github.com/users/bhadreshpsavani/repos",
"events_url": "https://api.github.com/users/bhadreshpsavani/events{/privacy}",
"received_events_url": "https://api.github.com/users/bhadreshpsavani/received_events",
"type": "User",
"site_admin": false
} | [
{
"login": "bhadreshpsavani",
"id": 26653468,
"node_id": "MDQ6VXNlcjI2NjUzNDY4",
"avatar_url": "https://avatars.githubusercontent.com/u/26653468?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/bhadreshpsavani",
"html_url": "https://github.com/bhadreshpsavani",
"followers_url": "https://api.github.com/users/bhadreshpsavani/followers",
"following_url": "https://api.github.com/users/bhadreshpsavani/following{/other_user}",
"gists_url": "https://api.github.com/users/bhadreshpsavani/gists{/gist_id}",
"starred_url": "https://api.github.com/users/bhadreshpsavani/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/bhadreshpsavani/subscriptions",
"organizations_url": "https://api.github.com/users/bhadreshpsavani/orgs",
"repos_url": "https://api.github.com/users/bhadreshpsavani/repos",
"events_url": "https://api.github.com/users/bhadreshpsavani/events{/privacy}",
"received_events_url": "https://api.github.com/users/bhadreshpsavani/received_events",
"type": "User",
"site_admin": false
}
] | null | [
"#take"
] | 1,625,655,364,000 | 1,626,129,271,000 | 1,626,129,271,000 | CONTRIBUTOR | null | null | null | **Is your feature request related to a problem? Please describe.**
While training the model on sentence split the task in English we require to evaluate the trained model on `Exact Match`, `SARI` and `BLEU` score
like this
![image](https://user-images.githubusercontent.com/26653468/124746876-ff5a3380-df3e-11eb-9a01-4b48db7a6694.png)
While training we require metrics which can give all the output
Currently, we don't have an exact match for text normalized data
**Describe the solution you'd like**
A custom metrics for wiki_split that can calculate these three values and provide it in the form of a single dictionary
For exact match, we can refer to [this](https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py)
**Describe alternatives you've considered**
Two metrics are already present one more can be added for an exact match then we can run all three metrics in training script
#self-assign | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2606/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2606/timeline | null | completed | false |
https://api.github.com/repos/huggingface/datasets/issues/2605 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2605/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2605/comments | https://api.github.com/repos/huggingface/datasets/issues/2605/events | https://github.com/huggingface/datasets/pull/2605 | 938,648,164 | MDExOlB1bGxSZXF1ZXN0Njg0OTkyODIz | 2,605 | Make any ClientError trigger retry in streaming mode (e.g. ClientOSError) | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | {
"url": "https://api.github.com/repos/huggingface/datasets/milestones/6",
"html_url": "https://github.com/huggingface/datasets/milestone/6",
"labels_url": "https://api.github.com/repos/huggingface/datasets/milestones/6/labels",
"id": 6836458,
"node_id": "MDk6TWlsZXN0b25lNjgzNjQ1OA==",
"number": 6,
"title": "1.10",
"description": "Next minor release",
"creator": {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
},
"open_issues": 0,
"closed_issues": 29,
"state": "closed",
"created_at": 1623178113000,
"updated_at": 1626881809000,
"due_on": 1628146800000,
"closed_at": 1626881809000
} | [] | 1,625,647,643,000 | 1,626,099,027,000 | 1,625,648,353,000 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2605",
"html_url": "https://github.com/huggingface/datasets/pull/2605",
"diff_url": "https://github.com/huggingface/datasets/pull/2605.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2605.patch",
"merged_at": 1625648353000
} | During the FLAX sprint some users have this error when streaming datasets:
```python
aiohttp.client_exceptions.ClientOSError: [Errno 104] Connection reset by peer
```
This error must trigger a retry instead of directly crashing
Therefore I extended the error type that triggers the retry to be the base aiohttp error type: `ClientError`
In particular both `ClientOSError` and `ServerDisconnectedError` inherit from `ClientError`. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2605/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2605/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2604 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2604/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2604/comments | https://api.github.com/repos/huggingface/datasets/issues/2604/events | https://github.com/huggingface/datasets/issues/2604 | 938,602,237 | MDU6SXNzdWU5Mzg2MDIyMzc= | 2,604 | Add option to delete temporary files (e.g. extracted files) when loading dataset | {
"login": "thomwolf",
"id": 7353373,
"node_id": "MDQ6VXNlcjczNTMzNzM=",
"avatar_url": "https://avatars.githubusercontent.com/u/7353373?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/thomwolf",
"html_url": "https://github.com/thomwolf",
"followers_url": "https://api.github.com/users/thomwolf/followers",
"following_url": "https://api.github.com/users/thomwolf/following{/other_user}",
"gists_url": "https://api.github.com/users/thomwolf/gists{/gist_id}",
"starred_url": "https://api.github.com/users/thomwolf/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/thomwolf/subscriptions",
"organizations_url": "https://api.github.com/users/thomwolf/orgs",
"repos_url": "https://api.github.com/users/thomwolf/repos",
"events_url": "https://api.github.com/users/thomwolf/events{/privacy}",
"received_events_url": "https://api.github.com/users/thomwolf/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892871,
"node_id": "MDU6TGFiZWwxOTM1ODkyODcx",
"url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement",
"name": "enhancement",
"color": "a2eeef",
"default": true,
"description": "New feature or request"
}
] | closed | false | {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
} | [
{
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
}
] | {
"url": "https://api.github.com/repos/huggingface/datasets/milestones/6",
"html_url": "https://github.com/huggingface/datasets/milestone/6",
"labels_url": "https://api.github.com/repos/huggingface/datasets/milestones/6/labels",
"id": 6836458,
"node_id": "MDk6TWlsZXN0b25lNjgzNjQ1OA==",
"number": 6,
"title": "1.10",
"description": "Next minor release",
"creator": {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
},
"open_issues": 0,
"closed_issues": 29,
"state": "closed",
"created_at": 1623178113000,
"updated_at": 1626881809000,
"due_on": 1628146800000,
"closed_at": 1626881809000
} | [
"Hi !\r\nIf we want something more general, we could either\r\n1. delete the extracted files after the arrow data generation automatically, or \r\n2. delete each extracted file during the arrow generation right after it has been closed.\r\n\r\nSolution 2 is better to save disk space during the arrow generation. Is it what you had in mind ?\r\n\r\nThe API could look like\r\n```python\r\nload_dataset(..., delete_extracted_files_after_usage=True)\r\n```\r\n\r\nIn terms of implementation, here are some directions we could take for each solution:\r\n1. get the list of the extracted files from the DownloadManager and then delete them after the dataset is processed. This can be implemented in `download_and_prepare` I guess\r\n2. maybe wrap and mock `open` in the builder to make it delete the file when the file is closed.",
"Also, if I delete the extracted files they need to be re-extracted again instead of loading from the Arrow cache files",
"I think we already opened an issue about this topic (suggested by @stas00): duplicated of #2481?\r\n\r\nThis is in our TODO list... ๐
",
"I think the deletion of each extracted file could be implemented in our CacheManager and ExtractManager (once merged to master: #2295, #2277). ๐ ",
"Oh yes sorry, I didn't check if this was a duplicate",
"Nevermind @thomwolf, I just mentioned the other issue so that both appear linked in GitHub and we do not forget to close both once we make the corresponding Pull Request... That was the main reason! ๐ ",
"Ok yes. I think this is an important feature to be able to use large datasets which are pretty much always compressed files.\r\n\r\nIn particular now this requires to keep the extracted file on the drive if you want to avoid reprocessing the dataset so in my case, this require using always ~400GB of drive instead of just 200GB (which is already significant). \r\n\r\nTwo nice features would be to:\r\n- allow to delete the extracted files without loosing the ability to load the dataset from the cached arrow-file\r\n- streamlined decompression when only the currently read file is extracted - this might require to read the list of files from the extracted archives before processing them?",
"Here is a sample dataset with 2 such large compressed JSON files for debugging: https://huggingface.co/datasets/thomwolf/github-python",
"Note that I'm confirming that with the current master branch of dataset, deleting extracted files (without deleting the arrow cache file) lead to **re-extracting** these files when reloading the dataset instead of directly loading the arrow cache file.",
"Hi ! That's weird, it doesn't do that on my side (tested on master on my laptop by deleting the `extracted` folder in the download cache directory). You tested with one of the files at https://huggingface.co/datasets/thomwolf/github-python that you have locally ?",
"Yes itโs when I load local compressed JSON line files with load_dataset(โjsonโ, data_files=โฆ) ",
"@thomwolf I'm sorry but I can't reproduce this problem. I'm also using: \r\n```python\r\nds = load_dataset(\"json\", split=\"train\", data_files=data_files, cache_dir=cache_dir)\r\n```\r\nafter having removed the extracted files:\r\n```python\r\nassert sorted((cache_dir / \"downloads\" / \"extracted\").iterdir()) == []\r\n```\r\n\r\nI get the logging message:\r\n```shell\r\nWARNING datasets.builder:builder.py:531 Reusing dataset json ...\r\n```",
"Do you confirm the extracted folder stays empty after reloading?",
"> \r\n> \r\n> Do you confirm the extracted folder stays empty after reloading?\r\n\r\nYes, I have the above mentioned assertion on the emptiness of the extracted folder:\r\n```python\r\nassert sorted((cache_dir / \"downloads\" / \"extracted\").iterdir()) == []\r\n```\r\n"
] | 1,625,644,576,000 | 1,626,685,698,000 | 1,626,685,698,000 | MEMBER | null | null | null | I'm loading a dataset constituted of 44 GB of compressed JSON files.
When loading the dataset with the JSON script, extracting the files create about 200 GB of uncompressed files before creating the 180GB of arrow cache tables
Having a simple way to delete the extracted files after usage (or even better, to stream extraction/delete) would be nice to avoid disk cluter.
I can maybe tackle this one in the JSON script unless you want a more general solution. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2604/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2604/timeline | null | completed | false |
https://api.github.com/repos/huggingface/datasets/issues/2603 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2603/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2603/comments | https://api.github.com/repos/huggingface/datasets/issues/2603/events | https://github.com/huggingface/datasets/pull/2603 | 938,588,149 | MDExOlB1bGxSZXF1ZXN0Njg0OTQ0ODcz | 2,603 | Fix DuplicatedKeysError in omp | {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | {
"url": "https://api.github.com/repos/huggingface/datasets/milestones/6",
"html_url": "https://github.com/huggingface/datasets/milestone/6",
"labels_url": "https://api.github.com/repos/huggingface/datasets/milestones/6/labels",
"id": 6836458,
"node_id": "MDk6TWlsZXN0b25lNjgzNjQ1OA==",
"number": 6,
"title": "1.10",
"description": "Next minor release",
"creator": {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
},
"open_issues": 0,
"closed_issues": 29,
"state": "closed",
"created_at": 1623178113000,
"updated_at": 1626881809000,
"due_on": 1628146800000,
"closed_at": 1626881809000
} | [] | 1,625,643,512,000 | 1,626,099,041,000 | 1,625,662,595,000 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2603",
"html_url": "https://github.com/huggingface/datasets/pull/2603",
"diff_url": "https://github.com/huggingface/datasets/pull/2603.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2603.patch",
"merged_at": 1625662595000
} | Close #2598. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2603/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2603/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2602 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2602/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2602/comments | https://api.github.com/repos/huggingface/datasets/issues/2602/events | https://github.com/huggingface/datasets/pull/2602 | 938,555,712 | MDExOlB1bGxSZXF1ZXN0Njg0OTE5MjMy | 2,602 | Remove import of transformers | {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | {
"url": "https://api.github.com/repos/huggingface/datasets/milestones/6",
"html_url": "https://github.com/huggingface/datasets/milestone/6",
"labels_url": "https://api.github.com/repos/huggingface/datasets/milestones/6/labels",
"id": 6836458,
"node_id": "MDk6TWlsZXN0b25lNjgzNjQ1OA==",
"number": 6,
"title": "1.10",
"description": "Next minor release",
"creator": {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
},
"open_issues": 0,
"closed_issues": 29,
"state": "closed",
"created_at": 1623178113000,
"updated_at": 1626881809000,
"due_on": 1628146800000,
"closed_at": 1626881809000
} | [] | 1,625,641,098,000 | 1,626,099,022,000 | 1,625,646,531,000 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2602",
"html_url": "https://github.com/huggingface/datasets/pull/2602",
"diff_url": "https://github.com/huggingface/datasets/pull/2602.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2602.patch",
"merged_at": 1625646531000
} | When pickling a tokenizer within multiprocessing, check that is instance of transformers PreTrainedTokenizerBase without importing transformers.
Related to huggingface/transformers#12549 and #502. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2602/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2602/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2601 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2601/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2601/comments | https://api.github.com/repos/huggingface/datasets/issues/2601/events | https://github.com/huggingface/datasets/pull/2601 | 938,096,396 | MDExOlB1bGxSZXF1ZXN0Njg0NTQyNjY5 | 2,601 | Fix `filter` with multiprocessing in case all samples are discarded | {
"login": "mxschmdt",
"id": 4904985,
"node_id": "MDQ6VXNlcjQ5MDQ5ODU=",
"avatar_url": "https://avatars.githubusercontent.com/u/4904985?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/mxschmdt",
"html_url": "https://github.com/mxschmdt",
"followers_url": "https://api.github.com/users/mxschmdt/followers",
"following_url": "https://api.github.com/users/mxschmdt/following{/other_user}",
"gists_url": "https://api.github.com/users/mxschmdt/gists{/gist_id}",
"starred_url": "https://api.github.com/users/mxschmdt/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mxschmdt/subscriptions",
"organizations_url": "https://api.github.com/users/mxschmdt/orgs",
"repos_url": "https://api.github.com/users/mxschmdt/repos",
"events_url": "https://api.github.com/users/mxschmdt/events{/privacy}",
"received_events_url": "https://api.github.com/users/mxschmdt/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | {
"url": "https://api.github.com/repos/huggingface/datasets/milestones/6",
"html_url": "https://github.com/huggingface/datasets/milestone/6",
"labels_url": "https://api.github.com/repos/huggingface/datasets/milestones/6/labels",
"id": 6836458,
"node_id": "MDk6TWlsZXN0b25lNjgzNjQ1OA==",
"number": 6,
"title": "1.10",
"description": "Next minor release",
"creator": {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
},
"open_issues": 0,
"closed_issues": 29,
"state": "closed",
"created_at": 1623178113000,
"updated_at": 1626881809000,
"due_on": 1628146800000,
"closed_at": 1626881809000
} | [] | 1,625,591,188,000 | 1,626,099,035,000 | 1,625,662,231,000 | CONTRIBUTOR | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2601",
"html_url": "https://github.com/huggingface/datasets/pull/2601",
"diff_url": "https://github.com/huggingface/datasets/pull/2601.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2601.patch",
"merged_at": 1625662231000
} | Fixes #2600
Also I moved the check for `num_proc` larger than dataset size added in #2566 up so that multiprocessing is not used with one process. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2601/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2601/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2600 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2600/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2600/comments | https://api.github.com/repos/huggingface/datasets/issues/2600/events | https://github.com/huggingface/datasets/issues/2600 | 938,086,745 | MDU6SXNzdWU5MzgwODY3NDU= | 2,600 | Crash when using multiprocessing (`num_proc` > 1) on `filter` and all samples are discarded | {
"login": "mxschmdt",
"id": 4904985,
"node_id": "MDQ6VXNlcjQ5MDQ5ODU=",
"avatar_url": "https://avatars.githubusercontent.com/u/4904985?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/mxschmdt",
"html_url": "https://github.com/mxschmdt",
"followers_url": "https://api.github.com/users/mxschmdt/followers",
"following_url": "https://api.github.com/users/mxschmdt/following{/other_user}",
"gists_url": "https://api.github.com/users/mxschmdt/gists{/gist_id}",
"starred_url": "https://api.github.com/users/mxschmdt/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mxschmdt/subscriptions",
"organizations_url": "https://api.github.com/users/mxschmdt/orgs",
"repos_url": "https://api.github.com/users/mxschmdt/repos",
"events_url": "https://api.github.com/users/mxschmdt/events{/privacy}",
"received_events_url": "https://api.github.com/users/mxschmdt/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892857,
"node_id": "MDU6TGFiZWwxOTM1ODkyODU3",
"url": "https://api.github.com/repos/huggingface/datasets/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
}
] | closed | false | null | [] | null | [] | 1,625,590,405,000 | 1,625,662,231,000 | 1,625,662,231,000 | CONTRIBUTOR | null | null | null | ## Describe the bug
If `filter` is applied to a dataset using multiprocessing (`num_proc` > 1) and all sharded datasets are empty afterwards (due to all samples being discarded), the program crashes.
## Steps to reproduce the bug
```python
from datasets import Dataset
data = Dataset.from_dict({'id': [0,1]})
data.filter(lambda x: False, num_proc=2)
```
## Expected results
An empty table should be returned without crashing.
## Actual results
```
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/user/venv/lib/python3.8/site-packages/datasets/arrow_dataset.py", line 185, in wrapper
out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs)
File "/home/user/venv/lib/python3.8/site-packages/datasets/fingerprint.py", line 397, in wrapper
out = func(self, *args, **kwargs)
File "/home/user/venv/lib/python3.8/site-packages/datasets/arrow_dataset.py", line 2143, in filter
return self.map(
File "/home/user/venv/lib/python3.8/site-packages/datasets/arrow_dataset.py", line 1738, in map
result = concatenate_datasets(transformed_shards)
File "/home/user/venv/lib/python3.8/site-packages/datasets/arrow_dataset.py", line 3267, in concatenate_datasets
table = concat_tables(tables_to_concat, axis=axis)
File "/home/user/venv/lib/python3.8/site-packages/datasets/table.py", line 853, in concat_tables
return ConcatenationTable.from_tables(tables, axis=axis)
File "/home/user/venv/lib/python3.8/site-packages/datasets/table.py", line 713, in from_tables
blocks = to_blocks(tables[0])
IndexError: list index out of range
```
## Environment info
<!-- You can run the command `datasets-cli env` and copy-and-paste its output below. -->
- `datasets` version: 1.9.0
- Platform: Linux-5.12.11-300.fc34.x86_64-x86_64-with-glibc2.2.5
- Python version: 3.8.10
- PyArrow version: 3.0.0
| {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2600/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2600/timeline | null | completed | false |
https://api.github.com/repos/huggingface/datasets/issues/2599 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2599/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2599/comments | https://api.github.com/repos/huggingface/datasets/issues/2599/events | https://github.com/huggingface/datasets/pull/2599 | 937,980,229 | MDExOlB1bGxSZXF1ZXN0Njg0NDQ2MTYx | 2,599 | Update processing.rst with other export formats | {
"login": "TevenLeScao",
"id": 26709476,
"node_id": "MDQ6VXNlcjI2NzA5NDc2",
"avatar_url": "https://avatars.githubusercontent.com/u/26709476?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/TevenLeScao",
"html_url": "https://github.com/TevenLeScao",
"followers_url": "https://api.github.com/users/TevenLeScao/followers",
"following_url": "https://api.github.com/users/TevenLeScao/following{/other_user}",
"gists_url": "https://api.github.com/users/TevenLeScao/gists{/gist_id}",
"starred_url": "https://api.github.com/users/TevenLeScao/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/TevenLeScao/subscriptions",
"organizations_url": "https://api.github.com/users/TevenLeScao/orgs",
"repos_url": "https://api.github.com/users/TevenLeScao/repos",
"events_url": "https://api.github.com/users/TevenLeScao/events{/privacy}",
"received_events_url": "https://api.github.com/users/TevenLeScao/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | {
"url": "https://api.github.com/repos/huggingface/datasets/milestones/6",
"html_url": "https://github.com/huggingface/datasets/milestone/6",
"labels_url": "https://api.github.com/repos/huggingface/datasets/milestones/6/labels",
"id": 6836458,
"node_id": "MDk6TWlsZXN0b25lNjgzNjQ1OA==",
"number": 6,
"title": "1.10",
"description": "Next minor release",
"creator": {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
},
"open_issues": 0,
"closed_issues": 29,
"state": "closed",
"created_at": 1623178113000,
"updated_at": 1626881809000,
"due_on": 1628146800000,
"closed_at": 1626881809000
} | [] | 1,625,583,038,000 | 1,626,099,016,000 | 1,625,645,148,000 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2599",
"html_url": "https://github.com/huggingface/datasets/pull/2599",
"diff_url": "https://github.com/huggingface/datasets/pull/2599.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2599.patch",
"merged_at": 1625645148000
} | Add other supported export formats than CSV in the docs. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2599/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2599/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2598 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2598/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2598/comments | https://api.github.com/repos/huggingface/datasets/issues/2598/events | https://github.com/huggingface/datasets/issues/2598 | 937,930,632 | MDU6SXNzdWU5Mzc5MzA2MzI= | 2,598 | Unable to download omp dataset | {
"login": "erikadistefano",
"id": 25797960,
"node_id": "MDQ6VXNlcjI1Nzk3OTYw",
"avatar_url": "https://avatars.githubusercontent.com/u/25797960?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/erikadistefano",
"html_url": "https://github.com/erikadistefano",
"followers_url": "https://api.github.com/users/erikadistefano/followers",
"following_url": "https://api.github.com/users/erikadistefano/following{/other_user}",
"gists_url": "https://api.github.com/users/erikadistefano/gists{/gist_id}",
"starred_url": "https://api.github.com/users/erikadistefano/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/erikadistefano/subscriptions",
"organizations_url": "https://api.github.com/users/erikadistefano/orgs",
"repos_url": "https://api.github.com/users/erikadistefano/repos",
"events_url": "https://api.github.com/users/erikadistefano/events{/privacy}",
"received_events_url": "https://api.github.com/users/erikadistefano/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892857,
"node_id": "MDU6TGFiZWwxOTM1ODkyODU3",
"url": "https://api.github.com/repos/huggingface/datasets/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
}
] | closed | false | {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
} | [
{
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
}
] | null | [
"Hi @erikadistefano , thanks for reporting the issue.\r\n\r\nI have created a Pull Request that should fix it. \r\n\r\nOnce merged into master, feel free to update your installed `datasets` library (either by installing it from our GitHub master branch or waiting until our next release) to be able to load omp dataset."
] | 1,625,580,052,000 | 1,625,662,595,000 | 1,625,662,595,000 | NONE | null | null | null | ## Describe the bug
The omp dataset cannot be downloaded because of a DuplicatedKeysError
## Steps to reproduce the bug
from datasets import load_dataset
omp = load_dataset('omp', 'posts_labeled')
print(omp)
## Expected results
This code should download the omp dataset and print the dictionary
## Actual results
Downloading and preparing dataset omp/posts_labeled (download: 1.27 MiB, generated: 13.31 MiB, post-processed: Unknown size, total: 14.58 MiB) to /home/erika_distefano/.cache/huggingface/datasets/omp/posts_labeled/1.1.0/2fe5b067be3bff1d4588d5b0cbb9b5b22ae1b9d5b026a8ff572cd389f862735b...
0 examples [00:00, ? examples/s]2021-07-06 09:43:55.868815: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcudart.so.11.0
Traceback (most recent call last):
File "/home/erika_distefano/.local/lib/python3.6/site-packages/datasets/builder.py", line 990, in _prepare_split
writer.write(example, key)
File "/home/erika_distefano/.local/lib/python3.6/site-packages/datasets/arrow_writer.py", line 338, in write
self.check_duplicate_keys()
File "/home/erika_distefano/.local/lib/python3.6/site-packages/datasets/arrow_writer.py", line 349, in check_duplicate_keys
raise DuplicatedKeysError(key)
datasets.keyhash.DuplicatedKeysError: FAILURE TO GENERATE DATASET !
Found duplicate Key: 3326
Keys should be unique and deterministic in nature
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "hf_datasets.py", line 32, in <module>
omp = load_dataset('omp', 'posts_labeled')
File "/home/erika_distefano/.local/lib/python3.6/site-packages/datasets/load.py", line 748, in load_dataset
use_auth_token=use_auth_token,
File "/home/erika_distefano/.local/lib/python3.6/site-packages/datasets/builder.py", line 575, in download_and_prepare
dl_manager=dl_manager, verify_infos=verify_infos, **download_and_prepare_kwargs
File "/home/erika_distefano/.local/lib/python3.6/site-packages/datasets/builder.py", line 652, in _download_and_prepare
self._prepare_split(split_generator, **prepare_split_kwargs)
File "/home/erika_distefano/.local/lib/python3.6/site-packages/datasets/builder.py", line 992, in _prepare_split
num_examples, num_bytes = writer.finalize()
File "/home/erika_distefano/.local/lib/python3.6/site-packages/datasets/arrow_writer.py", line 409, in finalize
self.check_duplicate_keys()
File "/home/erika_distefano/.local/lib/python3.6/site-packages/datasets/arrow_writer.py", line 349, in check_duplicate_keys
raise DuplicatedKeysError(key)
datasets.keyhash.DuplicatedKeysError: FAILURE TO GENERATE DATASET !
Found duplicate Key: 3326
Keys should be unique and deterministic in nature
## Environment info
- `datasets` version: 1.8.0
- Platform: Ubuntu 18.04.4 LTS
- Python version: 3.6.9
- PyArrow version: 3.0.0
| {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2598/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2598/timeline | null | completed | false |
https://api.github.com/repos/huggingface/datasets/issues/2597 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2597/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2597/comments | https://api.github.com/repos/huggingface/datasets/issues/2597/events | https://github.com/huggingface/datasets/pull/2597 | 937,917,770 | MDExOlB1bGxSZXF1ZXN0Njg0Mzk0MDIz | 2,597 | Remove redundant prepare_module | {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 2851292821,
"node_id": "MDU6TGFiZWwyODUxMjkyODIx",
"url": "https://api.github.com/repos/huggingface/datasets/labels/refactoring",
"name": "refactoring",
"color": "B67A40",
"default": false,
"description": "Restructuring existing code without changing its external behavior"
}
] | closed | false | null | [] | {
"url": "https://api.github.com/repos/huggingface/datasets/milestones/6",
"html_url": "https://github.com/huggingface/datasets/milestone/6",
"labels_url": "https://api.github.com/repos/huggingface/datasets/milestones/6/labels",
"id": 6836458,
"node_id": "MDk6TWlsZXN0b25lNjgzNjQ1OA==",
"number": 6,
"title": "1.10",
"description": "Next minor release",
"creator": {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
},
"open_issues": 0,
"closed_issues": 29,
"state": "closed",
"created_at": 1623178113000,
"updated_at": 1626881809000,
"due_on": 1628146800000,
"closed_at": 1626881809000
} | [] | 1,625,579,265,000 | 1,626,099,052,000 | 1,625,662,906,000 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2597",
"html_url": "https://github.com/huggingface/datasets/pull/2597",
"diff_url": "https://github.com/huggingface/datasets/pull/2597.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2597.patch",
"merged_at": 1625662906000
} | I have noticed that after implementing `load_dataset_builder` (#2500), there is a redundant call to `prepare_module`. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2597/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2597/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2596 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2596/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2596/comments | https://api.github.com/repos/huggingface/datasets/issues/2596/events | https://github.com/huggingface/datasets/issues/2596 | 937,598,914 | MDU6SXNzdWU5Mzc1OTg5MTQ= | 2,596 | Transformer Class on dataset | {
"login": "arita37",
"id": 18707623,
"node_id": "MDQ6VXNlcjE4NzA3NjIz",
"avatar_url": "https://avatars.githubusercontent.com/u/18707623?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/arita37",
"html_url": "https://github.com/arita37",
"followers_url": "https://api.github.com/users/arita37/followers",
"following_url": "https://api.github.com/users/arita37/following{/other_user}",
"gists_url": "https://api.github.com/users/arita37/gists{/gist_id}",
"starred_url": "https://api.github.com/users/arita37/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/arita37/subscriptions",
"organizations_url": "https://api.github.com/users/arita37/orgs",
"repos_url": "https://api.github.com/users/arita37/repos",
"events_url": "https://api.github.com/users/arita37/events{/privacy}",
"received_events_url": "https://api.github.com/users/arita37/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892871,
"node_id": "MDU6TGFiZWwxOTM1ODkyODcx",
"url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement",
"name": "enhancement",
"color": "a2eeef",
"default": true,
"description": "New feature or request"
}
] | open | false | null | [] | null | [
"Hi ! Do you have an example in mind that shows how this could be useful ?",
"Example:\n\nMerge 2 datasets into one datasets\n\nLabel extraction from dataset\n\ndataset(text, label)\n โ> dataset(text, newlabel)\n\nTextCleaning.\n\n\nFor image dataset, \nTransformation are easier (ie linear algebra).\n\n\n\n\n\n\n> On Jul 6, 2021, at 17:39, Quentin Lhoest ***@***.***> wrote:\n> \n> ๏ปฟ\n> Hi ! Do you have an example in mind that shows how this could be useful ?\n> \n> โ\n> You are receiving this because you authored the thread.\n> Reply to this email directly, view it on GitHub, or unsubscribe.\n",
"There are already a few transformations that you can apply on a dataset using methods like `dataset.map()`.\r\nYou can find examples in the documentation here:\r\nhttps://huggingface.co/docs/datasets/processing.html\r\n\r\nYou can merge two datasets with `concatenate_datasets()` or do label extraction with `dataset.map()` for example",
"Ok, sure.\n\nThanks for pointing on functional part.\nMy question is more\nโPhilosophicalโ/Design perspective.\n\nThere are 2 perspetive:\n Add transformation methods to \n Dataset Class\n\n\n OR Create a Transformer Class\n which operates on Dataset Class.\n\nT(Dataset) โ> Dataset\n\ndatasetnew = MyTransform.transform(dataset)\ndatasetNew.save(path)\n\n\nWhat would be the difficulty\nof implementing a Transformer Class\noperating at dataset level ?\n\n\nthanks\n\n\n\n\n\n\n\n\n\n> On Jul 6, 2021, at 22:00, Quentin Lhoest ***@***.***> wrote:\n> \n> ๏ปฟ\n> There are already a few transformations that you can apply on a dataset using methods like dataset.map().\n> You can find examples in the documentation here:\n> https://huggingface.co/docs/datasets/processing.html\n> \n> You can merge two datasets with concatenate_datasets() or do label extraction with dataset.map() for example\n> \n> โ\n> You are receiving this because you authored the thread.\n> Reply to this email directly, view it on GitHub, or unsubscribe.\n",
"I can imagine that this would be a useful API to implement processing pipelines as transforms. They could be used to perform higher level transforms compared to the atomic transforms allowed by methods like map, filter, etc.\r\n\r\nI guess if you find any transform that could be useful for text dataset processing, image dataset processing etc. we could definitely start having such transforms :)",
"Thanks for reply.\n\nWhat would be the constraints\nto have\nDataset โ> Dataset consistency ?\n\nMain issue would be\nlarger than memory dataset and\nserialization on disk.\n\nTechnically,\none still process at atomic level\nand try to wrap the full results\ninto Datasetโฆ. (!)\n\nWhat would you think ?\n\n\n\n\n\n\n\n\n> On Jul 7, 2021, at 16:51, Quentin Lhoest ***@***.***> wrote:\n> \n> ๏ปฟ\n> I can imagine that this would be a useful API to implement processing pipelines as transforms. They could be used to perform higher level transforms compared to the atomic transforms allowed by methods like map, filter, etc.\n> \n> I guess if you find any transform that could be useful for text dataset processing, image dataset processing etc. we could definitely start having such transforms :)\n> \n> โ\n> You are receiving this because you authored the thread.\n> Reply to this email directly, view it on GitHub, or unsubscribe.\n",
"We can be pretty flexible and not impose any constraints for transforms.\r\n\r\nMoreover, this library is designed to support datasets bigger than memory. The datasets are loaded from the disk via memory mapping, without filling up RAM. Even processing functions like `map` work in a batched fashion to not fill up your RAM. So this shouldn't be an issue",
"Ok thanks.\n\nBut, Dataset has various flavors.\nIn current design of Dataset,\n how the serialization on disk is done (?)\n\n\nThe main issue is serialization \nof newdataset= Transform(Dataset)\n (ie thats why am referring to Out Of memory datasetโฆ):\n\n Should be part of Transform or part of dataset ?\n\n\n\n\nMaybe, not, since the output is aimed to feed model in memory (?)\n\n\n\n\n\n\n\n\n> On Jul 7, 2021, at 18:04, Quentin Lhoest ***@***.***> wrote:\n> \n> ๏ปฟ\n> We can be pretty flexible and not impose any constraints for transforms.\n> \n> Moreover, this library is designed to support datasets bigger than memory. The datasets are loaded from the disk via memory mapping, without filling up RAM. Even processing functions like map work in a batched fashion to not fill up your RAM. So this shouldn't be an issue\n> \n> โ\n> You are receiving this because you authored the thread.\n> Reply to this email directly, view it on GitHub, or unsubscribe.\n",
"I'm not sure I understand, could you elaborate a bit more please ?\r\n\r\nEach dataset is a wrapper of a PyArrow Table that contains all the data. The table is loaded from an arrow file on the disk.\r\nWe have an ArrowWriter and ArrowReader class to write/read arrow tables on disk or in in-memory buffers."
] | 1,625,556,435,000 | 1,625,732,525,000 | null | NONE | null | null | null | Just wondering if you have intenttion to create
TransformerClass :
dataset --> dataset
and make determnistic transformation (ie not fit).
| {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2596/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2596/timeline | null | null | false |
https://api.github.com/repos/huggingface/datasets/issues/2595 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2595/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2595/comments | https://api.github.com/repos/huggingface/datasets/issues/2595/events | https://github.com/huggingface/datasets/issues/2595 | 937,483,120 | MDU6SXNzdWU5Mzc0ODMxMjA= | 2,595 | ModuleNotFoundError: No module named 'datasets.tasks' while importing common voice datasets | {
"login": "profsatwinder",
"id": 41314912,
"node_id": "MDQ6VXNlcjQxMzE0OTEy",
"avatar_url": "https://avatars.githubusercontent.com/u/41314912?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/profsatwinder",
"html_url": "https://github.com/profsatwinder",
"followers_url": "https://api.github.com/users/profsatwinder/followers",
"following_url": "https://api.github.com/users/profsatwinder/following{/other_user}",
"gists_url": "https://api.github.com/users/profsatwinder/gists{/gist_id}",
"starred_url": "https://api.github.com/users/profsatwinder/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/profsatwinder/subscriptions",
"organizations_url": "https://api.github.com/users/profsatwinder/orgs",
"repos_url": "https://api.github.com/users/profsatwinder/repos",
"events_url": "https://api.github.com/users/profsatwinder/events{/privacy}",
"received_events_url": "https://api.github.com/users/profsatwinder/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892857,
"node_id": "MDU6TGFiZWwxOTM1ODkyODU3",
"url": "https://api.github.com/repos/huggingface/datasets/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
}
] | closed | false | null | [] | null | [
"Hi @profsatwinder.\r\n\r\nIt looks like you are using an old version of `datasets`. Please update it with `pip install -U datasets` and indicate if the problem persists.",
"@albertvillanova Thanks for the information. I updated it to 1.9.0 and the issue is resolved. Thanks again. "
] | 1,625,541,655,000 | 1,625,551,189,000 | 1,625,551,189,000 | NONE | null | null | null | Error traceback:
---------------------------------------------------------------------------
ModuleNotFoundError Traceback (most recent call last)
<ipython-input-8-a7b592d3bca0> in <module>()
1 from datasets import load_dataset, load_metric
2
----> 3 common_voice_train = load_dataset("common_voice", "pa-IN", split="train+validation")
4 common_voice_test = load_dataset("common_voice", "pa-IN", split="test")
9 frames
/root/.cache/huggingface/modules/datasets_modules/datasets/common_voice/078d412587e9efeb0ae2e574da99c31e18844c496008d53dc5c60f4159ed639b/common_voice.py in <module>()
19
20 import datasets
---> 21 from datasets.tasks import AutomaticSpeechRecognition
22
23
ModuleNotFoundError: No module named 'datasets.tasks' | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2595/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2595/timeline | null | completed | false |
https://api.github.com/repos/huggingface/datasets/issues/2594 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2594/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2594/comments | https://api.github.com/repos/huggingface/datasets/issues/2594/events | https://github.com/huggingface/datasets/pull/2594 | 937,294,772 | MDExOlB1bGxSZXF1ZXN0NjgzODc0NjIz | 2,594 | Fix BibTeX entry | {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | 1,625,509,450,000 | 1,625,547,578,000 | 1,625,547,578,000 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2594",
"html_url": "https://github.com/huggingface/datasets/pull/2594",
"diff_url": "https://github.com/huggingface/datasets/pull/2594.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2594.patch",
"merged_at": 1625547578000
} | Fix BibTeX entry. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2594/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2594/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2593 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2593/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2593/comments | https://api.github.com/repos/huggingface/datasets/issues/2593/events | https://github.com/huggingface/datasets/pull/2593 | 937,242,137 | MDExOlB1bGxSZXF1ZXN0NjgzODMwMjcy | 2,593 | Support pandas 1.3.0 read_csv | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | 1,625,503,204,000 | 1,625,505,254,000 | 1,625,505,254,000 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2593",
"html_url": "https://github.com/huggingface/datasets/pull/2593",
"diff_url": "https://github.com/huggingface/datasets/pull/2593.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2593.patch",
"merged_at": 1625505254000
} | Workaround for this issue in pandas 1.3.0 : https://github.com/pandas-dev/pandas/issues/42387
The csv reader raises an error:
```python
/usr/local/lib/python3.7/dist-packages/pandas/io/parsers/readers.py in _refine_defaults_read(dialect, delimiter, delim_whitespace, engine, sep, error_bad_lines, warn_bad_lines, on_bad_lines, names, prefix, defaults)
1304
1305 if names is not lib.no_default and prefix is not lib.no_default:
-> 1306 raise ValueError("Specified named and prefix; you can only specify one.")
1307
1308 kwds["names"] = None if names is lib.no_default else names
ValueError: Specified named and prefix; you can only specify one.
``` | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2593/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2593/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2592 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2592/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2592/comments | https://api.github.com/repos/huggingface/datasets/issues/2592/events | https://github.com/huggingface/datasets/pull/2592 | 937,060,559 | MDExOlB1bGxSZXF1ZXN0NjgzNjc2MjA4 | 2,592 | Add c4.noclean infos | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | 1,625,489,500,000 | 1,625,490,953,000 | 1,625,490,952,000 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2592",
"html_url": "https://github.com/huggingface/datasets/pull/2592",
"diff_url": "https://github.com/huggingface/datasets/pull/2592.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2592.patch",
"merged_at": 1625490952000
} | Adding the data files checksums and the dataset size of the c4.noclean configuration of the C4 dataset | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2592/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2592/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2591 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2591/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2591/comments | https://api.github.com/repos/huggingface/datasets/issues/2591/events | https://github.com/huggingface/datasets/issues/2591 | 936,957,975 | MDU6SXNzdWU5MzY5NTc5NzU= | 2,591 | Cached dataset overflowing disk space | {
"login": "BirgerMoell",
"id": 1704131,
"node_id": "MDQ6VXNlcjE3MDQxMzE=",
"avatar_url": "https://avatars.githubusercontent.com/u/1704131?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/BirgerMoell",
"html_url": "https://github.com/BirgerMoell",
"followers_url": "https://api.github.com/users/BirgerMoell/followers",
"following_url": "https://api.github.com/users/BirgerMoell/following{/other_user}",
"gists_url": "https://api.github.com/users/BirgerMoell/gists{/gist_id}",
"starred_url": "https://api.github.com/users/BirgerMoell/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/BirgerMoell/subscriptions",
"organizations_url": "https://api.github.com/users/BirgerMoell/orgs",
"repos_url": "https://api.github.com/users/BirgerMoell/repos",
"events_url": "https://api.github.com/users/BirgerMoell/events{/privacy}",
"received_events_url": "https://api.github.com/users/BirgerMoell/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [
"Hi! I'm transferring this issue over to `datasets`",
"I'm using the datasets concatenate dataset to combine the datasets and then train.\r\ntrain_dataset = concatenate_datasets([dataset1, dataset2, common_voice_train])\r\n\r\n",
"Hi @BirgerMoell.\r\n\r\nYou have several options:\r\n- to set caching to be stored on a different path location, other than the default one (`~/.cache/huggingface/datasets`):\r\n - either setting the environment variable `HF_DATASETS_CACHE` with the path to the new cache location\r\n - or by passing it with the parameter `cache_dir` when loading each of the datasets: `dataset = load_dataset(..., cache_dir=your_new_location)`\r\n\r\n You can get all the information in the docs: https://huggingface.co/docs/datasets/loading_datasets.html#cache-directory\r\n- I wouldn't recommend disabling caching, because current implementation generates cache files anyway, although in a temporary directory and they are deleted when the session closes. See details here: https://huggingface.co/docs/datasets/processing.html#enable-or-disable-caching\r\n- You could alternatively load the datasets in streaming mode. This is a new feature which allows loading the datasets without downloading the entire files. More information here: https://huggingface.co/docs/datasets/dataset_streaming.html",
"Hi @BirgerMoell,\r\n\r\nWe are planning to add a new feature to datasets, which could be interesting in your case: Add the option to delete temporary files (decompressed files) from the cache directory (see: #2481, #2604).\r\n\r\nWe will ping you once this feature is implemented, so that the size of your cache directory will be considerably reduced."
] | 1,625,481,799,000 | 1,626,685,699,000 | 1,626,685,699,000 | CONTRIBUTOR | null | null | null | I'm training a Swedish Wav2vec2 model on a Linux GPU and having issues that the huggingface cached dataset folder is completely filling up my disk space (I'm training on a dataset of around 500 gb).
The cache folder is 500gb (and now my disk space is full).
Is there a way to toggle caching or set the caching to be stored on a different device (I have another drive with 4 tb that could hold the caching files).
This might not technically be a bug, but I was unsure and I felt that the bug was the closest one.
Traceback (most recent call last):
File "/home/birger/miniconda3/envs/wav2vec2/lib/python3.7/site-packages/multiprocess/pool.py", line 121, in worker
result = (True, func(*args, **kwds))
File "/home/birger/miniconda3/envs/wav2vec2/lib/python3.7/site-packages/datasets/arrow_dataset.py", line 186, in wrapper
out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs)
File "/home/birger/miniconda3/envs/wav2vec2/lib/python3.7/site-packages/datasets/fingerprint.py", line 397, in wrapper
out = func(self, *args, **kwargs)
File "/home/birger/miniconda3/envs/wav2vec2/lib/python3.7/site-packages/datasets/arrow_dataset.py", line 1983, in _map_single
writer.finalize()
File "/home/birger/miniconda3/envs/wav2vec2/lib/python3.7/site-packages/datasets/arrow_writer.py", line 418, in finalize
self.pa_writer.close()
File "pyarrow/ipc.pxi", line 402, in pyarrow.lib._CRecordBatchWriter.close
File "pyarrow/error.pxi", line 97, in pyarrow.lib.check_status
OSError: [Errno 28] Error writing bytes to file. Detail: [errno 28] No space left on device
"""
The above exception was the direct cause of the following exception:
| {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2591/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2591/timeline | null | completed | false |
https://api.github.com/repos/huggingface/datasets/issues/2590 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2590/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2590/comments | https://api.github.com/repos/huggingface/datasets/issues/2590/events | https://github.com/huggingface/datasets/pull/2590 | 936,954,348 | MDExOlB1bGxSZXF1ZXN0NjgzNTg1MDg2 | 2,590 | Add language tags | {
"login": "lewtun",
"id": 26859204,
"node_id": "MDQ6VXNlcjI2ODU5MjA0",
"avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lewtun",
"html_url": "https://github.com/lewtun",
"followers_url": "https://api.github.com/users/lewtun/followers",
"following_url": "https://api.github.com/users/lewtun/following{/other_user}",
"gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lewtun/subscriptions",
"organizations_url": "https://api.github.com/users/lewtun/orgs",
"repos_url": "https://api.github.com/users/lewtun/repos",
"events_url": "https://api.github.com/users/lewtun/events{/privacy}",
"received_events_url": "https://api.github.com/users/lewtun/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | 1,625,481,597,000 | 1,625,482,728,000 | 1,625,482,728,000 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2590",
"html_url": "https://github.com/huggingface/datasets/pull/2590",
"diff_url": "https://github.com/huggingface/datasets/pull/2590.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2590.patch",
"merged_at": 1625482728000
} | This PR adds some missing language tags needed for ASR datasets in #2565 | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2590/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2590/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2589 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2589/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2589/comments | https://api.github.com/repos/huggingface/datasets/issues/2589/events | https://github.com/huggingface/datasets/pull/2589 | 936,825,060 | MDExOlB1bGxSZXF1ZXN0NjgzNDc0OTQ0 | 2,589 | Support multilabel metrics | {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | {
"url": "https://api.github.com/repos/huggingface/datasets/milestones/6",
"html_url": "https://github.com/huggingface/datasets/milestone/6",
"labels_url": "https://api.github.com/repos/huggingface/datasets/milestones/6/labels",
"id": 6836458,
"node_id": "MDk6TWlsZXN0b25lNjgzNjQ1OA==",
"number": 6,
"title": "1.10",
"description": "Next minor release",
"creator": {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
},
"open_issues": 0,
"closed_issues": 29,
"state": "closed",
"created_at": 1623178113000,
"updated_at": 1626881809000,
"due_on": 1628146800000,
"closed_at": 1626881809000
} | [
"Hi ! Thanks for the fix :)\r\n\r\nIf I understand correctly, `OptionalSequence` doesn't have an associated arrow type that we know in advance unlike the other feature types, because it depends on the type of the examples.\r\n\r\nFor example, I tested this and it raises an error:\r\n```python\r\nimport datasets as ds\r\nimport pyarrow as pa\r\n\r\nfeatures = ds.Features({\"a\": ds.features.OptionalSequence(ds.Value(\"int32\"))})\r\nbatch = {\"a\": [[0]]}\r\n\r\nwriter = ds.ArrowWriter(features=features, stream=pa.BufferOutputStream())\r\nwriter.write_batch(batch)\r\n# ArrowInvalid: Could not convert [0] with type list: tried to convert to int\r\n```\r\nThis error happens because `features.type` is `StructType(struct<a: int32>)`.\r\n\r\nAnother way to add support for multilabel would be to have several configurations for these metrics. By default it would set the features without sequences, and for the multi label configuration it would use features with sequences. Let me know what you think",
"Hi @lhoestq, thanks for your feedback :)\r\n\r\nDefinitely, your suggested approach is simpler. I am going to refactor all my PR unless we could envision some other use cases where an OptionalSequence might be convenient, but for now I can't think of any..."
] | 1,625,473,165,000 | 1,626,099,130,000 | 1,625,733,615,000 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2589",
"html_url": "https://github.com/huggingface/datasets/pull/2589",
"diff_url": "https://github.com/huggingface/datasets/pull/2589.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2589.patch",
"merged_at": 1625733615000
} | Currently, multilabel metrics are not supported because `predictions` and `references` are defined as `Value("int32")`.
This PR creates a new feature type `OptionalSequence` which can act as either `Value("int32")` or `Sequence(Value("int32"))`, depending on the data passed.
Close #2554. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2589/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2589/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2588 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2588/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2588/comments | https://api.github.com/repos/huggingface/datasets/issues/2588/events | https://github.com/huggingface/datasets/pull/2588 | 936,795,541 | MDExOlB1bGxSZXF1ZXN0NjgzNDQ5Njky | 2,588 | Fix test_is_small_dataset | {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | {
"url": "https://api.github.com/repos/huggingface/datasets/milestones/6",
"html_url": "https://github.com/huggingface/datasets/milestone/6",
"labels_url": "https://api.github.com/repos/huggingface/datasets/milestones/6/labels",
"id": 6836458,
"node_id": "MDk6TWlsZXN0b25lNjgzNjQ1OA==",
"number": 6,
"title": "1.10",
"description": "Next minor release",
"creator": {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
},
"open_issues": 0,
"closed_issues": 29,
"state": "closed",
"created_at": 1623178113000,
"updated_at": 1626881809000,
"due_on": 1628146800000,
"closed_at": 1626881809000
} | [] | 1,625,471,186,000 | 1,626,099,011,000 | 1,625,591,370,000 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2588",
"html_url": "https://github.com/huggingface/datasets/pull/2588",
"diff_url": "https://github.com/huggingface/datasets/pull/2588.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2588.patch",
"merged_at": 1625591370000
} | Remove environment variable fixture `env_max_in_memory_dataset_size`. This fixture does not work because env variable is read in datasets.config when first loading datasets, and it is never reread during tests. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2588/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2588/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2587 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2587/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2587/comments | https://api.github.com/repos/huggingface/datasets/issues/2587/events | https://github.com/huggingface/datasets/pull/2587 | 936,771,339 | MDExOlB1bGxSZXF1ZXN0NjgzNDI5NjQy | 2,587 | Add aiohttp to tests extras require | {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | 1,625,469,241,000 | 1,625,475,878,000 | 1,625,475,878,000 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2587",
"html_url": "https://github.com/huggingface/datasets/pull/2587",
"diff_url": "https://github.com/huggingface/datasets/pull/2587.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2587.patch",
"merged_at": 1625475878000
} | Currently, none of the streaming tests are runned within our CI test suite, because the streaming tests require aiohttp and this is missing from our tests extras require dependencies.
Our CI test suite should be exhaustive and test all the library functionalities. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2587/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2587/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2586 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2586/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2586/comments | https://api.github.com/repos/huggingface/datasets/issues/2586/events | https://github.com/huggingface/datasets/pull/2586 | 936,747,588 | MDExOlB1bGxSZXF1ZXN0NjgzNDEwMDU3 | 2,586 | Fix misalignment in SQuAD | {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | {
"url": "https://api.github.com/repos/huggingface/datasets/milestones/6",
"html_url": "https://github.com/huggingface/datasets/milestone/6",
"labels_url": "https://api.github.com/repos/huggingface/datasets/milestones/6/labels",
"id": 6836458,
"node_id": "MDk6TWlsZXN0b25lNjgzNjQ1OA==",
"number": 6,
"title": "1.10",
"description": "Next minor release",
"creator": {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
},
"open_issues": 0,
"closed_issues": 29,
"state": "closed",
"created_at": 1623178113000,
"updated_at": 1626881809000,
"due_on": 1628146800000,
"closed_at": 1626881809000
} | [] | 1,625,467,340,000 | 1,626,099,070,000 | 1,625,663,931,000 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2586",
"html_url": "https://github.com/huggingface/datasets/pull/2586",
"diff_url": "https://github.com/huggingface/datasets/pull/2586.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2586.patch",
"merged_at": 1625663931000
} | Fix misalignment between:
- the answer text and
- the answer_start within the context
by keeping original leading blank spaces in the context.
Fix #2585. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2586/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2586/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2585 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2585/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2585/comments | https://api.github.com/repos/huggingface/datasets/issues/2585/events | https://github.com/huggingface/datasets/issues/2585 | 936,484,419 | MDU6SXNzdWU5MzY0ODQ0MTk= | 2,585 | sqaud_v2 dataset contains misalignment between the answer text and the context value at the answer index | {
"login": "mmajurski",
"id": 9354454,
"node_id": "MDQ6VXNlcjkzNTQ0NTQ=",
"avatar_url": "https://avatars.githubusercontent.com/u/9354454?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/mmajurski",
"html_url": "https://github.com/mmajurski",
"followers_url": "https://api.github.com/users/mmajurski/followers",
"following_url": "https://api.github.com/users/mmajurski/following{/other_user}",
"gists_url": "https://api.github.com/users/mmajurski/gists{/gist_id}",
"starred_url": "https://api.github.com/users/mmajurski/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mmajurski/subscriptions",
"organizations_url": "https://api.github.com/users/mmajurski/orgs",
"repos_url": "https://api.github.com/users/mmajurski/repos",
"events_url": "https://api.github.com/users/mmajurski/events{/privacy}",
"received_events_url": "https://api.github.com/users/mmajurski/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892857,
"node_id": "MDU6TGFiZWwxOTM1ODkyODU3",
"url": "https://api.github.com/repos/huggingface/datasets/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
}
] | closed | false | {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
} | [
{
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
}
] | null | [
"Hi @mmajurski, thanks for reporting this issue.\r\n\r\nIndeed this misalignment arises because the source dataset context field contains leading blank spaces (and these are counted within the answer_start), while our datasets loading script removes these leading blank spaces.\r\n\r\nI'm going to fix our script so that all leading blank spaces in the source dataset are kept, and there is no misalignment between the answer text and the answer_start within the context.",
"If you are going to be altering the data cleaning from the source Squad dataset, here is one thing to consider.\r\nThere are occasional double spaces separating words which it might be nice to get rid of. \r\n\r\nEither way, thank you."
] | 1,625,413,189,000 | 1,625,663,931,000 | 1,625,663,931,000 | NONE | null | null | null | ## Describe the bug
The built in huggingface squad_v2 dataset that you can access via datasets.load_dataset contains mis-alignment between the answers['text'] and the characters in the context at the location specified by answers['answer_start'].
For example:
id = '56d1f453e7d4791d009025bd'
answers = {'text': ['Pure Land'], 'answer_start': [146]}
However the actual text in context at location 146 is 'ure Land,'
Which is an off-by-one error from the correct answer.
## Steps to reproduce the bug
```python
import datasets
def check_context_answer_alignment(example):
for a_idx in range(len(example['answers']['text'])):
# check raw dataset for answer consistency between context and answer
answer_text = example['answers']['text'][a_idx]
a_st_idx = example['answers']['answer_start'][a_idx]
a_end_idx = a_st_idx + len(example['answers']['text'][a_idx])
answer_text_from_context = example['context'][a_st_idx:a_end_idx]
if answer_text != answer_text_from_context:
#print(example['id'])
return False
return True
dataset = datasets.load_dataset('squad_v2', split='train', keep_in_memory=True)
start_len = len(dataset)
dataset = dataset.filter(check_context_answer_alignment,
num_proc=1,
keep_in_memory=True)
end_len = len(dataset)
print('{} instances contain mis-alignment between the answer text and answer index.'.format(start_len - end_len))
```
## Expected results
This code should result in 0 rows being filtered out from the dataset.
## Actual results
This filter command results in 258 rows being flagged as containing a discrepancy between the text contained within answers['text'] and the text in example['context'] at the answers['answer_start'] location.
This code will reproduce the problem and produce the following count:
"258 instances contain mis-alignment between the answer text and answer index."
## Environment info
Steps to rebuilt the Conda environment:
```
# create a virtual environment to stuff all these packages into
conda create -n round8 python=3.8 -y
# activate the virtual environment
conda activate round8
# install pytorch (best done through conda to handle cuda dependencies)
conda install pytorch torchvision torchtext cudatoolkit=11.1 -c pytorch-lts -c nvidia
pip install jsonpickle transformers datasets matplotlib
```
OS: Ubuntu 20.04
Python 3.8
Result of `conda env export`:
```
name: round8
channels:
- pytorch-lts
- nvidia
- defaults
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=4.5=1_gnu
- blas=1.0=mkl
- brotlipy=0.7.0=py38h27cfd23_1003
- bzip2=1.0.8=h7b6447c_0
- ca-certificates=2021.5.25=h06a4308_1
- certifi=2021.5.30=py38h06a4308_0
- cffi=1.14.5=py38h261ae71_0
- chardet=4.0.0=py38h06a4308_1003
- cryptography=3.4.7=py38hd23ed53_0
- cudatoolkit=11.1.74=h6bb024c_0
- ffmpeg=4.2.2=h20bf706_0
- freetype=2.10.4=h5ab3b9f_0
- gmp=6.2.1=h2531618_2
- gnutls=3.6.15=he1e5248_0
- idna=2.10=pyhd3eb1b0_0
- intel-openmp=2021.2.0=h06a4308_610
- jpeg=9b=h024ee3a_2
- lame=3.100=h7b6447c_0
- lcms2=2.12=h3be6417_0
- ld_impl_linux-64=2.35.1=h7274673_9
- libffi=3.3=he6710b0_2
- libgcc-ng=9.3.0=h5101ec6_17
- libgomp=9.3.0=h5101ec6_17
- libidn2=2.3.1=h27cfd23_0
- libopus=1.3.1=h7b6447c_0
- libpng=1.6.37=hbc83047_0
- libstdcxx-ng=9.3.0=hd4cf53a_17
- libtasn1=4.16.0=h27cfd23_0
- libtiff=4.2.0=h85742a9_0
- libunistring=0.9.10=h27cfd23_0
- libuv=1.40.0=h7b6447c_0
- libvpx=1.7.0=h439df22_0
- libwebp-base=1.2.0=h27cfd23_0
- lz4-c=1.9.3=h2531618_0
- mkl=2021.2.0=h06a4308_296
- mkl-service=2.3.0=py38h27cfd23_1
- mkl_fft=1.3.0=py38h42c9631_2
- mkl_random=1.2.1=py38ha9443f7_2
- ncurses=6.2=he6710b0_1
- nettle=3.7.3=hbbd107a_1
- ninja=1.10.2=hff7bd54_1
- numpy=1.20.2=py38h2d18471_0
- numpy-base=1.20.2=py38hfae3a4d_0
- olefile=0.46=py_0
- openh264=2.1.0=hd408876_0
- openssl=1.1.1k=h27cfd23_0
- pillow=8.2.0=py38he98fc37_0
- pip=21.1.2=py38h06a4308_0
- pycparser=2.20=py_2
- pyopenssl=20.0.1=pyhd3eb1b0_1
- pysocks=1.7.1=py38h06a4308_0
- python=3.8.10=h12debd9_8
- pytorch=1.8.1=py3.8_cuda11.1_cudnn8.0.5_0
- readline=8.1=h27cfd23_0
- requests=2.25.1=pyhd3eb1b0_0
- setuptools=52.0.0=py38h06a4308_0
- six=1.16.0=pyhd3eb1b0_0
- sqlite=3.35.4=hdfb4753_0
- tk=8.6.10=hbc83047_0
- torchtext=0.9.1=py38
- torchvision=0.9.1=py38_cu111
- typing_extensions=3.7.4.3=pyha847dfd_0
- urllib3=1.26.4=pyhd3eb1b0_0
- wheel=0.36.2=pyhd3eb1b0_0
- x264=1!157.20191217=h7b6447c_0
- xz=5.2.5=h7b6447c_0
- zlib=1.2.11=h7b6447c_3
- zstd=1.4.9=haebb681_0
- pip:
- click==8.0.1
- cycler==0.10.0
- datasets==1.8.0
- dill==0.3.4
- filelock==3.0.12
- fsspec==2021.6.0
- huggingface-hub==0.0.8
- joblib==1.0.1
- jsonpickle==2.0.0
- kiwisolver==1.3.1
- matplotlib==3.4.2
- multiprocess==0.70.12.2
- packaging==20.9
- pandas==1.2.4
- pyarrow==3.0.0
- pyparsing==2.4.7
- python-dateutil==2.8.1
- pytz==2021.1
- regex==2021.4.4
- sacremoses==0.0.45
- tokenizers==0.10.3
- tqdm==4.49.0
- transformers==4.6.1
- xxhash==2.0.2
prefix: /home/mmajurski/anaconda3/envs/round8
```
| {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2585/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2585/timeline | null | completed | false |
https://api.github.com/repos/huggingface/datasets/issues/2584 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2584/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2584/comments | https://api.github.com/repos/huggingface/datasets/issues/2584/events | https://github.com/huggingface/datasets/pull/2584 | 936,049,736 | MDExOlB1bGxSZXF1ZXN0NjgyODY2Njc1 | 2,584 | wi_locness: reference latest leaderboard on codalab | {
"login": "aseifert",
"id": 4944799,
"node_id": "MDQ6VXNlcjQ5NDQ3OTk=",
"avatar_url": "https://avatars.githubusercontent.com/u/4944799?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/aseifert",
"html_url": "https://github.com/aseifert",
"followers_url": "https://api.github.com/users/aseifert/followers",
"following_url": "https://api.github.com/users/aseifert/following{/other_user}",
"gists_url": "https://api.github.com/users/aseifert/gists{/gist_id}",
"starred_url": "https://api.github.com/users/aseifert/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/aseifert/subscriptions",
"organizations_url": "https://api.github.com/users/aseifert/orgs",
"repos_url": "https://api.github.com/users/aseifert/repos",
"events_url": "https://api.github.com/users/aseifert/events{/privacy}",
"received_events_url": "https://api.github.com/users/aseifert/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | 1,625,257,582,000 | 1,625,475,974,000 | 1,625,475,974,000 | CONTRIBUTOR | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2584",
"html_url": "https://github.com/huggingface/datasets/pull/2584",
"diff_url": "https://github.com/huggingface/datasets/pull/2584.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2584.patch",
"merged_at": 1625475974000
} | The dataset's author asked me to put this codalab link into the dataset's README. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2584/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2584/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2583 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2583/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2583/comments | https://api.github.com/repos/huggingface/datasets/issues/2583/events | https://github.com/huggingface/datasets/issues/2583 | 936,034,976 | MDU6SXNzdWU5MzYwMzQ5NzY= | 2,583 | Error iteration over IterableDataset using Torch DataLoader | {
"login": "LeenaShekhar",
"id": 12227436,
"node_id": "MDQ6VXNlcjEyMjI3NDM2",
"avatar_url": "https://avatars.githubusercontent.com/u/12227436?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/LeenaShekhar",
"html_url": "https://github.com/LeenaShekhar",
"followers_url": "https://api.github.com/users/LeenaShekhar/followers",
"following_url": "https://api.github.com/users/LeenaShekhar/following{/other_user}",
"gists_url": "https://api.github.com/users/LeenaShekhar/gists{/gist_id}",
"starred_url": "https://api.github.com/users/LeenaShekhar/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/LeenaShekhar/subscriptions",
"organizations_url": "https://api.github.com/users/LeenaShekhar/orgs",
"repos_url": "https://api.github.com/users/LeenaShekhar/repos",
"events_url": "https://api.github.com/users/LeenaShekhar/events{/privacy}",
"received_events_url": "https://api.github.com/users/LeenaShekhar/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892857,
"node_id": "MDU6TGFiZWwxOTM1ODkyODU3",
"url": "https://api.github.com/repos/huggingface/datasets/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
}
] | closed | false | null | [] | null | [
"Hi ! This is because you first need to format the dataset for pytorch:\r\n\r\n```python\r\n>>> import torch\r\n>>> from datasets import load_dataset\r\n>>> dataset = load_dataset('oscar', \"unshuffled_deduplicated_en\", split='train', streaming=True)\r\n>>> torch_iterable_dataset = dataset.with_format(\"torch\")\r\n>>> assert isinstance(torch_iterable_dataset, torch.utils.data.IterableDataset)\r\n>>> dataloader = torch.utils.data.DataLoader(torch_iterable_dataset, batch_size=4)\r\n>>> next(iter(dataloader))\r\n{'id': tensor([0, 1, 2, 3]), 'text': ['Mtendere Village was inspired...]}\r\n```\r\n\r\nThis is because the pytorch dataloader expects a subclass of `torch.utils.data.IterableDataset`. Since you can't pass an arbitrary iterable to a pytorch dataloader, you first need to build an object that inherits from `torch.utils.data.IterableDataset` using `with_format(\"torch\")` for example.\r\n",
"Thank you for that and the example! \r\n\r\nWhat you said makes total sense; I just somehow missed that and assumed HF IterableDataset was a subclass of Torch IterableDataset. "
] | 1,625,255,758,000 | 1,626,771,885,000 | 1,625,528,903,000 | NONE | null | null | null | ## Describe the bug
I have an IterableDataset (created using streaming=True) and I am trying to create batches using Torch DataLoader class by passing this IterableDataset to it. This throws error which is pasted below. I can do the same by using Torch IterableDataset. One thing I noticed is that in the former case when I look at the dataloader.sampler class I get torch.utils.data.sampler.SequentialSampler while the latter one gives torch.utils.data.dataloader._InfiniteConstantSampler.
I am not sure if this is how it is meant to be used, but that's what seemed reasonable to me.
## Steps to reproduce the bug
1. Does not work.
```python
>>> from datasets import load_dataset
>>> dataset = load_dataset('oscar', "unshuffled_deduplicated_en", split='train', streaming=True)
>>> dataloader = torch.utils.data.DataLoader(dataset, batch_size=4)
>>> dataloader.sampler
<torch.utils.data.sampler.SequentialSampler object at 0x7f245a510208>
>>> for batch in dataloader:
... print(batch)
```
2. Works.
```python
import torch
from torch.utils.data import Dataset, IterableDataset, DataLoader
class CustomIterableDataset(IterableDataset):
'Characterizes a dataset for PyTorch'
def __init__(self, data):
'Initialization'
self.data = data
def __iter__(self):
return iter(self.data)
data = list(range(12))
dataset = CustomIterableDataset(data)
dataloader = DataLoader(dataset, batch_size=4)
print("dataloader: ", dataloader.sampler)
for batch in dataloader:
print(batch)
```
## Expected results
To get batches of data with the batch size as 4. Output from the latter one (2) though Datasource is different here so actual data is different.
dataloader: <torch.utils.data.dataloader._InfiniteConstantSampler object at 0x7f1cc29e2c50>
tensor([0, 1, 2, 3])
tensor([4, 5, 6, 7])
tensor([ 8, 9, 10, 11])
## Actual results
<torch.utils.data.sampler.SequentialSampler object at 0x7f245a510208>
...
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/data/leshekha/lib/HFDatasets/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 435, in __next__
data = self._next_data()
File "/data/leshekha/lib/HFDatasets/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 474, in _next_data
index = self._next_index() # may raise StopIteration
File "/data/leshekha/lib/HFDatasets/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 427, in _next_index
return next(self._sampler_iter) # may raise StopIteration
File "/data/leshekha/lib/HFDatasets/lib/python3.6/site-packages/torch/utils/data/sampler.py", line 227, in __iter__
for idx in self.sampler:
File "/data/leshekha/lib/HFDatasets/lib/python3.6/site-packages/torch/utils/data/sampler.py", line 67, in __iter__
return iter(range(len(self.data_source)))
TypeError: object of type 'IterableDataset' has no len()
## Environment info
<!-- You can run the command `datasets-cli env` and copy-and-paste its output below. -->
- `datasets` version: '1.8.1.dev0'
- Platform: Linux
- Python version: Python 3.6.8
- PyArrow version: '3.0.0'
| {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2583/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2583/timeline | null | completed | false |
https://api.github.com/repos/huggingface/datasets/issues/2582 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2582/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2582/comments | https://api.github.com/repos/huggingface/datasets/issues/2582/events | https://github.com/huggingface/datasets/pull/2582 | 935,859,104 | MDExOlB1bGxSZXF1ZXN0NjgyNzAzNzg3 | 2,582 | Add skip and take | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [
"@lhoestq looks good. I tried with https://huggingface.co/datasets/vblagoje/wikipedia_snippets_streamed and it worked nicely. I would add more unit tests for edge cases. What happens if the n is larger than the total number of samples? Just to make sure these cases are handled properly. ",
"Yup I'll add the tests thanks ;)\r\n\r\nMoreover, I just noticed something in your wiki snippets code. FYI you're using `++passage_counter ` at https://huggingface.co/datasets/vblagoje/wikipedia_snippets_streamed/blob/main/wikipedia_snippets_streamed.py#L102 but in python this doesn't increment the value @vblagoje ",
"Thanks @lhoestq - not easy to convert after 10+ years of Java"
] | 1,625,238,619,000 | 1,625,501,200,000 | 1,625,501,199,000 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2582",
"html_url": "https://github.com/huggingface/datasets/pull/2582",
"diff_url": "https://github.com/huggingface/datasets/pull/2582.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2582.patch",
"merged_at": 1625501199000
} | As discussed in https://github.com/huggingface/datasets/pull/2375#discussion_r657084544 I added the `IterableDataset.skip` and `IterableDataset.take` methods that allows to do basic splitting of iterable datasets.
You can create new dataset with the first `n` examples using `IterableDataset.take()`, or you can get a dataset with the rest of the examples by skipping the first `n` examples with `IterableDataset.skip()`
One implementation detail:
Using `take` (or `skip`) prevents future dataset shuffling from shuffling the dataset shards, otherwise the taken examples could come from other shards. In this case it only uses the shuffle buffer.
I would have loved to allow the shards of the taken examples to be shuffled anyway, but since we don't know in advance the length of each shard we don't know what shards to take or skip.
I think this is ok though since users can shuffle before doing take or skip. I mentioned this in the documentation
cc @vblagoje @lewtun | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2582/reactions",
"total_count": 2,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 2,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2582/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2581 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2581/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2581/comments | https://api.github.com/repos/huggingface/datasets/issues/2581/events | https://github.com/huggingface/datasets/pull/2581 | 935,783,588 | MDExOlB1bGxSZXF1ZXN0NjgyNjQwMDY4 | 2,581 | Faster search_batch for ElasticsearchIndex due to threading | {
"login": "mwrzalik",
"id": 1376337,
"node_id": "MDQ6VXNlcjEzNzYzMzc=",
"avatar_url": "https://avatars.githubusercontent.com/u/1376337?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/mwrzalik",
"html_url": "https://github.com/mwrzalik",
"followers_url": "https://api.github.com/users/mwrzalik/followers",
"following_url": "https://api.github.com/users/mwrzalik/following{/other_user}",
"gists_url": "https://api.github.com/users/mwrzalik/gists{/gist_id}",
"starred_url": "https://api.github.com/users/mwrzalik/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mwrzalik/subscriptions",
"organizations_url": "https://api.github.com/users/mwrzalik/orgs",
"repos_url": "https://api.github.com/users/mwrzalik/repos",
"events_url": "https://api.github.com/users/mwrzalik/events{/privacy}",
"received_events_url": "https://api.github.com/users/mwrzalik/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | {
"url": "https://api.github.com/repos/huggingface/datasets/milestones/6",
"html_url": "https://github.com/huggingface/datasets/milestone/6",
"labels_url": "https://api.github.com/repos/huggingface/datasets/milestones/6/labels",
"id": 6836458,
"node_id": "MDk6TWlsZXN0b25lNjgzNjQ1OA==",
"number": 6,
"title": "1.10",
"description": "Next minor release",
"creator": {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
},
"open_issues": 0,
"closed_issues": 29,
"state": "closed",
"created_at": 1623178113000,
"updated_at": 1626881809000,
"due_on": 1628146800000,
"closed_at": 1626881809000
} | [] | 1,625,233,327,000 | 1,626,099,226,000 | 1,626,083,571,000 | CONTRIBUTOR | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2581",
"html_url": "https://github.com/huggingface/datasets/pull/2581",
"diff_url": "https://github.com/huggingface/datasets/pull/2581.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2581.patch",
"merged_at": 1626083571000
} | Hey,
I think it makes sense to perform search_batch threaded, so ES can perform search in parallel.
Cheers! | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2581/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2581/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2580 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2580/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2580/comments | https://api.github.com/repos/huggingface/datasets/issues/2580/events | https://github.com/huggingface/datasets/pull/2580 | 935,767,421 | MDExOlB1bGxSZXF1ZXN0NjgyNjI2MTkz | 2,580 | Fix Counter import | {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | 1,625,232,108,000 | 1,625,236,667,000 | 1,625,236,666,000 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2580",
"html_url": "https://github.com/huggingface/datasets/pull/2580",
"diff_url": "https://github.com/huggingface/datasets/pull/2580.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2580.patch",
"merged_at": 1625236666000
} | Import from `collections` instead of `typing`. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2580/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2580/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2579 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2579/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2579/comments | https://api.github.com/repos/huggingface/datasets/issues/2579/events | https://github.com/huggingface/datasets/pull/2579 | 935,486,894 | MDExOlB1bGxSZXF1ZXN0NjgyMzkyNjYx | 2,579 | Fix BibTeX entry | {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | 1,625,209,840,000 | 1,625,211,224,000 | 1,625,211,224,000 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2579",
"html_url": "https://github.com/huggingface/datasets/pull/2579",
"diff_url": "https://github.com/huggingface/datasets/pull/2579.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2579.patch",
"merged_at": 1625211224000
} | Add missing contributor to BibTeX entry.
cc: @abhishekkrthakur @thomwolf | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2579/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2579/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2578 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2578/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2578/comments | https://api.github.com/repos/huggingface/datasets/issues/2578/events | https://github.com/huggingface/datasets/pull/2578 | 935,187,497 | MDExOlB1bGxSZXF1ZXN0NjgyMTQ0OTY2 | 2,578 | Support Zstandard compressed files | {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [
"> What if people want to run some tests without having zstandard ?\r\n> Usually what we do is add a decorator @require_zstandard for example\r\n\r\n@lhoestq I think I'm missing something here...\r\n\r\nTests are a *development* tool (to ensure we deliver a good quality lib), not something we offer to the end users of the lib. Users of the lib just `pip install datasets` and no tests are delivered with the lib (`tests` directory is outside the `src` code dir). \r\n\r\nOn the contrary, developers (contributors) of the lib do need to be able to run tests (TDD). And because of that, they are required to install datasets differently: `pip install -e .[dev]`, so that all required developing (and testing) dependencies are properly installed (included `zstandard`).\r\n\r\nApart from `zsatandard`, there are many other dev/test required dependencies for running tests, and we do not have a `@require_toto` for each and every of these dependencies in our tests: \r\n- `pytest` and `absl-py` (they are not dependencies in install_requires, but only in TEST_REQUIRE extras_require), \r\n- `boto3` (in test_filesystem.py), \r\n- `seqeval` (in test_metric_common.py), \r\n- `bs4` (used by eli5 and tested in test_hf_gcp.py)\r\n- ...\r\n\r\nSo IMHO, to run tests you should previously install datasets with dev or tests dependencies: either `pip install -e .[dev]` or `pip install -e .[tests]` (the latter to be used in CI testing-only part of the development cycle). And the tests should be written accordingly, assuming all tests dependencies are installed.",
"Hi !\r\nI was saying that because the other dependencies you mentioned are only required for _some_ tests. While here zstd is required for _all_ tests since it's imported in the conftest.py\r\nFeel free to keep it as it is right now, or maybe move the fixture to test_file_utils.py to allow users without zstd to run tests for their builders, dataset card etc. without issues",
"Thank you ! I think we can merge now",
"@lhoestq does this mean that the pile could have streaming support in the future? Afaik streaming doesnt support zstandard compressed type",
"> @lhoestq does this mean that the pile could have streaming support in the future? Afaik streaming doesnt support zstandard compressed type\r\n\r\njust for reference, i tried to stream one of the `.zst` files from [the pile](https://the-eye.eu/public/AI/pile/) using\r\n\r\n```python\r\ndata_files = [\"https://the-eye.eu/public/AI/pile/train/00.jsonl.zst\"]\r\nstreamed_dataset = load_dataset('json', split='train', data_files=data_files, streaming=True)\r\n```\r\n\r\nand got the following error:\r\n\r\n```\r\nUsing custom data configuration default-4e71acadc389c254\r\n---------------------------------------------------------------------------\r\nNotImplementedError Traceback (most recent call last)\r\n/tmp/ipykernel_1187680/10848115.py in <module>\r\n 1 data_files = [\"https://the-eye.eu/public/AI/pile/train/00.jsonl.zst\"]\r\n 2 \r\n----> 3 streamed_dataset = load_dataset('json', split='train', data_files=data_files, streaming=True)\r\n 4 \r\n\r\n~/miniconda3/envs/hf/lib/python3.8/site-packages/datasets/load.py in load_dataset(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, ignore_verifications, keep_in_memory, save_infos, script_version, use_auth_token, task, streaming, **config_kwargs)\r\n 835 # this extends the open and os.path.join functions for data streaming\r\n 836 extend_module_for_streaming(builder_instance.__module__, use_auth_token=use_auth_token)\r\n--> 837 return builder_instance.as_streaming_dataset(\r\n 838 split=split,\r\n 839 use_auth_token=use_auth_token,\r\n\r\n~/miniconda3/envs/hf/lib/python3.8/site-packages/datasets/builder.py in as_streaming_dataset(self, split, base_path, use_auth_token)\r\n 922 data_dir=self.config.data_dir,\r\n 923 )\r\n--> 924 splits_generators = {sg.name: sg for sg in self._split_generators(dl_manager)}\r\n 925 # By default, return all splits\r\n 926 if split is None:\r\n\r\n~/miniconda3/envs/hf/lib/python3.8/site-packages/datasets/packaged_modules/json/json.py in _split_generators(self, dl_manager)\r\n 50 if not self.config.data_files:\r\n 51 raise ValueError(f\"At least one data file must be specified, but got data_files={self.config.data_files}\")\r\n---> 52 data_files = dl_manager.download_and_extract(self.config.data_files)\r\n 53 if isinstance(data_files, (str, list, tuple)):\r\n 54 files = data_files\r\n\r\n~/miniconda3/envs/hf/lib/python3.8/site-packages/datasets/utils/streaming_download_manager.py in download_and_extract(self, url_or_urls)\r\n 140 \r\n 141 def download_and_extract(self, url_or_urls):\r\n--> 142 return self.extract(self.download(url_or_urls))\r\n\r\n~/miniconda3/envs/hf/lib/python3.8/site-packages/datasets/utils/streaming_download_manager.py in extract(self, path_or_paths)\r\n 115 \r\n 116 def extract(self, path_or_paths):\r\n--> 117 urlpaths = map_nested(self._extract, path_or_paths, map_tuple=True)\r\n 118 return urlpaths\r\n 119 \r\n\r\n~/miniconda3/envs/hf/lib/python3.8/site-packages/datasets/utils/py_utils.py in map_nested(function, data_struct, dict_only, map_list, map_tuple, map_numpy, num_proc, types)\r\n 202 num_proc = 1\r\n 203 if num_proc <= 1 or len(iterable) <= num_proc:\r\n--> 204 mapped = [\r\n 205 _single_map_nested((function, obj, types, None, True))\r\n 206 for obj in utils.tqdm(iterable, disable=disable_tqdm)\r\n\r\n~/miniconda3/envs/hf/lib/python3.8/site-packages/datasets/utils/py_utils.py in <listcomp>(.0)\r\n 203 if num_proc <= 1 or len(iterable) <= num_proc:\r\n 204 mapped = [\r\n--> 205 _single_map_nested((function, obj, types, None, True))\r\n 206 for obj in utils.tqdm(iterable, disable=disable_tqdm)\r\n 207 ]\r\n\r\n~/miniconda3/envs/hf/lib/python3.8/site-packages/datasets/utils/py_utils.py in _single_map_nested(args)\r\n 141 # Singleton first to spare some computation\r\n 142 if not isinstance(data_struct, dict) and not isinstance(data_struct, types):\r\n--> 143 return function(data_struct)\r\n 144 \r\n 145 # Reduce logging to keep things readable in multiprocessing with tqdm\r\n\r\n~/miniconda3/envs/hf/lib/python3.8/site-packages/datasets/utils/streaming_download_manager.py in _extract(self, urlpath)\r\n 119 \r\n 120 def _extract(self, urlpath):\r\n--> 121 protocol = self._get_extraction_protocol(urlpath)\r\n 122 if protocol is None:\r\n 123 # no extraction\r\n\r\n~/miniconda3/envs/hf/lib/python3.8/site-packages/datasets/utils/streaming_download_manager.py in _get_extraction_protocol(self, urlpath)\r\n 137 elif path.endswith(\".zip\"):\r\n 138 return \"zip\"\r\n--> 139 raise NotImplementedError(f\"Extraction protocol for file at {urlpath} is not implemented yet\")\r\n 140 \r\n 141 def download_and_extract(self, url_or_urls):\r\n\r\nNotImplementedError: Extraction protocol for file at https://the-eye.eu/public/AI/pile/train/00.jsonl.zst is not implemented yet\r\n```\r\n\r\ni'm not sure whether @Shashi456 is referring to a fundamental limitation with \"streaming\" zstandard compression files or simply that we need to support the protocol in the streaming api of `datasets`\r\n\r\n",
"@lewtun our streaming mode patches the Python `open` function. I could have a look tomorrow if it is easily implementable for this case.",
"@lewtun, I have tested and yes, it is easily implementable. I've created a draft Pull Request with an implementation proposal: #2786.",
"thanks a lot @albertvillanova - now i can stream the pile :)"
] | 1,625,170,954,000 | 1,628,693,184,000 | 1,625,482,227,000 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2578",
"html_url": "https://github.com/huggingface/datasets/pull/2578",
"diff_url": "https://github.com/huggingface/datasets/pull/2578.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2578.patch",
"merged_at": 1625482227000
} | Close #2572.
cc: @thomwolf | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2578/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2578/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2576 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2576/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2576/comments | https://api.github.com/repos/huggingface/datasets/issues/2576/events | https://github.com/huggingface/datasets/pull/2576 | 934,986,761 | MDExOlB1bGxSZXF1ZXN0NjgxOTc5MTA1 | 2,576 | Add mC4 | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | 1,625,154,685,000 | 1,625,237,456,000 | 1,625,237,455,000 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2576",
"html_url": "https://github.com/huggingface/datasets/pull/2576",
"diff_url": "https://github.com/huggingface/datasets/pull/2576.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2576.patch",
"merged_at": 1625237455000
} | AllenAI is now hosting the processed C4 and mC4 dataset in this repo: https://huggingface.co/datasets/allenai/c4
Thanks a lot to them !
In this PR I added the mC4 dataset builder. It supports 108 languages
You can load it with
```python
from datasets import load_dataset
en_mc4 = load_dataset("mc4", "en")
fr_mc4 = load_dataset("mc4", "fr")
en_and_fr_mc4 = load_dataset("mc4", languages=["en", "fr"])
```
It also supports streaming, if you don't want to download hundreds of GB of data:
```python
en_mc4 = load_dataset("mc4", "en", streaming=True)
```
Regarding the dataset_infos.json, I will add them once I have them.
Also we can work on the dataset card at that will be at https://huggingface.co/datasets/mc4
For now I just added a link to https://huggingface.co/datasets/allenai/c4 as well as a few sections | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2576/reactions",
"total_count": 1,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 1,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2576/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2575 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2575/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2575/comments | https://api.github.com/repos/huggingface/datasets/issues/2575/events | https://github.com/huggingface/datasets/pull/2575 | 934,876,496 | MDExOlB1bGxSZXF1ZXN0NjgxODg0OTgy | 2,575 | Add C4 | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | 1,625,147,888,000 | 1,625,237,423,000 | 1,625,237,423,000 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2575",
"html_url": "https://github.com/huggingface/datasets/pull/2575",
"diff_url": "https://github.com/huggingface/datasets/pull/2575.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2575.patch",
"merged_at": 1625237423000
} | The old code for the C4 dataset was to generate the C4 with Apache Beam, as in Tensorflow Datasets.
However AllenAI is now hosting the processed C4 dataset in this repo: https://huggingface.co/datasets/allenai/c4
Thanks a lot to them for their amazing work !
In this PR I changed the script to download and prepare the data directly from this repo.
It has 4 variants: en, en.noblocklist, en.noclean, realnewslike
You can load it with
```python
from datasets import load_dataset
c4 = load_dataset("c4", "en")
```
It also supports streaming, if you don't want to download hundreds of GB of data:
```python
c4 = load_dataset("c4", "en", streaming=True)
```
Regarding the dataset_infos.json, I haven't added the infos for en.noclean. I will add them once I have them.
Also we can work on the dataset card at https://huggingface.co/datasets/c4
For now I just added a link to https://huggingface.co/datasets/allenai/c4 as well as a few sections | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2575/reactions",
"total_count": 1,
"+1": 1,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2575/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2574 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2574/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2574/comments | https://api.github.com/repos/huggingface/datasets/issues/2574/events | https://github.com/huggingface/datasets/pull/2574 | 934,632,378 | MDExOlB1bGxSZXF1ZXN0NjgxNjczMzYy | 2,574 | Add streaming in load a dataset docs | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | 1,625,131,973,000 | 1,625,148,742,000 | 1,625,148,741,000 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2574",
"html_url": "https://github.com/huggingface/datasets/pull/2574",
"diff_url": "https://github.com/huggingface/datasets/pull/2574.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2574.patch",
"merged_at": 1625148741000
} | Mention dataset streaming on the "loading a dataset" page of the documentation | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2574/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2574/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2573 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2573/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2573/comments | https://api.github.com/repos/huggingface/datasets/issues/2573/events | https://github.com/huggingface/datasets/issues/2573 | 934,584,745 | MDU6SXNzdWU5MzQ1ODQ3NDU= | 2,573 | Finding right block-size with JSON loading difficult for user | {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892857,
"node_id": "MDU6TGFiZWwxOTM1ODkyODU3",
"url": "https://api.github.com/repos/huggingface/datasets/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
}
] | open | false | null | [] | null | [
"This was actually a second error arising from a too small block-size in the json reader.\r\n\r\nFinding the right block size is difficult for the layman user"
] | 1,625,129,315,000 | 1,625,166,653,000 | null | MEMBER | null | null | null | As reported by @thomwolf, while loading a JSON Lines file with "json" loading script, he gets
> json.decoder.JSONDecodeError: Extra data: line 2 column 1 (char 383)
| {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2573/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2573/timeline | null | null | false |
https://api.github.com/repos/huggingface/datasets/issues/2572 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2572/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2572/comments | https://api.github.com/repos/huggingface/datasets/issues/2572/events | https://github.com/huggingface/datasets/issues/2572 | 934,573,767 | MDU6SXNzdWU5MzQ1NzM3Njc= | 2,572 | Support Zstandard compressed files | {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892871,
"node_id": "MDU6TGFiZWwxOTM1ODkyODcx",
"url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement",
"name": "enhancement",
"color": "a2eeef",
"default": true,
"description": "New feature or request"
}
] | closed | false | {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
} | [
{
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
}
] | null | [] | 1,625,128,624,000 | 1,625,482,227,000 | 1,625,482,227,000 | MEMBER | null | null | null | Add support for Zstandard compressed files: https://facebook.github.io/zstd/ | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2572/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2572/timeline | null | completed | false |
https://api.github.com/repos/huggingface/datasets/issues/2571 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2571/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2571/comments | https://api.github.com/repos/huggingface/datasets/issues/2571/events | https://github.com/huggingface/datasets/pull/2571 | 933,791,018 | MDExOlB1bGxSZXF1ZXN0NjgwOTQ2NzQ1 | 2,571 | Filter expected warning log from transformers | {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [
"I think the failing test has nothing to do with my PR..."
] | 1,625,064,499,000 | 1,625,198,897,000 | 1,625,198,897,000 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2571",
"html_url": "https://github.com/huggingface/datasets/pull/2571",
"diff_url": "https://github.com/huggingface/datasets/pull/2571.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2571.patch",
"merged_at": 1625198896000
} | Close #2569. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2571/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2571/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2570 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2570/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2570/comments | https://api.github.com/repos/huggingface/datasets/issues/2570/events | https://github.com/huggingface/datasets/pull/2570 | 933,402,521 | MDExOlB1bGxSZXF1ZXN0NjgwNjEzNzc0 | 2,570 | Minor fix docs format for bertscore | {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | 1,625,038,932,000 | 1,625,067,061,000 | 1,625,067,061,000 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2570",
"html_url": "https://github.com/huggingface/datasets/pull/2570",
"diff_url": "https://github.com/huggingface/datasets/pull/2570.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2570.patch",
"merged_at": 1625067061000
} | Minor fix docs format for bertscore:
- link to README
- format of KWARGS_DESCRIPTION | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2570/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2570/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2569 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2569/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2569/comments | https://api.github.com/repos/huggingface/datasets/issues/2569/events | https://github.com/huggingface/datasets/issues/2569 | 933,015,797 | MDU6SXNzdWU5MzMwMTU3OTc= | 2,569 | Weights of model checkpoint not initialized for RobertaModel for Bertscore | {
"login": "suzyahyah",
"id": 2980993,
"node_id": "MDQ6VXNlcjI5ODA5OTM=",
"avatar_url": "https://avatars.githubusercontent.com/u/2980993?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/suzyahyah",
"html_url": "https://github.com/suzyahyah",
"followers_url": "https://api.github.com/users/suzyahyah/followers",
"following_url": "https://api.github.com/users/suzyahyah/following{/other_user}",
"gists_url": "https://api.github.com/users/suzyahyah/gists{/gist_id}",
"starred_url": "https://api.github.com/users/suzyahyah/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/suzyahyah/subscriptions",
"organizations_url": "https://api.github.com/users/suzyahyah/orgs",
"repos_url": "https://api.github.com/users/suzyahyah/repos",
"events_url": "https://api.github.com/users/suzyahyah/events{/privacy}",
"received_events_url": "https://api.github.com/users/suzyahyah/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892857,
"node_id": "MDU6TGFiZWwxOTM1ODkyODU3",
"url": "https://api.github.com/repos/huggingface/datasets/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
}
] | closed | false | {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
} | [
{
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
}
] | null | [
"Hi @suzyahyah, thanks for reporting.\r\n\r\nThe message you get is indeed not an error message, but a warning coming from Hugging Face `transformers`. The complete warning message is:\r\n```\r\nSome weights of the model checkpoint at roberta-large were not used when initializing RobertaModel: ['lm_head.decoder.weight', 'lm_head.dense.weight', 'lm_head.dense.bias', 'lm_head.layer_norm.bias', 'lm_head.bias', 'lm_head.layer_norm.weight']\r\n- This IS expected if you are initializing RobertaModel from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\r\n- This IS NOT expected if you are initializing RobertaModel from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\r\n```\r\n\r\nIn this case, this behavior IS expected and you can safely ignore the warning message.\r\n\r\nThe reason is that you are just using RoBERTa to get the contextual embeddings of the input sentences/tokens, thus leaving away its head layer, whose weights are ignored.\r\n\r\nFeel free to reopen this issue if you need further explanations.",
"Hi @suzyahyah, I have created a Pull Request to filter out that warning message in this specific case, since the behavior is as expected and the warning message can only cause confusion for users (as in your case)."
] | 1,624,992,923,000 | 1,625,123,339,000 | 1,625,038,549,000 | NONE | null | null | null | When applying bertscore out of the box,
```Some weights of the model checkpoint at roberta-large were not used when initializing RobertaModel: ['lm_head.decoder.weight', 'lm_head.bias', 'lm_head.dense.bias', 'lm_head.layer_norm.bias', 'lm_head.dense.weight', 'lm_head.layer_norm.weight']```
Following the typical usage from https://huggingface.co/docs/datasets/loading_metrics.html
```
from datasets import load_metric
metric = load_metric('bertscore')
# Example of typical usage
for batch in dataset:
inputs, references = batch
predictions = model(inputs)
metric.add_batch(predictions=predictions, references=references)
score = metric.compute(lang="en")
#score = metric.compute(model_type="roberta-large") # gives the same error
```
I am concerned about this because my usage shouldn't require any further fine-tuning and most people would expect to use BertScore out of the box? I realised the huggingface code is a wrapper around https://github.com/Tiiiger/bert_score, but I think this repo is anyway relying on the model code and weights from huggingface repo....
## Environment info
- `datasets` version: 1.7.0
- Platform: Linux-5.4.0-1041-aws-x86_64-with-glibc2.27
- Python version: 3.9.5
- PyArrow version: 3.0.0
| {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2569/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2569/timeline | null | completed | false |
https://api.github.com/repos/huggingface/datasets/issues/2568 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2568/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2568/comments | https://api.github.com/repos/huggingface/datasets/issues/2568/events | https://github.com/huggingface/datasets/pull/2568 | 932,934,795 | MDExOlB1bGxSZXF1ZXN0NjgwMjE5MDU2 | 2,568 | Add interleave_datasets for map-style datasets | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | 1,624,987,164,000 | 1,625,132,014,000 | 1,625,132,013,000 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2568",
"html_url": "https://github.com/huggingface/datasets/pull/2568",
"diff_url": "https://github.com/huggingface/datasets/pull/2568.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2568.patch",
"merged_at": 1625132012000
} | ### Add interleave_datasets for map-style datasets
Add support for map-style datasets (i.e. `Dataset` objects) in `interleave_datasets`.
It was only supporting iterable datasets (i.e. `IterableDataset` objects).
### Implementation details
It works by concatenating the datasets and then re-order the indices to make the new dataset.
### TODO
- [x] tests
- [x] docs
Close #2563 | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2568/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2568/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2567 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2567/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2567/comments | https://api.github.com/repos/huggingface/datasets/issues/2567/events | https://github.com/huggingface/datasets/pull/2567 | 932,933,536 | MDExOlB1bGxSZXF1ZXN0NjgwMjE3OTY3 | 2,567 | Add ASR task and new languages to resources | {
"login": "lewtun",
"id": 26859204,
"node_id": "MDQ6VXNlcjI2ODU5MjA0",
"avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lewtun",
"html_url": "https://github.com/lewtun",
"followers_url": "https://api.github.com/users/lewtun/followers",
"following_url": "https://api.github.com/users/lewtun/following{/other_user}",
"gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lewtun/subscriptions",
"organizations_url": "https://api.github.com/users/lewtun/orgs",
"repos_url": "https://api.github.com/users/lewtun/repos",
"events_url": "https://api.github.com/users/lewtun/events{/privacy}",
"received_events_url": "https://api.github.com/users/lewtun/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | 1,624,987,081,000 | 1,625,132,543,000 | 1,625,132,529,000 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2567",
"html_url": "https://github.com/huggingface/datasets/pull/2567",
"diff_url": "https://github.com/huggingface/datasets/pull/2567.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2567.patch",
"merged_at": 1625132529000
} | This PR adds a new `automatic-speech-recognition` task to the list of supported tasks in `tasks.json` and also includes a few new languages missing from `common_voice`.
Note: I used the [Papers with Code list](https://www.paperswithcode.com/area/speech/speech-recognition) as inspiration for the ASR subtasks | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2567/reactions",
"total_count": 1,
"+1": 1,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2567/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2566 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2566/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2566/comments | https://api.github.com/repos/huggingface/datasets/issues/2566/events | https://github.com/huggingface/datasets/pull/2566 | 932,804,725 | MDExOlB1bGxSZXF1ZXN0NjgwMTA2NzM0 | 2,566 | fix Dataset.map when num_procs > num rows | {
"login": "connor-mccarthy",
"id": 55268212,
"node_id": "MDQ6VXNlcjU1MjY4MjEy",
"avatar_url": "https://avatars.githubusercontent.com/u/55268212?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/connor-mccarthy",
"html_url": "https://github.com/connor-mccarthy",
"followers_url": "https://api.github.com/users/connor-mccarthy/followers",
"following_url": "https://api.github.com/users/connor-mccarthy/following{/other_user}",
"gists_url": "https://api.github.com/users/connor-mccarthy/gists{/gist_id}",
"starred_url": "https://api.github.com/users/connor-mccarthy/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/connor-mccarthy/subscriptions",
"organizations_url": "https://api.github.com/users/connor-mccarthy/orgs",
"repos_url": "https://api.github.com/users/connor-mccarthy/repos",
"events_url": "https://api.github.com/users/connor-mccarthy/events{/privacy}",
"received_events_url": "https://api.github.com/users/connor-mccarthy/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | 1,624,979,227,000 | 1,625,130,673,000 | 1,625,130,673,000 | CONTRIBUTOR | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2566",
"html_url": "https://github.com/huggingface/datasets/pull/2566",
"diff_url": "https://github.com/huggingface/datasets/pull/2566.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2566.patch",
"merged_at": 1625130673000
} | closes #2470
## Testing notes
To run updated tests:
```sh
pytest tests/test_arrow_dataset.py -k "BaseDatasetTest and test_map_multiprocessing" -s
```
With Python code (to view warning):
```python
from datasets import Dataset
dataset = Dataset.from_dict({"x": ["sample"]})
print(len(dataset))
dataset.map(lambda x: x, num_proc=10)
``` | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2566/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2566/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2565 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2565/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2565/comments | https://api.github.com/repos/huggingface/datasets/issues/2565/events | https://github.com/huggingface/datasets/pull/2565 | 932,445,439 | MDExOlB1bGxSZXF1ZXN0Njc5Nzg3NTI4 | 2,565 | Inject templates for ASR datasets | {
"login": "lewtun",
"id": 26859204,
"node_id": "MDQ6VXNlcjI2ODU5MjA0",
"avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lewtun",
"html_url": "https://github.com/lewtun",
"followers_url": "https://api.github.com/users/lewtun/followers",
"following_url": "https://api.github.com/users/lewtun/following{/other_user}",
"gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lewtun/subscriptions",
"organizations_url": "https://api.github.com/users/lewtun/orgs",
"repos_url": "https://api.github.com/users/lewtun/repos",
"events_url": "https://api.github.com/users/lewtun/events{/privacy}",
"received_events_url": "https://api.github.com/users/lewtun/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [
"Wait until #2567 is merged so we can benefit from the tagger :)",
"thanks for the feedback @lhoestq! i've added the new language codes and this PR should be ready for a merge :)"
] | 1,624,960,921,000 | 1,625,495,186,000 | 1,625,495,186,000 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2565",
"html_url": "https://github.com/huggingface/datasets/pull/2565",
"diff_url": "https://github.com/huggingface/datasets/pull/2565.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2565.patch",
"merged_at": 1625495186000
} | This PR adds ASR templates for 5 of the most common speech datasets on the Hub, where "common" is defined by the number of models trained on them.
I also fixed a bunch of the tags in the READMEs ๐ | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2565/reactions",
"total_count": 1,
"+1": 1,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2565/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2564 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2564/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2564/comments | https://api.github.com/repos/huggingface/datasets/issues/2564/events | https://github.com/huggingface/datasets/issues/2564 | 932,389,639 | MDU6SXNzdWU5MzIzODk2Mzk= | 2,564 | concatenate_datasets for iterable datasets | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | open | false | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [
{
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
}
] | null | [
"It is probably worth noting here that the [documentation](https://huggingface.co/docs/datasets/process#concatenate) is misleading (indicating that it does work for IterableDatasets):\r\n\r\n> You can also mix several datasets together by taking alternating examples from each one to create a new dataset. This is known as interleaving, and you can use it with [interleave_datasets()](https://huggingface.co/docs/datasets/v2.2.1/en/package_reference/main_classes#datasets.interleave_datasets). **Both [interleave_datasets()](https://huggingface.co/docs/datasets/v2.2.1/en/package_reference/main_classes#datasets.interleave_datasets) and [concatenate_datasets()](https://huggingface.co/docs/datasets/v2.2.1/en/package_reference/main_classes#datasets.concatenate_datasets) will work with regular [Dataset](https://huggingface.co/docs/datasets/v2.2.1/en/package_reference/main_classes#datasets.Dataset) and [IterableDataset](https://huggingface.co/docs/datasets/v2.2.1/en/package_reference/main_classes#datasets.IterableDataset) objects**. Refer to the [Stream](https://huggingface.co/docs/datasets/stream#interleave) section for an example of how itโs used. ",
"Thanks for the heads up, I'll fix that"
] | 1,624,957,181,000 | 1,652,717,997,000 | null | MEMBER | null | null | null | Currently `concatenate_datasets` only works for map-style `Dataset`.
It would be nice to have it work for `IterableDataset` objects as well.
It would simply chain the iterables of the iterable datasets. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2564/reactions",
"total_count": 3,
"+1": 3,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2564/timeline | null | null | false |
https://api.github.com/repos/huggingface/datasets/issues/2563 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2563/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2563/comments | https://api.github.com/repos/huggingface/datasets/issues/2563/events | https://github.com/huggingface/datasets/issues/2563 | 932,387,639 | MDU6SXNzdWU5MzIzODc2Mzk= | 2,563 | interleave_datasets for map-style datasets | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [
{
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
}
] | null | [] | 1,624,957,044,000 | 1,625,132,013,000 | 1,625,132,013,000 | MEMBER | null | null | null | Currently the `interleave_datasets` functions only works for `IterableDataset`.
Let's make it work for map-style `Dataset` objects as well.
It would work the same way: either alternate between the datasets in order or randomly given probabilities specified by the user. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2563/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2563/timeline | null | completed | false |
https://api.github.com/repos/huggingface/datasets/issues/2562 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2562/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2562/comments | https://api.github.com/repos/huggingface/datasets/issues/2562/events | https://github.com/huggingface/datasets/pull/2562 | 932,333,436 | MDExOlB1bGxSZXF1ZXN0Njc5NjkyMjQ2 | 2,562 | Minor fix in loading metrics docs | {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | 1,624,953,311,000 | 1,624,987,282,000 | 1,624,987,282,000 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2562",
"html_url": "https://github.com/huggingface/datasets/pull/2562",
"diff_url": "https://github.com/huggingface/datasets/pull/2562.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2562.patch",
"merged_at": 1624987282000
} | Make some minor fixes in "Loading metrics" docs. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2562/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2562/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2561 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2561/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2561/comments | https://api.github.com/repos/huggingface/datasets/issues/2561/events | https://github.com/huggingface/datasets/issues/2561 | 932,321,725 | MDU6SXNzdWU5MzIzMjE3MjU= | 2,561 | Existing cache for local dataset builder file updates is ignored with `ignore_verifications=True` | {
"login": "apsdehal",
"id": 3616806,
"node_id": "MDQ6VXNlcjM2MTY4MDY=",
"avatar_url": "https://avatars.githubusercontent.com/u/3616806?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/apsdehal",
"html_url": "https://github.com/apsdehal",
"followers_url": "https://api.github.com/users/apsdehal/followers",
"following_url": "https://api.github.com/users/apsdehal/following{/other_user}",
"gists_url": "https://api.github.com/users/apsdehal/gists{/gist_id}",
"starred_url": "https://api.github.com/users/apsdehal/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/apsdehal/subscriptions",
"organizations_url": "https://api.github.com/users/apsdehal/orgs",
"repos_url": "https://api.github.com/users/apsdehal/repos",
"events_url": "https://api.github.com/users/apsdehal/events{/privacy}",
"received_events_url": "https://api.github.com/users/apsdehal/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892857,
"node_id": "MDU6TGFiZWwxOTM1ODkyODU3",
"url": "https://api.github.com/repos/huggingface/datasets/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
}
] | open | false | null | [] | null | [
"Hi ! I just tried to reproduce what you said:\r\n- create a local builder class\r\n- use `load_dataset`\r\n- update the builder class code\r\n- use `load_dataset` again (with or without `ignore_verifications=True`)\r\nAnd it creates a new cache, as expected.\r\n\r\nWhat modifications did you do to your builder's code ?",
"Hi @lhoestq. Thanks for your reply. I just did minor modifications for which it should not regenerate cache (for e.g. Adding a print statement). Overall, regardless of cache miss, there should be an explicit option to allow reuse of existing cache if author knows cache shouldn't be affected.",
"The cache is based on the hash of the dataset builder's code, so changing the code makes it recompute the cache.\r\n\r\nYou could still rename the cache directory of your previous computation to the new expected cache directory if you want to avoid having to recompute it and if you're sure that it would generate the exact same result.\r\n\r\nThe verifications are data integrity verifications: it checks the checksums of the downloaded files, as well as the size of the generated splits.",
"Hi @apsdehal,\r\n\r\nIf you decide to follow @lhoestq's suggestion to rename the cache directory of your previous computation to the new expected cache directory, you can do the following to get the name of the new expected cache directory once #2500 is merged:\r\n```python\r\nfrom datasets import load_dataset_builder\r\ndataset_builder = load_dataset_builder(\"path/to/your/dataset\")\r\nprint(dataset_builder.cache_dir)\r\n```\r\n\r\nThis way, you don't have to recompute the hash of the dataset script yourself each time you modify the script."
] | 1,624,952,583,000 | 1,625,057,724,000 | null | MEMBER | null | null | null | ## Describe the bug
If i have local file defining a dataset builder class and I load it using `load_dataset` functionality, the existing cache is ignored whenever the file is update even with `ignore_verifications=True`. This slows down debugging and cache generator for very large datasets.
## Steps to reproduce the bug
- Create a local dataset builder class
- load the local builder class file using `load_dataset` and let the cache build
- update the file's content
- The cache should rebuilt.
## Expected results
With `ignore_verifications=True`, `load_dataset` should pick up existing cache.
## Actual results
Creates new cache.
## Environment info
<!-- You can run the command `datasets-cli env` and copy-and-paste its output below. -->
- `datasets` version: 1.8.0
- Platform: Linux-5.4.0-52-generic-x86_64-with-debian-bullseye-sid
- Python version: 3.7.7
- PyArrow version: 3.0.0
| {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2561/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2561/timeline | null | null | false |
https://api.github.com/repos/huggingface/datasets/issues/2560 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2560/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2560/comments | https://api.github.com/repos/huggingface/datasets/issues/2560/events | https://github.com/huggingface/datasets/pull/2560 | 932,143,634 | MDExOlB1bGxSZXF1ZXN0Njc5NTMyODk4 | 2,560 | fix Dataset.map when num_procs > num rows | {
"login": "connor-mccarthy",
"id": 55268212,
"node_id": "MDQ6VXNlcjU1MjY4MjEy",
"avatar_url": "https://avatars.githubusercontent.com/u/55268212?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/connor-mccarthy",
"html_url": "https://github.com/connor-mccarthy",
"followers_url": "https://api.github.com/users/connor-mccarthy/followers",
"following_url": "https://api.github.com/users/connor-mccarthy/following{/other_user}",
"gists_url": "https://api.github.com/users/connor-mccarthy/gists{/gist_id}",
"starred_url": "https://api.github.com/users/connor-mccarthy/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/connor-mccarthy/subscriptions",
"organizations_url": "https://api.github.com/users/connor-mccarthy/orgs",
"repos_url": "https://api.github.com/users/connor-mccarthy/repos",
"events_url": "https://api.github.com/users/connor-mccarthy/events{/privacy}",
"received_events_url": "https://api.github.com/users/connor-mccarthy/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [
"Hi ! Thanks for fixing this :)\r\n\r\nLooks like you have tons of changes due to code formatting.\r\nWe're using `black` for this, with a custom line length. To run our code formatting, you just need to run\r\n```\r\nmake style\r\n```\r\n\r\nThen for the windows error in the CI, I'm looking into it. It's probably just a file that isn't properly closed",
"CI is all green now ! Thanks :)\r\n\r\nThere are still many code formatting changes in your PR - probably due to the first commit you did.\r\nTo avoid conflicts with future PRs it would be nice to only have the changes related to the `num_proc` warning, and not have all those code formatting changes,\r\n\r\nCould you try remove those code formatting changes ?\r\n\r\nIf it's easier for you, you can make a new branch from `master` if needed",
"Thanks, @lhoestq! Apologies for the half-baked commits yesterday! I wasnโt able to step back in to resolve those CI issues until this morning.\r\n\r\nAlso, Iโm surprised that `make style` isnโt resolving the formatting changes. Iโm a bit stumped on that, so Iโm going to re-apply on a new branch and open a PR as you suggested."
] | 1,624,933,451,000 | 1,624,978,818,000 | 1,624,978,411,000 | CONTRIBUTOR | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2560",
"html_url": "https://github.com/huggingface/datasets/pull/2560",
"diff_url": "https://github.com/huggingface/datasets/pull/2560.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2560.patch",
"merged_at": null
} | closes #2470
## Testing notes
To run updated tests:
```sh
pytest tests/test_arrow_dataset.py -k "BaseDatasetTest and test_map_multiprocessing" -s
```
With Python code (to view warning):
```python
from datasets import Dataset
dataset = Dataset.from_dict({"x": ["sample"]})
print(len(dataset))
dataset.map(lambda x: x, num_proc=10)
``` | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2560/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2560/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2559 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2559/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2559/comments | https://api.github.com/repos/huggingface/datasets/issues/2559/events | https://github.com/huggingface/datasets/issues/2559 | 931,849,724 | MDU6SXNzdWU5MzE4NDk3MjQ= | 2,559 | Memory usage consistently increases when processing a dataset with `.map` | {
"login": "apsdehal",
"id": 3616806,
"node_id": "MDQ6VXNlcjM2MTY4MDY=",
"avatar_url": "https://avatars.githubusercontent.com/u/3616806?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/apsdehal",
"html_url": "https://github.com/apsdehal",
"followers_url": "https://api.github.com/users/apsdehal/followers",
"following_url": "https://api.github.com/users/apsdehal/following{/other_user}",
"gists_url": "https://api.github.com/users/apsdehal/gists{/gist_id}",
"starred_url": "https://api.github.com/users/apsdehal/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/apsdehal/subscriptions",
"organizations_url": "https://api.github.com/users/apsdehal/orgs",
"repos_url": "https://api.github.com/users/apsdehal/repos",
"events_url": "https://api.github.com/users/apsdehal/events{/privacy}",
"received_events_url": "https://api.github.com/users/apsdehal/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892857,
"node_id": "MDU6TGFiZWwxOTM1ODkyODU3",
"url": "https://api.github.com/repos/huggingface/datasets/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
}
] | open | false | null | [] | null | [
"Hi ! Can you share the function you pass to `map` ?\r\nI know you mentioned it would be hard to share some code but this would really help to understand what happened"
] | 1,624,905,118,000 | 1,624,956,180,000 | null | MEMBER | null | null | null | ## Describe the bug
I have a HF dataset with image paths stored in it and I am trying to load those image paths using `.map` with `num_proc=80`. I am noticing that the memory usage consistently keeps on increasing with time. I tried using `DEFAULT_WRITER_BATCH_SIZE=10` in the builder to decrease arrow writer's batch size but that doesn't seem to help.
## Steps to reproduce the bug
Providing code as it is would be hard. I can provide a MVP if that helps.
## Expected results
Memory usage should become consistent after some time following the launch of processing.
## Actual results
Memory usage keeps on increasing.
## Environment info
- `datasets` version: 1.8.0
- Platform: Linux-5.4.0-52-generic-x86_64-with-debian-bullseye-sid
- Python version: 3.7.7
- PyArrow version: 3.0.0 | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2559/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2559/timeline | null | null | false |
https://api.github.com/repos/huggingface/datasets/issues/2558 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2558/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2558/comments | https://api.github.com/repos/huggingface/datasets/issues/2558/events | https://github.com/huggingface/datasets/pull/2558 | 931,736,647 | MDExOlB1bGxSZXF1ZXN0Njc5MTg0Njk1 | 2,558 | Update: WebNLG - update checksums | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | 1,624,896,997,000 | 1,624,900,997,000 | 1,624,900,996,000 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2558",
"html_url": "https://github.com/huggingface/datasets/pull/2558",
"diff_url": "https://github.com/huggingface/datasets/pull/2558.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2558.patch",
"merged_at": 1624900996000
} | The master branch changed so I computed the new checksums.
I also pinned a specific revision so that it doesn't happen again in the future.
Fix https://github.com/huggingface/datasets/issues/2553 | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2558/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2558/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2557 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2557/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2557/comments | https://api.github.com/repos/huggingface/datasets/issues/2557/events | https://github.com/huggingface/datasets/pull/2557 | 931,633,823 | MDExOlB1bGxSZXF1ZXN0Njc5MDk4ODg3 | 2,557 | Fix `fever` keys | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | 1,624,890,422,000 | 1,624,896,690,000 | 1,624,896,689,000 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2557",
"html_url": "https://github.com/huggingface/datasets/pull/2557",
"diff_url": "https://github.com/huggingface/datasets/pull/2557.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2557.patch",
"merged_at": 1624896689000
} | The keys has duplicates since they were reset to 0 after each file.
I fixed it by taking into account the file index as well. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2557/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2557/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2556 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2556/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2556/comments | https://api.github.com/repos/huggingface/datasets/issues/2556/events | https://github.com/huggingface/datasets/issues/2556 | 931,595,872 | MDU6SXNzdWU5MzE1OTU4NzI= | 2,556 | Better DuplicateKeysError error to help the user debug the issue | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892871,
"node_id": "MDU6TGFiZWwxOTM1ODkyODcx",
"url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement",
"name": "enhancement",
"color": "a2eeef",
"default": true,
"description": "New feature or request"
},
{
"id": 1935892877,
"node_id": "MDU6TGFiZWwxOTM1ODkyODc3",
"url": "https://api.github.com/repos/huggingface/datasets/labels/good%20first%20issue",
"name": "good first issue",
"color": "7057ff",
"default": true,
"description": "Good for newcomers"
}
] | open | false | {
"login": "VijayKalmath",
"id": 20517962,
"node_id": "MDQ6VXNlcjIwNTE3OTYy",
"avatar_url": "https://avatars.githubusercontent.com/u/20517962?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/VijayKalmath",
"html_url": "https://github.com/VijayKalmath",
"followers_url": "https://api.github.com/users/VijayKalmath/followers",
"following_url": "https://api.github.com/users/VijayKalmath/following{/other_user}",
"gists_url": "https://api.github.com/users/VijayKalmath/gists{/gist_id}",
"starred_url": "https://api.github.com/users/VijayKalmath/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/VijayKalmath/subscriptions",
"organizations_url": "https://api.github.com/users/VijayKalmath/orgs",
"repos_url": "https://api.github.com/users/VijayKalmath/repos",
"events_url": "https://api.github.com/users/VijayKalmath/events{/privacy}",
"received_events_url": "https://api.github.com/users/VijayKalmath/received_events",
"type": "User",
"site_admin": false
} | [
{
"login": "VijayKalmath",
"id": 20517962,
"node_id": "MDQ6VXNlcjIwNTE3OTYy",
"avatar_url": "https://avatars.githubusercontent.com/u/20517962?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/VijayKalmath",
"html_url": "https://github.com/VijayKalmath",
"followers_url": "https://api.github.com/users/VijayKalmath/followers",
"following_url": "https://api.github.com/users/VijayKalmath/following{/other_user}",
"gists_url": "https://api.github.com/users/VijayKalmath/gists{/gist_id}",
"starred_url": "https://api.github.com/users/VijayKalmath/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/VijayKalmath/subscriptions",
"organizations_url": "https://api.github.com/users/VijayKalmath/orgs",
"repos_url": "https://api.github.com/users/VijayKalmath/repos",
"events_url": "https://api.github.com/users/VijayKalmath/events{/privacy}",
"received_events_url": "https://api.github.com/users/VijayKalmath/received_events",
"type": "User",
"site_admin": false
}
] | null | [
"excuse me, my `datasets` version is `2.2.2`, but I also just see the error info like \r\n```\r\nDuplicatedKeysError: FAILURE TO GENERATE DATASET !\r\nFound duplicate Key: 0\r\nKeys should be unique and deterministic in nature\r\n```",
"Hi ! for which dataset do you have this error ?\r\n\r\nAlso note that this issue is just about improving the error message, which is not very friendly x)",
"@lhoestq I would like to take a hit at improving the error message. Will open a draft PR and will reach out to you for review\r\n",
"> DuplicateKeysError: both 42th and 1337th examples have the same keys `48`.\r\n\r\n@lhoestq when you mention 42th and 1337th in the above case , are these values the examples' \"id\" or are they the examples' index ? ",
"Hi ! Thanks @VijayKalmath :)\r\n\r\nIn the general case, examples don't have an \"id\" field, so I think it should correspond to the index",
"@lhoestq , I have opened a draft PR for this Issue. \r\n\r\nI wanted to check with you if there is a way to get `<path/to/the/dataset/script>` currently or do I need to add extra code to find that. \r\n\r\nIf I need to find the script , I can assume that the generator function will always be in `datasets/{dataset_name}/{dataset_name}.py`. ",
"Thanks !\r\n\r\n> I wanted to check with you if there is a way to get <path/to/the/dataset/script> currently or do I need to add extra code to find that.\r\n\r\nYou don't have access to this info inside the ArrowWriter unfortunately. This info is available in builder.py in the DatasetBuilder code that uses the ArrowWriter though, maybe a try-catch there can do the job"
] | 1,624,888,257,000 | 1,655,990,972,000 | null | MEMBER | null | null | null | As mentioned in https://github.com/huggingface/datasets/issues/2552 it would be nice to improve the error message when a dataset fails to build because there are duplicate example keys.
The current one is
```python
datasets.keyhash.DuplicatedKeysError: FAILURE TO GENERATE DATASET !
Found duplicate Key: 48
Keys should be unique and deterministic in nature
```
and we could have something that guides the user to debugging the issue:
```python
DuplicateKeysError: both 42th and 1337th examples have the same keys `48`.
Please fix the dataset script at <path/to/the/dataset/script>
``` | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2556/reactions",
"total_count": 1,
"+1": 1,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2556/timeline | null | null | false |
https://api.github.com/repos/huggingface/datasets/issues/2555 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2555/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2555/comments | https://api.github.com/repos/huggingface/datasets/issues/2555/events | https://github.com/huggingface/datasets/pull/2555 | 931,585,485 | MDExOlB1bGxSZXF1ZXN0Njc5MDU4ODM3 | 2,555 | Fix code_search_net keys | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [
"Fix #2552."
] | 1,624,887,623,000 | 1,630,571,083,000 | 1,624,889,435,000 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2555",
"html_url": "https://github.com/huggingface/datasets/pull/2555",
"diff_url": "https://github.com/huggingface/datasets/pull/2555.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2555.patch",
"merged_at": 1624889435000
} | There were duplicate keys in the `code_search_net` dataset, as reported in https://github.com/huggingface/datasets/issues/2552
I fixed the keys (it was an addition of the file and row indices, which was causing collisions)
Fix #2552. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2555/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2555/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2554 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2554/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2554/comments | https://api.github.com/repos/huggingface/datasets/issues/2554/events | https://github.com/huggingface/datasets/issues/2554 | 931,453,855 | MDU6SXNzdWU5MzE0NTM4NTU= | 2,554 | Multilabel metrics not supported | {
"login": "GuillemGSubies",
"id": 37592763,
"node_id": "MDQ6VXNlcjM3NTkyNzYz",
"avatar_url": "https://avatars.githubusercontent.com/u/37592763?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/GuillemGSubies",
"html_url": "https://github.com/GuillemGSubies",
"followers_url": "https://api.github.com/users/GuillemGSubies/followers",
"following_url": "https://api.github.com/users/GuillemGSubies/following{/other_user}",
"gists_url": "https://api.github.com/users/GuillemGSubies/gists{/gist_id}",
"starred_url": "https://api.github.com/users/GuillemGSubies/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/GuillemGSubies/subscriptions",
"organizations_url": "https://api.github.com/users/GuillemGSubies/orgs",
"repos_url": "https://api.github.com/users/GuillemGSubies/repos",
"events_url": "https://api.github.com/users/GuillemGSubies/events{/privacy}",
"received_events_url": "https://api.github.com/users/GuillemGSubies/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892857,
"node_id": "MDU6TGFiZWwxOTM1ODkyODU3",
"url": "https://api.github.com/repos/huggingface/datasets/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
}
] | closed | false | null | [] | null | [
"Hi @GuillemGSubies, thanks for reporting.\r\n\r\nI have made a PR to fix this issue and allow metrics to be computed also for multilabel classification problems.",
"Looks nice, thank you very much! ๐ ",
"Sorry for reopening but I just noticed that the `_compute` method for the F1 metric is still not good enough for multilabel problems:\r\n\r\nhttps://github.com/huggingface/datasets/blob/92a3ee549705aa0a107c9fa5caf463b3b3da2616/metrics/f1/f1.py#L115\r\n\r\nSomehow we should be able to change the parameter `average` at least",
"@GuillemGSubies, the parameter `average` passed to `_compute` is then passed to `f1_score`. This is right."
] | 1,624,878,586,000 | 1,634,128,153,000 | 1,625,733,615,000 | NONE | null | null | null | When I try to use a metric like F1 macro I get the following error:
```
TypeError: int() argument must be a string, a bytes-like object or a number, not 'list'
```
There is an explicit casting here:
https://github.com/huggingface/datasets/blob/fc79f61cbbcfa0e8c68b28c0a8257f17e768a075/src/datasets/features.py#L274
And looks like this is because here
https://github.com/huggingface/datasets/blob/fc79f61cbbcfa0e8c68b28c0a8257f17e768a075/metrics/f1/f1.py#L88
the features can only be integers, so we cannot use that F1 for multilabel. Instead, if I create the following F1 (ints replaced with sequence of ints), it will work:
```python
class F1(datasets.Metric):
def _info(self):
return datasets.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32")),
"references": datasets.Sequence(datasets.Value("int32")),
}
),
reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"],
)
def _compute(self, predictions, references, labels=None, pos_label=1, average="binary", sample_weight=None):
return {
"f1": f1_score(
references,
predictions,
labels=labels,
pos_label=pos_label,
average=average,
sample_weight=sample_weight,
),
}
```
| {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2554/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2554/timeline | null | completed | false |
https://api.github.com/repos/huggingface/datasets/issues/2553 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2553/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2553/comments | https://api.github.com/repos/huggingface/datasets/issues/2553/events | https://github.com/huggingface/datasets/issues/2553 | 931,365,926 | MDU6SXNzdWU5MzEzNjU5MjY= | 2,553 | load_dataset("web_nlg") NonMatchingChecksumError | {
"login": "alxthm",
"id": 33730312,
"node_id": "MDQ6VXNlcjMzNzMwMzEy",
"avatar_url": "https://avatars.githubusercontent.com/u/33730312?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/alxthm",
"html_url": "https://github.com/alxthm",
"followers_url": "https://api.github.com/users/alxthm/followers",
"following_url": "https://api.github.com/users/alxthm/following{/other_user}",
"gists_url": "https://api.github.com/users/alxthm/gists{/gist_id}",
"starred_url": "https://api.github.com/users/alxthm/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/alxthm/subscriptions",
"organizations_url": "https://api.github.com/users/alxthm/orgs",
"repos_url": "https://api.github.com/users/alxthm/repos",
"events_url": "https://api.github.com/users/alxthm/events{/privacy}",
"received_events_url": "https://api.github.com/users/alxthm/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892857,
"node_id": "MDU6TGFiZWwxOTM1ODkyODU3",
"url": "https://api.github.com/repos/huggingface/datasets/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
}
] | closed | false | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [
{
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
}
] | null | [
"Hi ! Thanks for reporting. This is due to the WebNLG repository that got updated today.\r\nI just pushed a fix at #2558 - this shouldn't happen anymore in the future.",
"This is fixed on `master` now :)\r\nWe'll do a new release soon !"
] | 1,624,872,406,000 | 1,624,901,019,000 | 1,624,900,996,000 | NONE | null | null | null | Hi! It seems the WebNLG dataset gives a NonMatchingChecksumError.
## Steps to reproduce the bug
```python
from datasets import load_dataset
dataset = load_dataset('web_nlg', name="release_v3.0_en", split="dev")
```
Gives
```
NonMatchingChecksumError: Checksums didn't match for dataset source files:
['https://gitlab.com/shimorina/webnlg-dataset/-/archive/master/webnlg-dataset-master.zip']
```
## Environment info
- `datasets` version: 1.8.0
- Platform: macOS-11.3.1-x86_64-i386-64bit
- Python version: 3.9.4
- PyArrow version: 3.0.0
Also tested on Linux, with python 3.6.8 | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2553/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2553/timeline | null | completed | false |
https://api.github.com/repos/huggingface/datasets/issues/2552 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2552/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2552/comments | https://api.github.com/repos/huggingface/datasets/issues/2552/events | https://github.com/huggingface/datasets/issues/2552 | 931,354,687 | MDU6SXNzdWU5MzEzNTQ2ODc= | 2,552 | Keys should be unique error on code_search_net | {
"login": "thomwolf",
"id": 7353373,
"node_id": "MDQ6VXNlcjczNTMzNzM=",
"avatar_url": "https://avatars.githubusercontent.com/u/7353373?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/thomwolf",
"html_url": "https://github.com/thomwolf",
"followers_url": "https://api.github.com/users/thomwolf/followers",
"following_url": "https://api.github.com/users/thomwolf/following{/other_user}",
"gists_url": "https://api.github.com/users/thomwolf/gists{/gist_id}",
"starred_url": "https://api.github.com/users/thomwolf/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/thomwolf/subscriptions",
"organizations_url": "https://api.github.com/users/thomwolf/orgs",
"repos_url": "https://api.github.com/users/thomwolf/repos",
"events_url": "https://api.github.com/users/thomwolf/events{/privacy}",
"received_events_url": "https://api.github.com/users/thomwolf/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892857,
"node_id": "MDU6TGFiZWwxOTM1ODkyODU3",
"url": "https://api.github.com/repos/huggingface/datasets/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
}
] | closed | false | null | [] | null | [
"Two questions:\r\n- with `datasets-cli env` we don't have any information on the dataset script version used. Should we give access to this somehow? Either as a note in the Error message or as an argument with the name of the dataset to `datasets-cli env`?\r\n- I don't really understand why the id is duplicated in the code of `code_search_net`, how can I debug this actually?",
"Thanks for reporting. There was indeed an issue with the keys. The key was the addition of the file id and row id, which resulted in collisions. I just opened a PR to fix this at https://github.com/huggingface/datasets/pull/2555\r\n\r\nTo help users debug this kind of errors we could try to show a message like this\r\n```python\r\nDuplicateKeysError: both 42th and 1337th examples have the same keys `48`.\r\nPlease fix the dataset script at <path/to/the/dataset/script>\r\n```\r\n\r\nThis way users who what to look for if they want to debug this issue. I opened an issue to track this: https://github.com/huggingface/datasets/issues/2556",
"and are we sure there are not a lot of datasets which are now broken with this change?",
"Thanks to the dummy data, we know for sure that most of them work as expected.\r\n`code_search_net` wasn't caught because the dummy data only have one dummy data file while the dataset script can actually load several of them using `os.listdir`. Let me take a look at all the other datasets that use `os.listdir` to see if the keys are alright",
"I found one issue on `fever` (PR here: https://github.com/huggingface/datasets/pull/2557)\r\nAll the other ones seem fine :)",
"Hi! Got same error when loading other dataset:\r\n```python3\r\nload_dataset('wikicorpus', 'raw_en')\r\n```\r\n\r\ntb:\r\n```pytb\r\n---------------------------------------------------------------------------\r\nDuplicatedKeysError Traceback (most recent call last)\r\n/opt/conda/lib/python3.8/site-packages/datasets/builder.py in _prepare_split(self, split_generator)\r\n 1109 example = self.info.features.encode_example(record)\r\n-> 1110 writer.write(example, key)\r\n 1111 finally:\r\n\r\n/opt/conda/lib/python3.8/site-packages/datasets/arrow_writer.py in write(self, example, key, writer_batch_size)\r\n 341 if self._check_duplicates:\r\n--> 342 self.check_duplicate_keys()\r\n 343 # Re-intializing to empty list for next batch\r\n\r\n/opt/conda/lib/python3.8/site-packages/datasets/arrow_writer.py in check_duplicate_keys(self)\r\n 352 if hash in tmp_record:\r\n--> 353 raise DuplicatedKeysError(key)\r\n 354 else:\r\n\r\nDuplicatedKeysError: FAILURE TO GENERATE DATASET !\r\nFound duplicate Key: 519\r\nKeys should be unique and deterministic in nature\r\n```\r\n\r\nVersion: datasets==1.11.0",
"Fixed by #2555.",
"The wikicorpus issue has been fixed by https://github.com/huggingface/datasets/pull/2844\r\n\r\nWe'll do a new release of `datasets` soon :)"
] | 1,624,871,720,000 | 1,630,937,310,000 | 1,630,571,129,000 | MEMBER | null | null | null | ## Describe the bug
Loading `code_search_net` seems not possible at the moment.
## Steps to reproduce the bug
```python
>>> load_dataset('code_search_net')
Downloading: 8.50kB [00:00, 3.09MB/s]
Downloading: 19.1kB [00:00, 10.1MB/s]
No config specified, defaulting to: code_search_net/all
Downloading and preparing dataset code_search_net/all (download: 4.77 GiB, generated: 5.99 GiB, post-processed: Unknown size, total: 10.76 GiB) to /Users/thomwolf/.cache/huggingface/datasets/code_search_net/all/1.0.0/b3e8278faf5d67da1d06981efbeac3b76a2900693bd2239bbca7a4a3b0d6e52a...
Traceback (most recent call last):
File "/Users/thomwolf/Documents/GitHub/datasets/src/datasets/builder.py", line 1067, in _prepare_split
writer.write(example, key)
File "/Users/thomwolf/Documents/GitHub/datasets/src/datasets/arrow_writer.py", line 343, in write
self.check_duplicate_keys()
File "/Users/thomwolf/Documents/GitHub/datasets/src/datasets/arrow_writer.py", line 354, in check_duplicate_keys
raise DuplicatedKeysError(key)
datasets.keyhash.DuplicatedKeysError: FAILURE TO GENERATE DATASET !
Found duplicate Key: 48
Keys should be unique and deterministic in nature
```
## Environment info
- `datasets` version: 1.8.1.dev0
- Platform: macOS-10.15.7-x86_64-i386-64bit
- Python version: 3.8.5
- PyArrow version: 2.0.0
| {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2552/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2552/timeline | null | completed | false |
https://api.github.com/repos/huggingface/datasets/issues/2551 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2551/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2551/comments | https://api.github.com/repos/huggingface/datasets/issues/2551/events | https://github.com/huggingface/datasets/pull/2551 | 930,967,978 | MDExOlB1bGxSZXF1ZXN0Njc4NTQzMjg1 | 2,551 | Fix FileSystems documentation | {
"login": "connor-mccarthy",
"id": 55268212,
"node_id": "MDQ6VXNlcjU1MjY4MjEy",
"avatar_url": "https://avatars.githubusercontent.com/u/55268212?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/connor-mccarthy",
"html_url": "https://github.com/connor-mccarthy",
"followers_url": "https://api.github.com/users/connor-mccarthy/followers",
"following_url": "https://api.github.com/users/connor-mccarthy/following{/other_user}",
"gists_url": "https://api.github.com/users/connor-mccarthy/gists{/gist_id}",
"starred_url": "https://api.github.com/users/connor-mccarthy/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/connor-mccarthy/subscriptions",
"organizations_url": "https://api.github.com/users/connor-mccarthy/orgs",
"repos_url": "https://api.github.com/users/connor-mccarthy/repos",
"events_url": "https://api.github.com/users/connor-mccarthy/events{/privacy}",
"received_events_url": "https://api.github.com/users/connor-mccarthy/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | 1,624,810,722,000 | 1,624,885,795,000 | 1,624,885,794,000 | CONTRIBUTOR | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2551",
"html_url": "https://github.com/huggingface/datasets/pull/2551",
"diff_url": "https://github.com/huggingface/datasets/pull/2551.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2551.patch",
"merged_at": 1624885794000
} | ### What this fixes:
This PR resolves several issues I discovered in the documentation on the `datasets.filesystems` module ([this page](https://huggingface.co/docs/datasets/filesystems.html)).
### What were the issues?
When I originally tried implementing the code examples I faced several bugs attributed to:
- out of date [botocore](https://github.com/boto/botocore) call signatures
- capitalization errors in the `S3FileSystem` class name (written as `S3Filesystem` in one place)
- call signature errors for the `S3FileSystem` class constructor (uses parameter `sessions` instead of `session` in some places) (see [`s3fs`](https://s3fs.readthedocs.io/en/latest/api.html#s3fs.core.S3FileSystem) for where this constructor signature is defined)
### Testing/reviewing notes
Instructions for generating the documentation locally: [here](https://github.com/huggingface/datasets/tree/master/docs#generating-the-documentation). | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2551/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2551/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2550 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2550/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2550/comments | https://api.github.com/repos/huggingface/datasets/issues/2550/events | https://github.com/huggingface/datasets/issues/2550 | 930,951,287 | MDU6SXNzdWU5MzA5NTEyODc= | 2,550 | Allow for incremental cumulative metric updates in a distributed setup | {
"login": "eladsegal",
"id": 13485709,
"node_id": "MDQ6VXNlcjEzNDg1NzA5",
"avatar_url": "https://avatars.githubusercontent.com/u/13485709?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/eladsegal",
"html_url": "https://github.com/eladsegal",
"followers_url": "https://api.github.com/users/eladsegal/followers",
"following_url": "https://api.github.com/users/eladsegal/following{/other_user}",
"gists_url": "https://api.github.com/users/eladsegal/gists{/gist_id}",
"starred_url": "https://api.github.com/users/eladsegal/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/eladsegal/subscriptions",
"organizations_url": "https://api.github.com/users/eladsegal/orgs",
"repos_url": "https://api.github.com/users/eladsegal/repos",
"events_url": "https://api.github.com/users/eladsegal/events{/privacy}",
"received_events_url": "https://api.github.com/users/eladsegal/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892871,
"node_id": "MDU6TGFiZWwxOTM1ODkyODcx",
"url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement",
"name": "enhancement",
"color": "a2eeef",
"default": true,
"description": "New feature or request"
}
] | closed | false | null | [] | null | [] | 1,624,806,058,000 | 1,632,663,759,000 | 1,632,663,759,000 | CONTRIBUTOR | null | null | null | Currently, using a metric allows for one of the following:
- Per example/batch metrics
- Cumulative metrics over the whole data
What I'd like is to have an efficient way to get cumulative metrics over the examples/batches added so far, in order to display it as part of the progress bar during training/evaluation.
Since most metrics are just an average of per-example metrics (which aren't?), an efficient calculation can be done as follows:
`((score_cumulative * n_cumulative) + (score_new * n_new)) / (n_cumulative+ n_new)`
where `n` and `score` refer to number of examples and metric score, `cumulative` refers to the cumulative metric and `new` refers to the addition of new examples.
If you don't want to add this capability in the library, a simple solution exists so users can do it themselves:
It is easy to implement for a single process setup, but in a distributed one there is no way to get the correct `n_new`.
The solution for this is to return the number of examples that was used to compute the metrics in `.compute()` by adding the following line here:
https://github.com/huggingface/datasets/blob/5a3221785311d0ce86c2785b765e86bd6997d516/src/datasets/metric.py#L402-L403
```
output["number_of_examples"] = len(predictions)
```
and also remove the log message here so it won't spam:
https://github.com/huggingface/datasets/blob/3db67f5ff6cbf807b129d2b4d1107af27623b608/src/datasets/metric.py#L411
If this change is ok with you, I'll open a pull request.
| {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2550/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2550/timeline | null | completed | false |
https://api.github.com/repos/huggingface/datasets/issues/2549 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2549/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2549/comments | https://api.github.com/repos/huggingface/datasets/issues/2549/events | https://github.com/huggingface/datasets/issues/2549 | 929,819,093 | MDU6SXNzdWU5Mjk4MTkwOTM= | 2,549 | Handling unlabeled datasets | {
"login": "nelson-liu",
"id": 7272031,
"node_id": "MDQ6VXNlcjcyNzIwMzE=",
"avatar_url": "https://avatars.githubusercontent.com/u/7272031?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/nelson-liu",
"html_url": "https://github.com/nelson-liu",
"followers_url": "https://api.github.com/users/nelson-liu/followers",
"following_url": "https://api.github.com/users/nelson-liu/following{/other_user}",
"gists_url": "https://api.github.com/users/nelson-liu/gists{/gist_id}",
"starred_url": "https://api.github.com/users/nelson-liu/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/nelson-liu/subscriptions",
"organizations_url": "https://api.github.com/users/nelson-liu/orgs",
"repos_url": "https://api.github.com/users/nelson-liu/repos",
"events_url": "https://api.github.com/users/nelson-liu/events{/privacy}",
"received_events_url": "https://api.github.com/users/nelson-liu/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892871,
"node_id": "MDU6TGFiZWwxOTM1ODkyODcx",
"url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement",
"name": "enhancement",
"color": "a2eeef",
"default": true,
"description": "New feature or request"
}
] | closed | false | null | [] | null | [
"Hi @nelson-liu,\r\n\r\nYou can pass the parameter `features` to `load_dataset`: https://huggingface.co/docs/datasets/_modules/datasets/load.html#load_dataset\r\n\r\nIf you look at the code of the MNLI script you referred in your question (https://github.com/huggingface/datasets/blob/master/datasets/multi_nli/multi_nli.py#L62-L77), you can see how the Features were originally specified. \r\n\r\nFeel free to use it as a template, customize it and pass it to `load_dataset` using the parameter `features`.",
"ah got it, thanks!"
] | 1,624,595,543,000 | 1,624,655,277,000 | 1,624,655,276,000 | NONE | null | null | null | Hi!
Is there a way for datasets to produce unlabeled instances (e.g., the `ClassLabel` can be nullable).
For example, I want to use the MNLI dataset reader ( https://github.com/huggingface/datasets/blob/master/datasets/multi_nli/multi_nli.py ) on a file that doesn't have the `gold_label` field. I tried setting `"label": data.get("gold_label")`, but got the following error:
```
File "/home/nfliu/miniconda3/envs/debias/lib/python3.7/site-packages/datasets/load.py", line 748, in load_dataset
use_auth_token=use_auth_token,
File "/home/nfliu/miniconda3/envs/debias/lib/python3.7/site-packages/datasets/builder.py", line 575, in download_and_prepare
dl_manager=dl_manager, verify_infos=verify_infos, **download_and_prepare_kwargs
File "/home/nfliu/miniconda3/envs/debias/lib/python3.7/site-packages/datasets/builder.py", line 652, in _download_and_prepare
self._prepare_split(split_generator, **prepare_split_kwargs)
File "/home/nfliu/miniconda3/envs/debias/lib/python3.7/site-packages/datasets/builder.py", line 989, in _prepare_split
example = self.info.features.encode_example(record)
File "/home/nfliu/miniconda3/envs/debias/lib/python3.7/site-packages/datasets/features.py", line 953, in encode_example
return encode_nested_example(self, example)
File "/home/nfliu/miniconda3/envs/debias/lib/python3.7/site-packages/datasets/features.py", line 848, in encode_nested_example
k: encode_nested_example(sub_schema, sub_obj) for k, (sub_schema, sub_obj) in utils.zip_dict(schema, obj)
File "/home/nfliu/miniconda3/envs/debias/lib/python3.7/site-packages/datasets/features.py", line 848, in <dictcomp>
k: encode_nested_example(sub_schema, sub_obj) for k, (sub_schema, sub_obj) in utils.zip_dict(schema, obj)
File "/home/nfliu/miniconda3/envs/debias/lib/python3.7/site-packages/datasets/features.py", line 875, in encode_nested_example
return schema.encode_example(obj)
File "/home/nfliu/miniconda3/envs/debias/lib/python3.7/site-packages/datasets/features.py", line 653, in encode_example
if not -1 <= example_data < self.num_classes:
TypeError: '<=' not supported between instances of 'int' and 'NoneType'
```
What's the proper way to handle reading unlabeled datasets, especially for downstream usage with Transformers? | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2549/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2549/timeline | null | completed | false |
https://api.github.com/repos/huggingface/datasets/issues/2548 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2548/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2548/comments | https://api.github.com/repos/huggingface/datasets/issues/2548/events | https://github.com/huggingface/datasets/issues/2548 | 929,232,831 | MDU6SXNzdWU5MjkyMzI4MzE= | 2,548 | Field order issue in loading json | {
"login": "luyug",
"id": 55288513,
"node_id": "MDQ6VXNlcjU1Mjg4NTEz",
"avatar_url": "https://avatars.githubusercontent.com/u/55288513?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/luyug",
"html_url": "https://github.com/luyug",
"followers_url": "https://api.github.com/users/luyug/followers",
"following_url": "https://api.github.com/users/luyug/following{/other_user}",
"gists_url": "https://api.github.com/users/luyug/gists{/gist_id}",
"starred_url": "https://api.github.com/users/luyug/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/luyug/subscriptions",
"organizations_url": "https://api.github.com/users/luyug/orgs",
"repos_url": "https://api.github.com/users/luyug/repos",
"events_url": "https://api.github.com/users/luyug/events{/privacy}",
"received_events_url": "https://api.github.com/users/luyug/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892857,
"node_id": "MDU6TGFiZWwxOTM1ODkyODU3",
"url": "https://api.github.com/repos/huggingface/datasets/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
}
] | closed | false | null | [] | null | [
"Hi @luyug, thanks for reporting.\r\n\r\nThe good news is that we fixed this issue only 9 days ago: #2507.\r\n\r\nThe patch is already in the master branch of our repository and it will be included in our next `datasets` release version 1.9.0.\r\n\r\nFeel free to reopen the issue if the problem persists."
] | 1,624,541,393,000 | 1,624,545,403,000 | 1,624,545,245,000 | NONE | null | null | null | ## Describe the bug
The `load_dataset` function expects columns in alphabetical order when loading json files.
Similar bug was previously reported for csv in #623 and fixed in #684.
## Steps to reproduce the bug
For a json file `j.json`,
```
{"c":321, "a": 1, "b": 2}
```
Running the following,
```
f= datasets.Features({'a': Value('int32'), 'b': Value('int32'), 'c': Value('int32')})
json_data = datasets.load_dataset('json', data_files='j.json', features=f)
```
## Expected results
A successful load.
## Actual results
```
File "pyarrow/table.pxi", line 1409, in pyarrow.lib.Table.cast
ValueError: Target schema's field names are not matching the table's field names: ['c', 'a', 'b'], ['a', 'b', 'c']
```
## Environment info
- `datasets` version: 1.8.0
- Platform: Linux-3.10.0-957.1.3.el7.x86_64-x86_64-with-glibc2.10
- Python version: 3.8.8
- PyArrow version: 3.0.0
| {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2548/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2548/timeline | null | completed | false |
https://api.github.com/repos/huggingface/datasets/issues/2547 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2547/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2547/comments | https://api.github.com/repos/huggingface/datasets/issues/2547/events | https://github.com/huggingface/datasets/issues/2547 | 929,192,329 | MDU6SXNzdWU5MjkxOTIzMjk= | 2,547 | Dataset load_from_disk is too slow | {
"login": "alexvaca0",
"id": 35173563,
"node_id": "MDQ6VXNlcjM1MTczNTYz",
"avatar_url": "https://avatars.githubusercontent.com/u/35173563?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/alexvaca0",
"html_url": "https://github.com/alexvaca0",
"followers_url": "https://api.github.com/users/alexvaca0/followers",
"following_url": "https://api.github.com/users/alexvaca0/following{/other_user}",
"gists_url": "https://api.github.com/users/alexvaca0/gists{/gist_id}",
"starred_url": "https://api.github.com/users/alexvaca0/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/alexvaca0/subscriptions",
"organizations_url": "https://api.github.com/users/alexvaca0/orgs",
"repos_url": "https://api.github.com/users/alexvaca0/repos",
"events_url": "https://api.github.com/users/alexvaca0/events{/privacy}",
"received_events_url": "https://api.github.com/users/alexvaca0/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892857,
"node_id": "MDU6TGFiZWwxOTM1ODkyODU3",
"url": "https://api.github.com/repos/huggingface/datasets/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
}
] | open | false | null | [] | null | [
"Hi ! It looks like an issue with the virtual disk you are using.\r\n\r\nWe load datasets using memory mapping. In general it makes it possible to load very big files instantaneously since it doesn't have to read the file (it just assigns virtual memory to the file on disk).\r\nHowever there happens to be issues with virtual disks (for example on spot instances), for which memory mapping does a pass over the entire file, and this takes a while. We are discussing about this issue here: #2252 \r\n\r\nMemory mapping is something handled by the OS so we can't do much about it, though we're still trying to figure out what's causing this behavior exactly to see what we can do.",
"Okay, that's exactly my case, with spot instances... Therefore this isn't something we can change in any way to be able to load the dataset faster? I mean, what do you do internally at huggingface for being able to use spot instances with datasets efficiently?",
"There are no solutions yet unfortunately.\r\nWe're still trying to figure out a way to make the loading instantaneous on such disks, I'll keep you posted"
] | 1,624,538,744,000 | 1,624,632,998,000 | null | NONE | null | null | null | @lhoestq
## Describe the bug
It's not normal that I have to wait 7-8 hours for a dataset to be loaded from disk, as there are no preprocessing steps, it's only loading it with load_from_disk. I have 96 cpus, however only 1 is used for this, which is inefficient. Moreover, its usage is at 1%... This is happening in the context of a language model training, therefore I'm wasting 100$ each time I have to load the dataset from disk again (because the spot instance was stopped by aws and I need to relaunch it for example).
## Steps to reproduce the bug
Just get the oscar in spanish (around 150GGB) and try to first save in disk and then load the processed dataset. It's not dependent on the task you're doing, it just depends on the size of the text dataset.
## Expected results
I expect the dataset to be loaded in a normal time, by using the whole machine for loading it, I mean if you store the dataset in multiple files (.arrow) and then load it from multiple files, you can use multiprocessing for that and therefore don't waste so much time.
## Environment info
<!-- You can run the command `datasets-cli env` and copy-and-paste its output below. -->
- `datasets` version: 1.8.0
- Platform: Ubuntu 18
- Python version: 3.8
I've seen you're planning to include a streaming mode for load_dataset, but that only saves the downloading and processing time, that's not being a problem for me, you cannot save the pure loading from disk time, therefore that's not a solution for my use case or for anyone who wants to use your library for training a language model. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2547/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2547/timeline | null | null | false |
https://api.github.com/repos/huggingface/datasets/issues/2546 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2546/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2546/comments | https://api.github.com/repos/huggingface/datasets/issues/2546/events | https://github.com/huggingface/datasets/pull/2546 | 929,091,689 | MDExOlB1bGxSZXF1ZXN0Njc2OTk2MjQ0 | 2,546 | Add license to the Cambridge English Write & Improve + LOCNESS dataset card | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | 1,624,531,169,000 | 1,624,531,921,000 | 1,624,531,921,000 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2546",
"html_url": "https://github.com/huggingface/datasets/pull/2546",
"diff_url": "https://github.com/huggingface/datasets/pull/2546.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2546.patch",
"merged_at": 1624531921000
} | As noticed in https://github.com/huggingface/datasets/pull/2539, the licensing information was missing for this dataset.
I added it and I also filled a few other empty sections. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2546/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2546/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2545 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2545/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2545/comments | https://api.github.com/repos/huggingface/datasets/issues/2545/events | https://github.com/huggingface/datasets/pull/2545 | 929,016,580 | MDExOlB1bGxSZXF1ZXN0Njc2OTMxOTYw | 2,545 | Fix DuplicatedKeysError in drop dataset | {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | 1,624,525,839,000 | 1,624,546,628,000 | 1,624,546,628,000 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2545",
"html_url": "https://github.com/huggingface/datasets/pull/2545",
"diff_url": "https://github.com/huggingface/datasets/pull/2545.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2545.patch",
"merged_at": 1624546628000
} | Close #2542.
cc: @VictorSanh. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2545/reactions",
"total_count": 1,
"+1": 1,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2545/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2544 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2544/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2544/comments | https://api.github.com/repos/huggingface/datasets/issues/2544/events | https://github.com/huggingface/datasets/pull/2544 | 928,900,827 | MDExOlB1bGxSZXF1ZXN0Njc2ODM1MjYz | 2,544 | Fix logging levels | {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | 1,624,516,896,000 | 1,624,628,419,000 | 1,624,628,419,000 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2544",
"html_url": "https://github.com/huggingface/datasets/pull/2544",
"diff_url": "https://github.com/huggingface/datasets/pull/2544.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2544.patch",
"merged_at": 1624628419000
} | Sometimes default `datasets` logging can be too verbose. One approach could be reducing some logging levels, from info to debug, or from warning to info.
Close #2543.
cc: @stas00 | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2544/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2544/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2543 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2543/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2543/comments | https://api.github.com/repos/huggingface/datasets/issues/2543/events | https://github.com/huggingface/datasets/issues/2543 | 928,571,915 | MDU6SXNzdWU5Mjg1NzE5MTU= | 2,543 | switching some low-level log.info's to log.debug? | {
"login": "stas00",
"id": 10676103,
"node_id": "MDQ6VXNlcjEwNjc2MTAz",
"avatar_url": "https://avatars.githubusercontent.com/u/10676103?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/stas00",
"html_url": "https://github.com/stas00",
"followers_url": "https://api.github.com/users/stas00/followers",
"following_url": "https://api.github.com/users/stas00/following{/other_user}",
"gists_url": "https://api.github.com/users/stas00/gists{/gist_id}",
"starred_url": "https://api.github.com/users/stas00/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/stas00/subscriptions",
"organizations_url": "https://api.github.com/users/stas00/orgs",
"repos_url": "https://api.github.com/users/stas00/repos",
"events_url": "https://api.github.com/users/stas00/events{/privacy}",
"received_events_url": "https://api.github.com/users/stas00/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892871,
"node_id": "MDU6TGFiZWwxOTM1ODkyODcx",
"url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement",
"name": "enhancement",
"color": "a2eeef",
"default": true,
"description": "New feature or request"
}
] | closed | false | {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
} | [
{
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
}
] | null | [
"Hi @stas00, thanks for pointing out this issue with logging.\r\n\r\nI agree that `datasets` can sometimes be too verbose... I can create a PR and we could discuss there the choice of the log levels for different parts of the code."
] | 1,624,476,415,000 | 1,624,628,419,000 | 1,624,628,419,000 | MEMBER | null | null | null | In https://github.com/huggingface/transformers/pull/12276 we are now changing the examples to have `datasets` on the same log level as `transformers`, so that one setting can do a consistent logging across all involved components.
The trouble is that now we get a ton of these:
```
06/23/2021 12:15:31 - INFO - datasets.utils.filelock - Lock 139627640431136 acquired on /home/stas/.cache/huggingface/metrics/sacrebleu/default/default_experiment-1-0.arrow.lock
06/23/2021 12:15:31 - INFO - datasets.arrow_writer - Done writing 50 examples in 12280 bytes /home/stas/.cache/huggingface/metrics/sacrebleu/default/default_experiment-1-0.arrow.
06/23/2021 12:15:31 - INFO - datasets.arrow_dataset - Set __getitem__(key) output type to python objects for no columns (when key is int or slice) and don't output other (un-formatted) columns.
06/23/2021 12:15:31 - INFO - datasets.utils.filelock - Lock 139627640431136 released on /home/stas/.cache/huggingface/metrics/sacrebleu/default/default_experiment-1-0.arrow.lock
```
May I suggest that these can be `log.debug` as it's no informative to the user.
More examples: these are not informative - too much information:
```
06/23/2021 12:14:26 - INFO - datasets.load - Checking /home/stas/.cache/huggingface/datasets/downloads/459933f1fe47711fad2f6ff8110014ff189120b45ad159ef5b8e90ea43a174fa.e23e7d1259a8c6274a82a42a8936dd1b87225302c6dc9b7261beb3bc2daac640.py for additional imports.
06/23/2021 12:14:27 - INFO - datasets.builder - Constructing Dataset for split train, validation, test, from /home/stas/.cache/huggingface/datasets/wmt16/ro-en/1.0.0/0d9fb3e814712c785176ad8cdb9f465fbe6479000ee6546725db30ad8a8b5f8a
```
While these are:
```
06/23/2021 12:14:27 - INFO - datasets.info - Loading Dataset Infos from /home/stas/.cache/huggingface/modules/datasets_modules/datasets/wmt16/0d9fb3e814712c785176ad8cdb9f465fbe6479000ee6546725db30ad8a8b5f8a
06/23/2021 12:14:27 - WARNING - datasets.builder - Reusing dataset wmt16 (/home/stas/.cache/huggingface/datasets/wmt16/ro-en/1.0.0/0d9fb3e814712c785176ad8cdb9f465fbe6479000ee6546725db30ad8a8b5f8a)
```
I also realize that `transformers` examples don't have do use `info` for `datasets` to let the default `warning` keep logging to less noisy.
But I think currently the log levels are slightly misused and skewed by 1 level. Many `warnings` will better be `info`s and most `info`s be `debug`.
e.g.:
```
06/23/2021 12:14:27 - WARNING - datasets.builder - Reusing dataset wmt16 (/home/stas/.cache/huggingface/datasets/wmt16/ro-en/1.0.0/0d9fb3e814712c785176ad8cdb9f465fbe6479000ee6546725db30ad8a8b5f8a)
```
why is this a warning? it is informing me that the cache is used, there is nothing to be worried about. I'd have it as `info`.
Warnings are typically something that's bordering error or the first thing to check when things don't work as expected.
infrequent info is there to inform of the different stages or important events.
Everything else is debug.
At least the way I understand things.
| {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2543/reactions",
"total_count": 1,
"+1": 1,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2543/timeline | null | completed | false |
https://api.github.com/repos/huggingface/datasets/issues/2542 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2542/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2542/comments | https://api.github.com/repos/huggingface/datasets/issues/2542/events | https://github.com/huggingface/datasets/issues/2542 | 928,540,382 | MDU6SXNzdWU5Mjg1NDAzODI= | 2,542 | `datasets.keyhash.DuplicatedKeysError` for `drop` and `adversarial_qa/adversarialQA` | {
"login": "VictorSanh",
"id": 16107619,
"node_id": "MDQ6VXNlcjE2MTA3NjE5",
"avatar_url": "https://avatars.githubusercontent.com/u/16107619?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/VictorSanh",
"html_url": "https://github.com/VictorSanh",
"followers_url": "https://api.github.com/users/VictorSanh/followers",
"following_url": "https://api.github.com/users/VictorSanh/following{/other_user}",
"gists_url": "https://api.github.com/users/VictorSanh/gists{/gist_id}",
"starred_url": "https://api.github.com/users/VictorSanh/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/VictorSanh/subscriptions",
"organizations_url": "https://api.github.com/users/VictorSanh/orgs",
"repos_url": "https://api.github.com/users/VictorSanh/repos",
"events_url": "https://api.github.com/users/VictorSanh/events{/privacy}",
"received_events_url": "https://api.github.com/users/VictorSanh/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892857,
"node_id": "MDU6TGFiZWwxOTM1ODkyODU3",
"url": "https://api.github.com/repos/huggingface/datasets/labels/bug",
"name": "bug",
"color": "d73a4a",
"default": true,
"description": "Something isn't working"
}
] | closed | false | {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
} | [
{
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
}
] | null | [
"very much related: https://github.com/huggingface/datasets/pull/2333",
"Hi @VictorSanh, thank you for reporting this issue with duplicated keys.\r\n\r\n- The issue with \"adversarial_qa\" was fixed 23 days ago: #2433. Current version of `datasets` (1.8.0) includes the patch.\r\n- I am investigating the issue with `drop`. I'll ping you to keep you informed.",
"Hi @VictorSanh, the issue is already fixed and merged into master branch and will be included in our next release version 1.9.0.",
"thank you!"
] | 1,624,473,676,000 | 1,624,657,805,000 | 1,624,546,628,000 | MEMBER | null | null | null | ## Describe the bug
Failure to generate the datasets (`drop` and subset `adversarialQA` from `adversarial_qa`) because of duplicate keys.
## Steps to reproduce the bug
```python
from datasets import load_dataset
load_dataset("drop")
load_dataset("adversarial_qa", "adversarialQA")
```
## Expected results
The examples keys should be unique.
## Actual results
```bash
>>> load_dataset("drop")
Using custom data configuration default
Downloading and preparing dataset drop/default (download: 7.92 MiB, generated: 111.88 MiB, post-processed: Unknown size, total: 119.80 MiB) to /home/hf/.cache/huggingface/datasets/drop/default/0.1.0/7a94f1e2bb26c4b5c75f89857c06982967d7416e5af935a9374b9bccf5068026...
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/hf/dev/promptsource/.venv/lib/python3.7/site-packages/datasets/load.py", line 751, in load_dataset
use_auth_token=use_auth_token,
File "/home/hf/dev/promptsource/.venv/lib/python3.7/site-packages/datasets/builder.py", line 575, in download_and_prepare
dl_manager=dl_manager, verify_infos=verify_infos, **download_and_prepare_kwargs
File "/home/hf/dev/promptsource/.venv/lib/python3.7/site-packages/datasets/builder.py", line 652, in _download_and_prepare
self._prepare_split(split_generator, **prepare_split_kwargs)
File "/home/hf/dev/promptsource/.venv/lib/python3.7/site-packages/datasets/builder.py", line 992, in _prepare_split
num_examples, num_bytes = writer.finalize()
File "/home/hf/dev/promptsource/.venv/lib/python3.7/site-packages/datasets/arrow_writer.py", line 409, in finalize
self.check_duplicate_keys()
File "/home/hf/dev/promptsource/.venv/lib/python3.7/site-packages/datasets/arrow_writer.py", line 349, in check_duplicate_keys
raise DuplicatedKeysError(key)
datasets.keyhash.DuplicatedKeysError: FAILURE TO GENERATE DATASET !
Found duplicate Key: 28553293-d719-441b-8f00-ce3dc6df5398
Keys should be unique and deterministic in nature
```
## Environment info
<!-- You can run the command `datasets-cli env` and copy-and-paste its output below. -->
- `datasets` version: 1.7.0
- Platform: Linux-5.4.0-1044-gcp-x86_64-with-Ubuntu-18.04-bionic
- Python version: 3.7.10
- PyArrow version: 3.0.0
| {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2542/reactions",
"total_count": 1,
"+1": 1,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2542/timeline | null | completed | false |
https://api.github.com/repos/huggingface/datasets/issues/2541 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2541/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2541/comments | https://api.github.com/repos/huggingface/datasets/issues/2541/events | https://github.com/huggingface/datasets/pull/2541 | 928,529,078 | MDExOlB1bGxSZXF1ZXN0Njc2NTIwNDgx | 2,541 | update discofuse link cc @ekQ | {
"login": "VictorSanh",
"id": 16107619,
"node_id": "MDQ6VXNlcjE2MTA3NjE5",
"avatar_url": "https://avatars.githubusercontent.com/u/16107619?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/VictorSanh",
"html_url": "https://github.com/VictorSanh",
"followers_url": "https://api.github.com/users/VictorSanh/followers",
"following_url": "https://api.github.com/users/VictorSanh/following{/other_user}",
"gists_url": "https://api.github.com/users/VictorSanh/gists{/gist_id}",
"starred_url": "https://api.github.com/users/VictorSanh/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/VictorSanh/subscriptions",
"organizations_url": "https://api.github.com/users/VictorSanh/orgs",
"repos_url": "https://api.github.com/users/VictorSanh/repos",
"events_url": "https://api.github.com/users/VictorSanh/events{/privacy}",
"received_events_url": "https://api.github.com/users/VictorSanh/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [
"The CI is failing because the dataset tags for `discofuse` are missing. I'm merging this PR since this is unrelated to this PR, but feel free to open another PR to add the tags here if you have some time:\r\n\r\nhttps://github.com/huggingface/datasets/blob/19408f9fab85c79b966085574cd2da3b90959179/datasets/discofuse/README.md#L1-L5\r\n\r\nThe missing tags are:\r\n```\r\n'annotations_creators', 'language_creators', 'licenses', 'multilinguality', 'pretty_name', 'size_categories', 'source_datasets', 'task_categories', and 'task_ids'\r\n```\r\nThanks again !"
] | 1,624,472,698,000 | 1,624,890,891,000 | 1,624,890,890,000 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2541",
"html_url": "https://github.com/huggingface/datasets/pull/2541",
"diff_url": "https://github.com/huggingface/datasets/pull/2541.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2541.patch",
"merged_at": 1624890890000
} | Updating the discofuse link: https://github.com/google-research-datasets/discofuse/commit/fd4b120cb3dd19a417e7f3b5432010b574b5eeee | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2541/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2541/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2540 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2540/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2540/comments | https://api.github.com/repos/huggingface/datasets/issues/2540/events | https://github.com/huggingface/datasets/pull/2540 | 928,433,892 | MDExOlB1bGxSZXF1ZXN0Njc2NDM5NTM1 | 2,540 | Remove task templates if required features are removed during `Dataset.map` | {
"login": "lewtun",
"id": 26859204,
"node_id": "MDQ6VXNlcjI2ODU5MjA0",
"avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lewtun",
"html_url": "https://github.com/lewtun",
"followers_url": "https://api.github.com/users/lewtun/followers",
"following_url": "https://api.github.com/users/lewtun/following{/other_user}",
"gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lewtun/subscriptions",
"organizations_url": "https://api.github.com/users/lewtun/orgs",
"repos_url": "https://api.github.com/users/lewtun/repos",
"events_url": "https://api.github.com/users/lewtun/events{/privacy}",
"received_events_url": "https://api.github.com/users/lewtun/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | 1,624,465,225,000 | 1,624,545,675,000 | 1,624,541,643,000 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2540",
"html_url": "https://github.com/huggingface/datasets/pull/2540",
"diff_url": "https://github.com/huggingface/datasets/pull/2540.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2540.patch",
"merged_at": 1624541643000
} | This PR fixes a bug reported by @craffel where removing a dataset's columns during `Dataset.map` triggered a `KeyError` because the `TextClassification` template tried to access the removed columns during `DatasetInfo.__post_init__`:
```python
from datasets import load_dataset
# `yelp_polarity` comes with a `TextClassification` template
ds = load_dataset("yelp_polarity", split="test")
ds
# Dataset({
# features: ['text', 'label'],
# num_rows: 38000
# })
# Triggers KeyError: 'label' - oh noes!
ds.map(lambda x: {"inputs": 0}, remove_columns=ds.column_names)
```
I wrote a unit test to make sure I could reproduce the error and then patched a fix. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2540/reactions",
"total_count": 2,
"+1": 1,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 1,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2540/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2539 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2539/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2539/comments | https://api.github.com/repos/huggingface/datasets/issues/2539/events | https://github.com/huggingface/datasets/pull/2539 | 927,952,429 | MDExOlB1bGxSZXF1ZXN0Njc2MDI5MDY5 | 2,539 | remove wi_locness dataset due to licensing issues | {
"login": "aseifert",
"id": 4944799,
"node_id": "MDQ6VXNlcjQ5NDQ3OTk=",
"avatar_url": "https://avatars.githubusercontent.com/u/4944799?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/aseifert",
"html_url": "https://github.com/aseifert",
"followers_url": "https://api.github.com/users/aseifert/followers",
"following_url": "https://api.github.com/users/aseifert/following{/other_user}",
"gists_url": "https://api.github.com/users/aseifert/gists{/gist_id}",
"starred_url": "https://api.github.com/users/aseifert/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/aseifert/subscriptions",
"organizations_url": "https://api.github.com/users/aseifert/orgs",
"repos_url": "https://api.github.com/users/aseifert/repos",
"events_url": "https://api.github.com/users/aseifert/events{/privacy}",
"received_events_url": "https://api.github.com/users/aseifert/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [
"Hi ! I'm sorry to hear that.\r\nThough we are not redistributing the dataset, we just provide a python script that downloads and process the dataset from its original source hosted at https://www.cl.cam.ac.uk\r\n\r\nTherefore I'm not sure what's the issue with licensing. What do you mean exactly ?",
"I think that the main issue is that the licesenses of the data are not made clear in the huggingface hub โย other people wrongly assumed that the data was license-free, which resulted in commercial use, which is against the licenses.\r\nIs it possible to add the licenses from the original download to huggingface? that would help clear any confusion (licenses can be found here: https://www.cl.cam.ac.uk/research/nl/bea2019st/data/wi+locness_v2.1.bea19.tar.gz)",
"Thanks for the clarification @SimonHFL \r\nYou're completely right, we need to show the licenses.\r\nI just added them here: https://huggingface.co/datasets/wi_locness#licensing-information",
"Hi guys, I'm one of the authors of this dataset. \r\n\r\nTo clarify, we're happy for you to keep the data in the repo on 2 conditions:\r\n1. You don't host the data yourself.\r\n2. You make it clear that anyone who downloads the data via HuggingFace should read and abide by the license. \r\n\r\nI think you've now met these conditions, so we're all good, but I just wanted to make it clear in case there are any issues in the future. Thanks again to @aseifert for bringing this to our attention! :)",
"Thanks for your message @chrisjbryant :)\r\nI'm closing this PR then.\r\n\r\nAnd thanks for reporting @aseifert"
] | 1,624,433,732,000 | 1,624,632,762,000 | 1,624,632,762,000 | CONTRIBUTOR | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2539",
"html_url": "https://github.com/huggingface/datasets/pull/2539",
"diff_url": "https://github.com/huggingface/datasets/pull/2539.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2539.patch",
"merged_at": null
} | It was brought to my attention that this dataset's license is not only missing, but also prohibits redistribution. I contacted the original author to apologize for this oversight and asked if we could still use it, but unfortunately we can't and the author kindly asked to take down this dataset. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2539/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2539/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2538 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2538/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2538/comments | https://api.github.com/repos/huggingface/datasets/issues/2538/events | https://github.com/huggingface/datasets/issues/2538 | 927,940,691 | MDU6SXNzdWU5Mjc5NDA2OTE= | 2,538 | Loading partial dataset when debugging | {
"login": "reachtarunhere",
"id": 9061913,
"node_id": "MDQ6VXNlcjkwNjE5MTM=",
"avatar_url": "https://avatars.githubusercontent.com/u/9061913?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/reachtarunhere",
"html_url": "https://github.com/reachtarunhere",
"followers_url": "https://api.github.com/users/reachtarunhere/followers",
"following_url": "https://api.github.com/users/reachtarunhere/following{/other_user}",
"gists_url": "https://api.github.com/users/reachtarunhere/gists{/gist_id}",
"starred_url": "https://api.github.com/users/reachtarunhere/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/reachtarunhere/subscriptions",
"organizations_url": "https://api.github.com/users/reachtarunhere/orgs",
"repos_url": "https://api.github.com/users/reachtarunhere/repos",
"events_url": "https://api.github.com/users/reachtarunhere/events{/privacy}",
"received_events_url": "https://api.github.com/users/reachtarunhere/received_events",
"type": "User",
"site_admin": false
} | [] | open | false | null | [] | null | [
"Hi ! `load_dataset` downloads the full dataset once and caches it, so that subsequent calls to `load_dataset` just reloads the dataset from your disk.\r\nThen when you specify a `split` in `load_dataset`, it will just load the requested split from the disk. If your specified split is a sliced split (e.g. `\"train[:10]\"`), then it will load the 10 first rows of the train split that you have on disk.\r\n\r\nTherefore, as long as you don't delete your cache, all your calls to `load_dataset` will be very fast. Except the first call that downloads the dataset of course ^^",
"Thatโs a use case for the new streaming feature, no?",
"Hi @reachtarunhere.\r\n\r\nBesides the above insights provided by @lhoestq and @thomwolf, there is also a Dataset feature in progress (I plan to finish it this week): #2249, which will allow you, when calling `load_dataset`, to pass the option to download/preprocess/cache only some specific split(s), which will definitely speed up your workflow.\r\n\r\nIf this feature is interesting for you, I can ping you once it will be merged into the master branch.",
"Thanks all for responding.\r\n\r\nHey @albertvillanova \r\n\r\nThanks. Yes, I would be interested.\r\n\r\n@lhoestq I think even if a small split is specified it loads up the full dataset from the disk (please correct me if this is not the case). Because it does seem to be slow to me even on subsequent calls. There is no repeated downloading so it seems that the cache is working.\r\n\r\nI am not aware of the streaming feature @thomwolf mentioned. So I might need to read up on it.",
"@reshinthadithyan I use the .select function to have a fraction of indices."
] | 1,624,432,792,000 | 1,627,567,833,000 | null | NONE | null | null | null | I am using PyTorch Lightning along with datasets (thanks for so many datasets already prepared and the great splits).
Every time I execute load_dataset for the imdb dataset it takes some time even if I specify a split involving very few samples. I guess this due to hashing as per the other issues.
Is there a way to only load part of the dataset on load_dataset? This would really speed up my workflow.
Something like a debug mode would really help. Thanks! | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2538/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2538/timeline | null | null | false |
https://api.github.com/repos/huggingface/datasets/issues/2537 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2537/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2537/comments | https://api.github.com/repos/huggingface/datasets/issues/2537/events | https://github.com/huggingface/datasets/pull/2537 | 927,472,659 | MDExOlB1bGxSZXF1ZXN0Njc1NjI1OTY3 | 2,537 | Add Parquet loader + from_parquet and to_parquet | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [
"`pyarrow` 1.0.0 doesn't support some types in parquet, we'll have to bump its minimum version.\r\n\r\nAlso I still need to add dummy data to test the parquet builder.",
"I had to bump the minimum pyarrow version to 3.0.0 to properly support parquet.\r\n\r\nEverything is ready for review now :)\r\nI reused pretty much the same tests we had for CSV",
"Done !\r\nNow we're still allowing pyarrow>=1.0.0, but when users want to use parquet features they're asked to update to pyarrow>=3.0.0"
] | 1,624,382,903,000 | 1,625,070,663,000 | 1,625,070,658,000 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2537",
"html_url": "https://github.com/huggingface/datasets/pull/2537",
"diff_url": "https://github.com/huggingface/datasets/pull/2537.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2537.patch",
"merged_at": 1625070658000
} | Continuation of #2247
I added a "parquet" dataset builder, as well as the methods `Dataset.from_parquet` and `Dataset.to_parquet`.
As usual, the data are converted to arrow in a batched way to avoid loading everything in memory. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2537/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2537/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2536 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2536/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2536/comments | https://api.github.com/repos/huggingface/datasets/issues/2536/events | https://github.com/huggingface/datasets/issues/2536 | 927,338,639 | MDU6SXNzdWU5MjczMzg2Mzk= | 2,536 | Use `Audio` features for `AutomaticSpeechRecognition` task template | {
"login": "lewtun",
"id": 26859204,
"node_id": "MDQ6VXNlcjI2ODU5MjA0",
"avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lewtun",
"html_url": "https://github.com/lewtun",
"followers_url": "https://api.github.com/users/lewtun/followers",
"following_url": "https://api.github.com/users/lewtun/following{/other_user}",
"gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lewtun/subscriptions",
"organizations_url": "https://api.github.com/users/lewtun/orgs",
"repos_url": "https://api.github.com/users/lewtun/repos",
"events_url": "https://api.github.com/users/lewtun/events{/privacy}",
"received_events_url": "https://api.github.com/users/lewtun/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892871,
"node_id": "MDU6TGFiZWwxOTM1ODkyODcx",
"url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement",
"name": "enhancement",
"color": "a2eeef",
"default": true,
"description": "New feature or request"
}
] | closed | false | {
"login": "lewtun",
"id": 26859204,
"node_id": "MDQ6VXNlcjI2ODU5MjA0",
"avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lewtun",
"html_url": "https://github.com/lewtun",
"followers_url": "https://api.github.com/users/lewtun/followers",
"following_url": "https://api.github.com/users/lewtun/following{/other_user}",
"gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lewtun/subscriptions",
"organizations_url": "https://api.github.com/users/lewtun/orgs",
"repos_url": "https://api.github.com/users/lewtun/repos",
"events_url": "https://api.github.com/users/lewtun/events{/privacy}",
"received_events_url": "https://api.github.com/users/lewtun/received_events",
"type": "User",
"site_admin": false
} | [
{
"login": "lewtun",
"id": 26859204,
"node_id": "MDQ6VXNlcjI2ODU5MjA0",
"avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lewtun",
"html_url": "https://github.com/lewtun",
"followers_url": "https://api.github.com/users/lewtun/followers",
"following_url": "https://api.github.com/users/lewtun/following{/other_user}",
"gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lewtun/subscriptions",
"organizations_url": "https://api.github.com/users/lewtun/orgs",
"repos_url": "https://api.github.com/users/lewtun/repos",
"events_url": "https://api.github.com/users/lewtun/events{/privacy}",
"received_events_url": "https://api.github.com/users/lewtun/received_events",
"type": "User",
"site_admin": false
}
] | null | [
"I'm just retaking and working on #2324. ๐ ",
"Resolved via https://github.com/huggingface/datasets/pull/4006."
] | 1,624,374,441,000 | 1,654,103,896,000 | 1,654,103,896,000 | MEMBER | null | null | null | In #2533 we added a task template for speech recognition that relies on the file paths to the audio files. As pointed out by @SBrandeis this is brittle as it doesn't port easily across different OS'.
The solution is to use dedicated `Audio` features when casting the dataset. These features are not yet available in `datasets`, but should be included in the `AutomaticSpeechRecognition` template once they are. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2536/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2536/timeline | null | completed | false |
https://api.github.com/repos/huggingface/datasets/issues/2535 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2535/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2535/comments | https://api.github.com/repos/huggingface/datasets/issues/2535/events | https://github.com/huggingface/datasets/pull/2535 | 927,334,349 | MDExOlB1bGxSZXF1ZXN0Njc1NTA3MTAw | 2,535 | Improve Features docs | {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [] | 1,624,374,207,000 | 1,624,455,643,000 | 1,624,455,643,000 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2535",
"html_url": "https://github.com/huggingface/datasets/pull/2535",
"diff_url": "https://github.com/huggingface/datasets/pull/2535.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2535.patch",
"merged_at": 1624455643000
} | - Fix rendering and cross-references in Features docs
- Add docstrings to Features methods | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2535/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2535/timeline | null | null | true |
https://api.github.com/repos/huggingface/datasets/issues/2534 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/2534/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/2534/comments | https://api.github.com/repos/huggingface/datasets/issues/2534/events | https://github.com/huggingface/datasets/pull/2534 | 927,201,435 | MDExOlB1bGxSZXF1ZXN0Njc1MzkzODg0 | 2,534 | Sync with transformers disabling NOTSET | {
"login": "albertvillanova",
"id": 8515462,
"node_id": "MDQ6VXNlcjg1MTU0NjI=",
"avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/albertvillanova",
"html_url": "https://github.com/albertvillanova",
"followers_url": "https://api.github.com/users/albertvillanova/followers",
"following_url": "https://api.github.com/users/albertvillanova/following{/other_user}",
"gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}",
"starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions",
"organizations_url": "https://api.github.com/users/albertvillanova/orgs",
"repos_url": "https://api.github.com/users/albertvillanova/repos",
"events_url": "https://api.github.com/users/albertvillanova/events{/privacy}",
"received_events_url": "https://api.github.com/users/albertvillanova/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | [
"Nice thanks ! I think there are other places with\r\n```python\r\nnot_verbose = bool(logger.getEffectiveLevel() > WARNING)\r\n```\r\n\r\nCould you replace them as well ?",
"Sure @lhoestq! I was not sure if this change should only be circumscribed to `http_get`..."
] | 1,624,366,461,000 | 1,624,545,767,000 | 1,624,545,767,000 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/2534",
"html_url": "https://github.com/huggingface/datasets/pull/2534",
"diff_url": "https://github.com/huggingface/datasets/pull/2534.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/2534.patch",
"merged_at": 1624545767000
} | Close #2528. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/2534/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/2534/timeline | null | null | true |