datasetId
stringlengths
5
121
author
stringlengths
2
42
last_modified
unknown
downloads
int64
0
2.59M
likes
int64
0
6.32k
tags
sequencelengths
1
7.92k
task_categories
sequencelengths
0
40
createdAt
unknown
card
stringlengths
19
1.01M
DopeorNope/only_gsm8k_v2
DopeorNope
"2024-11-20T10:29:49Z"
2
0
[ "size_categories:10K<n<100K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-20T10:29:46Z"
--- dataset_info: features: - name: instruction dtype: string - name: output dtype: string - name: input dtype: string splits: - name: train num_bytes: 3993094 num_examples: 7473 - name: validation num_bytes: 3993094 num_examples: 7473 download_size: 4616658 dataset_size: 7986188 configs: - config_name: default data_files: - split: train path: data/train-* - split: validation path: data/validation-* ---
paolordls/crosslg-contaminated-benchmark-en-sm-3-r1024
paolordls
"2024-11-20T12:21:32Z"
2
0
[ "size_categories:n<1K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-20T12:21:31Z"
--- dataset_info: features: - name: fake_news dtype: string - name: scenario_id dtype: int64 - name: real_news dtype: string - name: fake_keyword dtype: string - name: real_question dtype: string - name: fake_question dtype: string - name: real_answer dtype: string - name: fake_answer dtype: string splits: - name: train num_bytes: 132600 num_examples: 20 download_size: 122257 dataset_size: 132600 configs: - config_name: default data_files: - split: train path: data/train-* ---
0xayman/Qwen2.5-0.5B-fc-v1-json-results
0xayman
"2024-11-20T15:13:41Z"
2
0
[ "size_categories:10K<n<100K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-20T15:13:37Z"
--- dataset_info: features: - name: query dtype: string - name: id dtype: int64 - name: answers dtype: string - name: tools dtype: string - name: prompt dtype: string - name: input dtype: string - name: prediction dtype: string splits: - name: train num_bytes: 338643853 num_examples: 60000 download_size: 101264911 dataset_size: 338643853 configs: - config_name: default data_files: - split: train path: data/train-* ---
mrlyle/img-nov-20
mrlyle
"2024-11-20T18:14:29Z"
2
0
[ "size_categories:n<1K", "format:parquet", "modality:image", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-20T18:14:28Z"
--- dataset_info: features: - name: image dtype: image - name: label dtype: class_label: names: '0': test '1': train splits: - name: train num_bytes: 20637.0 num_examples: 4 - name: test num_bytes: 22886.0 num_examples: 3 download_size: 51062 dataset_size: 43523.0 configs: - config_name: default data_files: - split: train path: data/train-* - split: test path: data/test-* ---
clearclarencs/finetuning_demo
clearclarencs
"2024-11-20T20:42:07Z"
2
0
[ "size_categories:n<1K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-20T20:42:04Z"
--- dataset_info: features: - name: prompt dtype: string splits: - name: train num_bytes: 491836 num_examples: 520 download_size: 148992 dataset_size: 491836 configs: - config_name: default data_files: - split: train path: data/train-* ---
Mitake/cluster1_cat1
Mitake
"2024-11-20T21:40:24Z"
2
0
[ "license:mit", "size_categories:10K<n<100K", "format:csv", "modality:tabular", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-20T21:34:22Z"
--- license: mit ---
Mitake/cluster1_cat3
Mitake
"2024-11-20T21:41:08Z"
2
0
[ "license:mit", "size_categories:10K<n<100K", "format:csv", "modality:tabular", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-20T21:34:52Z"
--- license: mit ---
allenai/tulu-3-sft-reused-on-policy-8b
allenai
"2024-11-21T16:53:06Z"
2
0
[ "size_categories:10K<n<100K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-20T22:32:01Z"
--- dataset_info: features: - name: id dtype: string - name: prompt dtype: string - name: chosen list: - name: content dtype: string - name: role dtype: string - name: rejected list: - name: content dtype: string - name: role dtype: string splits: - name: train num_bytes: 108701467 num_examples: 19444 download_size: 59809485 dataset_size: 108701467 configs: - config_name: default data_files: - split: train path: data/train-* --- <img src="https://huggingface.co/datasets/allenai/blog-images/resolve/main/tulu-3/Tulu3-logo.png" alt="Tulu3 banner" width="400" style="margin-left:'auto' margin-right:'auto' display:'block'"/> # Llama 3.1 Tulu 3 SFT reused (on-policy 8b) *Note that this collection is licensed under ODC-BY-1.0 license; different licenses apply to subsets of the data. Some portions of the dataset are non-commercial. We present the mixture as a research artifact.* This preference dataset is part of our Tulu 3 preference mixture: it contains prompts from our [SFT mixture](https://huggingface.co/datasets/allenai/tulu-3-sft-mixture) and it contains 19,444 generation pairs (some of which on-policy from: https://huggingface.co/allenai/Llama-3.1-Tulu-3-8B) obtained using the following models: - [Mistral 7B Instruct v0.2](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2) (Apache 2.0) - [Mistral Nemo Instruct 2407](https://huggingface.co/mistralai/Mistral-Nemo-Instruct-2407) (Apache 2.0) - [Tulu 2 7B](https://huggingface.co/allenai/tulu-2-7b) (Ai2 ImpACT Low Risk License) - [Tulu 2 13B](https://huggingface.co/allenai/tulu-2-13b) (Ai2 ImpACT Low Risk License) - [Yi-34B-Chat](https://huggingface.co/01-ai/Yi-34B-Chat) (Apache 2.0) - [Yi-6B-Chat](https://huggingface.co/01-ai/Yi-6B-Chat) (Apache 2.0) - [MPT 30B Chat](https://huggingface.co/mosaicml/mpt-30b-chat) (CC-BY-SA-4.0) - [MPT 7B 8k Chat](https://huggingface.co/mosaicml/mpt-7b-8k-chat) (CC-BY-SA-4.0) - [Google Gemma 2 27B it](https://huggingface.co/google/gemma-2-27b-it) (Gemma is provided under and subject to the Gemma Terms of Use found at [ai.google.dev/gemma/terms](https://ai.google.dev/gemma/terms)) - [Google Gemma 2 9B it](https://huggingface.co/google/gemma-2-9b-it) (Gemma is provided under and subject to the Gemma Terms of Use found at [ai.google.dev/gemma/terms](https://ai.google.dev/gemma/terms)) - [InternLM2.5 20B](https://huggingface.co/internlm/internlm2_5-20b-chat) (InternLM weights are fully open for academic research and also allow free commercial usage. A commercial license can be obtained as instructed in the model card.) - [InternLM2.5 7B](https://huggingface.co/internlm/internlm2_5-7b-chat) (InternLM weights are fully open for academic research and also allow free commercial usage. A commercial license can be obtained as instructed in the model card.) - [InternLM2.5 1.8B](https://huggingface.co/internlm/internlm2_5-1_8b-chat) (InternLM weights are fully open for academic research and also allow free commercial usage. A commercial license can be obtained as instructed in the model card.) - [Falcon 7B](https://huggingface.co/tiiuae/falcon-7b-instruct) (Apache 2.0) - [Qwen2.5 72B Instruct](https://huggingface.co/Qwen/Qwen2.5-72B-Instruct) (Qwen is licensed under the Qwen LICENSE AGREEMENT, Copyright (c) Alibaba Cloud. All Rights Reserved.) - [Qwen2.5 32B Instruct](https://huggingface.co/Qwen/Qwen2.5-32B-Instruct) (Apache 2.0) - [Qwen2.5 14B Instruct](https://huggingface.co/Qwen/Qwen2.5-14B-Instruct) (Apache 2.0) - [Qwen2.5 7B Instruct](https://huggingface.co/Qwen/Qwen2.5-7B-Instruct) (Apache 2.0) - [Llama 3.1 8B Instruct ](https://huggingface.co/meta-llama/Llama-3.1-8B-Instruct) (this dataset was partially "Built with Llama" and is thus subject to the Llama 3.1 License) - [Llama 3.1 70B Instruct](https://huggingface.co/meta-llama/Llama-3.1-70B-Instruct) (this dataset was partially "Built with Llama" and is thus subject to the Llama 3.1 License) - [Llama 3 8B Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-8B) (this dataset was partially "Built with Meta Llama 3" and is thus subject to the Llama 3 License) - [GPT-4 Turbo](https://openai.com/index/new-models-and-developer-products-announced-at-devday/) and [GPT-4o](https://openai.com/index/hello-gpt-4o/) (Outputs produced by GPT-4 are subject to OpenAI's [terms of use](https://openai.com/policies/row-terms-of-use)) - [Claude 3.5 Sonnet](https://www.anthropic.com/news/claude-3-5-sonnet) (Outputs produced by Claude are subject to Anthropic [terms of service](https://www.anthropic.com/legal/commercial-terms) and [usage policy](https://www.anthropic.com/legal/aup)) ## License This dataset is licensed under ODC-BY. It is intended for research and educational use in accordance with Ai2's [Responsible Use Guidelines](https://allenai.org/responsible-use). This dataset includes output data generated from third party models that are subject to separate terms governing their use.
allenai/tulu-3-wildchat-if-on-policy-8b
allenai
"2024-11-21T16:52:21Z"
2
0
[ "size_categories:10K<n<100K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-20T22:32:06Z"
--- dataset_info: features: - name: id dtype: string - name: prompt dtype: string - name: chosen list: - name: content dtype: string - name: role dtype: string - name: rejected list: - name: content dtype: string - name: role dtype: string splits: - name: train num_bytes: 90414403 num_examples: 10792 download_size: 46225965 dataset_size: 90414403 configs: - config_name: default data_files: - split: train path: data/train-* --- <img src="https://huggingface.co/datasets/allenai/blog-images/resolve/main/tulu-3/Tulu3-logo.png" alt="Tulu3 banner" width="400" style="margin-left:'auto' margin-right:'auto' display:'block'"/> # Llama 3.1 Tulu 3 Wildchat IF (on-policy 8b) *Note that this collection is licensed under ODC-BY-1.0 license; different licenses apply to subsets of the data. Some portions of the dataset are non-commercial. We present the mixture as a research artifact.* This preference dataset is part of our Tulu 3 preference mixture: it contains prompts from [WildChat](allenai/WildChat-1M), which include constraints, and it contains 10,792 generation pairs (some of which on-policy from allenai/Llama-3.1-Tulu-3-8B) obtained using the following models: - [Mistral 7B Instruct v0.2](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2) (Apache 2.0) - [Mistral Nemo Instruct 2407](https://huggingface.co/mistralai/Mistral-Nemo-Instruct-2407) (Apache 2.0) - [Tulu 2 7B](https://huggingface.co/allenai/tulu-2-7b) (Ai2 ImpACT Low Risk License) - [Tulu 2 13B](https://huggingface.co/allenai/tulu-2-13b) (Ai2 ImpACT Low Risk License) - [Yi-34B-Chat](https://huggingface.co/01-ai/Yi-34B-Chat) (Apache 2.0) - [Yi-6B-Chat](https://huggingface.co/01-ai/Yi-6B-Chat) (Apache 2.0) - [MPT 30B Chat](https://huggingface.co/mosaicml/mpt-30b-chat) (CC-BY-SA-4.0) - [MPT 7B 8k Chat](https://huggingface.co/mosaicml/mpt-7b-8k-chat) (CC-BY-SA-4.0) - [Google Gemma 2 27B it](https://huggingface.co/google/gemma-2-27b-it) (Gemma is provided under and subject to the Gemma Terms of Use found at [ai.google.dev/gemma/terms](https://ai.google.dev/gemma/terms)) - [Google Gemma 2 9B it](https://huggingface.co/google/gemma-2-9b-it) (Gemma is provided under and subject to the Gemma Terms of Use found at [ai.google.dev/gemma/terms](https://ai.google.dev/gemma/terms)) - [InternLM2.5 20B](https://huggingface.co/internlm/internlm2_5-20b-chat) (InternLM weights are fully open for academic research and also allow free commercial usage. A commercial license can be obtained as instructed in the model card.) - [InternLM2.5 7B](https://huggingface.co/internlm/internlm2_5-7b-chat) (InternLM weights are fully open for academic research and also allow free commercial usage. A commercial license can be obtained as instructed in the model card.) - [InternLM2.5 1.8B](https://huggingface.co/internlm/internlm2_5-1_8b-chat) (InternLM weights are fully open for academic research and also allow free commercial usage. A commercial license can be obtained as instructed in the model card.) - [Falcon 7B](https://huggingface.co/tiiuae/falcon-7b-instruct) (Apache 2.0) - [Qwen2.5 72B Instruct](https://huggingface.co/Qwen/Qwen2.5-72B-Instruct) (Qwen is licensed under the Qwen LICENSE AGREEMENT, Copyright (c) Alibaba Cloud. All Rights Reserved.) - [Qwen2.5 32B Instruct](https://huggingface.co/Qwen/Qwen2.5-32B-Instruct) (Apache 2.0) - [Qwen2.5 14B Instruct](https://huggingface.co/Qwen/Qwen2.5-14B-Instruct) (Apache 2.0) - [Qwen2.5 7B Instruct](https://huggingface.co/Qwen/Qwen2.5-7B-Instruct) (Apache 2.0) - [Llama 3.1 8B Instruct ](https://huggingface.co/meta-llama/Llama-3.1-8B-Instruct) (this dataset was partially "Built with Llama" and is thus subject to the Llama 3.1 License) - [Llama 3.1 70B Instruct](https://huggingface.co/meta-llama/Llama-3.1-70B-Instruct) (this dataset was partially "Built with Llama" and is thus subject to the Llama 3.1 License) - [Llama 3 8B Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-8B) (this dataset was partially "Built with Meta Llama 3" and is thus subject to the Llama 3 License) - [GPT-4 Turbo](https://openai.com/index/new-models-and-developer-products-announced-at-devday/) and [GPT-4o](https://openai.com/index/hello-gpt-4o/) (Outputs produced by GPT-4 are subject to OpenAI's [terms of use](https://openai.com/policies/row-terms-of-use)) - [Claude 3.5 Sonnet](https://www.anthropic.com/news/claude-3-5-sonnet) (Outputs produced by Claude are subject to Anthropic [terms of service](https://www.anthropic.com/legal/commercial-terms) and [usage policy](https://www.anthropic.com/legal/aup)) ## Completion Generation Approach: Given a set of prompts, we generated the completions and preferences using a synthetic pipeline that combines both on-policy and off-policy data, and obtained the preference annotations on four different aspects using the Ultrafeedback template and an LLM judge. The code for the synthetic generation pipeline is found in the scripts/synth_pref directory of [open-instruct](https://github.com/allenai/open-instruct/) ## License This dataset is licensed under ODC-BY. It is intended for research and educational use in accordance with Ai2's [Responsible Use Guidelines](https://allenai.org/responsible-use). This dataset includes output data generated from third party models that are subject to separate terms governing their use.
allenai/tulu-3-sft-reused-on-policy-70b
allenai
"2024-11-21T16:47:54Z"
2
0
[ "size_categories:10K<n<100K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-20T22:47:51Z"
--- dataset_info: features: - name: id dtype: string - name: prompt dtype: string - name: chosen list: - name: content dtype: string - name: role dtype: string - name: rejected list: - name: content dtype: string - name: role dtype: string splits: - name: train num_bytes: 108105727 num_examples: 19453 download_size: 59589549 dataset_size: 108105727 configs: - config_name: default data_files: - split: train path: data/train-* --- <img src="https://huggingface.co/datasets/allenai/blog-images/resolve/main/tulu-3/Tulu3-logo.png" alt="Tulu3 banner" width="400" style="margin-left:'auto' margin-right:'auto' display:'block'"/> # Llama 3.1 Tulu 3 SFT reused (on-policy 70b) *Note that this collection is licensed under ODC-BY-1.0 license; different licenses apply to subsets of the data. Some portions of the dataset are non-commercial. We present the mixture as a research artifact.* This preference dataset is part of our Tulu 3 preference mixture: it contains prompts from [Tulu-3-SFT](https://huggingface.co/datasets/allenai/tulu-3-sft-mixture), which include constraints, and it contains 19,444 generation pairs (some of which on-policy from allenai/Llama-3.1-Tulu-3-70B) obtained using the following models: - [Mistral 7B Instruct v0.2](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2) (Apache 2.0) - [Mistral Nemo Instruct 2407](https://huggingface.co/mistralai/Mistral-Nemo-Instruct-2407) (Apache 2.0) - [Tulu 2 7B](https://huggingface.co/allenai/tulu-2-7b) (Ai2 ImpACT Low Risk License) - [Tulu 2 13B](https://huggingface.co/allenai/tulu-2-13b) (Ai2 ImpACT Low Risk License) - [Yi-34B-Chat](https://huggingface.co/01-ai/Yi-34B-Chat) (Apache 2.0) - [Yi-6B-Chat](https://huggingface.co/01-ai/Yi-6B-Chat) (Apache 2.0) - [MPT 30B Chat](https://huggingface.co/mosaicml/mpt-30b-chat) (CC-BY-SA-4.0) - [MPT 7B 8k Chat](https://huggingface.co/mosaicml/mpt-7b-8k-chat) (CC-BY-SA-4.0) - [Google Gemma 2 27B it](https://huggingface.co/google/gemma-2-27b-it) (Gemma is provided under and subject to the Gemma Terms of Use found at [ai.google.dev/gemma/terms](https://ai.google.dev/gemma/terms)) - [Google Gemma 2 9B it](https://huggingface.co/google/gemma-2-9b-it) (Gemma is provided under and subject to the Gemma Terms of Use found at [ai.google.dev/gemma/terms](https://ai.google.dev/gemma/terms)) - [InternLM2.5 20B](https://huggingface.co/internlm/internlm2_5-20b-chat) (InternLM weights are fully open for academic research and also allow free commercial usage. A commercial license can be obtained as instructed in the model card.) - [InternLM2.5 7B](https://huggingface.co/internlm/internlm2_5-7b-chat) (InternLM weights are fully open for academic research and also allow free commercial usage. A commercial license can be obtained as instructed in the model card.) - [InternLM2.5 1.8B](https://huggingface.co/internlm/internlm2_5-1_8b-chat) (InternLM weights are fully open for academic research and also allow free commercial usage. A commercial license can be obtained as instructed in the model card.) - [Falcon 7B](https://huggingface.co/tiiuae/falcon-7b-instruct) (Apache 2.0) - [Qwen2.5 72B Instruct](https://huggingface.co/Qwen/Qwen2.5-72B-Instruct) (Qwen is licensed under the Qwen LICENSE AGREEMENT, Copyright (c) Alibaba Cloud. All Rights Reserved.) - [Qwen2.5 32B Instruct](https://huggingface.co/Qwen/Qwen2.5-32B-Instruct) (Apache 2.0) - [Qwen2.5 14B Instruct](https://huggingface.co/Qwen/Qwen2.5-14B-Instruct) (Apache 2.0) - [Qwen2.5 7B Instruct](https://huggingface.co/Qwen/Qwen2.5-7B-Instruct) (Apache 2.0) - [Llama 3.1 8B Instruct ](https://huggingface.co/meta-llama/Llama-3.1-8B-Instruct) (this dataset was partially "Built with Llama" and is thus subject to the Llama 3.1 License) - [Llama 3.1 70B Instruct](https://huggingface.co/meta-llama/Llama-3.1-70B-Instruct) (this dataset was partially "Built with Llama" and is thus subject to the Llama 3.1 License) - [Llama 3 8B Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-8B) (this dataset was partially "Built with Meta Llama 3" and is thus subject to the Llama 3 License) - [GPT-4 Turbo](https://openai.com/index/new-models-and-developer-products-announced-at-devday/) and [GPT-4o](https://openai.com/index/hello-gpt-4o/) (Outputs produced by GPT-4 are subject to OpenAI's [terms of use](https://openai.com/policies/row-terms-of-use)) - [Claude 3.5 Sonnet](https://www.anthropic.com/news/claude-3-5-sonnet) (Outputs produced by Claude are subject to Anthropic [terms of service](https://www.anthropic.com/legal/commercial-terms) and [usage policy](https://www.anthropic.com/legal/aup)) ## Completion Generation Approach: Given a set of prompts, we generated the completions and preferences using a synthetic pipeline that combines both on-policy and off-policy data, and obtained the preference annotations on four different aspects using the Ultrafeedback template and an LLM judge. The code for the synthetic generation pipeline is found in the scripts/synth_pref directory of [open-instruct](https://github.com/allenai/open-instruct/) ## License This dataset is licensed under ODC-BY. It is intended for research and educational use in accordance with Ai2's [Responsible Use Guidelines](https://allenai.org/responsible-use). This dataset includes output data generated from third party models that are subject to separate terms governing their use.
allenai/tulu-3-wildchat-if-on-policy-70b
allenai
"2024-11-21T16:47:07Z"
2
0
[ "size_categories:10K<n<100K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-20T22:47:58Z"
--- dataset_info: features: - name: id dtype: string - name: prompt dtype: string - name: chosen list: - name: content dtype: string - name: role dtype: string - name: rejected list: - name: content dtype: string - name: role dtype: string splits: - name: train num_bytes: 90351793 num_examples: 10793 download_size: 46153976 dataset_size: 90351793 configs: - config_name: default data_files: - split: train path: data/train-* --- <img src="https://huggingface.co/datasets/allenai/blog-images/resolve/main/tulu-3/Tulu3-logo.png" alt="Tulu3 banner" width="400" style="margin-left:'auto' margin-right:'auto' display:'block'"/> # Llama 3.1 Tulu 3 Wildchat IF (on-policy 70b) *Note that this collection is licensed under ODC-BY-1.0 license; different licenses apply to subsets of the data. Some portions of the dataset are non-commercial. We present the mixture as a research artifact.* This preference dataset is part of our Tulu 3 preference mixture: it contains prompts from [WildChat](allenai/WildChat-1M), which include constraints, and it contains 10,792 generation pairs (some of which on-policy from allenai/Llama-3.1-Tulu-3-70B) obtained using the following models: - [Mistral 7B Instruct v0.2](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2) (Apache 2.0) - [Mistral Nemo Instruct 2407](https://huggingface.co/mistralai/Mistral-Nemo-Instruct-2407) (Apache 2.0) - [Tulu 2 7B](https://huggingface.co/allenai/tulu-2-7b) (Ai2 ImpACT Low Risk License) - [Tulu 2 13B](https://huggingface.co/allenai/tulu-2-13b) (Ai2 ImpACT Low Risk License) - [Yi-34B-Chat](https://huggingface.co/01-ai/Yi-34B-Chat) (Apache 2.0) - [Yi-6B-Chat](https://huggingface.co/01-ai/Yi-6B-Chat) (Apache 2.0) - [MPT 30B Chat](https://huggingface.co/mosaicml/mpt-30b-chat) (CC-BY-SA-4.0) - [MPT 7B 8k Chat](https://huggingface.co/mosaicml/mpt-7b-8k-chat) (CC-BY-SA-4.0) - [Google Gemma 2 27B it](https://huggingface.co/google/gemma-2-27b-it) (Gemma is provided under and subject to the Gemma Terms of Use found at [ai.google.dev/gemma/terms](https://ai.google.dev/gemma/terms)) - [Google Gemma 2 9B it](https://huggingface.co/google/gemma-2-9b-it) (Gemma is provided under and subject to the Gemma Terms of Use found at [ai.google.dev/gemma/terms](https://ai.google.dev/gemma/terms)) - [InternLM2.5 20B](https://huggingface.co/internlm/internlm2_5-20b-chat) (InternLM weights are fully open for academic research and also allow free commercial usage. A commercial license can be obtained as instructed in the model card.) - [InternLM2.5 7B](https://huggingface.co/internlm/internlm2_5-7b-chat) (InternLM weights are fully open for academic research and also allow free commercial usage. A commercial license can be obtained as instructed in the model card.) - [InternLM2.5 1.8B](https://huggingface.co/internlm/internlm2_5-1_8b-chat) (InternLM weights are fully open for academic research and also allow free commercial usage. A commercial license can be obtained as instructed in the model card.) - [Falcon 7B](https://huggingface.co/tiiuae/falcon-7b-instruct) (Apache 2.0) - [Qwen2.5 72B Instruct](https://huggingface.co/Qwen/Qwen2.5-72B-Instruct) (Qwen is licensed under the Qwen LICENSE AGREEMENT, Copyright (c) Alibaba Cloud. All Rights Reserved.) - [Qwen2.5 32B Instruct](https://huggingface.co/Qwen/Qwen2.5-32B-Instruct) (Apache 2.0) - [Qwen2.5 14B Instruct](https://huggingface.co/Qwen/Qwen2.5-14B-Instruct) (Apache 2.0) - [Qwen2.5 7B Instruct](https://huggingface.co/Qwen/Qwen2.5-7B-Instruct) (Apache 2.0) - [Llama 3.1 8B Instruct ](https://huggingface.co/meta-llama/Llama-3.1-8B-Instruct) (this dataset was partially "Built with Llama" and is thus subject to the Llama 3.1 License) - [Llama 3.1 70B Instruct](https://huggingface.co/meta-llama/Llama-3.1-70B-Instruct) (this dataset was partially "Built with Llama" and is thus subject to the Llama 3.1 License) - [Llama 3 8B Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-8B) (this dataset was partially "Built with Meta Llama 3" and is thus subject to the Llama 3 License) - [GPT-4 Turbo](https://openai.com/index/new-models-and-developer-products-announced-at-devday/) and [GPT-4o](https://openai.com/index/hello-gpt-4o/) (Outputs produced by GPT-4 are subject to OpenAI's [terms of use](https://openai.com/policies/row-terms-of-use)) - [Claude 3.5 Sonnet](https://www.anthropic.com/news/claude-3-5-sonnet) (Outputs produced by Claude are subject to Anthropic [terms of service](https://www.anthropic.com/legal/commercial-terms) and [usage policy](https://www.anthropic.com/legal/aup)) ## Completion Generation Approach: Given a set of prompts, we generated the completions and preferences using a synthetic pipeline that combines both on-policy and off-policy data, and obtained the preference annotations on four different aspects using the Ultrafeedback template and an LLM judge. The code for the synthetic generation pipeline is found in the scripts/synth_pref directory of [open-instruct](https://github.com/allenai/open-instruct/) ## License This dataset is licensed under ODC-BY. It is intended for research and educational use in accordance with Ai2's [Responsible Use Guidelines](https://allenai.org/responsible-use). This dataset includes output data generated from third party models that are subject to separate terms governing their use.
allenai/tulu-3-wildchat-unused
allenai
"2024-11-21T16:43:34Z"
2
0
[ "size_categories:10K<n<100K", "format:parquet", "modality:text", "library:datasets", "library:dask", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-20T22:48:51Z"
--- dataset_info: features: - name: id dtype: string - name: prompt dtype: string - name: chosen list: - name: content dtype: string - name: role dtype: string - name: rejected list: - name: content dtype: string - name: role dtype: string splits: - name: train num_bytes: 682190855 num_examples: 82783 download_size: 386432288 dataset_size: 682190855 configs: - config_name: default data_files: - split: train path: data/train-* --- <img src="https://huggingface.co/datasets/allenai/blog-images/resolve/main/tulu-3/Tulu3-logo.png" alt="Tulu3 banner" width="400" style="margin-left:'auto' margin-right:'auto' display:'block'"/> # Llama 3.1 Tulu 3 Wildchat unused *Note that this collection is licensed under ODC-BY-1.0 license; different licenses apply to subsets of the data. Some portions of the dataset are non-commercial. We present the mixture as a research artifact.* This preference dataset is part of our Tulu 3 preference mixture: it contains prompts from [WildChat](allenai/WildChat-1M) and it contains 82,783 generation pairs obtained using the following models: - [Mistral 7B Instruct v0.2](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2) (Apache 2.0) - [Mistral Nemo Instruct 2407](https://huggingface.co/mistralai/Mistral-Nemo-Instruct-2407) (Apache 2.0) - [Tulu 2 7B](https://huggingface.co/allenai/tulu-2-7b) (Ai2 ImpACT Low Risk License) - [Tulu 2 13B](https://huggingface.co/allenai/tulu-2-13b) (Ai2 ImpACT Low Risk License) - [Yi-34B-Chat](https://huggingface.co/01-ai/Yi-34B-Chat) (Apache 2.0) - [Yi-6B-Chat](https://huggingface.co/01-ai/Yi-6B-Chat) (Apache 2.0) - [MPT 30B Chat](https://huggingface.co/mosaicml/mpt-30b-chat) (CC-BY-SA-4.0) - [MPT 7B 8k Chat](https://huggingface.co/mosaicml/mpt-7b-8k-chat) (CC-BY-SA-4.0) - [Google Gemma 2 27B it](https://huggingface.co/google/gemma-2-27b-it) (Gemma is provided under and subject to the Gemma Terms of Use found at [ai.google.dev/gemma/terms](https://ai.google.dev/gemma/terms)) - [Google Gemma 2 9B it](https://huggingface.co/google/gemma-2-9b-it) (Gemma is provided under and subject to the Gemma Terms of Use found at [ai.google.dev/gemma/terms](https://ai.google.dev/gemma/terms)) - [InternLM2.5 20B](https://huggingface.co/internlm/internlm2_5-20b-chat) (InternLM weights are fully open for academic research and also allow free commercial usage. A commercial license can be obtained as instructed in the model card.) - [InternLM2.5 7B](https://huggingface.co/internlm/internlm2_5-7b-chat) (InternLM weights are fully open for academic research and also allow free commercial usage. A commercial license can be obtained as instructed in the model card.) - [InternLM2.5 1.8B](https://huggingface.co/internlm/internlm2_5-1_8b-chat) (InternLM weights are fully open for academic research and also allow free commercial usage. A commercial license can be obtained as instructed in the model card.) - [Falcon 7B](https://huggingface.co/tiiuae/falcon-7b-instruct) (Apache 2.0) - [Qwen2.5 72B Instruct](https://huggingface.co/Qwen/Qwen2.5-72B-Instruct) (Qwen is licensed under the Qwen LICENSE AGREEMENT, Copyright (c) Alibaba Cloud. All Rights Reserved.) - [Qwen2.5 32B Instruct](https://huggingface.co/Qwen/Qwen2.5-32B-Instruct) (Apache 2.0) - [Qwen2.5 14B Instruct](https://huggingface.co/Qwen/Qwen2.5-14B-Instruct) (Apache 2.0) - [Qwen2.5 7B Instruct](https://huggingface.co/Qwen/Qwen2.5-7B-Instruct) (Apache 2.0) - [Llama 3.1 8B Instruct ](https://huggingface.co/meta-llama/Llama-3.1-8B-Instruct) (this dataset was partially "Built with Llama" and is thus subject to the Llama 3.1 License) - [Llama 3.1 70B Instruct](https://huggingface.co/meta-llama/Llama-3.1-70B-Instruct) (this dataset was partially "Built with Llama" and is thus subject to the Llama 3.1 License) - [Llama 3 8B Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-8B) (this dataset was partially "Built with Meta Llama 3" and is thus subject to the Llama 3 License) - [GPT-4 Turbo](https://openai.com/index/new-models-and-developer-products-announced-at-devday/) and [GPT-4o](https://openai.com/index/hello-gpt-4o/) (Outputs produced by GPT-4 are subject to OpenAI's [terms of use](https://openai.com/policies/row-terms-of-use)) - [Claude 3.5 Sonnet](https://www.anthropic.com/news/claude-3-5-sonnet) (Outputs produced by Claude are subject to Anthropic [terms of service](https://www.anthropic.com/legal/commercial-terms) and [usage policy](https://www.anthropic.com/legal/aup)) ## Completion Generation Approach: Given a set of prompts, we generated the completions and preferences using a synthetic pipeline that combines both on-policy and off-policy data, and obtained the preference annotations on four different aspects using the Ultrafeedback template and an LLM judge. The code for the synthetic generation pipeline is found in the scripts/synth_pref directory of [open-instruct](https://github.com/allenai/open-instruct/) ## License This dataset is licensed under ODC-BY. It is intended for research and educational use in accordance with Ai2's [Responsible Use Guidelines](https://allenai.org/responsible-use). This dataset includes output data generated from third party models that are subject to separate terms governing their use.
allenai/tulu-3-wildchat-reused-on-policy-70b
allenai
"2024-11-21T16:43:05Z"
2
0
[ "size_categories:10K<n<100K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-20T22:49:10Z"
--- dataset_info: features: - name: id dtype: string - name: prompt dtype: string - name: chosen list: - name: content dtype: string - name: role dtype: string - name: rejected list: - name: content dtype: string - name: role dtype: string splits: - name: train num_bytes: 141772436 num_examples: 17242 download_size: 84326528 dataset_size: 141772436 configs: - config_name: default data_files: - split: train path: data/train-* --- <img src="https://huggingface.co/datasets/allenai/blog-images/resolve/main/tulu-3/Tulu3-logo.png" alt="Tulu3 banner" width="400" style="margin-left:'auto' margin-right:'auto' display:'block'"/> # Llama 3.1 Tulu 3 Wildchat reused (on-policy 70B) *Note that this collection is licensed under ODC-BY-1.0 license; different licenses apply to subsets of the data. Some portions of the dataset are non-commercial. We present the mixture as a research artifact.* This preference dataset is part of our Tulu 3 preference mixture: it contains prompts from [WildChat](allenai/WildChat-1M) and it contains 17,207 generation pairs (some of which on-policy completions from https://huggingface.co/allenai/Llama-3.1-Tulu-3-70B) obtained using the following models: - [Mistral 7B Instruct v0.2](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2) (Apache 2.0) - [Mistral Nemo Instruct 2407](https://huggingface.co/mistralai/Mistral-Nemo-Instruct-2407) (Apache 2.0) - [Tulu 2 7B](https://huggingface.co/allenai/tulu-2-7b) (Ai2 ImpACT Low Risk License) - [Tulu 2 13B](https://huggingface.co/allenai/tulu-2-13b) (Ai2 ImpACT Low Risk License) - [Yi-34B-Chat](https://huggingface.co/01-ai/Yi-34B-Chat) (Apache 2.0) - [Yi-6B-Chat](https://huggingface.co/01-ai/Yi-6B-Chat) (Apache 2.0) - [MPT 30B Chat](https://huggingface.co/mosaicml/mpt-30b-chat) (CC-BY-SA-4.0) - [MPT 7B 8k Chat](https://huggingface.co/mosaicml/mpt-7b-8k-chat) (CC-BY-SA-4.0) - [Google Gemma 2 27B it](https://huggingface.co/google/gemma-2-27b-it) (Gemma is provided under and subject to the Gemma Terms of Use found at [ai.google.dev/gemma/terms](https://ai.google.dev/gemma/terms)) - [Google Gemma 2 9B it](https://huggingface.co/google/gemma-2-9b-it) (Gemma is provided under and subject to the Gemma Terms of Use found at [ai.google.dev/gemma/terms](https://ai.google.dev/gemma/terms)) - [InternLM2.5 20B](https://huggingface.co/internlm/internlm2_5-20b-chat) (InternLM weights are fully open for academic research and also allow free commercial usage. A commercial license can be obtained as instructed in the model card.) - [InternLM2.5 7B](https://huggingface.co/internlm/internlm2_5-7b-chat) (InternLM weights are fully open for academic research and also allow free commercial usage. A commercial license can be obtained as instructed in the model card.) - [InternLM2.5 1.8B](https://huggingface.co/internlm/internlm2_5-1_8b-chat) (InternLM weights are fully open for academic research and also allow free commercial usage. A commercial license can be obtained as instructed in the model card.) - [Falcon 7B](https://huggingface.co/tiiuae/falcon-7b-instruct) (Apache 2.0) - [Qwen2.5 72B Instruct](https://huggingface.co/Qwen/Qwen2.5-72B-Instruct) (Qwen is licensed under the Qwen LICENSE AGREEMENT, Copyright (c) Alibaba Cloud. All Rights Reserved.) - [Qwen2.5 32B Instruct](https://huggingface.co/Qwen/Qwen2.5-32B-Instruct) (Apache 2.0) - [Qwen2.5 14B Instruct](https://huggingface.co/Qwen/Qwen2.5-14B-Instruct) (Apache 2.0) - [Qwen2.5 7B Instruct](https://huggingface.co/Qwen/Qwen2.5-7B-Instruct) (Apache 2.0) - [Llama 3.1 8B Instruct ](https://huggingface.co/meta-llama/Llama-3.1-8B-Instruct) (this dataset was partially "Built with Llama" and is thus subject to the Llama 3.1 License) - [Llama 3.1 70B Instruct](https://huggingface.co/meta-llama/Llama-3.1-70B-Instruct) (this dataset was partially "Built with Llama" and is thus subject to the Llama 3.1 License) - [Llama 3 8B Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-8B) (this dataset was partially "Built with Meta Llama 3" and is thus subject to the Llama 3 License) - [GPT-4 Turbo](https://openai.com/index/new-models-and-developer-products-announced-at-devday/) and [GPT-4o](https://openai.com/index/hello-gpt-4o/) (Outputs produced by GPT-4 are subject to OpenAI's [terms of use](https://openai.com/policies/row-terms-of-use)) - [Claude 3.5 Sonnet](https://www.anthropic.com/news/claude-3-5-sonnet) (Outputs produced by Claude are subject to Anthropic [terms of service](https://www.anthropic.com/legal/commercial-terms) and [usage policy](https://www.anthropic.com/legal/aup)) ## Completion Generation Approach: Given a set of prompts, we generated the completions and preferences using a synthetic pipeline that combines both on-policy and off-policy data, and obtained the preference annotations on four different aspects using the Ultrafeedback template and an LLM judge. The code for the synthetic generation pipeline is found in the scripts/synth_pref directory of [open-instruct](https://github.com/allenai/open-instruct/) ## License This dataset is licensed under ODC-BY. It is intended for research and educational use in accordance with Ai2's [Responsible Use Guidelines](https://allenai.org/responsible-use). This dataset includes output data generated from third party models that are subject to separate terms governing their use.
Doub7e/SDv2_512-Count-seedmining-rectified
Doub7e
"2024-11-20T22:53:18Z"
2
0
[ "size_categories:1K<n<10K", "format:parquet", "modality:image", "modality:text", "library:datasets", "library:dask", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-20T22:52:20Z"
--- dataset_info: features: - name: image dtype: image - name: prompt dtype: string splits: - name: train num_bytes: 1056605943.0 num_examples: 2400 download_size: 1056599258 dataset_size: 1056605943.0 configs: - config_name: default data_files: - split: train path: data/train-* ---
nona-ghazizadeh/CoT
nona-ghazizadeh
"2024-11-21T01:48:54Z"
2
0
[ "size_categories:10K<n<100K", "format:json", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-21T00:48:22Z"
--- pretty_name: j --- CoT dataset
KKACHI-HUB/TEST
KKACHI-HUB
"2024-11-21T01:33:15Z"
2
0
[ "size_categories:10K<n<100K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-21T01:32:58Z"
--- dataset_info: features: - name: original dtype: string - name: refined dtype: string - name: evaluation dtype: string splits: - name: train num_bytes: 332143078 num_examples: 36576 download_size: 162240496 dataset_size: 332143078 configs: - config_name: default data_files: - split: train path: data/train-* ---
hyen99-03/argue_finetuningset
hyen99-03
"2024-11-21T02:11:14Z"
2
0
[ "size_categories:n<1K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-21T02:11:03Z"
--- dataset_info: features: - name: instruction dtype: string - name: output dtype: string - name: input dtype: string splits: - name: train num_bytes: 1050213 num_examples: 620 download_size: 191761 dataset_size: 1050213 configs: - config_name: default data_files: - split: train path: data/train-* ---
rPucs/Us
rPucs
"2024-11-21T20:03:03Z"
2
0
[ "license:mit", "size_categories:n<1K", "format:imagefolder", "modality:image", "modality:text", "library:datasets", "library:mlcroissant", "region:us" ]
null
"2024-11-21T02:14:25Z"
--- license: mit ---
Yuanxin-Liu/Iter3_generation
Yuanxin-Liu
"2024-11-21T02:14:54Z"
2
0
[ "size_categories:10K<n<100K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-21T02:14:51Z"
--- dataset_info: features: - name: prompt dtype: string - name: question dtype: string - name: responses sequence: string - name: answer dtype: string - name: pick dtype: bool - name: rewards sequence: int64 splits: - name: train num_bytes: 154594176.28231227 num_examples: 13809 download_size: 42573142 dataset_size: 154594176.28231227 configs: - config_name: default data_files: - split: train path: data/train-* ---
sirjoy/so100_test
sirjoy
"2024-11-22T01:15:06Z"
2
0
[ "task_categories:robotics", "region:us", "LeRobot", "so100", "tutorial" ]
[ "robotics" ]
"2024-11-21T02:18:08Z"
--- task_categories: - robotics tags: - LeRobot - so100 - tutorial --- This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
r1v3r/snarkOS
r1v3r
"2024-11-21T02:26:07Z"
2
0
[ "size_categories:n<1K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-21T02:26:03Z"
--- dataset_info: features: - name: repo dtype: string - name: pull_number dtype: int64 - name: instance_id dtype: string - name: issue_numbers sequence: string - name: base_commit dtype: string - name: patch dtype: string - name: test_patch dtype: string - name: problem_statement dtype: string - name: hints_text dtype: string - name: created_at dtype: string - name: version dtype: string splits: - name: train num_bytes: 344157 num_examples: 9 download_size: 131492 dataset_size: 344157 configs: - config_name: default data_files: - split: train path: data/train-* ---
r1v3r/bottlerocket
r1v3r
"2024-11-21T02:26:46Z"
2
0
[ "size_categories:n<1K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-21T02:26:42Z"
--- dataset_info: features: - name: repo dtype: string - name: pull_number dtype: int64 - name: instance_id dtype: string - name: issue_numbers sequence: string - name: base_commit dtype: string - name: patch dtype: string - name: test_patch dtype: string - name: problem_statement dtype: string - name: hints_text dtype: string - name: created_at dtype: string splits: - name: train num_bytes: 132729 num_examples: 4 download_size: 56995 dataset_size: 132729 configs: - config_name: default data_files: - split: train path: data/train-* ---
Nabin1995/yol-en-words-sentences
Nabin1995
"2024-11-21T02:57:18Z"
2
0
[ "size_categories:1K<n<10K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-21T02:57:14Z"
--- dataset_info: features: - name: instruction dtype: string - name: input dtype: string - name: output dtype: string splits: - name: train num_bytes: 661643 num_examples: 7901 download_size: 199565 dataset_size: 661643 configs: - config_name: default data_files: - split: train path: data/train-* ---
1231czx/test_ver2_rebuttal_af_rm_bon64_01
1231czx
"2024-11-21T03:18:53Z"
2
0
[ "size_categories:n<1K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-21T03:18:52Z"
--- dataset_info: features: - name: instruction dtype: string - name: output dtype: string - name: generator dtype: string splits: - name: train num_bytes: 1289534 num_examples: 805 download_size: 770382 dataset_size: 1289534 configs: - config_name: default data_files: - split: train path: data/train-* ---
1231czx/test_ver2_rebuttal_af_rrm_bon64_01
1231czx
"2024-11-21T03:18:55Z"
2
0
[ "size_categories:n<1K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-21T03:18:54Z"
--- dataset_info: features: - name: instruction dtype: string - name: output dtype: string - name: generator dtype: string splits: - name: train num_bytes: 1474951 num_examples: 805 download_size: 874853 dataset_size: 1474951 configs: - config_name: default data_files: - split: train path: data/train-* ---
1231czx/test_ver2_rebuttal_af_rm_bon64_02
1231czx
"2024-11-21T03:18:57Z"
2
0
[ "size_categories:n<1K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-21T03:18:56Z"
--- dataset_info: features: - name: instruction dtype: string - name: output dtype: string - name: generator dtype: string splits: - name: train num_bytes: 1288750 num_examples: 805 download_size: 768186 dataset_size: 1288750 configs: - config_name: default data_files: - split: train path: data/train-* ---
1231czx/test_ver2_rebuttal_af_rrm_bon64_02
1231czx
"2024-11-21T03:18:59Z"
2
0
[ "size_categories:n<1K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-21T03:18:58Z"
--- dataset_info: features: - name: instruction dtype: string - name: output dtype: string - name: generator dtype: string splits: - name: train num_bytes: 1480763 num_examples: 805 download_size: 887060 dataset_size: 1480763 configs: - config_name: default data_files: - split: train path: data/train-* ---
1231czx/test_ver2_rebuttal_af_rm_bon64_05
1231czx
"2024-11-21T03:19:01Z"
2
0
[ "size_categories:n<1K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-21T03:19:00Z"
--- dataset_info: features: - name: instruction dtype: string - name: output dtype: string - name: generator dtype: string splits: - name: train num_bytes: 1279264 num_examples: 805 download_size: 764873 dataset_size: 1279264 configs: - config_name: default data_files: - split: train path: data/train-* ---
1231czx/test_ver2_rebuttal_af_rrm_bon64_05
1231czx
"2024-11-21T03:19:03Z"
2
0
[ "size_categories:n<1K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-21T03:19:02Z"
--- dataset_info: features: - name: instruction dtype: string - name: output dtype: string - name: generator dtype: string splits: - name: train num_bytes: 1486565 num_examples: 805 download_size: 887873 dataset_size: 1486565 configs: - config_name: default data_files: - split: train path: data/train-* ---
1231czx/test_ver2_rebuttal_af_rm_bon8_005
1231czx
"2024-11-21T03:19:07Z"
2
0
[ "size_categories:n<1K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-21T03:19:06Z"
--- dataset_info: features: - name: instruction dtype: string - name: output dtype: string - name: generator dtype: string splits: - name: train num_bytes: 1349555 num_examples: 805 download_size: 807037 dataset_size: 1349555 configs: - config_name: default data_files: - split: train path: data/train-* ---
1231czx/test_ver2_rebuttal_af_rrm_bon8_01
1231czx
"2024-11-21T03:19:09Z"
2
0
[ "size_categories:n<1K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-21T03:19:08Z"
--- dataset_info: features: - name: instruction dtype: string - name: output dtype: string - name: generator dtype: string splits: - name: train num_bytes: 1475080 num_examples: 805 download_size: 872281 dataset_size: 1475080 configs: - config_name: default data_files: - split: train path: data/train-* ---
1231czx/test_ver2_rebuttal_af_rm_bon8_01
1231czx
"2024-11-21T03:19:10Z"
2
0
[ "size_categories:n<1K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-21T03:19:09Z"
--- dataset_info: features: - name: instruction dtype: string - name: output dtype: string - name: generator dtype: string splits: - name: train num_bytes: 1355607 num_examples: 805 download_size: 811942 dataset_size: 1355607 configs: - config_name: default data_files: - split: train path: data/train-* ---
juliadollis/mistral_toxic_hatespeech
juliadollis
"2024-11-21T12:02:38Z"
2
0
[ "size_categories:n<1K", "format:parquet", "modality:tabular", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-21T03:37:43Z"
--- dataset_info: features: - name: nome_arquivo dtype: string - name: file_id dtype: string - name: user_id dtype: int64 - name: subforum_id dtype: int64 - name: num_contexts dtype: int64 - name: label dtype: string - name: text dtype: string - name: is_toxic dtype: int64 - name: predicted_is_toxic dtype: int64 - name: __index_level_0__ dtype: int64 splits: - name: train num_bytes: 739 num_examples: 3 download_size: 6922 dataset_size: 739 configs: - config_name: default data_files: - split: train path: data/train-* ---
self-generate/ds_chat_original_cn_rl_oj_debug_iter0-pos-binarized-reflection-scored
self-generate
"2024-11-21T03:57:46Z"
2
0
[ "size_categories:n<1K", "format:parquet", "modality:tabular", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-21T03:57:45Z"
--- dataset_info: features: - name: prompt dtype: string - name: chosen dtype: string - name: rejected dtype: string - name: rejected_traceback dtype: string - name: test dtype: string - name: reflection_generate_0 dtype: string - name: reflection_generate_0_score dtype: int64 - name: reflection_traceback_0 dtype: string - name: reflection_generate_1 dtype: string - name: reflection_generate_1_score dtype: int64 - name: reflection_traceback_1 dtype: string - name: reflection_generate_2 dtype: string - name: reflection_generate_2_score dtype: int64 - name: reflection_traceback_2 dtype: string - name: reflection_generate_3 dtype: string - name: reflection_generate_3_score dtype: int64 - name: reflection_traceback_3 dtype: string - name: average_reflection_score dtype: float64 - name: chosen_average_reflection_score dtype: float64 splits: - name: train num_bytes: 27466 num_examples: 3 download_size: 78925 dataset_size: 27466 configs: - config_name: default data_files: - split: train path: data/train-* --- # Dataset Card for "ds_chat_original_cn_rl_oj_debug_iter0-pos-binarized-reflection-scored" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
DeL-TaiseiOzaki/magpie-gemma-2-9b-it
DeL-TaiseiOzaki
"2024-11-21T04:01:22Z"
2
0
[ "license:gemma", "size_categories:10K<n<100K", "format:json", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-21T04:00:38Z"
--- license: gemma ---
self-generate/ds_coder_original_cn_rl_oj_debug_iter0-pos-binarized-reflection-scored
self-generate
"2024-11-21T04:12:39Z"
2
0
[ "size_categories:n<1K", "format:parquet", "modality:tabular", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-21T04:12:38Z"
--- dataset_info: features: - name: prompt dtype: string - name: chosen dtype: string - name: rejected dtype: string - name: rejected_traceback dtype: string - name: test dtype: string - name: reflection_generate_0 dtype: string - name: reflection_generate_0_score dtype: int64 - name: reflection_traceback_0 dtype: string - name: reflection_generate_1 dtype: string - name: reflection_generate_1_score dtype: int64 - name: reflection_traceback_1 dtype: string - name: reflection_generate_2 dtype: string - name: reflection_generate_2_score dtype: int64 - name: reflection_traceback_2 dtype: string - name: reflection_generate_3 dtype: string - name: reflection_generate_3_score dtype: int64 - name: reflection_traceback_3 dtype: string - name: average_reflection_score dtype: float64 - name: chosen_average_reflection_score dtype: float64 splits: - name: train num_bytes: 149315 num_examples: 10 download_size: 127276 dataset_size: 149315 configs: - config_name: default data_files: - split: train path: data/train-* --- # Dataset Card for "ds_coder_original_cn_rl_oj_debug_iter0-pos-binarized-reflection-scored" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
omnineura/supriya102
omnineura
"2024-11-21T04:40:25Z"
2
0
[ "size_categories:n<1K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-21T04:40:04Z"
--- dataset_info: features: - name: context dtype: string - name: question dtype: string - name: answers dtype: string splits: - name: train num_bytes: 76279 num_examples: 102 download_size: 22504 dataset_size: 76279 configs: - config_name: default data_files: - split: train path: data/train-* ---
swaghjal/Codebridge_2
swaghjal
"2024-11-21T04:43:46Z"
2
0
[ "size_categories:n<1K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-21T04:43:21Z"
--- dataset_info: features: - name: python dtype: string - name: r dtype: string - name: usecase dtype: string - name: status dtype: string - name: __index_level_0__ dtype: int64 splits: - name: icl num_bytes: 76469 num_examples: 40 - name: test num_bytes: 1124501 num_examples: 650 download_size: 377123 dataset_size: 1200970 configs: - config_name: default data_files: - split: icl path: data/icl-* - split: test path: data/test-* --- # Dataset Card for "Codebridge_2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
derricka59/Model-trainer-v1
derricka59
"2024-11-21T05:16:58Z"
2
0
[ "license:apache-2.0", "region:us" ]
null
"2024-11-21T05:16:58Z"
--- license: apache-2.0 ---
omnineura/supriyaaug
omnineura
"2024-11-21T05:19:36Z"
2
0
[ "size_categories:n<1K", "format:parquet", "modality:tabular", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-21T05:19:36Z"
--- dataset_info: features: - name: sepalLength dtype: float64 - name: sepalWidth dtype: float64 - name: petalLength dtype: float64 - name: petalWidth dtype: float64 - name: species dtype: string splits: - name: train num_bytes: 6650 num_examples: 150 download_size: 3899 dataset_size: 6650 configs: - config_name: default data_files: - split: train path: data/train-* ---
DevCar/dataset_internado_rotatorio_v3
DevCar
"2024-11-21T05:23:42Z"
2
0
[ "size_categories:10K<n<100K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-21T05:23:39Z"
--- dataset_info: features: - name: area dtype: string - name: nombre_pdf dtype: string - name: contenido dtype: string splits: - name: train num_bytes: 24461099.503796507 num_examples: 14223 - name: test num_bytes: 2719046.496203493 num_examples: 1581 download_size: 16914146 dataset_size: 27180146.0 configs: - config_name: default data_files: - split: train path: data/train-* - split: test path: data/test-* ---
derricka59/New_training-data-set
derricka59
"2024-11-21T05:25:20Z"
2
0
[ "license:apache-2.0", "region:us" ]
null
"2024-11-21T05:24:17Z"
--- license: apache-2.0 --- git clone https://huggingface.co/Qwen/Qwen2.5-Coder-32B-Instruct
SHASWATSINGH3101/key_info_bail_para_prompts_TEST_V1
SHASWATSINGH3101
"2024-11-21T05:40:59Z"
2
0
[ "license:cc-by-nc-nd-4.0", "size_categories:n<1K", "format:json", "modality:text", "library:datasets", "library:dask", "library:mlcroissant", "region:us" ]
null
"2024-11-21T05:40:27Z"
--- license: cc-by-nc-nd-4.0 ---
violetxi/NUMINA-V1-Clean-Blocks-3400_4600-577_600
violetxi
"2024-11-22T00:47:07Z"
2
0
[ "size_categories:100K<n<1M", "format:parquet", "modality:text", "library:datasets", "library:dask", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-21T06:20:38Z"
--- dataset_info: features: - name: problem dtype: string - name: is_correct dtype: bool - name: target_answer dtype: string - name: solution dtype: string - name: solution_steps dtype: string - name: attempts dtype: string - name: model_answer dtype: string splits: - name: train num_bytes: 1337149391 num_examples: 102544 download_size: 130178292 dataset_size: 1337149391 configs: - config_name: default data_files: - split: train path: data/train-* ---
Sujithanumala/oic_dataset
Sujithanumala
"2024-11-21T06:23:20Z"
2
0
[ "size_categories:10K<n<100K", "format:parquet", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-21T06:23:08Z"
--- dataset_info: features: - name: input_ids sequence: int32 - name: attention_mask sequence: int8 splits: - name: train num_bytes: 87001272.0 num_examples: 33879 - name: test num_bytes: 37287360.0 num_examples: 14520 download_size: 25057481 dataset_size: 124288632.0 configs: - config_name: default data_files: - split: train path: data/train-* - split: test path: data/test-* ---
oakwood/mori_supdiff
oakwood
"2024-11-21T06:25:00Z"
2
0
[ "task_categories:robotics", "region:us", "LeRobot", "tutorial" ]
[ "robotics" ]
"2024-11-21T06:24:40Z"
--- task_categories: - robotics tags: - LeRobot - tutorial --- This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
DonaldLee/bedkids-nft
DonaldLee
"2024-11-21T11:39:01Z"
2
0
[ "license:apache-2.0", "region:us" ]
null
"2024-11-21T06:34:09Z"
--- license: apache-2.0 ---
joycewu/common_voice_16_1_zh_TW_pseudo_labelled_large_v2_concat
joycewu
"2024-11-21T08:22:09Z"
2
0
[ "size_categories:1K<n<10K", "format:parquet", "modality:audio", "modality:text", "library:datasets", "library:dask", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-21T06:39:13Z"
--- dataset_info: config_name: zh-TW features: - name: path dtype: string - name: audio dtype: audio: sampling_rate: 16000 - name: sentence dtype: string - name: condition_on_prev sequence: int64 - name: whisper_transcript dtype: string splits: - name: train num_bytes: 746186952.0 num_examples: 857 - name: validation num_bytes: 538995300.0 num_examples: 617 - name: test num_bytes: 594574487.0 num_examples: 684 download_size: 1682750460 dataset_size: 1879756739.0 configs: - config_name: zh-TW data_files: - split: train path: zh-TW/train-* - split: validation path: zh-TW/validation-* - split: test path: zh-TW/test-* ---
mjjang/custom_drug_dataset
mjjang
"2024-11-21T06:42:51Z"
2
0
[ "size_categories:100K<n<1M", "format:parquet", "modality:tabular", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-21T06:42:45Z"
--- dataset_info: features: - name: patient_id dtype: int64 - name: drugName dtype: string - name: condition dtype: string - name: review dtype: string - name: rating dtype: float64 - name: date dtype: string - name: usefulCount dtype: int64 - name: review_length dtype: int64 splits: - name: train num_bytes: 65975578 num_examples: 110811 - name: validation num_bytes: 16422578 num_examples: 27703 - name: test num_bytes: 27430466 num_examples: 46108 download_size: 63886981 dataset_size: 109828622 configs: - config_name: default data_files: - split: train path: data/train-* - split: validation path: data/validation-* - split: test path: data/test-* ---
lihaonan/multible
lihaonan
"2024-11-21T06:45:34Z"
2
0
[ "license:apache-2.0", "region:us" ]
null
"2024-11-21T06:45:34Z"
--- license: apache-2.0 ---
ahmedheakl/ar_lvis_instruct
ahmedheakl
"2024-11-21T06:53:08Z"
2
0
[ "size_categories:10K<n<100K", "format:parquet", "modality:image", "modality:text", "library:datasets", "library:dask", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-21T06:46:13Z"
--- dataset_info: features: - name: id dtype: string - name: image_path dtype: string - name: conversations list: - name: from dtype: string - name: value dtype: string - name: image dtype: image splits: - name: train num_bytes: 8215424279 num_examples: 50000 download_size: 8142669176 dataset_size: 8215424279 configs: - config_name: default data_files: - split: train path: data/train-* ---
dayeonglim/custom_drug_dataset
dayeonglim
"2024-11-21T06:48:33Z"
2
0
[ "size_categories:100K<n<1M", "format:parquet", "modality:tabular", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-21T06:48:26Z"
--- dataset_info: features: - name: patient_id dtype: int64 - name: drugName dtype: string - name: condition dtype: string - name: review dtype: string - name: rating dtype: float64 - name: date dtype: string - name: usefulCount dtype: int64 - name: review_length dtype: int64 splits: - name: train num_bytes: 65975578 num_examples: 110811 - name: validation num_bytes: 16422578 num_examples: 27703 - name: test num_bytes: 27430466 num_examples: 46108 download_size: 63886981 dataset_size: 109828622 configs: - config_name: default data_files: - split: train path: data/train-* - split: validation path: data/validation-* - split: test path: data/test-* ---
GitBag/llama3-ultrafeedback-reasoning-ReRe-armo-tokenized
GitBag
"2024-11-21T08:04:32Z"
2
0
[ "size_categories:100K<n<1M", "format:parquet", "modality:tabular", "modality:text", "library:datasets", "library:dask", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-21T06:49:30Z"
--- dataset_info: features: - name: prompt dtype: string - name: augmented_prompt dtype: string - name: shared_thought sequence: string - name: chosen_ts sequence: string - name: chosen_ts_reward dtype: float64 - name: reject_ts sequence: string - name: reject_ts_reward dtype: float64 - name: augmented_prompt_llama dtype: string - name: augmented_prompt_llama_token sequence: int64 - name: chosen_ts_llama dtype: string - name: chosen_ts_llama_token sequence: int64 - name: reject_ts_llama dtype: string - name: reject_ts_llama_token sequence: int64 splits: - name: train num_bytes: 10033865614.582733 num_examples: 228254 - name: test num_bytes: 43959210.417266436 num_examples: 1000 download_size: 1378535869 dataset_size: 10077824825.0 configs: - config_name: default data_files: - split: train path: data/train-* - split: test path: data/test-* ---
junnystateofmind/conversational_ai
junnystateofmind
"2024-11-21T06:51:42Z"
2
0
[ "size_categories:n<1K", "format:parquet", "modality:tabular", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-21T06:51:40Z"
--- dataset_info: features: - name: trajectory list: - name: content dtype: string - name: role dtype: string - name: sampled_len_from_5 dtype: int64 - name: sampled_h_from_sampled_len dtype: int64 - name: trajectory_sampled_h_from_sampled_len list: - name: content dtype: string - name: role dtype: string splits: - name: train num_bytes: 305004 num_examples: 10 download_size: 56891 dataset_size: 305004 configs: - config_name: default data_files: - split: train path: data/train-* ---
minimaster/fine_tuned
minimaster
"2024-11-21T07:13:10Z"
2
0
[ "size_categories:10K<n<100K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-21T07:13:07Z"
--- dataset_info: features: - name: question dtype: string - name: answer dtype: string - name: url dtype: string - name: Transformed dtype: string splits: - name: train num_bytes: 7036678 num_examples: 10535 download_size: 2502832 dataset_size: 7036678 configs: - config_name: default data_files: - split: train path: data/train-* ---
balamurali18/impaired-data
balamurali18
"2024-11-21T07:18:06Z"
2
0
[ "size_categories:n<1K", "format:parquet", "modality:audio", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-21T07:15:34Z"
--- dataset_info: features: - name: audio dtype: audio - name: text dtype: string - name: start_time dtype: string - name: end_time dtype: string - name: duration dtype: float64 splits: - name: train num_bytes: 12010236.0 num_examples: 5 download_size: 12009568 dataset_size: 12010236.0 configs: - config_name: default data_files: - split: train path: data/train-* ---
hellomomiji/PairRM-dataset
hellomomiji
"2024-11-22T00:42:33Z"
2
0
[ "size_categories:n<1K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-21T07:32:40Z"
--- dataset_info: features: - name: prompt dtype: string - name: rejected dtype: string - name: chosen dtype: string splits: - name: train num_bytes: 75496 num_examples: 50 download_size: 46488 dataset_size: 75496 configs: - config_name: default data_files: - split: train path: data/train-* ---
gusornu/github-issues
gusornu
"2024-11-21T07:33:20Z"
2
0
[ "size_categories:1K<n<10K", "format:parquet", "modality:tabular", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-21T07:33:15Z"
--- dataset_info: features: - name: url dtype: string - name: repository_url dtype: string - name: labels_url dtype: string - name: comments_url dtype: string - name: events_url dtype: string - name: html_url dtype: string - name: id dtype: int64 - name: node_id dtype: string - name: number dtype: int64 - name: title dtype: string - name: user struct: - name: login dtype: string - name: id dtype: int64 - name: node_id dtype: string - name: avatar_url dtype: string - name: gravatar_id dtype: string - name: url dtype: string - name: html_url dtype: string - name: followers_url dtype: string - name: following_url dtype: string - name: gists_url dtype: string - name: starred_url dtype: string - name: subscriptions_url dtype: string - name: organizations_url dtype: string - name: repos_url dtype: string - name: events_url dtype: string - name: received_events_url dtype: string - name: type dtype: string - name: user_view_type dtype: string - name: site_admin dtype: bool - name: labels list: - name: id dtype: int64 - name: node_id dtype: string - name: url dtype: string - name: name dtype: string - name: color dtype: string - name: default dtype: bool - name: description dtype: string - name: state dtype: string - name: locked dtype: bool - name: assignee struct: - name: login dtype: string - name: id dtype: int64 - name: node_id dtype: string - name: avatar_url dtype: string - name: gravatar_id dtype: string - name: url dtype: string - name: html_url dtype: string - name: followers_url dtype: string - name: following_url dtype: string - name: gists_url dtype: string - name: starred_url dtype: string - name: subscriptions_url dtype: string - name: organizations_url dtype: string - name: repos_url dtype: string - name: events_url dtype: string - name: received_events_url dtype: string - name: type dtype: string - name: user_view_type dtype: string - name: site_admin dtype: bool - name: assignees list: - name: login dtype: string - name: id dtype: int64 - name: node_id dtype: string - name: avatar_url dtype: string - name: gravatar_id dtype: string - name: url dtype: string - name: html_url dtype: string - name: followers_url dtype: string - name: following_url dtype: string - name: gists_url dtype: string - name: starred_url dtype: string - name: subscriptions_url dtype: string - name: organizations_url dtype: string - name: repos_url dtype: string - name: events_url dtype: string - name: received_events_url dtype: string - name: type dtype: string - name: user_view_type dtype: string - name: site_admin dtype: bool - name: milestone struct: - name: url dtype: string - name: html_url dtype: string - name: labels_url dtype: string - name: id dtype: int64 - name: node_id dtype: string - name: number dtype: int64 - name: title dtype: string - name: description dtype: string - name: creator struct: - name: login dtype: string - name: id dtype: int64 - name: node_id dtype: string - name: avatar_url dtype: string - name: gravatar_id dtype: string - name: url dtype: string - name: html_url dtype: string - name: followers_url dtype: string - name: following_url dtype: string - name: gists_url dtype: string - name: starred_url dtype: string - name: subscriptions_url dtype: string - name: organizations_url dtype: string - name: repos_url dtype: string - name: events_url dtype: string - name: received_events_url dtype: string - name: type dtype: string - name: user_view_type dtype: string - name: site_admin dtype: bool - name: open_issues dtype: int64 - name: closed_issues dtype: int64 - name: state dtype: string - name: created_at dtype: timestamp[s] - name: updated_at dtype: timestamp[s] - name: due_on dtype: 'null' - name: closed_at dtype: 'null' - name: comments sequence: string - name: created_at dtype: timestamp[s] - name: updated_at dtype: timestamp[s] - name: closed_at dtype: timestamp[s] - name: author_association dtype: string - name: active_lock_reason dtype: 'null' - name: draft dtype: bool - name: pull_request struct: - name: url dtype: string - name: html_url dtype: string - name: diff_url dtype: string - name: patch_url dtype: string - name: merged_at dtype: timestamp[s] - name: body dtype: string - name: closed_by struct: - name: login dtype: string - name: id dtype: int64 - name: node_id dtype: string - name: avatar_url dtype: string - name: gravatar_id dtype: string - name: url dtype: string - name: html_url dtype: string - name: followers_url dtype: string - name: following_url dtype: string - name: gists_url dtype: string - name: starred_url dtype: string - name: subscriptions_url dtype: string - name: organizations_url dtype: string - name: repos_url dtype: string - name: events_url dtype: string - name: received_events_url dtype: string - name: type dtype: string - name: user_view_type dtype: string - name: site_admin dtype: bool - name: reactions struct: - name: url dtype: string - name: total_count dtype: int64 - name: '+1' dtype: int64 - name: '-1' dtype: int64 - name: laugh dtype: int64 - name: hooray dtype: int64 - name: confused dtype: int64 - name: heart dtype: int64 - name: rocket dtype: int64 - name: eyes dtype: int64 - name: timeline_url dtype: string - name: performed_via_github_app dtype: 'null' - name: state_reason dtype: string - name: is_pull_request dtype: bool splits: - name: train num_bytes: 8299878 num_examples: 1000 download_size: 2244132 dataset_size: 8299878 configs: - config_name: default data_files: - split: train path: data/train-* ---
magneum/pixelhue-captioned-dataset
magneum
"2024-11-21T07:35:14Z"
2
0
[ "license:apache-2.0", "region:us" ]
null
"2024-11-21T07:35:14Z"
--- license: apache-2.0 ---
dsfsi/zasca-sum
dsfsi
"2024-11-21T13:10:58Z"
2
0
[ "license:cc-by-sa-4.0", "size_categories:1K<n<10K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "doi:10.57967/hf/3565", "region:us" ]
null
"2024-11-21T07:41:08Z"
--- license: cc-by-sa-4.0 dataset_info: - config_name: with_summaries features: - name: id dtype: string - name: type dtype: string - name: year dtype: string - name: input dtype: string - name: output dtype: string splits: - name: train num_bytes: 57867100 num_examples: 1521 - name: validation num_bytes: 10985252 num_examples: 299 - name: test num_bytes: 11291457 num_examples: 298 download_size: 42168935 dataset_size: 80143809 - config_name: without_summaries features: - name: id dtype: string - name: type dtype: string - name: year dtype: string - name: input dtype: string splits: - name: all_data num_bytes: 55925930 num_examples: 2053 download_size: 29653319 dataset_size: 55925930 configs: - config_name: with_summaries data_files: - split: train path: with_summaries/train-* - split: validation path: with_summaries/validation-* - split: test path: with_summaries/test-* - config_name: without_summaries data_files: - split: all_data path: without_summaries/all_data-* ---
yoki123/small_MIA
yoki123
"2024-11-21T07:58:25Z"
2
0
[ "size_categories:1K<n<10K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-21T07:49:54Z"
--- dataset_info: features: - name: text dtype: string - name: split dtype: string - name: label dtype: int64 splits: - name: new_Math num_bytes: 313124 num_examples: 100 - name: new_News num_bytes: 302819 num_examples: 100 - name: new_Wiki num_bytes: 80460 num_examples: 100 - name: new_ArXiv num_bytes: 125433 num_examples: 100 - name: new_Twitter num_bytes: 30628 num_examples: 100 - name: Pile_CC num_bytes: 525742 num_examples: 100 - name: Pile_Wiki num_bytes: 503993 num_examples: 100 - name: Pile_ArXiv num_bytes: 4912893 num_examples: 100 - name: Pile_Math num_bytes: 821900 num_examples: 100 - name: Pile_Github num_bytes: 857031 num_examples: 100 - name: full num_bytes: 8474023 num_examples: 1000 download_size: 8185046 dataset_size: 16948046 configs: - config_name: default data_files: - split: new_Math path: data/new_Math-* - split: new_News path: data/new_News-* - split: new_Wiki path: data/new_Wiki-* - split: new_ArXiv path: data/new_ArXiv-* - split: new_Twitter path: data/new_Twitter-* - split: Pile_CC path: data/Pile_CC-* - split: Pile_Wiki path: data/Pile_Wiki-* - split: Pile_ArXiv path: data/Pile_ArXiv-* - split: Pile_Math path: data/Pile_Math-* - split: Pile_Github path: data/Pile_Github-* - split: full path: data/full-* ---
Marcusxx/CngFSt10sec
Marcusxx
"2024-11-21T07:51:55Z"
2
0
[ "size_categories:1K<n<10K", "format:parquet", "modality:audio", "modality:text", "library:datasets", "library:dask", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-21T07:50:52Z"
--- dataset_info: features: - name: audio dtype: audio: sampling_rate: 16000 - name: transcripts dtype: string splits: - name: train num_bytes: 598099904.5376791 num_examples: 1934 - name: test num_bytes: 74221544.33416046 num_examples: 242 - name: valid num_bytes: 74699082.33416046 num_examples: 242 download_size: 665620175 dataset_size: 747020531.206 configs: - config_name: default data_files: - split: train path: data/train-* - split: test path: data/test-* - split: valid path: data/valid-* ---
limcheekin/llmtwin
limcheekin
"2024-11-21T08:28:36Z"
2
0
[ "size_categories:n<1K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-21T08:28:31Z"
--- dataset_info: features: - name: instruction dtype: string - name: output dtype: string splits: - name: train num_bytes: 134606 num_examples: 288 - name: test num_bytes: 15279 num_examples: 32 download_size: 63359 dataset_size: 149885 configs: - config_name: default data_files: - split: train path: data/train-* - split: test path: data/test-* ---
amgadhasan/arabic_tweets_dialects
amgadhasan
"2024-05-24T21:06:15Z"
1
0
[ "task_categories:text-classification", "language:ar", "license:mit", "size_categories:100K<n<1M", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
[ "text-classification" ]
"2024-05-24T21:04:32Z"
--- dataset_info: features: - name: text dtype: string - name: dialect dtype: string splits: - name: train num_bytes: 24122795 num_examples: 147725 download_size: 13293546 dataset_size: 24122795 configs: - config_name: default data_files: - split: train path: data/train-* license: mit task_categories: - text-classification language: - ar pretty_name: Arabic Dialectical Tweets size_categories: - 100K<n<1M ---
UBC-NLP/AfroLingu-MT
UBC-NLP
"2024-08-13T19:36:25Z"
1
1
[ "language:aar", "language:ach", "language:afr", "language:aka", "language:amh", "language:bam", "language:bas", "language:bem", "language:btg", "language:eng", "language:ewe", "language:fon", "language:fra", "language:hau", "language:ibo", "language:kbp", "language:lgg", "language:lug", "language:mlg", "language:nyn", "language:orm", "language:som", "language:sot", "language:swa", "language:tir", "language:yor", "language:teo", "language:gez", "language:wal", "language:fan", "language:kau", "language:kin", "language:kon", "language:lin", "language:nya", "language:pcm", "language:ssw", "language:tsn", "language:tso", "language:twi", "language:wol", "language:xho", "language:zul", "language:nnb", "language:swc", "language:ara", "size_categories:100K<n<1M", "format:json", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us", "African MT", "Toucan", "Machine translation", "UBC", "DLNLP" ]
null
"2024-08-12T22:53:16Z"
--- language: - aar - ach - afr - aka - amh - bam - bas - bem - btg - eng - ewe - fon - fra - hau - ibo - kbp - lgg - lug - mlg - nyn - orm - som - sot - swa - tir - yor - teo - gez - wal - fan - kau - kin - kon - lin - nya - pcm - ssw - tsn - tso - twi - wol - xho - zul - nnb - swc - ara pipeline_tag: text-generation tags: - African MT - Toucan - Machine translation - UBC - DLNLP extra_gated_fields: First Name: text Last Name: text Country: country Affiliation: text Job title: type: select options: - Student - Research Graduate - AI researcher - AI developer/engineer - Reporter - Other I agree to use this model for non-commercial use ONLY: checkbox I agree to cite the Toucan paper: checkbox geo: ip_location By clicking Submit below I accept the terms of the license: checkbox extra_gated_button_content: Submit --- <div style='text-align: justify;'> This is the repository accompanying our ACL 2024 paper [Toucan: Many-to-Many Translation for 150 African Language Pairs](https://aclanthology.org/2024.findings-acl.781/). We address a notable gap in Natural Language Processing (NLP) by introducing a collection of resources designed to improve Machine Translation (MT) for low-resource languages, with a specific focus on African languages. First, We introduce two language models (LMs), Cheetah-1.2B and Cheetah-3.7B, with 1.2 billion and 3.7 billion parameters respectively. Next, we finetune the aforementioned models to create Toucan, an Afrocentric machine translation model designed to support 156 African language pairs. To evaluate Toucan, we carefully develop an extensive machine translation benchmark, dubbed AfroLingu-MT, tailored for evaluating machine translation. Toucan significantly outperforms other models, showcasing its remarkable performance on MT for African languages. Finally, we train a new model, spBLEU_1K, to enhance translation evaluation metrics, covering 1K languages, including 614 African languages. This work aims to advance the field of NLP, fostering cross-cultural understanding and knowledge exchange, particularly in regions with limited language resources such as Africa. </div> ## AfroLingu-MT Benchmark Our collection comprises data from a total of 43 datasets, encompassing 84 unique language pairs derived from 46 different languages. We also develop a new manually translated dataset useful for evaluation in the government domain. In all, the data cover 43 African languages from five language families domiciled in 29 African countries. We also include Arabic, English, and French, since these are widely spoken in Africa. - More details about AfroLingu-MT benchmark, visit Toucan's GitHub [**Toucan paper GitHub**]("https://github.com/UBC-NLP/Toucan") ### Supoorted langauges Below the supported langauges ``` lang_names={ "aar": "Afar", "ach": "Acholi", "afr": "Afrikaans", "aka": "Akan", "amh": "Amharic", "bam": "Bambara", "bas": "Basaa", "bem": "Bemba", "btg": "Bete Gagnoa", "eng": "English", "ewe": "Ewe", "fon": "Fon", "fra": "French", "hau": "Hausa", "ibo": "Igbo", "kbp": "Kabiye", "lgg": "Lugbara", "lug": "Luganda", "mlg": "Malagasy", "nyn": "Nyakore", "orm": "Oromo", "som": "Somali", "sot": "Sesotho", "swa": "Swahili", "tir": "Tigrinya", "yor": "Yoruba", "teo": "Ateso", "gez": "Geez", "wal": "Wolaytta", "fan": "Fang", "kau": "Kanuri", "kin": "Kinyawanda", "kon": "Kongo", "lin": "Lingala", "nya": "Chichewa", "pcm": "Nigerian Pidgin", "ssw": "Siswati", "tsn": "Setswana", "tso": "Tsonga", "twi": "Twi", "wol": "Wolof", "xho": "Xhosa", "zul": "Zulu", "nnb": "Nande", "swc": "Swahili Congo", "ara": "Arabic" } ``` ### Loading the dataset ``` python from datasets import load_dataset afrolingu_mt = load_dataset("UBC-NLP/AfroLingu-MT") print(afrolingu_mt) ``` Output: ``` DatasetDict({ train: Dataset({ features: ['langcode', 'instruction', 'input', 'output'], num_rows: 586261 }) validation: Dataset({ features: ['langcode', 'instruction', 'input', 'output'], num_rows: 7437 }) test: Dataset({ features: ['langcode', 'instruction', 'input', 'output'], num_rows: 26875 }) }) ``` ## Citation If you use the AfroLingu-MT benchmark for your scientific publication, or if you find the resources in this repository useful, please cite our papers as follows (to be updated): **Toucan's Paper** ``` @inproceedings{adebara-etal-2024-cheetah, title = "Cheetah: Natural Language Generation for 517 {A}frican Languages", author = "Adebara, Ife and Elmadany, AbdelRahim and Abdul-Mageed, Muhammad", editor = "Ku, Lun-Wei and Martins, Andre and Srikumar, Vivek", booktitle = "Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)", month = aug, year = "2024", address = "Bangkok, Thailand and virtual meeting", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2024.acl-long.691", pages = "12798--12823", } ```
davit312/Armenian-speech-hy
davit312
"2024-10-26T18:26:22Z"
1
0
[ "license:gpl-2.0", "region:us" ]
null
"2024-09-13T19:20:24Z"
--- license: gpl-2.0 --- greek-myths :: total length 404.67 mins -> 6 hrs 44 mins
LLM-EDA/BuggyVerilog
LLM-EDA
"2024-10-05T11:03:00Z"
1
0
[ "task_categories:text-generation", "language:en", "license:apache-2.0", "region:us" ]
[ "text-generation" ]
"2024-10-05T10:57:59Z"
--- license: apache-2.0 task_categories: - text-generation language: - en --- For usage, please take a look at https://github.com/CatIIIIIIII/VeriDebug.
xingjunm/CC1M-Adv
xingjunm
"2024-10-16T03:08:25Z"
1
0
[ "license:mit", "modality:image", "region:us" ]
null
"2024-10-15T05:12:05Z"
--- license: mit ---
nexaai2b/perry_lora_function_call_training_data
nexaai2b
"2024-10-18T19:06:55Z"
1
0
[ "size_categories:1K<n<10K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-10-18T18:21:30Z"
--- dataset_info: features: - name: query dtype: string - name: function dtype: string splits: - name: train num_bytes: 253506.0 num_examples: 3008 download_size: 75680 dataset_size: 253506.0 configs: - config_name: default data_files: - split: train path: data/train-* ---
llm-jp/llava-instruct-ja
llm-jp
"2024-11-19T10:35:55Z"
1
0
[ "task_categories:visual-question-answering", "language:ja", "size_categories:100K<n<1M", "format:json", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
[ "visual-question-answering" ]
"2024-10-22T06:43:09Z"
--- language: - ja task_categories: - visual-question-answering size_categories: - 100K<n<1M --- ## Dataset Card for llava_instruct_ja ### Dataset details This is the Japanese version of [LLaVA-Instruct](https://huggingface.co/datasets/liuhaotian/LLaVA-Instruct-150K), which contains 156K samples. We used `gpt-4o-mini-2024-07-18` to generate data through via Azure OpenAI API. ### License Creative Commons Attribution 4.0 License; and it should abide by [the OpenAI terms of use](https://openai.com/policies/terms-of-use)
allenai/tulu-3-sft-personas-instruction-following
allenai
"2024-11-21T15:57:21Z"
1
0
[ "task_categories:text-generation", "language:en", "license:odc-by", "size_categories:10K<n<100K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "arxiv:2311.07911", "arxiv:2406.20094", "region:us" ]
[ "text-generation" ]
"2024-10-30T20:27:20Z"
--- dataset_info: features: - name: id dtype: string - name: prompt dtype: string - name: messages list: - name: content dtype: string - name: role dtype: string - name: constraints sequence: string splits: - name: train num_bytes: 70397173 num_examples: 29980 download_size: 39171921 dataset_size: 70397173 configs: - config_name: default data_files: - split: train path: data/train-* task_categories: - text-generation language: - en license: odc-by size_categories: - 10K<n<100K --- <img src="https://huggingface.co/datasets/allenai/blog-images/resolve/main/tulu-3/Tulu3-logo.png" alt="Tulu3 banner" width="400" style="margin-left:'auto' margin-right:'auto' display:'block'"/> ### Dataset Descriptions This dataset contains **29980** examples and is synthetically created to enhance model's capabilities to follow instructions precisely and to satisfy user constraints. The constraints are borrowed from the taxonomy in [IFEval dataset](https://arxiv.org/abs/2311.07911). To generate diverse instructions, we expand the methodology in [Ge et al., 2024](https://arxiv.org/pdf/2406.20094) by using personas. More details and exact prompts used to construct the dataset can be found in our [paper](). - **Curated by:** Allen Institute for AI - **Paper:** [TBD]() - **Repository:** [TBD]() - **Language(s) (NLP):** English - **License:** ODC-BY - **Point of Contact:** [Faeze Brahman](mailto:faezeb@allenai.org) ### Loading ```python from datasets import load_dataset dataset = load_dataset("allenai/tulu-3-sft-personas-instruction-following")["train"] ``` ### Dataset Structure Each example in the dataset contains the standard instruction-tuning data points as follow: - id (str): a unique identifier - prompt (str): the verifiable instruction which involves satisfying 1 to 3 constraints - messages (list): message format used for supervised fine-tuning (this contains user prompt and assistant response) - constraints (list of str): a list of verifiable constraints that need to be satisfied by the assistant response
llm-jp/llava-instruct-v1_5-en-subset-358k
llm-jp
"2024-11-19T11:02:16Z"
1
0
[ "task_categories:visual-question-answering", "language:en", "size_categories:100K<n<1M", "region:us" ]
[ "visual-question-answering" ]
"2024-11-06T04:44:38Z"
--- language: - en task_categories: - visual-question-answering size_categories: - 100K<n<1M --- ## Dataset Card for llava-instruct-v1_5-en-subset-358k ### Dataset details This dataset is a subset of the [LLaVA-1.5 Instruction Data](https://huggingface.co/datasets/liuhaotian/LLaVA-Instruct-150K/blob/main/llava_v1_5_mix665k.json), which was used to train [llm-jp-3-vila-14b](https://huggingface.co/llm-jp/llm-jp-3-vila-14b). This dataset includes the following datasets. | Dataset | Images | |:---|---:| |LLaVA | 158K | |[VQAv2](https://visualqa.org/) | 53K | |[GQA](https://cs.stanford.edu/people/dorarad/gqa/index.html) | 46K | |[OCRVQA](https://ocr-vqa.github.io/) | 80K | |[TextVQA](https://textvqa.org/dataset/) | 22K | ### License Creative Commons Attribution 4.0 License; and it should abide by [the OpenAI terms of use](https://openai.com/policies/terms-of-use)
Anvilogic/CE-Typosquat-Training-Dataset
Anvilogic
"2024-11-08T18:06:56Z"
1
0
[ "size_categories:10K<n<100K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-07T10:43:16Z"
--- dataset_info: features: - name: potential_typosquat dtype: string - name: legitimate dtype: string - name: label dtype: bool splits: - name: train num_bytes: 1408438 num_examples: 38000 - name: test num_bytes: 75004 num_examples: 2000 download_size: 639859 dataset_size: 1483442 configs: - config_name: default data_files: - split: train path: data/train-* - split: test path: data/test-* --- # Typosquat Dataset ## Dataset Summary This dataset is intended for typosquatting detection within a domain corpus. It contains 40,000 labeled pairs, categorized as either typosquatted or non-typosquatted. The data is divided into training and test splits, each maintaining a balanced distribution of positive and negative examples. ## Supported Tasks and Leaderboards **CE training**: The primary task is binary classification, specifically detecting typosquatting domains. The dataset can be used to train a cross-encoder or other model types for binary classification. ## Languages The dataset is multilingual, reflecting the diversity of domain names. ## Dataset Structure ### Data Instances Each data instance in the dataset consists of two domains and a label indicating if the second domain is a typosquatted version of the first. An example from the training set: ```json { "domain": "example.com", "sim_domain": "exarnple.com", "label": 1 } ``` **domain**: A string representing the legitimate domain. **sim_domain**: A string representing a potentially typosquatted domain. **label**: An integer (0 or 1) where 1 indicates a typosquatted domain and 0 indicates no typosquatting. ### Data Splits The dataset is divided as follows: | Split | Number of Instances |Positive|Negative| |----------|---------------------|--------|--------| | Train | 38000 | 50% | 50% | | Test | 2000 | 50% | 50% | ## Dataset Creation ### Data Generation The domain pairs were generated using [ail-typo-squatting](https://github.com/typosquatter/ail-typo-squatting) Data processing includes balancing positive and negative samples to ensure even representation. ### Dataset usage This dataset was developed to facilitate large-scale typosquatting detection for cybersecurity applications. It supports training and evaluating binary classifiers designed to identify domains that may have been intentionally misspelled for malicious purposes.
Anvilogic/T5-Typosquat-Training-Dataset
Anvilogic
"2024-11-11T10:07:24Z"
1
0
[ "size_categories:10K<n<100K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-08T15:05:47Z"
--- dataset_info: features: - name: text dtype: string - name: response dtype: string - name: label dtype: class_label: names: '0': 'false' '1': 'true' splits: - name: train num_bytes: 3860467.2 num_examples: 40000 - name: test num_bytes: 965116.8 num_examples: 10000 download_size: 1390903 dataset_size: 4825584.0 configs: - config_name: default data_files: - split: train path: data/train-* - split: test path: data/test-* --- # Typosquat Dataset ## Dataset Summary This dataset is intended for typosquatting detection within a domain corpus. It contains 50,000 labeled pairs, categorized as either typosquatted or non-typosquatted. The data is divided into training and test splits, each maintaining a balanced distribution of positive and negative examples. ## Supported Tasks and Leaderboards **T5 training**: The primary task is binary classification, specifically detecting typosquatting domains. To do so we define a new task in the T5 format and we prompt the model with both domains. The dataset can be used to train a cross-encoder or other model types for binary classification. ## Languages The dataset is multilingual, reflecting the diversity of domain names. ## Dataset Structure ### Data Instances Each data instance in the dataset consists of two domains and a label indicating if the second domain is a typosquatted version of the first. An example from the training set: ```json {'text': 'Is the first domain a typosquat of the second: lonlonsoft.com stiltsoft.net', 'response': 'false', 'label': 0} ``` **text**: A prompt string comprised of the task definition as well as the pair of candidate domain and legitimate domain. **response**: A string representing the expected answer from the model. **label**: An integer (0 or 1) where 1 indicates a typosquatted domain and 0 indicates no typosquatting. ### Data Splits The dataset is divided as follows: | Split | Number of Instances |Positive|Negative| |----------|---------------------|--------|--------| | Train | 40000 | 50% | 50% | | Test | 10000 | 50% | 50% | ## Dataset Creation ### Data Generation The domain pairs were generated using [ail-typo-squatting](https://github.com/typosquatter/ail-typo-squatting) Data processing includes balancing positive and negative samples to ensure even representation. ### Dataset usage This dataset was developed to facilitate large-scale typosquatting detection for cybersecurity applications. It supports training and evaluating binary classifiers designed to identify domains that may have been intentionally misspelled for malicious purposes.
Anvilogic/Embedder-Typosquat-Training-Dataset
Anvilogic
"2024-11-08T18:07:00Z"
1
0
[ "size_categories:10K<n<100K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-08T17:46:34Z"
--- dataset_info: features: - name: anchor dtype: string - name: positive dtype: string splits: - name: train num_bytes: 1499094 num_examples: 43447 - name: test num_bytes: 377209 num_examples: 10881 download_size: 584371 dataset_size: 1876303 configs: - config_name: default data_files: - split: train path: data/train-* - split: test path: data/test-* --- # Typosquat Embedding Dataset ## Dataset Summary This dataset is designed for training embedding models to recognize typosquatting within a domain corpus. It consists of pairs of legitimate and typosquatted domains for use in similarity learning, enabling models to identify subtle domain alterations. The dataset is formatted for embedding-based training, specifically useful for contrastive learning techniques or other tasks where domain similarity is a key factor. ## Supported Tasks and Leaderboards **Embedding Training**: The primary task supported by this dataset is contrastive learning to create embeddings for typosquatting detection. The dataset can be used to train a similarity model, such as a dual-encoder, where each instance is a pair of legitimate and potentially typosquatted domains. ## Languages This dataset includes a multilingual set of domains, reflecting the diversity of internet domains globally. ## Dataset Structure ### Data Instances Each instance in the dataset consists of two domains: - **anchor**: The legitimate domain. - **positive**: A version of the domain with minor alterations that may represent typosquatting. An example from the dataset is as follows: ```json { "anchor": "e-volution.ai", "positive": "e-volutiọn.ai" } ``` The anchor and positive columns are both strings representing domains. The "positive" domain is a variation created by intentional typosquatting techniques (e.g., homoglyphs or character substitution). ### Data Splits The dataset is structured to be used for embedding model training and evaluation: - Split: Train Number of Instances: 43,447 - Split: Test Number of Instances: 10,881 ## Dataset Creation ### Data Generation The domain pairs were generated using [ail-typo-squatting](https://github.com/typosquatter/ail-typo-squatting) Data processing includes balancing positive and negative samples to ensure even representation. ### Dataset Usage This dataset is suitable for cybersecurity applications focusing on typosquatting detection. It can be used to train and evaluate embedding-based models designed to identify domains that may have been manipulated for malicious purposes, supporting efforts in online safety and domain monitoring.
e1010101/tongue-images-nosplit
e1010101
"2024-11-13T08:18:10Z"
1
0
[ "size_categories:1K<n<10K", "format:parquet", "modality:image", "library:datasets", "library:dask", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-13T08:16:56Z"
--- dataset_info: features: - name: image dtype: image - name: pixel_values sequence: sequence: sequence: float32 splits: - name: train num_bytes: 1934094019.002 num_examples: 1066 download_size: 465770207 dataset_size: 1934094019.002 configs: - config_name: default data_files: - split: train path: data/train-* ---
e1010101/tongue-images-nosplit-segmented
e1010101
"2024-11-13T08:36:25Z"
1
0
[ "size_categories:n<1K", "format:parquet", "modality:image", "library:datasets", "library:dask", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-13T08:35:16Z"
--- dataset_info: features: - name: image dtype: image - name: pixel_values sequence: sequence: sequence: float32 splits: - name: train num_bytes: 1764742614.0 num_examples: 894 download_size: 350336236 dataset_size: 1764742614.0 configs: - config_name: default data_files: - split: train path: data/train-* ---
e1010101/tongue-images-nosplit-cropped
e1010101
"2024-11-14T04:13:37Z"
1
0
[ "size_categories:1K<n<10K", "format:parquet", "modality:image", "library:datasets", "library:dask", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-14T04:12:55Z"
--- dataset_info: features: - name: image dtype: image - name: pixel_values sequence: sequence: sequence: float32 splits: - name: train num_bytes: 1875419490.17 num_examples: 1054 download_size: 230186487 dataset_size: 1875419490.17 configs: - config_name: default data_files: - split: train path: data/train-* ---
adityasinghce92/finance-qa-10k-dataset
adityasinghce92
"2024-11-19T10:27:34Z"
1
0
[ "language:en", "size_categories:1K<n<10K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-17T14:07:13Z"
--- language: - en dataset_info: features: - name: text dtype: string - name: __index_level_0__ dtype: int64 splits: - name: train num_bytes: 3279666 num_examples: 6997 download_size: 1302876 dataset_size: 3279666 configs: - config_name: default data_files: - split: train path: data/train-* ---
FrancophonIA/train-o-matic
FrancophonIA
"2024-11-21T13:42:41Z"
1
0
[ "language:de", "language:en", "language:es", "language:fr", "language:it", "language:zh", "region:us" ]
null
"2024-11-17T15:10:19Z"
--- language: - de - en - es - fr - it - zh multilingulality: - multilingual viewer: false --- > [!NOTE] > Dataset origin: https://live.european-language-grid.eu/catalogue/corpus/5110/ ## Description We present Train-O-Matic, a language-independent method for generating millions of sense-annotated training instances for virtually all meanings of words ina language’s vocabulary. For more info see http://trainomatic.org/trainomatic ## Citation ``` Pasini, (2021). Train-O-Matic Large. Version 1. [Dataset (Text corpus)]. Source: European Language Grid. https://live.european-language-grid.eu/catalogue/corpus/5110 ```
FrancophonIA/E3C-Corpus-2.0.0
FrancophonIA
"2024-11-21T13:41:11Z"
1
0
[ "language:de", "language:it", "language:es", "language:eu", "language:fr", "region:us" ]
null
"2024-11-17T15:10:26Z"
--- language: - de - it - es - eu - fr multilingulality: - multilingual viewer: false --- > [!NOTE] > Dataset origin: https://live.european-language-grid.eu/catalogue/corpus/7618/ ## Description E3C is a freely available multilingual corpus (English, French, Italian, Spanish, and Basque) of semantically annotated clinical narratives to allow for the linguistic analysis, benchmarking, and training of information extraction systems. It consists of two types of annotations: (i) clinical entities (e.g., pathologies), (ii) temporal information and factuality (e.g., events). Researchers can use the benchmark training and test splits of our corpus to develop and test their own models. ## Citation ``` Minard, Anne-Lyse; Zanoli, Roberto; Altuna, Begoña; Speranza, Manuela; Magnini, Bernardo; Lavelli, Alberto (2021, August 09). European Clinical Case Corpus. Version 2.0.0. Bruno Kessler Foundation. [Dataset (Text corpus)]. https://doi.org/10.57771/dey2-g751 ```
allenai/RLVR-GSM-MATH-IF-Mixed-Constraints
allenai
"2024-11-21T15:50:02Z"
1
0
[ "license:odc-by", "size_categories:10K<n<100K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-18T19:03:54Z"
--- dataset_info: features: - name: messages list: - name: content dtype: string - name: role dtype: string - name: ground_truth dtype: string - name: dataset dtype: string - name: constraint_type dtype: string - name: constraint dtype: string splits: - name: train num_bytes: 58788096 num_examples: 29946 download_size: 16533143 dataset_size: 58788096 configs: - config_name: default data_files: - split: train path: data/train-* license: odc-by --- <img src="https://huggingface.co/datasets/allenai/blog-images/resolve/main/tulu-3/Tulu3-logo.png" alt="Tulu3 banner" width="400" style="margin-left:'auto' margin-right:'auto' display:'block'"/> # GSM/MATH/IF Data - RLVR Formatted *Note that this collection is licensed under ODC-BY-1.0 license; different licenses apply to subsets of the data.* This dataset contains data formatted for use with [open-instruct](https://github.com/allenai/open-instruct) - specifically reinforcement learning with verifiable rewards. It was used to train the final Tulu 3 models with RL, and contains the following subsets: - **GSM8k** (7,473 samples): The [GSM8k train set](https://huggingface.co/datasets/openai/gsm8k) formatted for use with RLVR and open-instruct. MIT License. - **MATH** (7,500 samples): The [MATH train set](https://github.com/hendrycks/math) formatted for use with RLVR and open-instruct. MIT License. - **IF Prompts** (14,973 samples): Prompts with verifiable constraints generated by sampling from the [Tulu 2 SFT mixture](https://huggingface.co/datasets/allenai/tulu-v2-sft-mixture) and randomly adding constraints from [IFEval](https://github.com/Rohan2002/IFEval). ODC-BY license. Part of the Tulu 3 release, for which you can see models [here](https://huggingface.co/collections/allenai/tulu-3-models-673b8e0dc3512e30e7dc54f5) and datasets [here](https://huggingface.co/collections/allenai/tulu-3-datasets-673b8df14442393f7213f372). ## Dataset Structure Each example in the dataset contains the standard instruction-tuning data points as follow: - messages (list): inputs used to prompt the model (after chat template formatting). - ground_truth (str): the answer for the given sample. - dataset (str): For GSM8k and MATH, the answer to the question. For IF prompts, the arguments to be passed to the verifying function, as a json blob. - constraint_type (str): the constraint present in the prompt. - constraint (str): the constraint described in plain english.
CodeDPO/codedpo_20241119
CodeDPO
"2024-11-19T05:48:58Z"
1
0
[ "size_categories:100K<n<1M", "format:parquet", "library:datasets", "library:dask", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-19T05:47:19Z"
--- dataset_info: features: - name: question dtype: binary - name: tests dtype: binary - name: inferences dtype: binary splits: - name: train num_bytes: 10376274064 num_examples: 110966 download_size: 2322814305 dataset_size: 10376274064 configs: - config_name: default data_files: - split: train path: data/train-* ---
PROCIT-SANDBOX/training_dataset_ner_0.1
PROCIT-SANDBOX
"2024-11-19T12:59:54Z"
1
0
[ "size_categories:100K<n<1M", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-19T12:59:50Z"
--- dataset_info: features: - name: id dtype: string - name: tokens sequence: string - name: pos_tags sequence: class_label: names: '0': '"' '1': '''''' '2': '#' '3': $ '4': ( '5': ) '6': ',' '7': . '8': ':' '9': '``' '10': CC '11': CD '12': DT '13': EX '14': FW '15': IN '16': JJ '17': JJR '18': JJS '19': LS '20': MD '21': NN '22': NNP '23': NNPS '24': NNS '25': NN|SYM '26': PDT '27': POS '28': PRP '29': PRP$ '30': RB '31': RBR '32': RBS '33': RP '34': SYM '35': TO '36': UH '37': VB '38': VBD '39': VBG '40': VBN '41': VBP '42': VBZ '43': WDT '44': WP '45': WP$ '46': WRB - name: chunk_tags sequence: class_label: names: '0': O '1': B-ADJP '2': I-ADJP '3': B-ADVP '4': I-ADVP '5': B-CONJP '6': I-CONJP '7': B-INTJ '8': I-INTJ '9': B-LST '10': I-LST '11': B-NP '12': I-NP '13': B-PP '14': I-PP '15': B-PRT '16': I-PRT '17': B-SBAR '18': I-SBAR '19': B-UCP '20': I-UCP '21': B-VP '22': I-VP - name: ner_tags sequence: class_label: names: '0': O '1': B-PER '2': I-PER '3': B-ORG '4': I-ORG '5': B-LOC '6': I-LOC '7': B-MISC '8': I-MISC splits: - name: train num_bytes: 28318448 num_examples: 157374 - name: validation num_bytes: 4406785 num_examples: 21167 - name: test num_bytes: 4256672 num_examples: 21373 download_size: 5810373 dataset_size: 36981905 configs: - config_name: default data_files: - split: train path: data/train-* - split: validation path: data/validation-* - split: test path: data/test-* ---
FrancophonIA/IWSLT_2016
FrancophonIA
"2024-11-21T13:43:59Z"
1
0
[ "task_categories:translation", "language:en", "language:de", "language:fr", "region:us" ]
[ "translation" ]
"2024-11-19T21:13:24Z"
--- language: - en - de - fr multilingulality: - multilingual task_categories: - translation viewer: false --- > [!NOTE] > Dataset origin: https://live.european-language-grid.eu/catalogue/corpus/709/ ## Description The human evaluation (HE) dataset created for English to German (EnDe) and English to French (EnFr) MT tasks was a subset of one of the official test sets of the IWSLT 2016 evaluation campaign. The resulting HE sets are composed of 600 segments for both EnDe and EnFr, each corresponding to around 10,000 words. Human evaluation was based on Post-Editing, i.e. the manual correction of the MT system output, which was carried out by professional translators. Nine and five primary runs submitted to the evaluation campaign were post-edited for the two tasks, respectively. Data are publicly available through the WIT3 website wit3.fbk.eu. 600 segments for both EnDe and EnFr (10K tokens each). Respectively, 9 and 5 different automatic translations post-edited by professional translators (for Analysis of MT quality and Quality Estimation components). ## Citation ``` IWSLT 2016 Human Post-Editing data (2020). Version 1.0.0 (automatically assigned). [Dataset (Text and Text corpus)]. Source: European Language Grid. https://live.european-language-grid.eu/catalogue/corpus/709 ```
tobyye/lllama3-tt
tobyye
"2024-11-20T03:56:40Z"
1
0
[ "license:apache-2.0", "region:us" ]
null
"2024-11-20T03:56:40Z"
--- license: apache-2.0 ---
self-generate/ds_chat_pos_reflct_adamw_iter1_sppo_hard_new_cn_mining_oj_iter1-full_response_traceback
self-generate
"2024-11-20T05:25:05Z"
1
0
[ "size_categories:1K<n<10K", "format:parquet", "modality:tabular", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-20T05:25:03Z"
--- dataset_info: features: - name: prompt dtype: string - name: test dtype: string - name: tag dtype: string - name: chosen list: - name: content dtype: string - name: role dtype: string - name: rejected list: - name: content dtype: string - name: role dtype: string - name: text_prompt dtype: string - name: text_chosen dtype: string - name: text_rejected dtype: string - name: generate_0 dtype: string - name: generate_0_score dtype: int64 - name: traceback_0 dtype: string - name: generate_1 dtype: string - name: generate_1_score dtype: int64 - name: traceback_1 dtype: string - name: generate_2 dtype: string - name: generate_2_score dtype: int64 - name: traceback_2 dtype: string - name: generate_3 dtype: string - name: generate_3_score dtype: int64 - name: traceback_3 dtype: string - name: generate_4 dtype: string - name: generate_4_score dtype: int64 - name: traceback_4 dtype: string - name: generate_5 dtype: string - name: generate_5_score dtype: int64 - name: traceback_5 dtype: string - name: generate_6 dtype: string - name: generate_6_score dtype: int64 - name: traceback_6 dtype: string - name: generate_7 dtype: string - name: generate_7_score dtype: int64 - name: traceback_7 dtype: string - name: probability sequence: sequence: float64 - name: rm_scores sequence: int64 splits: - name: train num_bytes: 35805537 num_examples: 3305 download_size: 13697138 dataset_size: 35805537 configs: - config_name: default data_files: - split: train path: data/train-* --- # Dataset Card for "ds_chat_pos_reflct_adamw_iter1_sppo_hard_new_cn_mining_oj_iter1-full_response_traceback" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
CodeDPO/qwen_coder_train_20241120
CodeDPO
"2024-11-20T05:29:16Z"
1
0
[ "size_categories:100K<n<1M", "format:parquet", "modality:tabular", "modality:text", "library:datasets", "library:dask", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-20T05:29:03Z"
--- dataset_info: features: - name: program_1 dtype: string - name: program_2 dtype: string - name: winner dtype: int64 - name: accuracy_1 dtype: float64 - name: accuracy_2 dtype: float64 - name: accuracy_difference dtype: float64 - name: model_1 dtype: string - name: model_2 dtype: string - name: prompt dtype: string - name: tests sequence: string splits: - name: train num_bytes: 2476888565 num_examples: 490333 download_size: 124834643 dataset_size: 2476888565 configs: - config_name: default data_files: - split: train path: data/train-* ---
RyanYr/reflect_om2_265k
RyanYr
"2024-11-20T05:46:51Z"
1
0
[ "size_categories:100K<n<1M", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-20T05:46:46Z"
--- dataset_info: features: - name: problem dtype: string - name: generated_solution dtype: string - name: answer dtype: string - name: problem_source dtype: string splits: - name: train num_bytes: 319808930 num_examples: 264558 download_size: 152790290 dataset_size: 319808930 configs: - config_name: default data_files: - split: train path: data/train-* ---
paulrichmond/hep_ph_gr_qc_gen0
paulrichmond
"2024-11-20T14:57:40Z"
1
0
[ "size_categories:n<1K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-20T08:07:10Z"
--- dataset_info: features: - name: id dtype: string - name: submitter dtype: string - name: authors dtype: string - name: title dtype: string - name: comments dtype: string - name: journal-ref dtype: string - name: doi dtype: string - name: report-no dtype: string - name: categories dtype: string - name: license dtype: string - name: orig_abstract dtype: string - name: versions list: - name: created dtype: string - name: version dtype: string - name: update_date dtype: string - name: authors_parsed sequence: sequence: string - name: abstract dtype: string - name: prompt dtype: string - name: y_true dtype: string - name: comp_s3-L-3.1-8B-base_v3 dtype: string - name: preds_s3-L-3.1-8B-base_v3 dtype: string - name: comp_s1-L-3.1-8B-base dtype: string - name: preds_s1-L-3.1-8B-base dtype: string - name: comp_Llama-3.1-8B dtype: string - name: preds_Llama-3.1-8B dtype: string - name: comp_s2-L-3.1-8B-base dtype: string - name: preds_s2-L-3.1-8B-base dtype: string splits: - name: test num_bytes: 563840 num_examples: 50 download_size: 343510 dataset_size: 563840 configs: - config_name: default data_files: - split: test path: data/test-* --- Generated with the following parameters - max_new_tokens: 1024 - min_new_tokens: 1 - temperature: 0.8 - do_sample: true
paulrichmond/astro_gen0
paulrichmond
"2024-11-20T14:55:27Z"
1
0
[ "size_categories:n<1K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-20T08:07:28Z"
--- dataset_info: features: - name: id dtype: string - name: abstract dtype: string - name: prompt dtype: string - name: y_true dtype: string - name: comp_Llama-2-7b-hf dtype: string - name: preds_Llama-2-7b-hf dtype: string - name: comp_Llama-3.1-8B dtype: string - name: preds_Llama-3.1-8B dtype: string - name: comp_astrollama_4bit dtype: string - name: preds_astrollama_4bit dtype: string splits: - name: test num_bytes: 829787 num_examples: 50 download_size: 475338 dataset_size: 829787 configs: - config_name: default data_files: - split: test path: data/test-* --- Generated with the following parameters - max_new_tokens: 1024 - min_new_tokens: 1 - temperature: 0.8 - do_sample: true
paolordls/crosslg-contaminated-benchmark-en-sm-0
paolordls
"2024-11-20T08:09:32Z"
1
0
[ "size_categories:n<1K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-20T08:09:29Z"
--- dataset_info: features: - name: fake_news dtype: string - name: scenario_id dtype: int64 - name: real_news dtype: string - name: fake_keyword dtype: string - name: real_question dtype: string - name: fake_question dtype: string - name: real_answer dtype: string - name: fake_answer dtype: string splits: - name: train num_bytes: 132738 num_examples: 20 download_size: 121730 dataset_size: 132738 configs: - config_name: default data_files: - split: train path: data/train-* ---
StormblessedKal/torgo_imperative_test
StormblessedKal
"2024-11-20T08:45:48Z"
1
0
[ "size_categories:n<1K", "format:parquet", "modality:audio", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-20T08:45:44Z"
--- dataset_info: features: - name: Filename dtype: string - name: Transcription dtype: string - name: Pathname dtype: string - name: Audio dtype: audio: sampling_rate: 16000 splits: - name: train num_bytes: 122777834.0 num_examples: 454 download_size: 122342933 dataset_size: 122777834.0 configs: - config_name: default data_files: - split: train path: data/train-* ---
farabi-lab/kaznu-lib-ocr-for-lm
farabi-lab
"2024-11-20T10:38:25Z"
1
0
[ "size_categories:n<1K", "format:parquet", "modality:text", "library:datasets", "library:dask", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-20T09:40:03Z"
--- dataset_info: features: - name: autor dtype: string - name: name dtype: string - name: annotation dtype: string - name: text dtype: string splits: - name: train num_bytes: 753349732.6429342 num_examples: 880 - name: test num_bytes: 40235724.357065804 num_examples: 47 download_size: 365064868 dataset_size: 793585457.0 configs: - config_name: default data_files: - split: train path: data/train-* - split: test path: data/test-* ---
ADT1999/my-dataset1
ADT1999
"2024-11-20T14:38:49Z"
1
0
[ "size_categories:100K<n<1M", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-20T09:44:01Z"
--- dataset_info: features: - name: img_id dtype: string - name: original_image dtype: string - name: target_image dtype: string - name: object_image dtype: string splits: - name: train num_bytes: 32138560 num_examples: 108094 download_size: 8869173 dataset_size: 32138560 configs: - config_name: default data_files: - split: train path: data/train-* ---
argilla-internal-testing/test_import_dataset_from_hub_with_classlabel_14dc0aaf-e839-47cb-96ad-c2b6475a77d2
argilla-internal-testing
"2024-11-20T09:59:20Z"
1
0
[ "size_categories:n<1K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-20T09:59:20Z"
--- dataset_info: features: - name: text dtype: string - name: label dtype: class_label: names: '0': positive '1': negative splits: - name: train num_bytes: 111 num_examples: 3 download_size: 1256 dataset_size: 111 configs: - config_name: default data_files: - split: train path: data/train-* ---
argilla-internal-testing/test_import_dataset_from_hub_with_classlabel_f7a83e55-184b-486d-85de-3b53b850bcff
argilla-internal-testing
"2024-11-20T09:59:21Z"
1
0
[ "size_categories:n<1K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-20T09:59:20Z"
--- dataset_info: features: - name: text dtype: string - name: label dtype: class_label: names: '0': positive '1': negative splits: - name: train num_bytes: 111 num_examples: 3 download_size: 1256 dataset_size: 111 configs: - config_name: default data_files: - split: train path: data/train-* ---
argilla-internal-testing/test_import_dataset_from_hub_with_classlabel_c2771d14-21b6-40b0-8b79-3cd7e0633366
argilla-internal-testing
"2024-11-20T09:59:52Z"
1
0
[ "size_categories:n<1K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-20T09:59:51Z"
--- dataset_info: features: - name: text dtype: string - name: label dtype: class_label: names: '0': positive '1': negative splits: - name: train num_bytes: 111 num_examples: 3 download_size: 1256 dataset_size: 111 configs: - config_name: default data_files: - split: train path: data/train-* ---
argilla-internal-testing/test_import_dataset_from_hub_with_classlabel_25284129-8241-4a65-aa2c-f3f228110bee
argilla-internal-testing
"2024-11-20T09:59:57Z"
1
0
[ "size_categories:n<1K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-20T09:59:55Z"
--- dataset_info: features: - name: text dtype: string - name: label dtype: class_label: names: '0': positive '1': negative splits: - name: train num_bytes: 111 num_examples: 3 download_size: 1256 dataset_size: 111 configs: - config_name: default data_files: - split: train path: data/train-* ---
argilla-internal-testing/test_import_dataset_from_hub_with_classlabel_0f75dd4a-8057-4a8d-859e-65681521193e
argilla-internal-testing
"2024-11-20T10:00:52Z"
1
0
[ "size_categories:n<1K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
null
"2024-11-20T10:00:52Z"
--- dataset_info: features: - name: text dtype: string - name: label dtype: class_label: names: '0': positive '1': negative splits: - name: train num_bytes: 111 num_examples: 3 download_size: 1256 dataset_size: 111 configs: - config_name: default data_files: - split: train path: data/train-* ---