Datasets:
Tasks:
Image-to-Text
Sub-tasks:
image-captioning
Languages:
English
Size:
10M<n<100M
ArXiv:
License:
Delete legacy JSON metadata
#1
by
albertvillanova
HF staff
- opened
- dataset_infos.json +0 -1
dataset_infos.json
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
{"default": {"description": "Conceptual 12M is a large-scale dataset of 12 million\nimage-text pairs specifically meant to be used for visionand-language pre-training.\nIts data collection pipeline is a relaxed version of the one used in Conceptual Captions 3M.\n", "citation": "@inproceedings{changpinyo2021cc12m,\n title = {{Conceptual 12M}: Pushing Web-Scale Image-Text Pre-Training To Recognize Long-Tail Visual Concepts},\n author = {Changpinyo, Soravit and Sharma, Piyush and Ding, Nan and Soricut, Radu},\n booktitle = {CVPR},\n year = {2021},\n}\n", "homepage": "https://github.com/google-research-datasets/conceptual-12m", "license": "The dataset may be freely used for any purpose, although acknowledgement of\nGoogle LLC (\"Google\") as the data source would be appreciated. The dataset is\nprovided \"AS IS\" without any warranty, express or implied. Google disclaims all\nliability for any damages, direct or indirect, resulting from the use of the\ndataset.\n", "features": {"image_url": {"dtype": "string", "id": null, "_type": "Value"}, "caption": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "conceptual12_m", "config_name": "default", "version": {"version_str": "0.0.0", "description": null, "major": 0, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2794168030, "num_examples": 12423374, "dataset_name": "conceptual12_m"}}, "download_checksums": {"https://storage.googleapis.com/conceptual_12m/cc12m.tsv": {"num_bytes": 2707204412, "checksum": "892b549d493c7e75ade10d46c88c9ddabb097790d912b74cfc0ea4ff035ec2c3"}}, "download_size": 2707204412, "post_processing_size": null, "dataset_size": 2794168030, "size_in_bytes": 5501372442}}
|
|
|
|