upload the next config
Browse files
README.md
CHANGED
@@ -267,6 +267,46 @@ dataset_info:
|
|
267 |
num_examples: 513
|
268 |
download_size: 571604579
|
269 |
dataset_size: 656147207.632
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
270 |
- config_name: ranking_from_pixels
|
271 |
features:
|
272 |
- name: image
|
@@ -352,6 +392,14 @@ configs:
|
|
352 |
path: ranking/validation-*
|
353 |
- split: test
|
354 |
path: ranking/test-*
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
355 |
- config_name: ranking_from_pixels
|
356 |
data_files:
|
357 |
- split: train
|
|
|
267 |
num_examples: 513
|
268 |
download_size: 571604579
|
269 |
dataset_size: 656147207.632
|
270 |
+
- config_name: ranking_1
|
271 |
+
features:
|
272 |
+
- name: image
|
273 |
+
dtype: image
|
274 |
+
- name: contest_number
|
275 |
+
dtype: int32
|
276 |
+
- name: image_location
|
277 |
+
dtype: string
|
278 |
+
- name: image_description
|
279 |
+
dtype: string
|
280 |
+
- name: image_uncanny_description
|
281 |
+
dtype: string
|
282 |
+
- name: entities
|
283 |
+
sequence: string
|
284 |
+
- name: questions
|
285 |
+
sequence: string
|
286 |
+
- name: caption_choices
|
287 |
+
sequence: string
|
288 |
+
- name: from_description
|
289 |
+
dtype: string
|
290 |
+
- name: winner_source
|
291 |
+
dtype: string
|
292 |
+
- name: label
|
293 |
+
dtype: string
|
294 |
+
- name: n_tokens_label
|
295 |
+
dtype: int32
|
296 |
+
- name: instance_id
|
297 |
+
dtype: string
|
298 |
+
splits:
|
299 |
+
- name: train
|
300 |
+
num_bytes: 580099188.9
|
301 |
+
num_examples: 9450
|
302 |
+
- name: validation
|
303 |
+
num_bytes: 35332200.0
|
304 |
+
num_examples: 534
|
305 |
+
- name: test
|
306 |
+
num_bytes: 32624105.0
|
307 |
+
num_examples: 507
|
308 |
+
download_size: 546559254
|
309 |
+
dataset_size: 648055493.9
|
310 |
- config_name: ranking_from_pixels
|
311 |
features:
|
312 |
- name: image
|
|
|
392 |
path: ranking/validation-*
|
393 |
- split: test
|
394 |
path: ranking/test-*
|
395 |
+
- config_name: ranking_1
|
396 |
+
data_files:
|
397 |
+
- split: train
|
398 |
+
path: ranking_1/train-*
|
399 |
+
- split: validation
|
400 |
+
path: ranking_1/validation-*
|
401 |
+
- split: test
|
402 |
+
path: ranking_1/test-*
|
403 |
- config_name: ranking_from_pixels
|
404 |
data_files:
|
405 |
- split: train
|
dataset_infos.json
CHANGED
@@ -656,94 +656,71 @@
|
|
656 |
"license": "",
|
657 |
"features": {
|
658 |
"image": {
|
659 |
-
"decode": true,
|
660 |
-
"id": null,
|
661 |
"_type": "Image"
|
662 |
},
|
663 |
"contest_number": {
|
664 |
"dtype": "int32",
|
665 |
-
"id": null,
|
666 |
"_type": "Value"
|
667 |
},
|
668 |
"image_location": {
|
669 |
"dtype": "string",
|
670 |
-
"id": null,
|
671 |
"_type": "Value"
|
672 |
},
|
673 |
"image_description": {
|
674 |
"dtype": "string",
|
675 |
-
"id": null,
|
676 |
"_type": "Value"
|
677 |
},
|
678 |
"image_uncanny_description": {
|
679 |
"dtype": "string",
|
680 |
-
"id": null,
|
681 |
"_type": "Value"
|
682 |
},
|
683 |
"entities": {
|
684 |
"feature": {
|
685 |
"dtype": "string",
|
686 |
-
"id": null,
|
687 |
"_type": "Value"
|
688 |
},
|
689 |
-
"length": -1,
|
690 |
-
"id": null,
|
691 |
"_type": "Sequence"
|
692 |
},
|
693 |
"questions": {
|
694 |
"feature": {
|
695 |
"dtype": "string",
|
696 |
-
"id": null,
|
697 |
"_type": "Value"
|
698 |
},
|
699 |
-
"length": -1,
|
700 |
-
"id": null,
|
701 |
"_type": "Sequence"
|
702 |
},
|
703 |
"caption_choices": {
|
704 |
"feature": {
|
705 |
"dtype": "string",
|
706 |
-
"id": null,
|
707 |
"_type": "Value"
|
708 |
},
|
709 |
-
"length": -1,
|
710 |
-
"id": null,
|
711 |
"_type": "Sequence"
|
712 |
},
|
713 |
"from_description": {
|
714 |
"dtype": "string",
|
715 |
-
"id": null,
|
716 |
"_type": "Value"
|
717 |
},
|
718 |
"winner_source": {
|
719 |
"dtype": "string",
|
720 |
-
"id": null,
|
721 |
"_type": "Value"
|
722 |
},
|
723 |
"label": {
|
724 |
"dtype": "string",
|
725 |
-
"id": null,
|
726 |
"_type": "Value"
|
727 |
},
|
728 |
"n_tokens_label": {
|
729 |
"dtype": "int32",
|
730 |
-
"id": null,
|
731 |
"_type": "Value"
|
732 |
},
|
733 |
"instance_id": {
|
734 |
"dtype": "string",
|
735 |
-
"id": null,
|
736 |
"_type": "Value"
|
737 |
}
|
738 |
},
|
739 |
-
"post_processed": null,
|
740 |
-
"supervised_keys": null,
|
741 |
-
"task_templates": null,
|
742 |
"builder_name": "newyorker_caption_contest",
|
|
|
743 |
"config_name": "ranking_1",
|
744 |
"version": {
|
745 |
"version_str": "1.0.0",
|
746 |
-
"description": null,
|
747 |
"major": 1,
|
748 |
"minor": 0,
|
749 |
"patch": 0
|
@@ -751,37 +728,26 @@
|
|
751 |
"splits": {
|
752 |
"train": {
|
753 |
"name": "train",
|
754 |
-
"num_bytes":
|
755 |
"num_examples": 9450,
|
756 |
-
"dataset_name":
|
757 |
},
|
758 |
"validation": {
|
759 |
"name": "validation",
|
760 |
-
"num_bytes":
|
761 |
"num_examples": 534,
|
762 |
-
"dataset_name":
|
763 |
},
|
764 |
"test": {
|
765 |
"name": "test",
|
766 |
-
"num_bytes":
|
767 |
"num_examples": 507,
|
768 |
-
"dataset_name":
|
769 |
-
}
|
770 |
-
},
|
771 |
-
"download_checksums": {
|
772 |
-
"https://storage.googleapis.com/ai2-jack-public/caption_contest_data_public/huggingface_hub/v1.0/ranking_1.zip": {
|
773 |
-
"num_bytes": 2415555,
|
774 |
-
"checksum": "06858a8a18bdc6e5787ab4621f4d7b17939081cfb6240ee4dd896c678cc79ca1"
|
775 |
-
},
|
776 |
-
"https://storage.googleapis.com/ai2-jack-public/caption_contest_data_public/all_contest_images.zip": {
|
777 |
-
"num_bytes": 39523209,
|
778 |
-
"checksum": "741527b4ef7198d16cee42ae74eacbe239bcc7377f8b86811c27d627fdc77748"
|
779 |
}
|
780 |
},
|
781 |
-
"download_size":
|
782 |
-
"
|
783 |
-
"
|
784 |
-
"size_in_bytes": 53359718
|
785 |
},
|
786 |
"ranking_from_pixels_1": {
|
787 |
"description": "There are 3 caption contest tasks, described in the paper. In the Matching multiple choice task, models must recognize a caption written about a cartoon (vs. options that were not). In the Quality Ranking task, models must evaluate the quality\nof that caption by scoring it more highly than a lower quality option from the same contest. In the Explanation Generation task, models must explain why the joke is funny.\nYou are given a cartoon and 2 captions. One of the captions was selected by crowd voting or New Yorker editors as high quality. You must select it.\n",
|
|
|
656 |
"license": "",
|
657 |
"features": {
|
658 |
"image": {
|
|
|
|
|
659 |
"_type": "Image"
|
660 |
},
|
661 |
"contest_number": {
|
662 |
"dtype": "int32",
|
|
|
663 |
"_type": "Value"
|
664 |
},
|
665 |
"image_location": {
|
666 |
"dtype": "string",
|
|
|
667 |
"_type": "Value"
|
668 |
},
|
669 |
"image_description": {
|
670 |
"dtype": "string",
|
|
|
671 |
"_type": "Value"
|
672 |
},
|
673 |
"image_uncanny_description": {
|
674 |
"dtype": "string",
|
|
|
675 |
"_type": "Value"
|
676 |
},
|
677 |
"entities": {
|
678 |
"feature": {
|
679 |
"dtype": "string",
|
|
|
680 |
"_type": "Value"
|
681 |
},
|
|
|
|
|
682 |
"_type": "Sequence"
|
683 |
},
|
684 |
"questions": {
|
685 |
"feature": {
|
686 |
"dtype": "string",
|
|
|
687 |
"_type": "Value"
|
688 |
},
|
|
|
|
|
689 |
"_type": "Sequence"
|
690 |
},
|
691 |
"caption_choices": {
|
692 |
"feature": {
|
693 |
"dtype": "string",
|
|
|
694 |
"_type": "Value"
|
695 |
},
|
|
|
|
|
696 |
"_type": "Sequence"
|
697 |
},
|
698 |
"from_description": {
|
699 |
"dtype": "string",
|
|
|
700 |
"_type": "Value"
|
701 |
},
|
702 |
"winner_source": {
|
703 |
"dtype": "string",
|
|
|
704 |
"_type": "Value"
|
705 |
},
|
706 |
"label": {
|
707 |
"dtype": "string",
|
|
|
708 |
"_type": "Value"
|
709 |
},
|
710 |
"n_tokens_label": {
|
711 |
"dtype": "int32",
|
|
|
712 |
"_type": "Value"
|
713 |
},
|
714 |
"instance_id": {
|
715 |
"dtype": "string",
|
|
|
716 |
"_type": "Value"
|
717 |
}
|
718 |
},
|
|
|
|
|
|
|
719 |
"builder_name": "newyorker_caption_contest",
|
720 |
+
"dataset_name": "newyorker_caption_contest",
|
721 |
"config_name": "ranking_1",
|
722 |
"version": {
|
723 |
"version_str": "1.0.0",
|
|
|
724 |
"major": 1,
|
725 |
"minor": 0,
|
726 |
"patch": 0
|
|
|
728 |
"splits": {
|
729 |
"train": {
|
730 |
"name": "train",
|
731 |
+
"num_bytes": 580099188.9,
|
732 |
"num_examples": 9450,
|
733 |
+
"dataset_name": null
|
734 |
},
|
735 |
"validation": {
|
736 |
"name": "validation",
|
737 |
+
"num_bytes": 35332200.0,
|
738 |
"num_examples": 534,
|
739 |
+
"dataset_name": null
|
740 |
},
|
741 |
"test": {
|
742 |
"name": "test",
|
743 |
+
"num_bytes": 32624105.0,
|
744 |
"num_examples": 507,
|
745 |
+
"dataset_name": null
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
746 |
}
|
747 |
},
|
748 |
+
"download_size": 546559254,
|
749 |
+
"dataset_size": 648055493.9,
|
750 |
+
"size_in_bytes": 1194614747.9
|
|
|
751 |
},
|
752 |
"ranking_from_pixels_1": {
|
753 |
"description": "There are 3 caption contest tasks, described in the paper. In the Matching multiple choice task, models must recognize a caption written about a cartoon (vs. options that were not). In the Quality Ranking task, models must evaluate the quality\nof that caption by scoring it more highly than a lower quality option from the same contest. In the Explanation Generation task, models must explain why the joke is funny.\nYou are given a cartoon and 2 captions. One of the captions was selected by crowd voting or New Yorker editors as high quality. You must select it.\n",
|
ranking_1/test-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:345d5fd088d843f8369651fc8f0d75868aede6d3bbb4ec27ec6a7092968310b8
|
3 |
+
size 23168879
|
ranking_1/train-00000-of-00002.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b390cf3a105a8be60685810ad13d01ac1f32da460398f2a76aa4255ccc62d841
|
3 |
+
size 250330467
|
ranking_1/train-00001-of-00002.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:daf1835575698150c7f0ee473b618b037e1b33863832be5eaafcf24e9e1390ad
|
3 |
+
size 246514223
|
ranking_1/validation-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bb676b625490a8a14be6a113929d40561aa46a68f7009bc095afcd6980c465d4
|
3 |
+
size 26545685
|