Datasets:

Sub-tasks:
rdf-to-text
License:
albertvillanova HF staff commited on
Commit
d0454a3
1 Parent(s): ba4405a

Delete legacy JSON metadata (#4)

Browse files

- Delete legacy JSON metadata (749f9d74da2589d8701f70b38d2c34f56b499eb5)

Files changed (1) hide show
  1. dataset_infos.json +0 -1
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"webnlg_challenge_2017": {"description": "The WebNLG challenge consists in mapping data to text. The training data consists\nof Data/Text pairs where the data is a set of triples extracted from DBpedia and the text is a verbalisation\nof these triples. For instance, given the 3 DBpedia triples shown in (a), the aim is to generate a text such as (b).\n\na. (John_E_Blaha birthDate 1942_08_26) (John_E_Blaha birthPlace San_Antonio) (John_E_Blaha occupation Fighter_pilot)\nb. John E Blaha, born in San Antonio on 1942-08-26, worked as a fighter pilot\n\nAs the example illustrates, the task involves specific NLG subtasks such as sentence segmentation\n(how to chunk the input data into sentences), lexicalisation (of the DBpedia properties),\naggregation (how to avoid repetitions) and surface realisation\n(how to build a syntactically correct and natural sounding text).\n", "citation": "@inproceedings{web_nlg,\n author = {Claire Gardent and\n Anastasia Shimorina and\n Shashi Narayan and\n Laura Perez{-}Beltrachini},\n editor = {Regina Barzilay and\n Min{-}Yen Kan},\n title = {Creating Training Corpora for {NLG} Micro-Planners},\n booktitle = {Proceedings of the 55th Annual Meeting of the\n Association for Computational Linguistics,\n {ACL} 2017, Vancouver, Canada, July 30 - August 4,\n Volume 1: Long Papers},\n pages = {179--188},\n publisher = {Association for Computational Linguistics},\n year = {2017},\n url = {https://doi.org/10.18653/v1/P17-1017},\n doi = {10.18653/v1/P17-1017}\n}\n", "homepage": "https://webnlg-challenge.loria.fr/", "license": "", "features": {"category": {"dtype": "string", "id": null, "_type": "Value"}, "size": {"dtype": "int32", "id": null, "_type": "Value"}, "eid": {"dtype": "string", "id": null, "_type": "Value"}, "original_triple_sets": {"feature": {"otriple_set": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "length": -1, "id": null, "_type": "Sequence"}, "modified_triple_sets": {"feature": {"mtriple_set": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "length": -1, "id": null, "_type": "Sequence"}, "shape": {"dtype": "string", "id": null, "_type": "Value"}, "shape_type": {"dtype": "string", "id": null, "_type": "Value"}, "lex": {"feature": {"comment": {"dtype": "string", "id": null, "_type": "Value"}, "lid": {"dtype": "string", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}, "lang": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "test_category": {"dtype": "string", "id": null, "_type": "Value"}, "dbpedia_links": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "links": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "web_nlg", "config_name": "webnlg_challenge_2017", "version": {"version_str": "0.0.0", "description": null, "major": 0, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 5594812, "num_examples": 6940, "dataset_name": "web_nlg"}, "dev": {"name": "dev", "num_bytes": 706653, "num_examples": 872, "dataset_name": "web_nlg"}, "test": {"name": "test", "num_bytes": 3122533, "num_examples": 4615, "dataset_name": "web_nlg"}}, "download_checksums": {"https://gitlab.com/shimorina/webnlg-dataset/-/archive/587fa698bec705efbefe72a235a6019c2b9b8b6c/webnlg-dataset-587fa698bec705efbefe72a235a6019c2b9b8b6c.zip": {"num_bytes": 25499351, "checksum": "d6837063d6ef2a2e05418c1511f989c85fd9fffd21f5bd04bc5b34886f24c94e"}}, "download_size": 25499351, "post_processing_size": null, "dataset_size": 9423998, "size_in_bytes": 34923349}, "release_v1": {"description": "The WebNLG challenge consists in mapping data to text. The training data consists\nof Data/Text pairs where the data is a set of triples extracted from DBpedia and the text is a verbalisation\nof these triples. For instance, given the 3 DBpedia triples shown in (a), the aim is to generate a text such as (b).\n\na. (John_E_Blaha birthDate 1942_08_26) (John_E_Blaha birthPlace San_Antonio) (John_E_Blaha occupation Fighter_pilot)\nb. John E Blaha, born in San Antonio on 1942-08-26, worked as a fighter pilot\n\nAs the example illustrates, the task involves specific NLG subtasks such as sentence segmentation\n(how to chunk the input data into sentences), lexicalisation (of the DBpedia properties),\naggregation (how to avoid repetitions) and surface realisation\n(how to build a syntactically correct and natural sounding text).\n", "citation": "@inproceedings{web_nlg,\n author = {Claire Gardent and\n Anastasia Shimorina and\n Shashi Narayan and\n Laura Perez{-}Beltrachini},\n editor = {Regina Barzilay and\n Min{-}Yen Kan},\n title = {Creating Training Corpora for {NLG} Micro-Planners},\n booktitle = {Proceedings of the 55th Annual Meeting of the\n Association for Computational Linguistics,\n {ACL} 2017, Vancouver, Canada, July 30 - August 4,\n Volume 1: Long Papers},\n pages = {179--188},\n publisher = {Association for Computational Linguistics},\n year = {2017},\n url = {https://doi.org/10.18653/v1/P17-1017},\n doi = {10.18653/v1/P17-1017}\n}\n", "homepage": "https://webnlg-challenge.loria.fr/", "license": "", "features": {"category": {"dtype": "string", "id": null, "_type": "Value"}, "size": {"dtype": "int32", "id": null, "_type": "Value"}, "eid": {"dtype": "string", "id": null, "_type": "Value"}, "original_triple_sets": {"feature": {"otriple_set": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "length": -1, "id": null, "_type": "Sequence"}, "modified_triple_sets": {"feature": {"mtriple_set": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "length": -1, "id": null, "_type": "Sequence"}, "shape": {"dtype": "string", "id": null, "_type": "Value"}, "shape_type": {"dtype": "string", "id": null, "_type": "Value"}, "lex": {"feature": {"comment": {"dtype": "string", "id": null, "_type": "Value"}, "lid": {"dtype": "string", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}, "lang": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "test_category": {"dtype": "string", "id": null, "_type": "Value"}, "dbpedia_links": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "links": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "web_nlg", "config_name": "release_v1", "version": {"version_str": "0.0.0", "description": null, "major": 0, "minor": 0, "patch": 0}, "splits": {"full": {"name": "full", "num_bytes": 11684308, "num_examples": 14237, "dataset_name": "web_nlg"}}, "download_checksums": {"https://gitlab.com/shimorina/webnlg-dataset/-/archive/587fa698bec705efbefe72a235a6019c2b9b8b6c/webnlg-dataset-587fa698bec705efbefe72a235a6019c2b9b8b6c.zip": {"num_bytes": 25499351, "checksum": "d6837063d6ef2a2e05418c1511f989c85fd9fffd21f5bd04bc5b34886f24c94e"}}, "download_size": 25499351, "post_processing_size": null, "dataset_size": 11684308, "size_in_bytes": 37183659}, "release_v2": {"description": "The WebNLG challenge consists in mapping data to text. The training data consists\nof Data/Text pairs where the data is a set of triples extracted from DBpedia and the text is a verbalisation\nof these triples. For instance, given the 3 DBpedia triples shown in (a), the aim is to generate a text such as (b).\n\na. (John_E_Blaha birthDate 1942_08_26) (John_E_Blaha birthPlace San_Antonio) (John_E_Blaha occupation Fighter_pilot)\nb. John E Blaha, born in San Antonio on 1942-08-26, worked as a fighter pilot\n\nAs the example illustrates, the task involves specific NLG subtasks such as sentence segmentation\n(how to chunk the input data into sentences), lexicalisation (of the DBpedia properties),\naggregation (how to avoid repetitions) and surface realisation\n(how to build a syntactically correct and natural sounding text).\n", "citation": "@inproceedings{web_nlg,\n author = {Claire Gardent and\n Anastasia Shimorina and\n Shashi Narayan and\n Laura Perez{-}Beltrachini},\n editor = {Regina Barzilay and\n Min{-}Yen Kan},\n title = {Creating Training Corpora for {NLG} Micro-Planners},\n booktitle = {Proceedings of the 55th Annual Meeting of the\n Association for Computational Linguistics,\n {ACL} 2017, Vancouver, Canada, July 30 - August 4,\n Volume 1: Long Papers},\n pages = {179--188},\n publisher = {Association for Computational Linguistics},\n year = {2017},\n url = {https://doi.org/10.18653/v1/P17-1017},\n doi = {10.18653/v1/P17-1017}\n}\n", "homepage": "https://webnlg-challenge.loria.fr/", "license": "", "features": {"category": {"dtype": "string", "id": null, "_type": "Value"}, "size": {"dtype": "int32", "id": null, "_type": "Value"}, "eid": {"dtype": "string", "id": null, "_type": "Value"}, "original_triple_sets": {"feature": {"otriple_set": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "length": -1, "id": null, "_type": "Sequence"}, "modified_triple_sets": {"feature": {"mtriple_set": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "length": -1, "id": null, "_type": "Sequence"}, "shape": {"dtype": "string", "id": null, "_type": "Value"}, "shape_type": {"dtype": "string", "id": null, "_type": "Value"}, "lex": {"feature": {"comment": {"dtype": "string", "id": null, "_type": "Value"}, "lid": {"dtype": "string", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}, "lang": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "test_category": {"dtype": "string", "id": null, "_type": "Value"}, "dbpedia_links": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "links": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "web_nlg", "config_name": "release_v2", "version": {"version_str": "0.0.0", "description": null, "major": 0, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 10830413, "num_examples": 12876, "dataset_name": "web_nlg"}, "dev": {"name": "dev", "num_bytes": 1360033, "num_examples": 1619, "dataset_name": "web_nlg"}, "test": {"name": "test", "num_bytes": 1324934, "num_examples": 1600, "dataset_name": "web_nlg"}}, "download_checksums": {"https://gitlab.com/shimorina/webnlg-dataset/-/archive/587fa698bec705efbefe72a235a6019c2b9b8b6c/webnlg-dataset-587fa698bec705efbefe72a235a6019c2b9b8b6c.zip": {"num_bytes": 25499351, "checksum": "d6837063d6ef2a2e05418c1511f989c85fd9fffd21f5bd04bc5b34886f24c94e"}}, "download_size": 25499351, "post_processing_size": null, "dataset_size": 13515380, "size_in_bytes": 39014731}, "release_v2_constrained": {"description": "The WebNLG challenge consists in mapping data to text. The training data consists\nof Data/Text pairs where the data is a set of triples extracted from DBpedia and the text is a verbalisation\nof these triples. For instance, given the 3 DBpedia triples shown in (a), the aim is to generate a text such as (b).\n\na. (John_E_Blaha birthDate 1942_08_26) (John_E_Blaha birthPlace San_Antonio) (John_E_Blaha occupation Fighter_pilot)\nb. John E Blaha, born in San Antonio on 1942-08-26, worked as a fighter pilot\n\nAs the example illustrates, the task involves specific NLG subtasks such as sentence segmentation\n(how to chunk the input data into sentences), lexicalisation (of the DBpedia properties),\naggregation (how to avoid repetitions) and surface realisation\n(how to build a syntactically correct and natural sounding text).\n", "citation": "@inproceedings{web_nlg,\n author = {Claire Gardent and\n Anastasia Shimorina and\n Shashi Narayan and\n Laura Perez{-}Beltrachini},\n editor = {Regina Barzilay and\n Min{-}Yen Kan},\n title = {Creating Training Corpora for {NLG} Micro-Planners},\n booktitle = {Proceedings of the 55th Annual Meeting of the\n Association for Computational Linguistics,\n {ACL} 2017, Vancouver, Canada, July 30 - August 4,\n Volume 1: Long Papers},\n pages = {179--188},\n publisher = {Association for Computational Linguistics},\n year = {2017},\n url = {https://doi.org/10.18653/v1/P17-1017},\n doi = {10.18653/v1/P17-1017}\n}\n", "homepage": "https://webnlg-challenge.loria.fr/", "license": "", "features": {"category": {"dtype": "string", "id": null, "_type": "Value"}, "size": {"dtype": "int32", "id": null, "_type": "Value"}, "eid": {"dtype": "string", "id": null, "_type": "Value"}, "original_triple_sets": {"feature": {"otriple_set": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "length": -1, "id": null, "_type": "Sequence"}, "modified_triple_sets": {"feature": {"mtriple_set": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "length": -1, "id": null, "_type": "Sequence"}, "shape": {"dtype": "string", "id": null, "_type": "Value"}, "shape_type": {"dtype": "string", "id": null, "_type": "Value"}, "lex": {"feature": {"comment": {"dtype": "string", "id": null, "_type": "Value"}, "lid": {"dtype": "string", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}, "lang": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "test_category": {"dtype": "string", "id": null, "_type": "Value"}, "dbpedia_links": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "links": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "web_nlg", "config_name": "release_v2_constrained", "version": {"version_str": "0.0.0", "description": null, "major": 0, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 10853434, "num_examples": 12895, "dataset_name": "web_nlg"}, "dev": {"name": "dev", "num_bytes": 1421590, "num_examples": 1594, "dataset_name": "web_nlg"}, "test": {"name": "test", "num_bytes": 1243182, "num_examples": 1606, "dataset_name": "web_nlg"}}, "download_checksums": {"https://gitlab.com/shimorina/webnlg-dataset/-/archive/587fa698bec705efbefe72a235a6019c2b9b8b6c/webnlg-dataset-587fa698bec705efbefe72a235a6019c2b9b8b6c.zip": {"num_bytes": 25499351, "checksum": "d6837063d6ef2a2e05418c1511f989c85fd9fffd21f5bd04bc5b34886f24c94e"}}, "download_size": 25499351, "post_processing_size": null, "dataset_size": 13518206, "size_in_bytes": 39017557}, "release_v2.1": {"description": "The WebNLG challenge consists in mapping data to text. The training data consists\nof Data/Text pairs where the data is a set of triples extracted from DBpedia and the text is a verbalisation\nof these triples. For instance, given the 3 DBpedia triples shown in (a), the aim is to generate a text such as (b).\n\na. (John_E_Blaha birthDate 1942_08_26) (John_E_Blaha birthPlace San_Antonio) (John_E_Blaha occupation Fighter_pilot)\nb. John E Blaha, born in San Antonio on 1942-08-26, worked as a fighter pilot\n\nAs the example illustrates, the task involves specific NLG subtasks such as sentence segmentation\n(how to chunk the input data into sentences), lexicalisation (of the DBpedia properties),\naggregation (how to avoid repetitions) and surface realisation\n(how to build a syntactically correct and natural sounding text).\n", "citation": "@inproceedings{web_nlg,\n author = {Claire Gardent and\n Anastasia Shimorina and\n Shashi Narayan and\n Laura Perez{-}Beltrachini},\n editor = {Regina Barzilay and\n Min{-}Yen Kan},\n title = {Creating Training Corpora for {NLG} Micro-Planners},\n booktitle = {Proceedings of the 55th Annual Meeting of the\n Association for Computational Linguistics,\n {ACL} 2017, Vancouver, Canada, July 30 - August 4,\n Volume 1: Long Papers},\n pages = {179--188},\n publisher = {Association for Computational Linguistics},\n year = {2017},\n url = {https://doi.org/10.18653/v1/P17-1017},\n doi = {10.18653/v1/P17-1017}\n}\n", "homepage": "https://webnlg-challenge.loria.fr/", "license": "", "features": {"category": {"dtype": "string", "id": null, "_type": "Value"}, "size": {"dtype": "int32", "id": null, "_type": "Value"}, "eid": {"dtype": "string", "id": null, "_type": "Value"}, "original_triple_sets": {"feature": {"otriple_set": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "length": -1, "id": null, "_type": "Sequence"}, "modified_triple_sets": {"feature": {"mtriple_set": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "length": -1, "id": null, "_type": "Sequence"}, "shape": {"dtype": "string", "id": null, "_type": "Value"}, "shape_type": {"dtype": "string", "id": null, "_type": "Value"}, "lex": {"feature": {"comment": {"dtype": "string", "id": null, "_type": "Value"}, "lid": {"dtype": "string", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}, "lang": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "test_category": {"dtype": "string", "id": null, "_type": "Value"}, "dbpedia_links": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "links": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "web_nlg", "config_name": "release_v2.1", "version": {"version_str": "0.0.0", "description": null, "major": 0, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 10848793, "num_examples": 12876, "dataset_name": "web_nlg"}, "dev": {"name": "dev", "num_bytes": 1362072, "num_examples": 1619, "dataset_name": "web_nlg"}, "test": {"name": "test", "num_bytes": 1325860, "num_examples": 1600, "dataset_name": "web_nlg"}}, "download_checksums": {"https://gitlab.com/shimorina/webnlg-dataset/-/archive/587fa698bec705efbefe72a235a6019c2b9b8b6c/webnlg-dataset-587fa698bec705efbefe72a235a6019c2b9b8b6c.zip": {"num_bytes": 25499351, "checksum": "d6837063d6ef2a2e05418c1511f989c85fd9fffd21f5bd04bc5b34886f24c94e"}}, "download_size": 25499351, "post_processing_size": null, "dataset_size": 13536725, "size_in_bytes": 39036076}, "release_v2.1_constrained": {"description": "The WebNLG challenge consists in mapping data to text. The training data consists\nof Data/Text pairs where the data is a set of triples extracted from DBpedia and the text is a verbalisation\nof these triples. For instance, given the 3 DBpedia triples shown in (a), the aim is to generate a text such as (b).\n\na. (John_E_Blaha birthDate 1942_08_26) (John_E_Blaha birthPlace San_Antonio) (John_E_Blaha occupation Fighter_pilot)\nb. John E Blaha, born in San Antonio on 1942-08-26, worked as a fighter pilot\n\nAs the example illustrates, the task involves specific NLG subtasks such as sentence segmentation\n(how to chunk the input data into sentences), lexicalisation (of the DBpedia properties),\naggregation (how to avoid repetitions) and surface realisation\n(how to build a syntactically correct and natural sounding text).\n", "citation": "@inproceedings{web_nlg,\n author = {Claire Gardent and\n Anastasia Shimorina and\n Shashi Narayan and\n Laura Perez{-}Beltrachini},\n editor = {Regina Barzilay and\n Min{-}Yen Kan},\n title = {Creating Training Corpora for {NLG} Micro-Planners},\n booktitle = {Proceedings of the 55th Annual Meeting of the\n Association for Computational Linguistics,\n {ACL} 2017, Vancouver, Canada, July 30 - August 4,\n Volume 1: Long Papers},\n pages = {179--188},\n publisher = {Association for Computational Linguistics},\n year = {2017},\n url = {https://doi.org/10.18653/v1/P17-1017},\n doi = {10.18653/v1/P17-1017}\n}\n", "homepage": "https://webnlg-challenge.loria.fr/", "license": "", "features": {"category": {"dtype": "string", "id": null, "_type": "Value"}, "size": {"dtype": "int32", "id": null, "_type": "Value"}, "eid": {"dtype": "string", "id": null, "_type": "Value"}, "original_triple_sets": {"feature": {"otriple_set": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "length": -1, "id": null, "_type": "Sequence"}, "modified_triple_sets": {"feature": {"mtriple_set": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "length": -1, "id": null, "_type": "Sequence"}, "shape": {"dtype": "string", "id": null, "_type": "Value"}, "shape_type": {"dtype": "string", "id": null, "_type": "Value"}, "lex": {"feature": {"comment": {"dtype": "string", "id": null, "_type": "Value"}, "lid": {"dtype": "string", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}, "lang": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "test_category": {"dtype": "string", "id": null, "_type": "Value"}, "dbpedia_links": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "links": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "web_nlg", "config_name": "release_v2.1_constrained", "version": {"version_str": "0.0.0", "description": null, "major": 0, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 11040016, "num_examples": 12895, "dataset_name": "web_nlg"}, "dev": {"name": "dev", "num_bytes": 1284044, "num_examples": 1594, "dataset_name": "web_nlg"}, "test": {"name": "test", "num_bytes": 1212665, "num_examples": 1606, "dataset_name": "web_nlg"}}, "download_checksums": {"https://gitlab.com/shimorina/webnlg-dataset/-/archive/587fa698bec705efbefe72a235a6019c2b9b8b6c/webnlg-dataset-587fa698bec705efbefe72a235a6019c2b9b8b6c.zip": {"num_bytes": 25499351, "checksum": "d6837063d6ef2a2e05418c1511f989c85fd9fffd21f5bd04bc5b34886f24c94e"}}, "download_size": 25499351, "post_processing_size": null, "dataset_size": 13536725, "size_in_bytes": 39036076}, "release_v3.0_en": {"description": "The WebNLG challenge consists in mapping data to text. The training data consists\nof Data/Text pairs where the data is a set of triples extracted from DBpedia and the text is a verbalisation\nof these triples. For instance, given the 3 DBpedia triples shown in (a), the aim is to generate a text such as (b).\n\na. (John_E_Blaha birthDate 1942_08_26) (John_E_Blaha birthPlace San_Antonio) (John_E_Blaha occupation Fighter_pilot)\nb. John E Blaha, born in San Antonio on 1942-08-26, worked as a fighter pilot\n\nAs the example illustrates, the task involves specific NLG subtasks such as sentence segmentation\n(how to chunk the input data into sentences), lexicalisation (of the DBpedia properties),\naggregation (how to avoid repetitions) and surface realisation\n(how to build a syntactically correct and natural sounding text).\n", "citation": "@inproceedings{web_nlg,\n author = {Claire Gardent and\n Anastasia Shimorina and\n Shashi Narayan and\n Laura Perez{-}Beltrachini},\n editor = {Regina Barzilay and\n Min{-}Yen Kan},\n title = {Creating Training Corpora for {NLG} Micro-Planners},\n booktitle = {Proceedings of the 55th Annual Meeting of the\n Association for Computational Linguistics,\n {ACL} 2017, Vancouver, Canada, July 30 - August 4,\n Volume 1: Long Papers},\n pages = {179--188},\n publisher = {Association for Computational Linguistics},\n year = {2017},\n url = {https://doi.org/10.18653/v1/P17-1017},\n doi = {10.18653/v1/P17-1017}\n}\n", "homepage": "https://webnlg-challenge.loria.fr/", "license": "", "features": {"category": {"dtype": "string", "id": null, "_type": "Value"}, "size": {"dtype": "int32", "id": null, "_type": "Value"}, "eid": {"dtype": "string", "id": null, "_type": "Value"}, "original_triple_sets": {"feature": {"otriple_set": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "length": -1, "id": null, "_type": "Sequence"}, "modified_triple_sets": {"feature": {"mtriple_set": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "length": -1, "id": null, "_type": "Sequence"}, "shape": {"dtype": "string", "id": null, "_type": "Value"}, "shape_type": {"dtype": "string", "id": null, "_type": "Value"}, "lex": {"feature": {"comment": {"dtype": "string", "id": null, "_type": "Value"}, "lid": {"dtype": "string", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}, "lang": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "test_category": {"dtype": "string", "id": null, "_type": "Value"}, "dbpedia_links": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "links": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "web_nlg", "config_name": "release_v3.0_en", "version": {"version_str": "0.0.0", "description": null, "major": 0, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 11084860, "num_examples": 13211, "dataset_name": "web_nlg"}, "dev": {"name": "dev", "num_bytes": 1394243, "num_examples": 1667, "dataset_name": "web_nlg"}, "test": {"name": "test", "num_bytes": 4039282, "num_examples": 5713, "dataset_name": "web_nlg"}}, "download_checksums": {"https://gitlab.com/shimorina/webnlg-dataset/-/archive/587fa698bec705efbefe72a235a6019c2b9b8b6c/webnlg-dataset-587fa698bec705efbefe72a235a6019c2b9b8b6c.zip": {"num_bytes": 25499351, "checksum": "d6837063d6ef2a2e05418c1511f989c85fd9fffd21f5bd04bc5b34886f24c94e"}}, "download_size": 25499351, "post_processing_size": null, "dataset_size": 16518385, "size_in_bytes": 42017736}, "release_v3.0_ru": {"description": "The WebNLG challenge consists in mapping data to text. The training data consists\nof Data/Text pairs where the data is a set of triples extracted from DBpedia and the text is a verbalisation\nof these triples. For instance, given the 3 DBpedia triples shown in (a), the aim is to generate a text such as (b).\n\na. (John_E_Blaha birthDate 1942_08_26) (John_E_Blaha birthPlace San_Antonio) (John_E_Blaha occupation Fighter_pilot)\nb. John E Blaha, born in San Antonio on 1942-08-26, worked as a fighter pilot\n\nAs the example illustrates, the task involves specific NLG subtasks such as sentence segmentation\n(how to chunk the input data into sentences), lexicalisation (of the DBpedia properties),\naggregation (how to avoid repetitions) and surface realisation\n(how to build a syntactically correct and natural sounding text).\n", "citation": "@inproceedings{web_nlg,\n author = {Claire Gardent and\n Anastasia Shimorina and\n Shashi Narayan and\n Laura Perez{-}Beltrachini},\n editor = {Regina Barzilay and\n Min{-}Yen Kan},\n title = {Creating Training Corpora for {NLG} Micro-Planners},\n booktitle = {Proceedings of the 55th Annual Meeting of the\n Association for Computational Linguistics,\n {ACL} 2017, Vancouver, Canada, July 30 - August 4,\n Volume 1: Long Papers},\n pages = {179--188},\n publisher = {Association for Computational Linguistics},\n year = {2017},\n url = {https://doi.org/10.18653/v1/P17-1017},\n doi = {10.18653/v1/P17-1017}\n}\n", "homepage": "https://webnlg-challenge.loria.fr/", "license": "", "features": {"category": {"dtype": "string", "id": null, "_type": "Value"}, "size": {"dtype": "int32", "id": null, "_type": "Value"}, "eid": {"dtype": "string", "id": null, "_type": "Value"}, "original_triple_sets": {"feature": {"otriple_set": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "length": -1, "id": null, "_type": "Sequence"}, "modified_triple_sets": {"feature": {"mtriple_set": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "length": -1, "id": null, "_type": "Sequence"}, "shape": {"dtype": "string", "id": null, "_type": "Value"}, "shape_type": {"dtype": "string", "id": null, "_type": "Value"}, "lex": {"feature": {"comment": {"dtype": "string", "id": null, "_type": "Value"}, "lid": {"dtype": "string", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}, "lang": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "test_category": {"dtype": "string", "id": null, "_type": "Value"}, "dbpedia_links": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "links": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "web_nlg", "config_name": "release_v3.0_ru", "version": "0.0.0", "splits": {"train": {"name": "train", "num_bytes": 9550340, "num_examples": 5573, "dataset_name": "web_nlg"}, "dev": {"name": "dev", "num_bytes": 1314226, "num_examples": 790, "dataset_name": "web_nlg"}, "test": {"name": "test", "num_bytes": 3656501, "num_examples": 3410, "dataset_name": "web_nlg"}}, "download_checksums": {"https://gitlab.com/shimorina/webnlg-dataset/-/archive/587fa698bec705efbefe72a235a6019c2b9b8b6c/webnlg-dataset-587fa698bec705efbefe72a235a6019c2b9b8b6c.zip": {"num_bytes": 25499351, "checksum": "d6837063d6ef2a2e05418c1511f989c85fd9fffd21f5bd04bc5b34886f24c94e"}}, "download_size": 25499351, "post_processing_size": null, "dataset_size": 14521067, "size_in_bytes": 40020418}}