|
import json |
|
from datasets import DatasetBuilder, SplitGenerator, Split, Features, Value, DownloadConfig |
|
|
|
class PersianPoetry(DatasetBuilder): |
|
VERSION = "1.0.0" |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description="This dataset contains a rich collection of Persian poems along with metadata about the poets and the verses.", |
|
features=Features({ |
|
'poet': Value('string'), |
|
'title': Value('string'), |
|
'content': datasets.Sequence({ |
|
'hemistich': { |
|
'verse0': Value('string'), |
|
'verse1': Value('string') |
|
}, |
|
'verse': { |
|
'text': Value('string') |
|
} |
|
}) |
|
}), |
|
homepage="https://github.com/ganjoor/desktop/releases/tag/v2.81", |
|
citation="""Persian Poetry Dataset. Collected by Kakooch from the Ganjoor Project. Available at: https://huggingface.co/datasets/persian_poetry""", |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
local_path = "/path/to/your/dataset.jsonl" |
|
return [ |
|
SplitGenerator( |
|
name=Split.TRAIN, |
|
gen_kwargs={ |
|
"filepath": local_path |
|
} |
|
), |
|
] |
|
|
|
def _generate_examples(self, filepath): |
|
with open(filepath, encoding="utf-8") as f: |
|
for id_, line in enumerate(f): |
|
data = json.loads(line.strip()) |
|
yield id_, { |
|
'poet': data['poet'], |
|
'title': data['title'], |
|
'content': data['content'] |
|
} |
|
|
|
|