Salman95s's picture
Upload 27 files
cfc20e5 verified
raw
history blame
2.25 kB
import json
def parse_balochi_eng_file(file_path):
context_dict = {
"Daily Vocabulary": [],
"General Actions": [],
"Geography and Locations": [],
"Feelings and States": [],
"Technical Terms": []
}
with open(file_path, 'r', encoding='utf-8') as file:
for line in file:
balochi, english = line.strip().split('\t')
# Categorize translations based on keywords
if 'ءَ' in balochi or 'بابت' in balochi or 'بُرز' in balochi or 'درمُلک' in balochi:
context_dict["Geography and Locations"].append({"balochi": balochi, "english": english})
elif 'بیرَگا' in balochi or 'چِنت' in balochi or 'گیشی' in balochi:
context_dict["Feelings and States"].append({"balochi": balochi, "english": english})
elif 'سافٹ' in balochi or 'ہارڈ' in balochi:
context_dict["Technical Terms"].append({"balochi": balochi, "english": english})
else:
context_dict["General Actions"].append({"balochi": balochi, "english": english})
dataset_metadata = {
"name": "Balochi to English Translation Corpus",
"version": "1.0.0",
"purpose": "Translation Dataset for Language Model Training",
"languages": ["Balochi", "English"],
"total_translations": sum(len(translations) for translations in context_dict.values()),
"domains": ["general", "technical", "cultural", "literary", "historical"],
"creation_date": "2024-12-14",
"license": "CC-BY-SA 4.0"
}
dataset_structure = {
"dataset_metadata": dataset_metadata,
"translation_samples": [
{"context": context, "translations": translations} for context, translations in context_dict.items()
]
}
return dataset_structure
# Example usage
file_path = 'data_bal_en.tsv'
dataset_json = parse_balochi_eng_file(file_path)
# Save to a file
output_file_path = 'balochi_translation_corpus.json'
with open(output_file_path, 'w', encoding='utf-8') as json_file:
json.dump(dataset_json, json_file, ensure_ascii=False, indent=2)
output_file_path