File size: 2,254 Bytes
cfc20e5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
import json

def parse_balochi_eng_file(file_path):
    context_dict = {
        "Daily Vocabulary": [],
        "General Actions": [],
        "Geography and Locations": [],
        "Feelings and States": [],
        "Technical Terms": []
    }

    with open(file_path, 'r', encoding='utf-8') as file:
        for line in file:
            balochi, english = line.strip().split('\t')
            # Categorize translations based on keywords
            if 'ءَ' in balochi or 'بابت' in balochi or 'بُرز' in balochi or 'درمُلک' in balochi:
                context_dict["Geography and Locations"].append({"balochi": balochi, "english": english})
            elif 'بیرَگا' in balochi or 'چِنت' in balochi or 'گیشی' in balochi:
                context_dict["Feelings and States"].append({"balochi": balochi, "english": english})
            elif 'سافٹ' in balochi or 'ہارڈ' in balochi:
                context_dict["Technical Terms"].append({"balochi": balochi, "english": english})
            else:
                context_dict["General Actions"].append({"balochi": balochi, "english": english})

    dataset_metadata = {
        "name": "Balochi to English Translation Corpus",
        "version": "1.0.0",
        "purpose": "Translation Dataset for Language Model Training",
        "languages": ["Balochi", "English"],
        "total_translations": sum(len(translations) for translations in context_dict.values()),
        "domains": ["general", "technical", "cultural", "literary", "historical"],
        "creation_date": "2024-12-14",
        "license": "CC-BY-SA 4.0"
    }

    dataset_structure = {
        "dataset_metadata": dataset_metadata,
        "translation_samples": [
            {"context": context, "translations": translations} for context, translations in context_dict.items()
        ]
    }

    return dataset_structure

# Example usage
file_path = 'data_bal_en.tsv'
dataset_json = parse_balochi_eng_file(file_path)

# Save to a file
output_file_path = 'balochi_translation_corpus.json'
with open(output_file_path, 'w', encoding='utf-8') as json_file:
    json.dump(dataset_json, json_file, ensure_ascii=False, indent=2)

output_file_path