File size: 5,609 Bytes
cfc20e5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
import json
import re
from collections import defaultdict
import torch
import torch.nn as nn
import torch.optim as optim

def flatten_json_data(json_file_path):
    with open(json_file_path, 'r', encoding='utf-8') as f:
        data = json.load(f)

    flattened_data = []
    for dataset in data:
        for section in ["conversation_samples", "technical_terminology"]:
            if section in dataset:
                for category, samples in dataset[section].items():
                    for item in samples:
                        if "balochi" in item and "english" in item and "urdu" in item and "persian" in item: # Check if all keys are present
                            for balochi_sentence, english_sentence, urdu_sentence, persian_sentence in zip(item['balochi'], item['english'], item['urdu'], item['persian']):
                                flattened_data.append({
                                    "context": item.get('context', category),
                                    "balochi": balochi_sentence,
                                    "english": english_sentence,
                                    "urdu": urdu_sentence,
                                    "persian": persian_sentence
                                })
    return flattened_data

def create_vocab(tokenized_sentences):
    vocab = defaultdict(lambda: len(vocab))
    vocab['<PAD>'] = 0
    vocab['<UNK>'] = 1 # Add unknown token
    for sentence in tokenized_sentences:
        for word in sentence:
            vocab[word]
    return dict(vocab)

def tokenize_text(text):
    return re.findall(r"[\w']+(?:ء)?|[\u0600-\u06FF]+|[؟،۔٬؛٪٫٬]+|\S", text)

class SimpleRNNTranslator(nn.Module):
    def __init__(self, input_size, output_size, hidden_size):
        super(SimpleRNNTranslator, self).__init__()
        self.hidden_size = hidden_size
        self.embedding = nn.Embedding(input_size, hidden_size)
        self.rnn = nn.RNN(hidden_size, hidden_size, batch_first=True)
        self.fc = nn.Linear(hidden_size, output_size)

    def forward(self, x):
        embedded = self.embedding(x)
        output, hidden = self.rnn(embedded)
        output = self.fc(output[:, -1, :])  # Use the last output in the sequence
        return output

def train_model(flattened_data, num_epochs=500, hidden_size=256, learning_rate=0.0005):
    # Tokenize sentences
    balochi_sentences = [tokenize_text(entry['balochi']) for entry in flattened_data]
    english_sentences = [tokenize_text(entry['english']) for entry in flattened_data]

    # Create vocabularies
    balochi_vocab = create_vocab(balochi_sentences)
    english_vocab = create_vocab(english_sentences)

    input_size = len(balochi_vocab)
    output_size = len(english_vocab)

    # Prepare data
    max_balochi_len = max(len(sentence) for sentence in balochi_sentences)
    max_english_len = max(len(sentence) for sentence in english_sentences)

    def encode_sentences(sentences, vocab, max_len):
        encoded = [
            [vocab.get(word, vocab['<UNK>']) for word in sentence] + [0] * (max_len - len(sentence))
            for sentence in sentences
        ]
        return torch.LongTensor(encoded)

    X = encode_sentences(balochi_sentences, balochi_vocab, max_balochi_len)
    Y = encode_sentences(english_sentences, english_vocab, max_english_len)

    # Initialize model, loss, and optimizer
    model = SimpleRNNTranslator(input_size, output_size, hidden_size)
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=learning_rate)

    # Training loop
    for epoch in range(num_epochs):
        model.train()
        optimizer.zero_grad()
        outputs = model(X)  # Shape: [batch_size, output_size]

        # Reshape target to match CrossEntropyLoss expectations
        loss = criterion(outputs, Y[:, 0])  # Only use the first word of target sentences
        loss.backward()
        optimizer.step()
        print(f"Epoch [{epoch + 1}/{num_epochs}], Loss: {loss.item():.4f}")

    return model, balochi_vocab, english_vocab, max_balochi_len


def translate(model, balochi_sentence, balochi_vocab, english_vocab, max_balochi_len):
    tokenized_sentence = tokenize_text(balochi_sentence)
    encoded_sentence = [balochi_vocab.get(word, balochi_vocab['<UNK>']) for word in tokenized_sentence] # Use <UNK> for OOV words
    padded_sentence = encoded_sentence + [0] * (max_balochi_len - len(encoded_sentence))
    input_tensor = torch.LongTensor([padded_sentence])
    output = model(input_tensor)
    predicted_indices = torch.argmax(output, dim=1)
    reverse_english_vocab = {idx: word for word, idx in english_vocab.items()}
    predicted_words = [reverse_english_vocab.get(idx.item(), '<UNK>') for idx in predicted_indices] # Use <UNK> for OOV indices
    return " ".join(predicted_words)

# Example usage
json_file_path = "mergedv1.json"  # Replace with your JSON file path
flattened_data = flatten_json_data(json_file_path)

# Check if data was loaded
if not flattened_data:
    print("Error: No data loaded from JSON. Check file path and format.")
else:
    model, balochi_vocab, english_vocab, max_balochi_len = train_model(flattened_data)

    balochi_input = "دنیا ءِ ات، پُر راز ءِ ات، ہر کسی ءِ دل ءِ موج ءِ ات"
    translated_sentence = translate(model, balochi_input, balochi_vocab, english_vocab, max_balochi_len)
    print(f"Balochi Input: {balochi_input}")
    print(f"Translated Output: {translated_sentence}")