File size: 6,704 Bytes
cfc20e5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 |
import json
import re
from collections import defaultdict
import torch
import torch.nn as nn
import torch.optim as optim
def flatten_json_data(json_file_path):
with open(json_file_path, 'r', encoding='utf-8') as f:
data = json.load(f)
flattened_data = []
for dataset in data:
for section in ["conversation_samples", "technical_terminology"]:
if section in dataset:
for category, samples in dataset[section].items():
for item in samples:
if "balochi" in item and "english" in item and "urdu" in item and "persian" in item: # Check if all keys are present
for balochi_sentence, english_sentence, urdu_sentence, persian_sentence in zip(item['balochi'], item['english'], item['urdu'], item['persian']):
flattened_data.append({
"context": item.get('context', category),
"balochi": balochi_sentence,
"english": english_sentence,
"urdu": urdu_sentence,
"persian": persian_sentence
})
return flattened_data
def parse_tsv_data(tsv_file_path):
data = []
with open(tsv_file_path, 'r', encoding='utf-8') as f:
for line in f:
balochi, english = line.strip().split('\t')
data.append({"balochi": balochi, "english": english})
return data
def create_vocab(tokenized_sentences):
vocab = defaultdict(lambda: len(vocab))
vocab['<PAD>'] = 0
vocab['<UNK>'] = 1 # Add unknown token
for sentence in tokenized_sentences:
for word in sentence:
vocab[word]
return dict(vocab)
def tokenize_text(text):
return re.findall(r"[\w']+(?:ء)?|[\u0600-\u06FF]+|[؟،۔٬؛٪٫٬]+|\S", text)
class SimpleRNNTranslator(nn.Module):
def __init__(self, input_size, output_size, hidden_size):
super(SimpleRNNTranslator, self).__init__()
self.hidden_size = hidden_size
self.embedding = nn.Embedding(input_size, hidden_size)
self.rnn = nn.RNN(hidden_size, hidden_size, batch_first=True)
self.fc = nn.Linear(hidden_size, output_size)
def forward(self, x):
embedded = self.embedding(x) # Shape: [batch_size, seq_len, hidden_size]
output, _ = self.rnn(embedded) # Shape: [batch_size, seq_len, hidden_size]
output = self.fc(output) # Shape: [batch_size, seq_len, output_size]
return output
def train_model(flattened_data, num_epochs=10, hidden_size=256, learning_rate=0.001):
# Tokenize sentences
balochi_sentences = [tokenize_text(entry['balochi']) for entry in flattened_data]
english_sentences = [tokenize_text(entry['english']) for entry in flattened_data]
# Create vocabularies
balochi_vocab = create_vocab(balochi_sentences)
english_vocab = create_vocab(english_sentences)
input_size = len(balochi_vocab)
output_size = len(english_vocab)
# Use a unified max sequence length
max_seq_len = max(
max(len(sentence) for sentence in balochi_sentences),
max(len(sentence) for sentence in english_sentences)
)
def encode_sentences(sentences, vocab, max_len):
encoded = [
[vocab.get(word, vocab['<UNK>']) for word in sentence] + [0] * (max_len - len(sentence))
for sentence in sentences
]
return torch.LongTensor(encoded)
# Encode inputs and targets with the same max_seq_len
X = encode_sentences(balochi_sentences, balochi_vocab, max_seq_len) # Shape: [batch_size, max_seq_len]
Y = encode_sentences(english_sentences, english_vocab, max_seq_len) # Shape: [batch_size, max_seq_len]
# Initialize model, loss, and optimizer
model = SimpleRNNTranslator(input_size, output_size, hidden_size)
criterion = nn.CrossEntropyLoss(ignore_index=0) # Ignore padding token during loss computation
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
# Training loop
for epoch in range(num_epochs):
model.train()
optimizer.zero_grad()
# Forward pass
outputs = model(X) # Shape: [batch_size, max_seq_len, output_size]
outputs = outputs.view(-1, output_size) # Flatten outputs for loss computation
# Flatten target tensor
target = Y.view(-1) # Shape: [batch_size * max_seq_len]
# Compute loss
loss = criterion(outputs, target)
# Backward pass and optimization
loss.backward()
optimizer.step()
print(f"Epoch [{epoch + 1}/{num_epochs}], Loss: {loss.item():.4f}")
return model, balochi_vocab, english_vocab, max_seq_len
def translate(model, balochi_sentence, balochi_vocab, english_vocab, max_balochi_len, max_output_len=20):
tokenized_sentence = tokenize_text(balochi_sentence)
encoded_sentence = [balochi_vocab.get(word, balochi_vocab['<UNK>']) for word in tokenized_sentence]
padded_sentence = encoded_sentence + [0] * (max_balochi_len - len(encoded_sentence))
input_tensor = torch.LongTensor([padded_sentence])
# Initialize decoding
model.eval()
with torch.no_grad():
output_tensor = model(input_tensor) # Shape: [batch_size, seq_len, output_size]
predicted_indices = torch.argmax(output_tensor, dim=2)[0] # Take the first (and only) batch
reverse_english_vocab = {idx: word for word, idx in english_vocab.items()}
predicted_words = [reverse_english_vocab.get(idx.item(), '<UNK>') for idx in predicted_indices]
return " ".join(predicted_words[:max_output_len])
# Example usage
json_file_path = "mergedv1.json" # Replace with your JSON file path
tsv_file_path = "data_bal_en.tsv" # Replace with your TSV file path
# Load and combine data
flattened_json_data = flatten_json_data(json_file_path)
flattened_tsv_data = parse_tsv_data(tsv_file_path)
flattened_data = flattened_json_data + flattened_tsv_data
# Check if data was loaded
if not flattened_data:
print("Error: No data loaded from JSON or TSV. Check file paths and formats.")
else:
model, balochi_vocab, english_vocab, max_balochi_len = train_model(flattened_data)
balochi_input = "تئو چے کنغ پسند کنئے؟"
translated_sentence = translate(model, balochi_input, balochi_vocab, english_vocab, max_balochi_len)
print(f"Balochi Input: {balochi_input}")
print(f"Translated Output: {translated_sentence}")
|