File size: 1,969 Bytes
ea7f5b6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 |
import torch
import numpy as np
from nltk.translate.bleu_score import sentence_bleu
from transformers import BertTokenizer, BertModel
# Function to Calculate the BLEU score
def calculate_bleu(reference, candidate):
return sentence_bleu([reference], candidate)
# Function to calculate BERT score
def calculate_bert(reference, candidate):
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained('bert-base-uncased')
reference_tokens = tokenizer.tokenize(reference)
candidate_tokens = tokenizer.tokenize(candidate)
reference_ids = tokenizer.encode(reference, add_special_tokens=True, max_length=512, truncation=True, return_tensors="pt")
candidate_ids = tokenizer.encode(candidate, add_special_tokens=True, max_length=512, truncation=True, return_tensors="pt")
with torch.no_grad():
reference_outputs = model(reference_ids)
candidate_outputs = model(candidate_ids)
reference_embeddings = reference_outputs[0][:, 0, :].numpy()
candidate_embeddings = candidate_outputs[0][:, 0, :].numpy()
cosine_similarity = np.dot(reference_embeddings, candidate_embeddings.T) / (np.linalg.norm(reference_embeddings) * np.linalg.norm(candidate_embeddings))
return np.mean(cosine_similarity)
# Function to calculate minimum edit distance
def min_edit_distance(reference, candidate):
m = len(reference)
n = len(candidate)
dp = [[0] * (n + 1) for _ in range(m + 1)]
for i in range(m + 1):
for j in range(n + 1):
if i == 0:
dp[i][j] = j
elif j == 0:
dp[i][j] = i
elif reference[i - 1] == candidate[j - 1]:
dp[i][j] = dp[i - 1][j - 1]
else:
dp[i][j] = 1 + min(dp[i][j - 1], # Insert
dp[i - 1][j], # Remove
dp[i - 1][j - 1]) # Replace
return dp[m][n] |