|
import torch |
|
import numpy as np |
|
from nltk.translate.bleu_score import sentence_bleu |
|
from transformers import BertTokenizer, BertModel |
|
|
|
|
|
def calculate_bleu(reference, candidate): |
|
return sentence_bleu([reference], candidate) |
|
|
|
|
|
def calculate_bert(reference, candidate): |
|
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') |
|
model = BertModel.from_pretrained('bert-base-uncased') |
|
|
|
reference_tokens = tokenizer.tokenize(reference) |
|
candidate_tokens = tokenizer.tokenize(candidate) |
|
|
|
reference_ids = tokenizer.encode(reference, add_special_tokens=True, max_length=512, truncation=True, return_tensors="pt") |
|
candidate_ids = tokenizer.encode(candidate, add_special_tokens=True, max_length=512, truncation=True, return_tensors="pt") |
|
|
|
with torch.no_grad(): |
|
reference_outputs = model(reference_ids) |
|
candidate_outputs = model(candidate_ids) |
|
|
|
reference_embeddings = reference_outputs[0][:, 0, :].numpy() |
|
candidate_embeddings = candidate_outputs[0][:, 0, :].numpy() |
|
|
|
cosine_similarity = np.dot(reference_embeddings, candidate_embeddings.T) / (np.linalg.norm(reference_embeddings) * np.linalg.norm(candidate_embeddings)) |
|
return np.mean(cosine_similarity) |
|
|
|
|
|
def min_edit_distance(reference, candidate): |
|
m = len(reference) |
|
n = len(candidate) |
|
|
|
dp = [[0] * (n + 1) for _ in range(m + 1)] |
|
|
|
for i in range(m + 1): |
|
for j in range(n + 1): |
|
if i == 0: |
|
dp[i][j] = j |
|
elif j == 0: |
|
dp[i][j] = i |
|
elif reference[i - 1] == candidate[j - 1]: |
|
dp[i][j] = dp[i - 1][j - 1] |
|
else: |
|
dp[i][j] = 1 + min(dp[i][j - 1], |
|
dp[i - 1][j], |
|
dp[i - 1][j - 1]) |
|
|
|
return dp[m][n] |