|
import re |
|
from nltk.corpus import stopwords |
|
import random |
|
from termcolor import colored |
|
|
|
|
|
def random_sampling(original_sentence, paraphrased_sentences): |
|
stop_words = set(stopwords.words('english')) |
|
original_sentence_lower = original_sentence.lower() |
|
paraphrased_sentences_lower = [s.lower() for s in paraphrased_sentences] |
|
paraphrased_sentences_no_stopwords = [] |
|
|
|
for sentence in paraphrased_sentences_lower: |
|
words = re.findall(r'\b\w+\b', sentence) |
|
filtered_sentence = ' '.join([word for word in words if word not in stop_words]) |
|
paraphrased_sentences_no_stopwords.append(filtered_sentence) |
|
|
|
results = [] |
|
for idx, sentence in enumerate(paraphrased_sentences_no_stopwords): |
|
common_words = set(original_sentence_lower.split()) & set(sentence.split()) |
|
common_substrings = ', '.join(sorted(common_words)) |
|
|
|
words_to_replace = [word for word in sentence.split() if word not in common_words] |
|
if words_to_replace: |
|
word_to_mark = random.choice(words_to_replace) |
|
sentence = sentence.replace(word_to_mark, colored(word_to_mark, 'red')) |
|
|
|
for word in common_words: |
|
sentence = sentence.replace(word, colored(word, 'green')) |
|
|
|
results.append({ |
|
f"Paraphrased Sentence {idx+1}": sentence, |
|
"Common Substrings": common_substrings |
|
}) |
|
return results |
|
|
|
|
|
def inverse_transform_sampling(original_sentence, paraphrased_sentences): |
|
stop_words = set(stopwords.words('english')) |
|
original_sentence_lower = original_sentence.lower() |
|
paraphrased_sentences_lower = [s.lower() for s in paraphrased_sentences] |
|
paraphrased_sentences_no_stopwords = [] |
|
|
|
for sentence in paraphrased_sentences_lower: |
|
words = re.findall(r'\b\w+\b', sentence) |
|
filtered_sentence = ' '.join([word for word in words if word not in stop_words]) |
|
paraphrased_sentences_no_stopwords.append(filtered_sentence) |
|
|
|
results = [] |
|
for idx, sentence in enumerate(paraphrased_sentences_no_stopwords): |
|
common_words = set(original_sentence_lower.split()) & set(sentence.split()) |
|
common_substrings = ', '.join(sorted(common_words)) |
|
|
|
words_to_replace = [word for word in sentence.split() if word not in common_words] |
|
if words_to_replace: |
|
probabilities = [1 / len(words_to_replace)] * len(words_to_replace) |
|
chosen_word = random.choices(words_to_replace, weights=probabilities)[0] |
|
sentence = sentence.replace(chosen_word, colored(chosen_word, 'magenta')) |
|
|
|
for word in common_words: |
|
sentence = sentence.replace(word, colored(word, 'green')) |
|
|
|
results.append({ |
|
f"Paraphrased Sentence {idx+1}": sentence, |
|
"Common Substrings": common_substrings |
|
}) |
|
return results |
|
|
|
|
|
def contextual_sampling(original_sentence, paraphrased_sentences): |
|
stop_words = set(stopwords.words('english')) |
|
original_sentence_lower = original_sentence.lower() |
|
paraphrased_sentences_lower = [s.lower() for s in paraphrased_sentences] |
|
paraphrased_sentences_no_stopwords = [] |
|
|
|
for sentence in paraphrased_sentences_lower: |
|
words = re.findall(r'\b\w+\b', sentence) |
|
filtered_sentence = ' '.join([word for word in words if word not in stop_words]) |
|
paraphrased_sentences_no_stopwords.append(filtered_sentence) |
|
|
|
results = [] |
|
for idx, sentence in enumerate(paraphrased_sentences_no_stopwords): |
|
common_words = set(original_sentence_lower.split()) & set(sentence.split()) |
|
common_substrings = ', '.join(sorted(common_words)) |
|
|
|
words_to_replace = [word for word in sentence.split() if word not in common_words] |
|
if words_to_replace: |
|
context = " ".join([word for word in sentence.split() if word not in common_words]) |
|
chosen_word = random.choice(words_to_replace) |
|
sentence = sentence.replace(chosen_word, colored(chosen_word, 'red')) |
|
|
|
for word in common_words: |
|
sentence = sentence.replace(word, colored(word, 'green')) |
|
|
|
results.append({ |
|
f"Paraphrased Sentence {idx+1}": sentence, |
|
"Common Substrings": common_substrings |
|
}) |
|
return results |
|
|
|
|
|
def exponential_minimum_sampling(original_sentence, paraphrased_sentences): |
|
stop_words = set(stopwords.words('english')) |
|
original_sentence_lower = original_sentence.lower() |
|
paraphrased_sentences_lower = [s.lower() for s in paraphrased_sentences] |
|
paraphrased_sentences_no_stopwords = [] |
|
|
|
for sentence in paraphrased_sentences_lower: |
|
words = re.findall(r'\b\w+\b', sentence) |
|
filtered_sentence = ' '.join([word for word in words if word not in stop_words]) |
|
paraphrased_sentences_no_stopwords.append(filtered_sentence) |
|
|
|
results = [] |
|
for idx, sentence in enumerate(paraphrased_sentences_no_stopwords): |
|
common_words = set(original_sentence_lower.split()) & set(sentence.split()) |
|
common_substrings = ', '.join(sorted(common_words)) |
|
|
|
words_to_replace = [word for word in sentence.split() if word not in common_words] |
|
if words_to_replace: |
|
num_words = len(words_to_replace) |
|
probabilities = [2 ** (-i) for i in range(num_words)] |
|
chosen_word = random.choices(words_to_replace, weights=probabilities)[0] |
|
sentence = sentence.replace(chosen_word, colored(chosen_word, 'red')) |
|
|
|
for word in common_words: |
|
sentence = sentence.replace(word, colored(word, 'green')) |
|
|
|
results.append({ |
|
f"Paraphrased Sentence {idx+1}": sentence, |
|
"Common Substrings": common_substrings |
|
}) |
|
return results |