|
|
|
|
|
""" |
|
Created on Fri Jun 30 08:47:31 2023 |
|
|
|
@author: fujidai |
|
""" |
|
|
|
|
|
import torch |
|
from sentence_transformers import SentenceTransformer, InputExample, losses,models |
|
from sentence_transformers import SentenceTransformer, SentencesDataset, LoggingHandler, losses |
|
from sentence_transformers.readers import InputExample |
|
from torch.utils.data import DataLoader |
|
from transformers import AutoTokenizer |
|
from sentence_transformers.SentenceTransformer import SentenceTransformer |
|
import torch |
|
import torch.nn.functional as F |
|
import numpy as np |
|
from sentence_transformers import SentenceTransformer, util |
|
|
|
|
|
word_embedding_model = models.Transformer('/Users/fujidai/sinTED/xlm-roberta-base', max_seq_length=510) |
|
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension()) |
|
|
|
model = SentenceTransformer(modules=[word_embedding_model, pooling_model],device='mps') |
|
print(model) |
|
|
|
|
|
with open('/Users/fujidai/dataseigen/up/pseudo-pseudo-english_english_100000_cos-sim-karanasi_09-04.txt', 'r') as f: |
|
raberu = f.read() |
|
raberu_lines = raberu.splitlines() |
|
data = [] |
|
for i in range(len(raberu_lines)): |
|
data.append(float(raberu_lines[i])) |
|
|
|
with open('/Users/fujidai/dataseigen/up/pseudo-pseudo_en-ja-100000-karanasi_09-04.txt', 'r') as f: |
|
left = f.read() |
|
left_lines = left.splitlines() |
|
|
|
with open('/Users/fujidai/dataseigen/up/pseudo-pseudo_ja-en-100000-karanasi_09-04.txt', 'r') as f: |
|
senter = f.read() |
|
senter_lines = senter.splitlines() |
|
|
|
with open('/Users/fujidai/dataseigen/up/pseudo-japanese-sentence-100000-karanasi_09-04.txt', 'r') as f: |
|
right = f.read() |
|
right_lines = right.splitlines() |
|
|
|
|
|
train_examples = [] |
|
for i in range(len(left_lines)): |
|
pair=[] |
|
pair.append(left_lines[i]) |
|
pair.append(senter_lines[i]) |
|
pair.append(right_lines[i]) |
|
example = InputExample(texts=pair, label=1-data[i]) |
|
|
|
train_examples.append(example) |
|
|
|
with open('/Users/fujidai/dataseigen/down/pseudo-english_english_100000_cos-sim-karanasi_09-04.txt', 'r') as f: |
|
raberu2 = f.read() |
|
raberu2_lines = raberu2.splitlines() |
|
data2 = [] |
|
for i in range(len(raberu2_lines)): |
|
data2.append(float(raberu2_lines[i])) |
|
|
|
with open('/Users/fujidai/dataseigen/down/pseudo-ja-en-100000-karanasi_09-04.txt', 'r') as f: |
|
left2 = f.read() |
|
left2_lines = left2.splitlines() |
|
|
|
with open('/Users/fujidai/dataseigen/down/pseudo-en-ja-100000-karanasi_09-04.txt', 'r') as f: |
|
senter2 = f.read() |
|
senter2_lines = senter2.splitlines() |
|
|
|
with open('/Users/fujidai/dataseigen/down/pseudo-english-sentence-100000-karanasi_09-04.txt', 'r') as f: |
|
right2 = f.read() |
|
right2_lines = right2.splitlines() |
|
|
|
for i in range(len(left2_lines)): |
|
pair=[] |
|
pair.append(left2_lines[i]) |
|
pair.append(senter2_lines[i]) |
|
pair.append(right2_lines[i]) |
|
example = InputExample(texts=pair, label=1-data2[i]) |
|
|
|
train_examples.append(example) |
|
|
|
|
|
device = torch.device('mps') |
|
|
|
|
|
import torch.nn.functional as F |
|
|
|
|
|
train_dataloader = DataLoader(train_examples, shuffle=True, batch_size=8) |
|
train_loss = losses.MarginMSELoss(model=model,similarity_fct=F.cosine_similarity) |
|
|
|
|
|
|
|
model.fit(train_objectives=[(train_dataloader, train_loss)], epochs=3, warmup_steps=1000,show_progress_bar=True, |
|
|
|
checkpoint_path='checkpoint_savename',checkpoint_save_steps=2300, |
|
save_best_model=True) |
|
model.save("savename") |
|
|