from transformers import BertTokenizer import torch class TokenizerProcessor: def __init__(self, tokenizer_name='bert-base-uncased'): self.tokenizer = BertTokenizer.from_pretrained(tokenizer_name) """def tokenize_and_encode(self, input_texts, output_texts, max_length=100): encoded = self.tokenizer.batch_encode_plus( text_pair=list(zip(input_texts, output_texts)), padding='max_length', truncation=True, max_length=max_length, return_attention_mask=True, return_tensors='pt' ) return encoded""" def encode(self,input_texts, output_texts, max_length=512): return self.tokenizer.encode_plus( text_pair=list(zip(input_texts, output_texts)), padding='max_length', truncation=True, # Token dizisini kısaltır max_length=max_length, return_tensors='pt' ) """paraphrase = tokenizer.encode_plus(sequence_0, sequence_2, return_tensors="pt") not_paraphrase = tokenizer.encode_plus(sequence_0, sequence_1, return_tensors="pt") paraphrase_classification_logits = model(**paraphrase)[0] not_paraphrase_classification_logits = model(**not_paraphrase)[0]""" def custom_padding(self, input_ids_list, max_length=100, pad_token_id=0): padded_inputs = [] for ids in input_ids_list: if len(ids) < max_length: padded_ids = ids + [pad_token_id] * (max_length - len(ids)) else: padded_ids = ids[:max_length] padded_inputs.append(padded_ids) return padded_inputs def pad_and_truncate_pairs(self, input_texts, output_texts, max_length=512): #input ve output verilerinin uzunluğunu eşitleme inputs = self.tokenizer(input_texts, padding=False, truncation=False, return_tensors=None) outputs = self.tokenizer(output_texts, padding=False, truncation=False, return_tensors=None) input_ids = self.custom_padding(inputs['input_ids'], max_length, self.tokenizer.pad_token_id) output_ids = self.custom_padding(outputs['input_ids'], max_length, self.tokenizer.pad_token_id) input_ids_tensor = torch.tensor(input_ids) output_ids_tensor = torch.tensor(output_ids) input_attention_mask = (input_ids_tensor != self.tokenizer.pad_token_id).long() output_attention_mask = (output_ids_tensor != self.tokenizer.pad_token_id).long() return { 'input_ids': input_ids_tensor, 'input_attention_mask': input_attention_mask, 'output_ids': output_ids_tensor, 'output_attention_mask': output_attention_mask }