yonkasoft commited on
Commit
1552dd9
1 Parent(s): b8522d2

Upload 4 files

Browse files
Files changed (4) hide show
  1. dataPipeline.py +38 -0
  2. main.py +26 -0
  3. my_tokenize.py +170 -0
  4. yeni_tokenize.py +63 -0
dataPipeline.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from my_tokenize import Database
2
+ from yeni_tokenize import TokenizerProcessor
3
+ class DataPipeline:
4
+ def __init__(self, tokenizer_name='bert-base-uncased', max_length=512):
5
+ self.tokenizer_processor = TokenizerProcessor(tokenizer_name)
6
+ self.max_length = max_length
7
+
8
+ def prepare_data(self):
9
+ input_texts = Database.get_input_texts()
10
+ output_texts = Database.get_output_texts()
11
+ encoded_data = self.tokenizer_processor.pad_and_truncate_pairs(input_texts, output_texts, self.max_length)
12
+ return encoded_data
13
+
14
+ def tokenize_texts(self, texts):
15
+ return [self.tokenizer_processor.tokenizer(text) for text in texts]
16
+
17
+ def encode_texts(self, texts):
18
+ return [self.tokenizer_processor.encode(text, self.max_length) for text in texts]
19
+
20
+ # Tokenizer'ı başlat
21
+ pipeline = DataPipeline(tokenizer_name='bert-base-cased', max_length=512)
22
+
23
+ # MongoDB'den input metinlerini çek
24
+ input_texts = Database.get_input_texts()
25
+
26
+ # Metinleri tokenize et
27
+ tokenized_texts = pipeline.tokenize_texts(input_texts)
28
+ print("Tokenized Texts:")
29
+ for text, tokens in zip(input_texts, tokenized_texts):
30
+ print(f"Original Text: {text}")
31
+ print(f"Tokenized Text: {tokens}")
32
+
33
+ # Metinleri encode et
34
+ encoded_texts = pipeline.encode_texts(input_texts)
35
+ print("Encoded Texts:")
36
+ for text, encoded in zip(input_texts, encoded_texts):
37
+ print(f"Original Text: {text}")
38
+ print(f"Encoded Text: {encoded['input_ids'].squeeze().tolist()}")
main.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataPipeline import DataPipeline
2
+ from my_tokenize import Database
3
+ from yeni_tokenize import TokenizerProcessor
4
+ from transformers import BertTokenizer
5
+
6
+ # Tokenizer'ı başlat
7
+ tokenizer_name = 'bert-base-cased'
8
+ pipeline = DataPipeline(tokenizer_name=tokenizer_name, max_length=100)
9
+
10
+ # MongoDB'den input metinlerini çek
11
+ input_texts = [doc["Prompt"] for doc in Database.get_input_texts()]
12
+
13
+ # Metinleri tokenize et
14
+ tokenized_texts = pipeline.tokenize_texts(input_texts)
15
+ print("Tokenized Texts:")
16
+ for text, tokens in zip(input_texts, tokenized_texts):
17
+ print(f"Original Text: {text}")
18
+ print(f"Tokenized Text: {tokens}")
19
+
20
+ # Metinleri encode et
21
+ encoded_texts = pipeline.encode_texts(input_texts)
22
+ print("Encoded Texts:")
23
+ for text, encoded in zip(input_texts, encoded_texts):
24
+ print(f"Original Text: {text}")
25
+ print(f"Encoded Text: {encoded['input_ids'].squeeze().tolist()}")
26
+
my_tokenize.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datasets import load_dataset
2
+ import pandas as pd
3
+ import torch
4
+ from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
5
+ from transformers import BertTokenizer, BertForQuestionAnswering, BertConfig,AutoModelForCausalLM
6
+ from pymongo import MongoClient
7
+ import torchtext
8
+ torchtext.disable_torchtext_deprecation_warning()
9
+ from torchtext.data import get_tokenizer
10
+ from yeni_tokenize import TokenizerProcessor
11
+
12
+
13
+ class Database:
14
+
15
+ # MongoDB connection settings
16
+
17
+ def get_mongodb(database_name='yeniDatabase', collection_name='test', host='localhost', port=27017):
18
+ """
19
+ MongoDB connection and collection selection
20
+ """
21
+ client = MongoClient(f'mongodb://{host}:{port}/')
22
+ db = client[database_name]
23
+ collection = db[collection_name]
24
+ return collection
25
+ @staticmethod
26
+ def get_mongodb():
27
+ # MongoDB bağlantı bilgilerini döndürecek şekilde tanımlanmalıdır.
28
+ return 'mongodb://localhost:27017/', 'yeniDatabase', 'train'
29
+
30
+ @staticmethod
31
+ def get_input_texts():
32
+ # MongoDB bağlantı bilgilerini alma
33
+ mongo_url, db_name, collection_name = Database.get_mongodb()
34
+ # MongoDB'ye bağlanma
35
+ client = MongoClient(mongo_url)
36
+ db = client[db_name]
37
+ collection = db[collection_name]
38
+ # Sorguyu tanımlama
39
+ query = {"Prompt": {"$exists": True}}
40
+ # Sorguyu çalıştırma ve dökümanları çekme
41
+ cursor = collection.find(query, {"Prompt": 1, "_id": 0})
42
+ # Cursor'ı döküman listesine dönüştürme
43
+ input_texts_from_db = [doc['Prompt'] for doc in cursor]
44
+ # Input text'leri döndürme
45
+ # Düz metin listesine dönüştürme
46
+ return input_texts_from_db
47
+
48
+ @staticmethod
49
+ def get_output_texts():
50
+ # MongoDB bağlantı bilgilerini alma
51
+ mongo_url, db_name, collection_name = Database.get_mongodb()
52
+ # MongoDB'ye bağlanma
53
+ client = MongoClient(mongo_url)
54
+ db = client[db_name]
55
+ collection = db[collection_name]
56
+ # Sorguyu tanımlama
57
+ query = {"Response": {"$exists": True}}
58
+ # Sorguyu çalıştırma ve dökümanları çekme
59
+ cursor = collection.find(query, {"Response": 1, "_id": 0})
60
+ # Cursor'ı döküman listesine dönüştürme
61
+ output_texts_from_db = [doc['Response'] for doc in cursor]
62
+ #output metin listesine çevirme
63
+ return output_texts_from_db
64
+
65
+ @staticmethod
66
+ def get_average_prompt_token_length():
67
+ # MongoDB bağlantı bilgilerini alma
68
+ mongo_url, db_name, collection_name = Database.get_mongodb()
69
+ # MongoDB'ye bağlanma
70
+ client = MongoClient(mongo_url)
71
+ db = client[db_name]
72
+ collection = db[collection_name]
73
+ # Tüm dökümanları çekme ve 'prompt_token_length' alanını alma
74
+ docs = collection.find({}, {'Prompt_token_length': 1})
75
+ # 'prompt_token_length' değerlerini toplama ve sayma
76
+ total_length = 0
77
+ count = 0
78
+ for doc in docs:
79
+ if 'Prompt_token_length' in doc:
80
+ total_length += doc['Prompt_token_length']
81
+ count += 1
82
+ # Ortalama hesaplama
83
+ average_length = total_length / count if count > 0 else 0
84
+ return int(average_length)
85
+
86
+
87
+
88
+ # Tokenizer ve Modeli yükleme
89
+ """
90
+ class TokenizerProcessor:
91
+ def __init__(self, tokenizer_name='bert-base-uncased'):
92
+ self.tokenizer = BertTokenizer.from_pretrained(tokenizer_name)
93
+
94
+ def tokenize_and_encode(self, input_texts, output_texts, max_length=100):
95
+ encoded = self.tokenizer.batch_encode_plus(
96
+ text_pair=list(zip(input_texts, output_texts)),
97
+ padding='max_length',
98
+ truncation=True,
99
+ max_length=max_length,
100
+ return_attention_mask=True,
101
+ return_tensors='pt'
102
+ )
103
+ return encoded
104
+
105
+ paraphrase = tokenizer.encode_plus(sequence_0, sequence_2, return_tensors="pt")
106
+ not_paraphrase = tokenizer.encode_plus(sequence_0, sequence_1, return_tensors="pt")
107
+
108
+ paraphrase_classification_logits = model(**paraphrase)[0]
109
+ not_paraphrase_classification_logits = model(**not_paraphrase)[0]
110
+ def custom_padding(self, input_ids_list, max_length=100, pad_token_id=0):
111
+ padded_inputs = []
112
+ for ids in input_ids_list:
113
+ if len(ids) < max_length:
114
+ padded_ids = ids + [pad_token_id] * (max_length - len(ids))
115
+ else:
116
+ padded_ids = ids[:max_length]
117
+ padded_inputs.append(padded_ids)
118
+ return padded_inputs
119
+
120
+ def pad_and_truncate_pairs(self, input_texts, output_texts, max_length=100):
121
+
122
+ #input ve output verilerinin uzunluğunu eşitleme
123
+ inputs = self.tokenizer(input_texts, padding=False, truncation=False, return_tensors=None)
124
+ outputs = self.tokenizer(output_texts, padding=False, truncation=False, return_tensors=None)
125
+
126
+ input_ids = self.custom_padding(inputs['input_ids'], max_length, self.tokenizer.pad_token_id)
127
+ output_ids = self.custom_padding(outputs['input_ids'], max_length, self.tokenizer.pad_token_id)
128
+
129
+ input_ids_tensor = torch.tensor(input_ids)
130
+ output_ids_tensor = torch.tensor(output_ids)
131
+
132
+ input_attention_mask = (input_ids_tensor != self.tokenizer.pad_token_id).long()
133
+ output_attention_mask = (output_ids_tensor != self.tokenizer.pad_token_id).long()
134
+
135
+ return {
136
+ 'input_ids': input_ids_tensor,
137
+ 'input_attention_mask': input_attention_mask,
138
+ 'output_ids': output_ids_tensor,
139
+ 'output_attention_mask': output_attention_mask
140
+ }
141
+
142
+ """
143
+ #cümleleri teker teker input ve output verilerinden çekmem gerekiyor
144
+ #def tokenize_and_pad_sequences(sequence_1,sequence2,)
145
+
146
+
147
+ class DataPipeline:
148
+ def __init__(self, tokenizer_name='bert-base-uncased', max_length=100):
149
+ self.tokenizer_processor = TokenizerProcessor(tokenizer_name)
150
+ self.max_length = max_length
151
+
152
+ def prepare_data(self):
153
+ input_texts = Database.get_input_texts()
154
+ output_texts = Database.get_output_texts()
155
+ encoded_data = self.tokenizer_processor.pad_and_truncate_pairs(input_texts, output_texts, self.max_length)
156
+ return encoded_data
157
+
158
+ def tokenize_texts(self, texts):
159
+ return [self.tokenize(text) for text in texts]
160
+
161
+ def encode_texts(self, texts):
162
+ return [self.encode(text, self.max_length) for text in texts]
163
+
164
+ # Example Usage
165
+ if __name__ == "__main__":
166
+ data_pipeline = DataPipeline()
167
+ encoded_data = data_pipeline.prepare_data()
168
+ print(encoded_data)
169
+
170
+
yeni_tokenize.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import BertTokenizer
2
+ import torch
3
+
4
+ class TokenizerProcessor:
5
+ def __init__(self, tokenizer_name='bert-base-uncased'):
6
+ self.tokenizer = BertTokenizer.from_pretrained(tokenizer_name)
7
+
8
+ """def tokenize_and_encode(self, input_texts, output_texts, max_length=100):
9
+ encoded = self.tokenizer.batch_encode_plus(
10
+ text_pair=list(zip(input_texts, output_texts)),
11
+ padding='max_length',
12
+ truncation=True,
13
+ max_length=max_length,
14
+ return_attention_mask=True,
15
+ return_tensors='pt'
16
+ )
17
+ return encoded"""
18
+
19
+ def encode(self,input_texts, output_texts, max_length=512):
20
+ return self.tokenizer.encode_plus(
21
+ text_pair=list(zip(input_texts, output_texts)),
22
+ padding='max_length',
23
+ truncation=True, # Token dizisini kısaltır
24
+ max_length=max_length,
25
+ return_tensors='pt'
26
+ )
27
+
28
+ """paraphrase = tokenizer.encode_plus(sequence_0, sequence_2, return_tensors="pt")
29
+ not_paraphrase = tokenizer.encode_plus(sequence_0, sequence_1, return_tensors="pt")
30
+
31
+ paraphrase_classification_logits = model(**paraphrase)[0]
32
+ not_paraphrase_classification_logits = model(**not_paraphrase)[0]"""
33
+ def custom_padding(self, input_ids_list, max_length=100, pad_token_id=0):
34
+ padded_inputs = []
35
+ for ids in input_ids_list:
36
+ if len(ids) < max_length:
37
+ padded_ids = ids + [pad_token_id] * (max_length - len(ids))
38
+ else:
39
+ padded_ids = ids[:max_length]
40
+ padded_inputs.append(padded_ids)
41
+ return padded_inputs
42
+
43
+ def pad_and_truncate_pairs(self, input_texts, output_texts, max_length=512):
44
+
45
+ #input ve output verilerinin uzunluğunu eşitleme
46
+ inputs = self.tokenizer(input_texts, padding=False, truncation=False, return_tensors=None)
47
+ outputs = self.tokenizer(output_texts, padding=False, truncation=False, return_tensors=None)
48
+
49
+ input_ids = self.custom_padding(inputs['input_ids'], max_length, self.tokenizer.pad_token_id)
50
+ output_ids = self.custom_padding(outputs['input_ids'], max_length, self.tokenizer.pad_token_id)
51
+
52
+ input_ids_tensor = torch.tensor(input_ids)
53
+ output_ids_tensor = torch.tensor(output_ids)
54
+
55
+ input_attention_mask = (input_ids_tensor != self.tokenizer.pad_token_id).long()
56
+ output_attention_mask = (output_ids_tensor != self.tokenizer.pad_token_id).long()
57
+
58
+ return {
59
+ 'input_ids': input_ids_tensor,
60
+ 'input_attention_mask': input_attention_mask,
61
+ 'output_ids': output_ids_tensor,
62
+ 'output_attention_mask': output_attention_mask
63
+ }