yhavinga commited on
Commit
22a96ca
1 Parent(s): 2d27366
src/translate_imdb_flax.py ADDED
@@ -0,0 +1,309 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import json
3
+ import logging
4
+ import os
5
+ import pprint
6
+ from typing import Tuple
7
+
8
+ from datasets import get_dataset_config_names, load_dataset, get_dataset_split_names
9
+ import jax
10
+ import numpy as np
11
+ from flax import jax_utils
12
+ from flax.jax_utils import pad_shard_unpad
13
+ from transformers import AutoTokenizer, FlaxAutoModelForSeq2SeqLM
14
+ import pandas as pd
15
+
16
+ logger = logging.getLogger(__name__)
17
+
18
+ DATASET_NAME = "imdb"
19
+ OUTPUT_DIR = "./imdb_dutch"
20
+ MODEL_370 = "yhavinga/ul2-large-en-nl"
21
+ # BATCH_SIZE = 64
22
+ BATCH_SIZE = 32
23
+ # BATCH_SIZE = 2
24
+ MODEL_MAX_LENGTH = 370
25
+ MAX_WORDS = int(MODEL_MAX_LENGTH / 3)
26
+ END_MARKS = (".", "?", "!", '"', "'", "\n")
27
+
28
+
29
+ class FlaxModel:
30
+ def __init__(self, model_name: str, tokenizer_name: str, tokenizer_args={}):
31
+ """
32
+ Initializes the FlaxModel with the specified model and tokenizer names, as well as tokenizer arguments.
33
+ """
34
+ self.model = FlaxAutoModelForSeq2SeqLM.from_pretrained(
35
+ model_name, use_auth_token=True
36
+ )
37
+ self.model.params = self.model.to_fp32(self.model.params, mask=None)
38
+ self.tokenizer_args = {
39
+ # "model_max_length": self.model.config.max_length,
40
+ **tokenizer_args,
41
+ }
42
+ self.tokenizer = AutoTokenizer.from_pretrained(
43
+ tokenizer_name, use_auth_token=True, **self.tokenizer_args
44
+ )
45
+ # if not (
46
+ # self.model.config.max_length
47
+ # == self.tokenizer.model_max_length
48
+ # == self.tokenizer_args.get("model_max_length")
49
+ # ):
50
+ # print(
51
+ # f"Warning: model max length {self.model.config.max_length} != tokenizer max length {self.tokenizer.model_max_length} != tokenizer_args max length {tokenizer_args.get('model_max_length')}"
52
+ # )
53
+ # raise ValueError("Model and tokenizer max_length should be equal")
54
+
55
+ self.params = jax_utils.replicate(self.model.params)
56
+
57
+ kwargs = {
58
+ "max_length": self.tokenizer.model_max_length,
59
+ "length_penalty": 1.0,
60
+ "num_beams": 4,
61
+ "early_stopping": True,
62
+ }
63
+
64
+ def shard(xs):
65
+ local_device_count = jax.local_device_count()
66
+ return jax.tree_map(
67
+ lambda x: x.reshape((local_device_count, -1) + x.shape[1:]), xs
68
+ )
69
+
70
+ def generate_step(params, batch):
71
+ self.model.params = params
72
+ output_ids = self.model.generate(
73
+ batch["input_ids"], attention_mask=batch["attention_mask"], **kwargs
74
+ )
75
+ return output_ids.sequences
76
+
77
+ self.p_generate_step = jax.pmap(generate_step, "batch")
78
+
79
+ @functools.lru_cache()
80
+ def translate_batch(self, texts: Tuple[str]):
81
+ overflowed = False
82
+ texts = list(texts)
83
+ if self.model.config.prefix:
84
+ texts = [self.model.config.prefix + x for x in texts]
85
+ texts = [x.replace("\n", "<n>").replace("<br />", "<n>") for x in texts]
86
+ inputs = self.tokenizer(
87
+ texts,
88
+ max_length=self.tokenizer_args.get("model_max_length"),
89
+ truncation=True,
90
+ padding="max_length",
91
+ return_tensors="np",
92
+ )
93
+ if not np.array_equal(
94
+ inputs.data["input_ids"][:, self.tokenizer.model_max_length - 1],
95
+ np.zeros(BATCH_SIZE),
96
+ ):
97
+ overflowed = True
98
+ return BATCH_SIZE * [""], overflowed
99
+
100
+ batch = inputs.data
101
+ print(f"Batch inputs shape is {batch['input_ids'].shape}")
102
+ translated = pad_shard_unpad(self.p_generate_step)(self.params, batch)
103
+ predictions = jax.device_get(
104
+ translated.reshape(-1, self.tokenizer.model_max_length)
105
+ )
106
+ if not np.array_equal(
107
+ predictions[:, self.tokenizer.model_max_length - 1],
108
+ np.zeros(BATCH_SIZE),
109
+ ):
110
+ overflowed = True
111
+
112
+ output = [
113
+ self.tokenizer.decode(t, skip_special_tokens=False) for t in predictions
114
+ ]
115
+ # If there is <extra_id in the output, remove it and everything after it
116
+ output = [
117
+ x.replace("<pad>", "").replace("</s>", "").split("<extra_id")[0]
118
+ for x in output
119
+ ]
120
+ output = [x.replace("<n>", "<br />").strip() for x in output]
121
+ return output, overflowed
122
+
123
+
124
+ def split_text(text):
125
+ text_parts = []
126
+ current_part = ""
127
+
128
+ def split_on_end_marks(text):
129
+ sentences = []
130
+ current_sentence = ""
131
+ for char in text:
132
+ if char in END_MARKS:
133
+ sentences.append(current_sentence + char)
134
+ current_sentence = ""
135
+ else:
136
+ current_sentence += char
137
+
138
+ # Add the final sentence if it wasn't ended by an end of line mark
139
+ if current_sentence:
140
+ sentences.append(current_sentence)
141
+ return sentences
142
+
143
+ text_lines = split_on_end_marks(text)
144
+
145
+ for line in text_lines:
146
+ # If adding the line to the current part would not exceed MAX_WORDS words, add it to the current part
147
+ if len((current_part + line).split()) <= MAX_WORDS:
148
+ current_part += line
149
+ # If adding the line to the current part would exceed 200 characters, add the current part to the list and reset the current part
150
+ else:
151
+ if len(current_part) > 0:
152
+ text_parts.append(current_part)
153
+ while len(line.split()) > MAX_WORDS:
154
+ # print(f"Line {line} is longer than MAX_WORDS words")
155
+ current_part = " ".join(line.split()[:MAX_WORDS])
156
+ text_parts.append(current_part + " ")
157
+ line = " ".join(line.split()[MAX_WORDS:])
158
+ current_part = line
159
+ # Add the final part to the list
160
+ text_parts.append(current_part)
161
+ text_parts[-1] = text_parts[-1].rstrip()
162
+ return text_parts
163
+
164
+
165
+ def test_split_text():
166
+ # Test with single line that is less than MAX_WORDS words
167
+ text = " ".join([f"n{i}" for i in range(MAX_WORDS - 20)])
168
+ a = list(text)
169
+ a[150] = END_MARKS[0]
170
+ text = "".join(a)
171
+ text_parts = split_text(text)
172
+ assert text_parts == [text]
173
+
174
+ # Test with single line that is exactly MAX_WORDS words
175
+ text = " ".join([f"n{i}" for i in range(MAX_WORDS)])
176
+ a = list(text)
177
+ a[10] = END_MARKS[0]
178
+ text = "".join(a)
179
+ text_parts = split_text(text)
180
+ assert text_parts == [text]
181
+
182
+ # Test with single line that is more than MAX_WORDS words
183
+ text = " ".join([f"n{i}" for i in range(MAX_WORDS + 1)])
184
+ a = list(text)
185
+ a[150] = END_MARKS[0]
186
+ text = "".join(a)
187
+ text_parts = split_text(text)
188
+ assert text_parts == [text[:151], text[151:]]
189
+
190
+ # Test with multiple lines, none of which are more than 200 characters
191
+ text = "\n".join([f"n{i}" for i in range(10)])
192
+ text_parts = split_text(text)
193
+ assert text_parts == [text]
194
+
195
+ # Test with 500 words
196
+ text = " ".join([f"n{i}" for i in range(500)])
197
+ a = list(text)
198
+ a[150] = END_MARKS[0]
199
+ a[300] = END_MARKS[0]
200
+ a[550] = END_MARKS[0]
201
+ a[600] = END_MARKS[0]
202
+ a[750] = END_MARKS[0]
203
+ a[900] = END_MARKS[0]
204
+ a[950] = END_MARKS[0]
205
+ a[1000] = END_MARKS[0]
206
+ text = "".join(a)
207
+ text_parts = split_text(text)
208
+ assert all(
209
+ [len(x.split()) <= MAX_WORDS for x in text_parts]
210
+ ), "Not all text parts are less than MAX_WORDS words"
211
+ assert "".join(text_parts) == text, "Text parts concatenated != original text"
212
+
213
+
214
+ test_split_text()
215
+
216
+
217
+ def get_file_lines(filename):
218
+ """
219
+ Get the number of lines in a file, 0 if the file does not exist.
220
+ """
221
+ lines = 0
222
+ if os.path.exists(filename):
223
+ with open(filename) as f:
224
+ with open(filename, "r") as f:
225
+ lines = len(f.readlines())
226
+ print(f"{filename} already has {lines} lines")
227
+ return lines
228
+
229
+
230
+ SEP = "\n"
231
+ # SEP="<unk>"
232
+
233
+
234
+ def main():
235
+ os.makedirs(OUTPUT_DIR, exist_ok=True)
236
+
237
+ model_370 = FlaxModel(
238
+ MODEL_370, MODEL_370, tokenizer_args={"model_max_length": MODEL_MAX_LENGTH}
239
+ )
240
+
241
+ for config in get_dataset_config_names(DATASET_NAME):
242
+ print(f"Processing config {config}")
243
+ ds = load_dataset(DATASET_NAME, config)
244
+ # for split in ["validation"]:
245
+ for split in get_dataset_split_names(DATASET_NAME, config):
246
+ output_file = f"{OUTPUT_DIR}/{DATASET_NAME}_dutch_{config}-{split}.json"
247
+ num_examples = len(ds[split])
248
+ # fn = partial(encode_in_single_text, validation=(split == "validation"))
249
+ # single_text_ds = ds[split].map(fn, num_proc=6).sort("length", reverse=True)
250
+ # # fn = partial(batch_single_text_decode, validation=(split == "validation"))
251
+ # # decoded_ds = single_text_ds.map(fn, num_proc=6)
252
+ #
253
+ lines = get_file_lines(output_file)
254
+ start_batch_index = lines // BATCH_SIZE
255
+ with open(output_file, mode="ab" if lines else "wb") as writer:
256
+ for batch_index in range(start_batch_index, num_examples // BATCH_SIZE):
257
+ ds_split = ds[split]
258
+ batch = ds_split[
259
+ batch_index * BATCH_SIZE : (batch_index + 1) * BATCH_SIZE
260
+ ]
261
+ print(
262
+ f"Translating batch {batch_index} of {num_examples // BATCH_SIZE}"
263
+ )
264
+
265
+ translated, overflow = model_370.translate_batch(
266
+ tuple(batch["text"])
267
+ )
268
+ translated_batch = [{"text": x} for x in translated]
269
+ if overflow:
270
+ batch_text_splitted = [
271
+ split_text(text) for text in batch["text"]
272
+ ]
273
+ max_parts = max(
274
+ [len(text) for text in batch_text_splitted]
275
+ )
276
+ text_translated = [""] * BATCH_SIZE
277
+ for part_index in range(max_parts):
278
+ text_parts_i = [
279
+ text[part_index] if part_index < len(text) else ""
280
+ for text in batch_text_splitted
281
+ ]
282
+ (
283
+ text_part_translated,
284
+ overflow,
285
+ ) = model_370.translate_batch(tuple(text_parts_i))
286
+ if overflow:
287
+ print(
288
+ f"This shouldn't happen, overflow on a splitted text: {text_parts_i}"
289
+ )
290
+ for bi in range(BATCH_SIZE):
291
+ text_translated[bi] += " " + text_part_translated[bi] if text_parts_i[bi] != "" else ""
292
+ for bi in range(BATCH_SIZE):
293
+ translated_batch[bi]["text"] = text_translated[bi].strip()
294
+
295
+ # write each object in the batch as a separate line
296
+ for bi in range(BATCH_SIZE):
297
+ example = {
298
+ "text": translated_batch[bi]["text"],
299
+ "text_en": batch["text"][bi],
300
+ "label": batch["label"][bi],
301
+ }
302
+
303
+ pprint.pprint(example)
304
+ writer.write(json.dumps(example).encode("utf-8"))
305
+ writer.write("\n".encode("utf-8"))
306
+
307
+
308
+ if __name__ == "__main__":
309
+ main()
test.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1f7f0224277f89fe229c95c6437a5ee6e95db630734cbde6e549ece93d32205
3
+ size 26536475
train.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:432b57ce1ce49554be6dcd79da08900c0330fb094bd43182314d8baefe189852
3
+ size 27182787
unsupervised.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b14d7977619b67d07f4e4b7fdd7f2d3ea79717d865bec60983e9ff678c2565cf
3
+ size 54451678