nlparabic commited on
Commit
d433a35
1 Parent(s): 5cc8e24

Training in progress, epoch 1

Browse files
added_tokens.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "<sep>": 64003,
3
+ "<|bos|>": 64000,
4
+ "<|unk|>": 64001,
5
+ "[PAD]": 64002
6
+ }
config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "aubmindlab/aragpt2-base",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPT2LMHeadModel"
6
+ ],
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 0,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 0,
11
+ "gradient_checkpointing": false,
12
+ "initializer_range": 0.02,
13
+ "layer_norm_epsilon": 1e-05,
14
+ "model_type": "gpt2",
15
+ "n_ctx": 1024,
16
+ "n_embd": 768,
17
+ "n_head": 12,
18
+ "n_inner": null,
19
+ "n_layer": 12,
20
+ "n_positions": 1024,
21
+ "reorder_and_upcast_attn": false,
22
+ "resid_pdrop": 0.1,
23
+ "scale_attn_by_inverse_layer_idx": false,
24
+ "scale_attn_weights": true,
25
+ "summary_activation": null,
26
+ "summary_first_dropout": 0.1,
27
+ "summary_proj_to_labels": true,
28
+ "summary_type": "cls_index",
29
+ "summary_use_proj": true,
30
+ "task_specific_params": {
31
+ "text-generation": {
32
+ "do_sample": true,
33
+ "max_length": 50,
34
+ "no_repeat_ngram_size": 3,
35
+ "num_beams": 5,
36
+ "repetition_penalty": 3.0,
37
+ "top_p": 0.95
38
+ }
39
+ },
40
+ "torch_dtype": "float32",
41
+ "transformers_version": "4.45.0.dev0",
42
+ "use_cache": true,
43
+ "vocab_size": 64004
44
+ }
egy_training_log.txt ADDED
@@ -0,0 +1,280 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ WARNING:__main__:Process rank: 0, device: cuda:0, n_gpu: 1, distributed training: False, 16-bits training: False
2
+ INFO:__main__:Training/evaluation parameters TrainingArguments(
3
+ _n_gpu=1,
4
+ accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False},
5
+ adafactor=False,
6
+ adam_beta1=0.9,
7
+ adam_beta2=0.999,
8
+ adam_epsilon=1e-08,
9
+ auto_find_batch_size=False,
10
+ batch_eval_metrics=False,
11
+ bf16=False,
12
+ bf16_full_eval=False,
13
+ data_seed=None,
14
+ dataloader_drop_last=False,
15
+ dataloader_num_workers=0,
16
+ dataloader_persistent_workers=False,
17
+ dataloader_pin_memory=True,
18
+ dataloader_prefetch_factor=None,
19
+ ddp_backend=None,
20
+ ddp_broadcast_buffers=None,
21
+ ddp_bucket_cap_mb=None,
22
+ ddp_find_unused_parameters=None,
23
+ ddp_timeout=1800,
24
+ debug=[],
25
+ deepspeed=None,
26
+ disable_tqdm=False,
27
+ dispatch_batches=None,
28
+ do_eval=True,
29
+ do_predict=False,
30
+ do_train=True,
31
+ eval_accumulation_steps=None,
32
+ eval_delay=0,
33
+ eval_do_concat_batches=True,
34
+ eval_on_start=False,
35
+ eval_steps=None,
36
+ eval_strategy=IntervalStrategy.EPOCH,
37
+ eval_use_gather_object=False,
38
+ evaluation_strategy=epoch,
39
+ fp16=False,
40
+ fp16_backend=auto,
41
+ fp16_full_eval=False,
42
+ fp16_opt_level=O1,
43
+ fsdp=[],
44
+ fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
45
+ fsdp_min_num_params=0,
46
+ fsdp_transformer_layer_cls_to_wrap=None,
47
+ full_determinism=False,
48
+ gradient_accumulation_steps=1,
49
+ gradient_checkpointing=False,
50
+ gradient_checkpointing_kwargs=None,
51
+ greater_is_better=False,
52
+ group_by_length=False,
53
+ half_precision_backend=auto,
54
+ hub_always_push=False,
55
+ hub_model_id=None,
56
+ hub_private_repo=False,
57
+ hub_strategy=HubStrategy.EVERY_SAVE,
58
+ hub_token=<HUB_TOKEN>,
59
+ ignore_data_skip=False,
60
+ include_inputs_for_metrics=False,
61
+ include_num_input_tokens_seen=False,
62
+ include_tokens_per_second=False,
63
+ jit_mode_eval=False,
64
+ label_names=None,
65
+ label_smoothing_factor=0.0,
66
+ learning_rate=5e-05,
67
+ length_column_name=length,
68
+ load_best_model_at_end=True,
69
+ local_rank=0,
70
+ log_level=passive,
71
+ log_level_replica=warning,
72
+ log_on_each_node=True,
73
+ logging_dir=/home/iais_marenpielka/Bouthaina/res_nw_yem_aragpt2-base/runs/Sep07_09-00-55_lmgpu-node-07,
74
+ logging_first_step=False,
75
+ logging_nan_inf_filter=True,
76
+ logging_steps=500,
77
+ logging_strategy=IntervalStrategy.EPOCH,
78
+ lr_scheduler_kwargs={},
79
+ lr_scheduler_type=SchedulerType.LINEAR,
80
+ max_grad_norm=1.0,
81
+ max_steps=-1,
82
+ metric_for_best_model=loss,
83
+ mp_parameters=,
84
+ neftune_noise_alpha=None,
85
+ no_cuda=False,
86
+ num_train_epochs=20.0,
87
+ optim=OptimizerNames.ADAMW_TORCH,
88
+ optim_args=None,
89
+ optim_target_modules=None,
90
+ output_dir=/home/iais_marenpielka/Bouthaina/res_nw_yem_aragpt2-base,
91
+ overwrite_output_dir=False,
92
+ past_index=-1,
93
+ per_device_eval_batch_size=8,
94
+ per_device_train_batch_size=8,
95
+ prediction_loss_only=False,
96
+ push_to_hub=True,
97
+ push_to_hub_model_id=None,
98
+ push_to_hub_organization=None,
99
+ push_to_hub_token=<PUSH_TO_HUB_TOKEN>,
100
+ ray_scope=last,
101
+ remove_unused_columns=True,
102
+ report_to=[],
103
+ restore_callback_states_from_checkpoint=False,
104
+ resume_from_checkpoint=None,
105
+ run_name=/home/iais_marenpielka/Bouthaina/res_nw_yem_aragpt2-base,
106
+ save_on_each_node=False,
107
+ save_only_model=False,
108
+ save_safetensors=True,
109
+ save_steps=500,
110
+ save_strategy=IntervalStrategy.EPOCH,
111
+ save_total_limit=None,
112
+ seed=42,
113
+ skip_memory_metrics=True,
114
+ split_batches=None,
115
+ tf32=None,
116
+ torch_compile=False,
117
+ torch_compile_backend=None,
118
+ torch_compile_mode=None,
119
+ torch_empty_cache_steps=None,
120
+ torchdynamo=None,
121
+ tpu_metrics_debug=False,
122
+ tpu_num_cores=None,
123
+ use_cpu=False,
124
+ use_ipex=False,
125
+ use_legacy_prediction_loop=False,
126
+ use_mps_device=False,
127
+ warmup_ratio=0.0,
128
+ warmup_steps=500,
129
+ weight_decay=0.0,
130
+ )
131
+ INFO:datasets.builder:Using custom data configuration default-7d1e1bd6ffb527f0
132
+ INFO:datasets.info:Loading Dataset Infos from /home/iais_marenpielka/Bouthaina/miniconda3/lib/python3.12/site-packages/datasets/packaged_modules/text
133
+ INFO:datasets.builder:Overwrite dataset info from restored data version if exists.
134
+ INFO:datasets.info:Loading Dataset info from /home/iais_marenpielka/.cache/huggingface/datasets/text/default-7d1e1bd6ffb527f0/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101
135
+ INFO:datasets.builder:Found cached dataset text (/home/iais_marenpielka/.cache/huggingface/datasets/text/default-7d1e1bd6ffb527f0/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101)
136
+ INFO:datasets.info:Loading Dataset info from /home/iais_marenpielka/.cache/huggingface/datasets/text/default-7d1e1bd6ffb527f0/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101
137
+ WARNING:__main__:Process rank: 0, device: cuda:0, n_gpu: 1, distributed training: False, 16-bits training: False
138
+ INFO:__main__:Training/evaluation parameters TrainingArguments(
139
+ _n_gpu=1,
140
+ accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False},
141
+ adafactor=False,
142
+ adam_beta1=0.9,
143
+ adam_beta2=0.999,
144
+ adam_epsilon=1e-08,
145
+ auto_find_batch_size=False,
146
+ batch_eval_metrics=False,
147
+ bf16=False,
148
+ bf16_full_eval=False,
149
+ data_seed=None,
150
+ dataloader_drop_last=False,
151
+ dataloader_num_workers=0,
152
+ dataloader_persistent_workers=False,
153
+ dataloader_pin_memory=True,
154
+ dataloader_prefetch_factor=None,
155
+ ddp_backend=None,
156
+ ddp_broadcast_buffers=None,
157
+ ddp_bucket_cap_mb=None,
158
+ ddp_find_unused_parameters=None,
159
+ ddp_timeout=1800,
160
+ debug=[],
161
+ deepspeed=None,
162
+ disable_tqdm=False,
163
+ dispatch_batches=None,
164
+ do_eval=True,
165
+ do_predict=False,
166
+ do_train=True,
167
+ eval_accumulation_steps=None,
168
+ eval_delay=0,
169
+ eval_do_concat_batches=True,
170
+ eval_on_start=False,
171
+ eval_steps=None,
172
+ eval_strategy=IntervalStrategy.EPOCH,
173
+ eval_use_gather_object=False,
174
+ evaluation_strategy=epoch,
175
+ fp16=False,
176
+ fp16_backend=auto,
177
+ fp16_full_eval=False,
178
+ fp16_opt_level=O1,
179
+ fsdp=[],
180
+ fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
181
+ fsdp_min_num_params=0,
182
+ fsdp_transformer_layer_cls_to_wrap=None,
183
+ full_determinism=False,
184
+ gradient_accumulation_steps=1,
185
+ gradient_checkpointing=False,
186
+ gradient_checkpointing_kwargs=None,
187
+ greater_is_better=False,
188
+ group_by_length=False,
189
+ half_precision_backend=auto,
190
+ hub_always_push=False,
191
+ hub_model_id=None,
192
+ hub_private_repo=False,
193
+ hub_strategy=HubStrategy.EVERY_SAVE,
194
+ hub_token=<HUB_TOKEN>,
195
+ ignore_data_skip=False,
196
+ include_inputs_for_metrics=False,
197
+ include_num_input_tokens_seen=False,
198
+ include_tokens_per_second=False,
199
+ jit_mode_eval=False,
200
+ label_names=None,
201
+ label_smoothing_factor=0.0,
202
+ learning_rate=5e-05,
203
+ length_column_name=length,
204
+ load_best_model_at_end=True,
205
+ local_rank=0,
206
+ log_level=passive,
207
+ log_level_replica=warning,
208
+ log_on_each_node=True,
209
+ logging_dir=/home/iais_marenpielka/Bouthaina/res_nw_yem_aragpt2-base/runs/Sep07_21-03-25_lmgpu-node-07,
210
+ logging_first_step=False,
211
+ logging_nan_inf_filter=True,
212
+ logging_steps=500,
213
+ logging_strategy=IntervalStrategy.EPOCH,
214
+ lr_scheduler_kwargs={},
215
+ lr_scheduler_type=SchedulerType.LINEAR,
216
+ max_grad_norm=1.0,
217
+ max_steps=-1,
218
+ metric_for_best_model=loss,
219
+ mp_parameters=,
220
+ neftune_noise_alpha=None,
221
+ no_cuda=False,
222
+ num_train_epochs=20.0,
223
+ optim=OptimizerNames.ADAMW_TORCH,
224
+ optim_args=None,
225
+ optim_target_modules=None,
226
+ output_dir=/home/iais_marenpielka/Bouthaina/res_nw_yem_aragpt2-base,
227
+ overwrite_output_dir=False,
228
+ past_index=-1,
229
+ per_device_eval_batch_size=8,
230
+ per_device_train_batch_size=8,
231
+ prediction_loss_only=False,
232
+ push_to_hub=True,
233
+ push_to_hub_model_id=None,
234
+ push_to_hub_organization=None,
235
+ push_to_hub_token=<PUSH_TO_HUB_TOKEN>,
236
+ ray_scope=last,
237
+ remove_unused_columns=True,
238
+ report_to=[],
239
+ restore_callback_states_from_checkpoint=False,
240
+ resume_from_checkpoint=None,
241
+ run_name=/home/iais_marenpielka/Bouthaina/res_nw_yem_aragpt2-base,
242
+ save_on_each_node=False,
243
+ save_only_model=False,
244
+ save_safetensors=True,
245
+ save_steps=500,
246
+ save_strategy=IntervalStrategy.EPOCH,
247
+ save_total_limit=None,
248
+ seed=42,
249
+ skip_memory_metrics=True,
250
+ split_batches=None,
251
+ tf32=None,
252
+ torch_compile=False,
253
+ torch_compile_backend=None,
254
+ torch_compile_mode=None,
255
+ torch_empty_cache_steps=None,
256
+ torchdynamo=None,
257
+ tpu_metrics_debug=False,
258
+ tpu_num_cores=None,
259
+ use_cpu=False,
260
+ use_ipex=False,
261
+ use_legacy_prediction_loop=False,
262
+ use_mps_device=False,
263
+ warmup_ratio=0.0,
264
+ warmup_steps=500,
265
+ weight_decay=0.0,
266
+ )
267
+ INFO:datasets.builder:Using custom data configuration default-7d1e1bd6ffb527f0
268
+ INFO:datasets.info:Loading Dataset Infos from /home/iais_marenpielka/Bouthaina/miniconda3/lib/python3.12/site-packages/datasets/packaged_modules/text
269
+ INFO:datasets.builder:Overwrite dataset info from restored data version if exists.
270
+ INFO:datasets.info:Loading Dataset info from /home/iais_marenpielka/.cache/huggingface/datasets/text/default-7d1e1bd6ffb527f0/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101
271
+ INFO:datasets.builder:Found cached dataset text (/home/iais_marenpielka/.cache/huggingface/datasets/text/default-7d1e1bd6ffb527f0/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101)
272
+ INFO:datasets.info:Loading Dataset info from /home/iais_marenpielka/.cache/huggingface/datasets/text/default-7d1e1bd6ffb527f0/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101
273
+ INFO:datasets.arrow_dataset:Caching processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-7d1e1bd6ffb527f0/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-a27d6b05e7aac9fa.arrow
274
+ INFO:datasets.arrow_dataset:Caching processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-7d1e1bd6ffb527f0/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-3d57f3aaa6815ded.arrow
275
+ WARNING:__main__:The tokenizer picked seems to have a very large `model_max_length` (1000000000000000019884624838656). Using block_size=1024 instead. You can change that default value by passing --block_size xxx.
276
+ INFO:datasets.arrow_dataset:Caching processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-7d1e1bd6ffb527f0/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-ec1d8f12ea0419b8.arrow
277
+ INFO:datasets.arrow_dataset:Caching processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-7d1e1bd6ffb527f0/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-a45fe2c42de400e2.arrow
278
+ WARNING:accelerate.utils.other:Detected kernel version 5.4.0, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher.
279
+ INFO:root:Epoch 1.0: Train Loss = None, Eval Loss = None
280
+ INFO:absl:Using default tokenizer.
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:372a8b72875fb9e39767401c2e76af335b9765465ab7d07648b70944fd7fd0bd
3
+ size 540004992
special_tokens_map.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ {
4
+ "content": "<sep>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ }
10
+ ],
11
+ "bos_token": {
12
+ "content": "<|bos|>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false
17
+ },
18
+ "eos_token": {
19
+ "content": "<|endoftext|>",
20
+ "lstrip": false,
21
+ "normalized": false,
22
+ "rstrip": false,
23
+ "single_word": false
24
+ },
25
+ "pad_token": {
26
+ "content": "[PAD]",
27
+ "lstrip": false,
28
+ "normalized": false,
29
+ "rstrip": false,
30
+ "single_word": false
31
+ },
32
+ "unk_token": {
33
+ "content": "<|unk|>",
34
+ "lstrip": false,
35
+ "normalized": false,
36
+ "rstrip": false,
37
+ "single_word": false
38
+ }
39
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "<s>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "<pad>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "</s>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "64000": {
37
+ "content": "<|bos|>",
38
+ "lstrip": false,
39
+ "normalized": false,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ },
44
+ "64001": {
45
+ "content": "<|unk|>",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false,
50
+ "special": true
51
+ },
52
+ "64002": {
53
+ "content": "[PAD]",
54
+ "lstrip": false,
55
+ "normalized": false,
56
+ "rstrip": false,
57
+ "single_word": false,
58
+ "special": true
59
+ },
60
+ "64003": {
61
+ "content": "<sep>",
62
+ "lstrip": false,
63
+ "normalized": false,
64
+ "rstrip": false,
65
+ "single_word": false,
66
+ "special": true
67
+ }
68
+ },
69
+ "additional_special_tokens": [
70
+ "<sep>"
71
+ ],
72
+ "bos_token": "<|bos|>",
73
+ "clean_up_tokenization_spaces": true,
74
+ "eos_token": "<|endoftext|>",
75
+ "model_max_length": 1000000000000000019884624838656,
76
+ "pad_token": "[PAD]",
77
+ "tokenizer_class": "GPT2Tokenizer",
78
+ "unk_token": "<|unk|>"
79
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b03e424f61e0821879bb084631c18b7ed49d4b82f2112e372a7368b73f712222
3
+ size 5240
vocab.json ADDED
The diff for this file is too large to render. See raw diff