nlparabic commited on
Commit
a980a85
1 Parent(s): 539e95e

End of training

Browse files
README.md CHANGED
@@ -3,6 +3,9 @@ license: apache-2.0
3
  base_model: riotu-lab/ArabianGPT-01B
4
  tags:
5
  - generated_from_trainer
 
 
 
6
  model-index:
7
  - name: results
8
  results: []
@@ -14,6 +17,12 @@ should probably proofread and complete it, then remove this comment. -->
14
  # results
15
 
16
  This model is a fine-tuned version of [riotu-lab/ArabianGPT-01B](https://huggingface.co/riotu-lab/ArabianGPT-01B) on an unknown dataset.
 
 
 
 
 
 
17
 
18
  ## Model description
19
 
 
3
  base_model: riotu-lab/ArabianGPT-01B
4
  tags:
5
  - generated_from_trainer
6
+ metrics:
7
+ - bleu
8
+ - rouge
9
  model-index:
10
  - name: results
11
  results: []
 
17
  # results
18
 
19
  This model is a fine-tuned version of [riotu-lab/ArabianGPT-01B](https://huggingface.co/riotu-lab/ArabianGPT-01B) on an unknown dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 3.4630
22
+ - Bleu: 0.0984
23
+ - Rouge1: 0.3093
24
+ - Rouge2: 0.0718
25
+ - Rougel: 0.2296
26
 
27
  ## Model description
28
 
all_results.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "eval_bleu": 0.09842217214841122,
4
+ "eval_loss": 3.4630110263824463,
5
+ "eval_rouge1": 0.3093411104418377,
6
+ "eval_rouge2": 0.07177511381538038,
7
+ "eval_rougeL": 0.22957881226204224,
8
+ "eval_runtime": 27.1725,
9
+ "eval_samples": 847,
10
+ "eval_samples_per_second": 31.171,
11
+ "eval_steps_per_second": 3.901,
12
+ "perplexity": 31.912922646039927,
13
+ "total_flos": 1000225898496000.0,
14
+ "train_loss": 4.835940561324452,
15
+ "train_runtime": 121.5981,
16
+ "train_samples": 2552,
17
+ "train_samples_per_second": 20.987,
18
+ "train_steps_per_second": 2.623
19
+ }
egy_training_log.txt CHANGED
@@ -141,3 +141,5 @@ INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_maren
141
  INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-0637777c38512acf/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-4ddbb6e08bb37d3f.arrow
142
  WARNING:accelerate.utils.other:Detected kernel version 5.4.0, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher.
143
  WARNING:root:Epoch 1.0: No losses recorded yet.
 
 
 
141
  INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-0637777c38512acf/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-4ddbb6e08bb37d3f.arrow
142
  WARNING:accelerate.utils.other:Detected kernel version 5.4.0, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher.
143
  WARNING:root:Epoch 1.0: No losses recorded yet.
144
+ INFO:__main__:*** Evaluate ***
145
+ INFO:absl:Using default tokenizer.
eval_results.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "eval_bleu": 0.09842217214841122,
4
+ "eval_loss": 3.4630110263824463,
5
+ "eval_rouge1": 0.3093411104418377,
6
+ "eval_rouge2": 0.07177511381538038,
7
+ "eval_rougeL": 0.22957881226204224,
8
+ "eval_runtime": 27.1725,
9
+ "eval_samples": 847,
10
+ "eval_samples_per_second": 31.171,
11
+ "eval_steps_per_second": 3.901,
12
+ "perplexity": 31.912922646039927
13
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "total_flos": 1000225898496000.0,
4
+ "train_loss": 4.835940561324452,
5
+ "train_runtime": 121.5981,
6
+ "train_samples": 2552,
7
+ "train_samples_per_second": 20.987,
8
+ "train_steps_per_second": 2.623
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.0,
5
+ "eval_steps": 500,
6
+ "global_step": 319,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.0,
13
+ "step": 319,
14
+ "total_flos": 1000225898496000.0,
15
+ "train_loss": 4.835940561324452,
16
+ "train_runtime": 121.5981,
17
+ "train_samples_per_second": 20.987,
18
+ "train_steps_per_second": 2.623
19
+ }
20
+ ],
21
+ "logging_steps": 500,
22
+ "max_steps": 319,
23
+ "num_input_tokens_seen": 0,
24
+ "num_train_epochs": 1,
25
+ "save_steps": 500,
26
+ "stateful_callbacks": {
27
+ "EarlyStoppingCallback": {
28
+ "args": {
29
+ "early_stopping_patience": 3,
30
+ "early_stopping_threshold": 0.0
31
+ },
32
+ "attributes": {
33
+ "early_stopping_patience_counter": 0
34
+ }
35
+ },
36
+ "TrainerControl": {
37
+ "args": {
38
+ "should_epoch_stop": false,
39
+ "should_evaluate": false,
40
+ "should_log": false,
41
+ "should_save": true,
42
+ "should_training_stop": true
43
+ },
44
+ "attributes": {}
45
+ }
46
+ },
47
+ "total_flos": 1000225898496000.0,
48
+ "train_batch_size": 8,
49
+ "trial_name": null,
50
+ "trial_params": null
51
+ }