nlparabic commited on
Commit
65ef5b1
1 Parent(s): 8e4e771

End of training

Browse files
README.md CHANGED
@@ -3,6 +3,9 @@ license: apache-2.0
3
  base_model: riotu-lab/ArabianGPT-01B
4
  tags:
5
  - generated_from_trainer
 
 
 
6
  model-index:
7
  - name: results
8
  results: []
@@ -14,6 +17,12 @@ should probably proofread and complete it, then remove this comment. -->
14
  # results
15
 
16
  This model is a fine-tuned version of [riotu-lab/ArabianGPT-01B](https://huggingface.co/riotu-lab/ArabianGPT-01B) on an unknown dataset.
 
 
 
 
 
 
17
 
18
  ## Model description
19
 
 
3
  base_model: riotu-lab/ArabianGPT-01B
4
  tags:
5
  - generated_from_trainer
6
+ metrics:
7
+ - bleu
8
+ - rouge
9
  model-index:
10
  - name: results
11
  results: []
 
17
  # results
18
 
19
  This model is a fine-tuned version of [riotu-lab/ArabianGPT-01B](https://huggingface.co/riotu-lab/ArabianGPT-01B) on an unknown dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 3.4630
22
+ - Bleu: 0.0984
23
+ - Rouge1: 0.3093
24
+ - Rouge2: 0.0718
25
+ - Rougel: 0.2296
26
 
27
  ## Model description
28
 
all_results.json CHANGED
@@ -5,15 +5,15 @@
5
  "eval_rouge1": 0.3093411104418377,
6
  "eval_rouge2": 0.07177511381538038,
7
  "eval_rougeL": 0.22957881226204224,
8
- "eval_runtime": 27.1725,
9
  "eval_samples": 847,
10
- "eval_samples_per_second": 31.171,
11
- "eval_steps_per_second": 3.901,
12
  "perplexity": 31.912922646039927,
13
  "total_flos": 1000225898496000.0,
14
- "train_loss": 4.835940561324452,
15
- "train_runtime": 121.5981,
16
  "train_samples": 2552,
17
- "train_samples_per_second": 20.987,
18
- "train_steps_per_second": 2.623
19
  }
 
5
  "eval_rouge1": 0.3093411104418377,
6
  "eval_rouge2": 0.07177511381538038,
7
  "eval_rougeL": 0.22957881226204224,
8
+ "eval_runtime": 27.2297,
9
  "eval_samples": 847,
10
+ "eval_samples_per_second": 31.106,
11
+ "eval_steps_per_second": 3.893,
12
  "perplexity": 31.912922646039927,
13
  "total_flos": 1000225898496000.0,
14
+ "train_loss": 0.0,
15
+ "train_runtime": 0.0037,
16
  "train_samples": 2552,
17
+ "train_samples_per_second": 699005.016,
18
+ "train_steps_per_second": 87375.627
19
  }
egy_training_log.txt CHANGED
@@ -287,3 +287,6 @@ WARNING:__main__:The tokenizer picked seems to have a very large `model_max_leng
287
  INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-0637777c38512acf/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-607ae57e4b4160b3.arrow
288
  INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-0637777c38512acf/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-4ddbb6e08bb37d3f.arrow
289
  WARNING:accelerate.utils.other:Detected kernel version 5.4.0, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher.
 
 
 
 
287
  INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-0637777c38512acf/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-607ae57e4b4160b3.arrow
288
  INFO:datasets.arrow_dataset:Loading cached processed dataset at /home/iais_marenpielka/.cache/huggingface/datasets/text/default-0637777c38512acf/0.0.0/96636a050ef51804b84abbfd4f4ad440e01153c24b86293eb5c3b300a41f9101/cache-4ddbb6e08bb37d3f.arrow
289
  WARNING:accelerate.utils.other:Detected kernel version 5.4.0, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher.
290
+ INFO:__main__:*** Evaluate ***
291
+ INFO:absl:Using default tokenizer.
292
+ WARNING:root:No losses were recorded, so the loss graph was not generated.
eval_results.json CHANGED
@@ -5,9 +5,9 @@
5
  "eval_rouge1": 0.3093411104418377,
6
  "eval_rouge2": 0.07177511381538038,
7
  "eval_rougeL": 0.22957881226204224,
8
- "eval_runtime": 27.1725,
9
  "eval_samples": 847,
10
- "eval_samples_per_second": 31.171,
11
- "eval_steps_per_second": 3.901,
12
  "perplexity": 31.912922646039927
13
  }
 
5
  "eval_rouge1": 0.3093411104418377,
6
  "eval_rouge2": 0.07177511381538038,
7
  "eval_rougeL": 0.22957881226204224,
8
+ "eval_runtime": 27.2297,
9
  "eval_samples": 847,
10
+ "eval_samples_per_second": 31.106,
11
+ "eval_steps_per_second": 3.893,
12
  "perplexity": 31.912922646039927
13
  }
train_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "epoch": 1.0,
3
  "total_flos": 1000225898496000.0,
4
- "train_loss": 4.835940561324452,
5
- "train_runtime": 121.5981,
6
  "train_samples": 2552,
7
- "train_samples_per_second": 20.987,
8
- "train_steps_per_second": 2.623
9
  }
 
1
  {
2
  "epoch": 1.0,
3
  "total_flos": 1000225898496000.0,
4
+ "train_loss": 0.0,
5
+ "train_runtime": 0.0037,
6
  "train_samples": 2552,
7
+ "train_samples_per_second": 699005.016,
8
+ "train_steps_per_second": 87375.627
9
  }
trainer_state.json CHANGED
@@ -12,10 +12,10 @@
12
  "epoch": 1.0,
13
  "step": 319,
14
  "total_flos": 1000225898496000.0,
15
- "train_loss": 4.835940561324452,
16
- "train_runtime": 121.5981,
17
- "train_samples_per_second": 20.987,
18
- "train_steps_per_second": 2.623
19
  }
20
  ],
21
  "logging_steps": 500,
 
12
  "epoch": 1.0,
13
  "step": 319,
14
  "total_flos": 1000225898496000.0,
15
+ "train_loss": 0.0,
16
+ "train_runtime": 0.0037,
17
+ "train_samples_per_second": 699005.016,
18
+ "train_steps_per_second": 87375.627
19
  }
20
  ],
21
  "logging_steps": 500,