sedrickkeh commited on
Commit
aa94051
1 Parent(s): 2f1f692

End of training

Browse files
README.md CHANGED
@@ -16,7 +16,9 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  # test_run_mini
18
 
19
- This model is a fine-tuned version of [meta-llama/Llama-3.2-1B](https://huggingface.co/meta-llama/Llama-3.2-1B) on an unknown dataset.
 
 
20
 
21
  ## Model description
22
 
 
16
 
17
  # test_run_mini
18
 
19
+ This model is a fine-tuned version of [meta-llama/Llama-3.2-1B](https://huggingface.co/meta-llama/Llama-3.2-1B) on the llamafactory/alpaca_en dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 1.4380
22
 
23
  ## Model description
24
 
all_results.json CHANGED
@@ -1,12 +1,12 @@
1
  {
2
  "epoch": 0.15384615384615385,
3
- "eval_loss": 1.438125729560852,
4
- "eval_runtime": 1.6325,
5
- "eval_samples_per_second": 322.201,
6
- "eval_steps_per_second": 5.513,
7
  "total_flos": 305613766656.0,
8
- "train_loss": 1.5372200012207031,
9
- "train_runtime": 320.6446,
10
- "train_samples_per_second": 4.79,
11
  "train_steps_per_second": 0.009
12
  }
 
1
  {
2
  "epoch": 0.15384615384615385,
3
+ "eval_loss": 1.4380333423614502,
4
+ "eval_runtime": 1.6011,
5
+ "eval_samples_per_second": 328.523,
6
+ "eval_steps_per_second": 5.621,
7
  "total_flos": 305613766656.0,
8
+ "train_loss": 1.537254015604655,
9
+ "train_runtime": 320.3148,
10
+ "train_samples_per_second": 4.795,
11
  "train_steps_per_second": 0.009
12
  }
eval_results.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "epoch": 0.15384615384615385,
3
- "eval_loss": 1.438125729560852,
4
- "eval_runtime": 1.6325,
5
- "eval_samples_per_second": 322.201,
6
- "eval_steps_per_second": 5.513
7
  }
 
1
  {
2
  "epoch": 0.15384615384615385,
3
+ "eval_loss": 1.4380333423614502,
4
+ "eval_runtime": 1.6011,
5
+ "eval_samples_per_second": 328.523,
6
+ "eval_steps_per_second": 5.621
7
  }
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 0.15384615384615385,
3
  "total_flos": 305613766656.0,
4
- "train_loss": 1.5372200012207031,
5
- "train_runtime": 320.6446,
6
- "train_samples_per_second": 4.79,
7
  "train_steps_per_second": 0.009
8
  }
 
1
  {
2
  "epoch": 0.15384615384615385,
3
  "total_flos": 305613766656.0,
4
+ "train_loss": 1.537254015604655,
5
+ "train_runtime": 320.3148,
6
+ "train_samples_per_second": 4.795,
7
  "train_steps_per_second": 0.009
8
  }
trainer_state.json CHANGED
@@ -10,19 +10,19 @@
10
  "log_history": [
11
  {
12
  "epoch": 0.15384615384615385,
13
- "eval_loss": 1.438125729560852,
14
- "eval_runtime": 1.9143,
15
- "eval_samples_per_second": 274.775,
16
- "eval_steps_per_second": 4.701,
17
  "step": 3
18
  },
19
  {
20
  "epoch": 0.15384615384615385,
21
  "step": 3,
22
  "total_flos": 305613766656.0,
23
- "train_loss": 1.5372200012207031,
24
- "train_runtime": 320.6446,
25
- "train_samples_per_second": 4.79,
26
  "train_steps_per_second": 0.009
27
  }
28
  ],
 
10
  "log_history": [
11
  {
12
  "epoch": 0.15384615384615385,
13
+ "eval_loss": 1.4380333423614502,
14
+ "eval_runtime": 1.8301,
15
+ "eval_samples_per_second": 287.418,
16
+ "eval_steps_per_second": 4.918,
17
  "step": 3
18
  },
19
  {
20
  "epoch": 0.15384615384615385,
21
  "step": 3,
22
  "total_flos": 305613766656.0,
23
+ "train_loss": 1.537254015604655,
24
+ "train_runtime": 320.3148,
25
+ "train_samples_per_second": 4.795,
26
  "train_steps_per_second": 0.009
27
  }
28
  ],
training_eval_loss.png CHANGED