PEFT
Safetensors
llama
axolotl
Generated from Trainer
ClarenceDan commited on
Commit
444d1f6
·
verified ·
1 Parent(s): 230018e

Training in progress, step 10, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:84495954457b772247356bd832294de5642b1452ac7076e4fca047ef16a87484
3
  size 17425352
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed5d87a5631978c1202a3b37ee482cbd0d0d2b96b5dac1266ee9d41ba15f1a86
3
  size 17425352
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1e9508200d7c2a2ae3ff1d4e556ddc11da4912b619b2bcdb9a401c049b743a56
3
  size 10251668
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da8fc25ad7a879ea1de677e9d57eb937e3f029be0c2bea1722c223b208978c57
3
  size 10251668
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:97b21c7f33e6dd1250e43a1885dcf2862e9a21f89ea7c79b66b9571be9c48ccc
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d04f0603c6a3299a5e9bcb478c2e4a0188fe2937aaa82d8dd42cd9dea489a52f
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9c8e6b04902f17ae368c3e6cfd97a31ad4de2f025d673daea8c033ce0e260946
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb578e75c11a81e85dda67a691f96ba4793a02960f1409fd3e1511aac873491a
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.005989020129762102,
5
  "eval_steps": 3,
6
- "global_step": 9,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -94,6 +94,13 @@
94
  "eval_samples_per_second": 20.169,
95
  "eval_steps_per_second": 10.101,
96
  "step": 9
 
 
 
 
 
 
 
97
  }
98
  ],
99
  "logging_steps": 1,
@@ -108,12 +115,12 @@
108
  "should_evaluate": false,
109
  "should_log": false,
110
  "should_save": true,
111
- "should_training_stop": false
112
  },
113
  "attributes": {}
114
  }
115
  },
116
- "total_flos": 109748488765440.0,
117
  "train_batch_size": 2,
118
  "trial_name": null,
119
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.006654466810846781,
5
  "eval_steps": 3,
6
+ "global_step": 10,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
94
  "eval_samples_per_second": 20.169,
95
  "eval_steps_per_second": 10.101,
96
  "step": 9
97
+ },
98
+ {
99
+ "epoch": 0.006654466810846781,
100
+ "grad_norm": 0.0852142721414566,
101
+ "learning_rate": 0.0002,
102
+ "loss": 1.3184,
103
+ "step": 10
104
  }
105
  ],
106
  "logging_steps": 1,
 
115
  "should_evaluate": false,
116
  "should_log": false,
117
  "should_save": true,
118
+ "should_training_stop": true
119
  },
120
  "attributes": {}
121
  }
122
  },
123
+ "total_flos": 117587666534400.0,
124
  "train_batch_size": 2,
125
  "trial_name": null,
126
  "trial_params": null