ardaspear commited on
Commit
55ecb3a
·
verified ·
1 Parent(s): 11b8c97

Training in progress, step 20, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2eba3634c1ffd6e4ae0b701362ca12c1d63eebacdfef69ea70351fb0b4145497
3
  size 645975704
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15534d3755a6a4e5e30be58ff2c4a2167b06163faa57b68008187ad1d7497fd7
3
  size 645975704
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:93bd76d64c3e6ea4b1ab0c962168ce61ab91d5d71ea831738cb1f07f5a3d7646
3
  size 328468404
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6489f32e9d976acd503d0b7c43cb423d90e76a6dcb755e2bf8df2eb2ca94cc0c
3
  size 328468404
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5f2824a1e86e1f2ebb1954454acc2d65d06e39bdc1ec88097e15e38ec185b81b
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:842160c84da2f20ba4769075d37abcad1cd79e1e28006529f9f33b1c2157a9d4
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a991012fbb3e0053bc41a0b782b67db27509c27d2801c8bf4a64c3687e55e582
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:02d97e2af7e51ba89efcffc7f2a661ca0885e61db7f78987db5df30514ab62df
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.0013993026808307193,
5
  "eval_steps": 5,
6
- "global_step": 15,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -74,6 +74,21 @@
74
  "eval_samples_per_second": 16.017,
75
  "eval_steps_per_second": 4.005,
76
  "step": 15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77
  }
78
  ],
79
  "logging_steps": 3,
@@ -93,7 +108,7 @@
93
  "attributes": {}
94
  }
95
  },
96
- "total_flos": 1.804021269528576e+16,
97
  "train_batch_size": 4,
98
  "trial_name": null,
99
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.0018657369077742926,
5
  "eval_steps": 5,
6
+ "global_step": 20,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
74
  "eval_samples_per_second": 16.017,
75
  "eval_steps_per_second": 4.005,
76
  "step": 15
77
+ },
78
+ {
79
+ "epoch": 0.0016791632169968632,
80
+ "grad_norm": 2.642324209213257,
81
+ "learning_rate": 9.045084971874738e-05,
82
+ "loss": 1.1533,
83
+ "step": 18
84
+ },
85
+ {
86
+ "epoch": 0.0018657369077742926,
87
+ "eval_loss": 1.0050814151763916,
88
+ "eval_runtime": 1127.5321,
89
+ "eval_samples_per_second": 16.013,
90
+ "eval_steps_per_second": 4.003,
91
+ "step": 20
92
  }
93
  ],
94
  "logging_steps": 3,
 
108
  "attributes": {}
109
  }
110
  },
111
+ "total_flos": 2.4438711779328e+16,
112
  "train_batch_size": 4,
113
  "trial_name": null,
114
  "trial_params": null