dimasik87 commited on
Commit
b9532f9
1 Parent(s): 21de682

Training in progress, step 12, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e77f3e5d1289bba64d0fe9d2b1eb1b3bbc31b8fab083e6c9fc03003a927967cd
3
  size 167832240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b776cef4a8b110f29f03f1f2a505ab26816d15755d261cab864eced6635967f
3
  size 167832240
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a58d0ffe0e12c31a8e225eb979c810b76c5a6019aa5dee8426d03333ec1a6d33
3
  size 335922386
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db7cd6507b3e6336497c085dcdbab5771e4bc873fa652e44ade43922d4c8c091
3
  size 335922386
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:698a2a174d63a3686e64f723632c723119c5cfbfc1884cf988ecbf3863d7db38
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d24904fc57fd2178e69b645251775fb3fd05ea113003861d10791be78a06f1a9
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:26a93b2a1f4b5368650119fe6e0d6eec6d19cda6badeba4d21943ab48964fa00
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68888158764ed5e658b457a541f86335ea31432325308674d2962aa98e037fa4
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.021534320323014805,
5
  "eval_steps": 4,
6
- "global_step": 8,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -87,6 +87,42 @@
87
  "eval_samples_per_second": 7.812,
88
  "eval_steps_per_second": 7.812,
89
  "step": 8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90
  }
91
  ],
92
  "logging_steps": 1,
@@ -106,7 +142,7 @@
106
  "attributes": {}
107
  }
108
  },
109
- "total_flos": 3431228686467072.0,
110
  "train_batch_size": 1,
111
  "trial_name": null,
112
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.03230148048452221,
5
  "eval_steps": 4,
6
+ "global_step": 12,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
87
  "eval_samples_per_second": 7.812,
88
  "eval_steps_per_second": 7.812,
89
  "step": 8
90
+ },
91
+ {
92
+ "epoch": 0.024226110363391656,
93
+ "grad_norm": 1.7297841310501099,
94
+ "learning_rate": 0.00018,
95
+ "loss": 0.3654,
96
+ "step": 9
97
+ },
98
+ {
99
+ "epoch": 0.026917900403768506,
100
+ "grad_norm": 1.6552913188934326,
101
+ "learning_rate": 0.0002,
102
+ "loss": 0.3738,
103
+ "step": 10
104
+ },
105
+ {
106
+ "epoch": 0.029609690444145357,
107
+ "grad_norm": 1.50761079788208,
108
+ "learning_rate": 0.0001996917333733128,
109
+ "loss": 0.4244,
110
+ "step": 11
111
+ },
112
+ {
113
+ "epoch": 0.03230148048452221,
114
+ "grad_norm": 1.431064248085022,
115
+ "learning_rate": 0.00019876883405951377,
116
+ "loss": 0.2788,
117
+ "step": 12
118
+ },
119
+ {
120
+ "epoch": 0.03230148048452221,
121
+ "eval_loss": 0.32530486583709717,
122
+ "eval_runtime": 10.0708,
123
+ "eval_samples_per_second": 7.844,
124
+ "eval_steps_per_second": 7.844,
125
+ "step": 12
126
  }
127
  ],
128
  "logging_steps": 1,
 
142
  "attributes": {}
143
  }
144
  },
145
+ "total_flos": 5007739164033024.0,
146
  "train_batch_size": 1,
147
  "trial_name": null,
148
  "trial_params": null