Silemo commited on
Commit
5daba2c
1 Parent(s): 5aae96c

Training in progress, step 100, checkpoint

Browse files
last-checkpoint/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fe772448a15931bd7ce2be614b25b23070f28c6fd16f62d2ae67d4bf66284890
3
  size 966995080
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe3597c122a2e5567b331b531c1b0cf98c2606dfe5fc98bc664181d12b2b83cd
3
  size 966995080
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0d747a3260c537031fa55482065f18247b63193e87ebdf4c4cb51408e40715c4
3
  size 1925064044
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5feb1207165a2472ed0fc4a2352d29febeb73effc6fad6dbd900bc416ac92d15
3
  size 1925064044
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4b091d736184758344801816232d850355eef953b0eadc5bb3dae2822cac9058
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4fd91acef56149d62ef60f134f6bef8f3143b1426e8731dcb1c5449312d3ea8c
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b35cf88239a26eb39ad927ac7f57df293b8692504e58626316d052661a916270
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f9b9e6716d4845c0461a67674925be51cb5bd879f6bddb03b17fa941754a7de
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,29 +1,53 @@
1
  {
2
- "best_metric": 36.87603993344426,
3
- "best_model_checkpoint": "./whisper-it/checkpoint-10",
4
- "epoch": 0.019083969465648856,
5
- "eval_steps": 10,
6
- "global_step": 10,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.02,
13
- "eval_loss": 3.8508095741271973,
14
- "eval_runtime": 897.5106,
15
- "eval_samples_per_second": 1.671,
16
- "eval_steps_per_second": 0.209,
17
- "eval_wer": 36.87603993344426,
18
- "step": 10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  }
20
  ],
21
  "logging_steps": 25,
22
  "max_steps": 4000,
23
  "num_input_tokens_seen": 0,
24
  "num_train_epochs": 8,
25
- "save_steps": 10,
26
- "total_flos": 9.23473281024e+16,
27
  "trial_name": null,
28
  "trial_params": null
29
  }
 
1
  {
2
+ "best_metric": 176.42124237382143,
3
+ "best_model_checkpoint": "./whisper-it/checkpoint-100",
4
+ "epoch": 0.19083969465648856,
5
+ "eval_steps": 100,
6
+ "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.05,
13
+ "learning_rate": 4.4e-07,
14
+ "loss": 3.4673,
15
+ "step": 25
16
+ },
17
+ {
18
+ "epoch": 0.1,
19
+ "learning_rate": 9.400000000000001e-07,
20
+ "loss": 2.7417,
21
+ "step": 50
22
+ },
23
+ {
24
+ "epoch": 0.14,
25
+ "learning_rate": 1.44e-06,
26
+ "loss": 2.0307,
27
+ "step": 75
28
+ },
29
+ {
30
+ "epoch": 0.19,
31
+ "learning_rate": 1.94e-06,
32
+ "loss": 1.2496,
33
+ "step": 100
34
+ },
35
+ {
36
+ "epoch": 0.19,
37
+ "eval_loss": 1.233077049255371,
38
+ "eval_runtime": 1969.1907,
39
+ "eval_samples_per_second": 0.762,
40
+ "eval_steps_per_second": 0.095,
41
+ "eval_wer": 176.42124237382143,
42
+ "step": 100
43
  }
44
  ],
45
  "logging_steps": 25,
46
  "max_steps": 4000,
47
  "num_input_tokens_seen": 0,
48
  "num_train_epochs": 8,
49
+ "save_steps": 100,
50
+ "total_flos": 9.23473281024e+17,
51
  "trial_name": null,
52
  "trial_params": null
53
  }
last-checkpoint/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3e922856806fa46a80e269c17d1925486bd1667519f7ae3201640b0263c445da
3
  size 4856
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0deaaf9b4dc785b95bcfecba732d9f20738cd6d99cb5f2d674c46008c563ff98
3
  size 4856