kevinoli commited on
Commit
a9bada8
1 Parent(s): e3d20a2

Training in progress, step 2500, checkpoint

Browse files
checkpoint-2500/config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "openai/clip-vit-large-patch14-336",
3
+ "architectures": [
4
+ "CLIPModel"
5
+ ],
6
+ "initializer_factor": 1.0,
7
+ "logit_scale_init_value": 2.6592,
8
+ "model_type": "clip",
9
+ "projection_dim": 768,
10
+ "text_config": {
11
+ "dropout": 0.0,
12
+ "hidden_size": 768,
13
+ "intermediate_size": 3072,
14
+ "model_type": "clip_text_model",
15
+ "num_attention_heads": 12,
16
+ "projection_dim": 768
17
+ },
18
+ "torch_dtype": "float32",
19
+ "transformers_version": "4.45.0.dev0",
20
+ "vision_config": {
21
+ "dropout": 0.0,
22
+ "hidden_size": 1024,
23
+ "image_size": 336,
24
+ "intermediate_size": 4096,
25
+ "model_type": "clip_vision_model",
26
+ "num_attention_heads": 16,
27
+ "num_hidden_layers": 24,
28
+ "patch_size": 14,
29
+ "projection_dim": 768
30
+ }
31
+ }
checkpoint-2500/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d0367660f83df2eb451c708665efa800a258086032b172a385e37af999f6b26
3
+ size 1711848436
checkpoint-2500/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5f0caf2931081fa8e54616238f2fd70bdf16c3deaad7acd29d7d7d4f030e4e0
3
+ size 3424043887
checkpoint-2500/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd0cc501af0df1601264672dbfc6cf6040fc01ccafb00b41ccca22f726fbadf9
3
+ size 14503
checkpoint-2500/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d37cfc061e64b87efb39d07e8af3cae3cffcb749fb440f18d9bcee47d88e8ad4
3
+ size 623
checkpoint-2500/trainer_state.json ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 2.0306718349456787,
3
+ "best_model_checkpoint": "./output/clip-finetuned-csu-p14-336-e2l57-l/checkpoint-2500",
4
+ "epoch": 0.46057479734708917,
5
+ "eval_steps": 500,
6
+ "global_step": 2500,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.09211495946941783,
13
+ "grad_norm": 0.00766215892508626,
14
+ "learning_rate": 4.769712601326456e-05,
15
+ "loss": 0.7207,
16
+ "step": 500
17
+ },
18
+ {
19
+ "epoch": 0.09211495946941783,
20
+ "eval_loss": 2.078455924987793,
21
+ "eval_runtime": 73.0449,
22
+ "eval_samples_per_second": 16.524,
23
+ "eval_steps_per_second": 2.067,
24
+ "step": 500
25
+ },
26
+ {
27
+ "epoch": 0.18422991893883567,
28
+ "grad_norm": 0.016227997839450836,
29
+ "learning_rate": 4.539425202652911e-05,
30
+ "loss": 0.702,
31
+ "step": 1000
32
+ },
33
+ {
34
+ "epoch": 0.18422991893883567,
35
+ "eval_loss": 2.0786073207855225,
36
+ "eval_runtime": 73.7466,
37
+ "eval_samples_per_second": 16.367,
38
+ "eval_steps_per_second": 2.048,
39
+ "step": 1000
40
+ },
41
+ {
42
+ "epoch": 0.2763448784082535,
43
+ "grad_norm": 3.326040029525757,
44
+ "learning_rate": 4.309137803979367e-05,
45
+ "loss": 0.7058,
46
+ "step": 1500
47
+ },
48
+ {
49
+ "epoch": 0.2763448784082535,
50
+ "eval_loss": 2.0656001567840576,
51
+ "eval_runtime": 75.4832,
52
+ "eval_samples_per_second": 15.99,
53
+ "eval_steps_per_second": 2.0,
54
+ "step": 1500
55
+ },
56
+ {
57
+ "epoch": 0.36845983787767134,
58
+ "grad_norm": 5.95470666885376,
59
+ "learning_rate": 4.078850405305822e-05,
60
+ "loss": 0.7117,
61
+ "step": 2000
62
+ },
63
+ {
64
+ "epoch": 0.36845983787767134,
65
+ "eval_loss": 2.113447427749634,
66
+ "eval_runtime": 75.7404,
67
+ "eval_samples_per_second": 15.936,
68
+ "eval_steps_per_second": 1.994,
69
+ "step": 2000
70
+ },
71
+ {
72
+ "epoch": 0.46057479734708917,
73
+ "grad_norm": 1.3789223432540894,
74
+ "learning_rate": 3.848563006632277e-05,
75
+ "loss": 0.7163,
76
+ "step": 2500
77
+ },
78
+ {
79
+ "epoch": 0.46057479734708917,
80
+ "eval_loss": 2.0306718349456787,
81
+ "eval_runtime": 75.7822,
82
+ "eval_samples_per_second": 15.927,
83
+ "eval_steps_per_second": 1.993,
84
+ "step": 2500
85
+ }
86
+ ],
87
+ "logging_steps": 500,
88
+ "max_steps": 10856,
89
+ "num_input_tokens_seen": 0,
90
+ "num_train_epochs": 2,
91
+ "save_steps": 500,
92
+ "stateful_callbacks": {
93
+ "TrainerControl": {
94
+ "args": {
95
+ "should_epoch_stop": false,
96
+ "should_evaluate": false,
97
+ "should_log": false,
98
+ "should_save": true,
99
+ "should_training_stop": false
100
+ },
101
+ "attributes": {}
102
+ }
103
+ },
104
+ "total_flos": 900115394852520.0,
105
+ "train_batch_size": 2,
106
+ "trial_name": null,
107
+ "trial_params": null
108
+ }
checkpoint-2500/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:113e5581618b2f31a20dabc0bbd4943905ebe2c2323e7b369e736bac42477916
3
+ size 4847