sayanbanerjee32 commited on
Commit
52042cc
1 Parent(s): a213efb

Upload folder using huggingface_hub

Browse files
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:613351b3a8c02478f800f003a20ef8fadadb5d382cdb35560002a0f156f3e305
3
  size 35669232
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7768ed6817eab44bd64beb4a55b5f83d5900d11c7032af064c30163ae66bc3b
3
  size 35669232
image_projector.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7d2adc8578709898d5b4766e9abf64f0e9c5682b8e148a3d2e8d6ea1d74536db
3
  size 29379352
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef844b1160bc63da61c82c8e3d0a67caa1cac0f55a6271f900c807ba90b561e1
3
  size 29379352
lora_weights.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6ee9ba600d94524b27e9bebc006259a4c8bef999b475150edd2b43416a2d7a3b
3
  size 35697862
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e2681369bd3f8efb94cfc028761bf8fb0cf99cd9b64e81f43d012cd3c91aab56
3
  size 35697862
trainer_state.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
- "epoch": 0.11649580615097857,
3
- "global_step": 250,
4
  "max_steps": 2146,
5
  "logging_steps": 25,
6
  "eval_steps": 50,
@@ -8,7 +8,7 @@
8
  "train_batch_size": 8,
9
  "num_train_epochs": 1,
10
  "num_input_tokens_seen": 0,
11
- "total_flos": 1.1459011039494144e+17,
12
  "log_history": [
13
  {
14
  "loss": 3.9545,
@@ -119,6 +119,13 @@
119
  "eval_steps_per_second": 0.14,
120
  "epoch": 0.11649580615097857,
121
  "step": 250
 
 
 
 
 
 
 
122
  }
123
  ],
124
  "best_metric": null,
 
1
  {
2
+ "epoch": 0.12814538676607642,
3
+ "global_step": 275,
4
  "max_steps": 2146,
5
  "logging_steps": 25,
6
  "eval_steps": 50,
 
8
  "train_batch_size": 8,
9
  "num_train_epochs": 1,
10
  "num_input_tokens_seen": 0,
11
+ "total_flos": 1.2757378629471437e+17,
12
  "log_history": [
13
  {
14
  "loss": 3.9545,
 
119
  "eval_steps_per_second": 0.14,
120
  "epoch": 0.11649580615097857,
121
  "step": 250
122
+ },
123
+ {
124
+ "loss": 0.1632,
125
+ "grad_norm": 0.019430797547101974,
126
+ "learning_rate": 0.0004844640082858622,
127
+ "epoch": 0.12814538676607642,
128
+ "step": 275
129
  }
130
  ],
131
  "best_metric": null,