sayanbanerjee32 commited on
Commit
d96a6ed
1 Parent(s): d9d76bb

Upload folder using huggingface_hub

Browse files
adapter_config.json CHANGED
@@ -20,13 +20,13 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
 
 
23
  "gate_proj",
24
- "k_proj",
25
- "o_proj",
26
  "v_proj",
27
- "down_proj",
28
- "q_proj",
29
- "up_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
+ "q_proj",
24
+ "up_proj",
25
  "gate_proj",
 
 
26
  "v_proj",
27
+ "o_proj",
28
+ "k_proj",
29
+ "down_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:be9a734ddff672d875a96978942f8382ed8cf321eec9d2e2aac943fea12aa8fd
3
  size 35669232
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6679575b37a284cbe6767241b31cae3f2c765126fc336fb0b2503ce4484c34ce
3
  size 35669232
image_projector.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:85b65304e44ea622380c1bb36fd4b777f63a46b138dd2c4a3f96f7043468de7c
3
  size 29379352
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cdb8d104176b23b4a9c2d6b9a48d380970c5542e265f758efbff698bb64f6980
3
  size 29379352
lora_weights.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a5b826e516bfac0e8949764d862de53d3e24738a533c6265012035e4d708f1d8
3
  size 35697862
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1328057d9358075ae550fac83757d4f59a4224ccf074337967b5e09b3918bbd3
3
  size 35697862
trainer_state.json CHANGED
@@ -1,14 +1,14 @@
1
  {
2
- "epoch": 0.014814814814814815,
3
- "global_step": 2,
4
  "max_steps": 12,
5
  "logging_steps": 10,
6
  "eval_steps": 10,
7
- "save_steps": 1,
8
  "train_batch_size": 8,
9
  "num_train_epochs": 1,
10
  "num_input_tokens_seen": 0,
11
- "total_flos": 1147442980454400.0,
12
  "log_history": [],
13
  "best_metric": null,
14
  "best_model_checkpoint": null,
 
1
  {
2
+ "epoch": 0.037037037037037035,
3
+ "global_step": 5,
4
  "max_steps": 12,
5
  "logging_steps": 10,
6
  "eval_steps": 10,
7
+ "save_steps": 5,
8
  "train_batch_size": 8,
9
  "num_train_epochs": 1,
10
  "num_input_tokens_seen": 0,
11
+ "total_flos": 2792230777749504.0,
12
  "log_history": [],
13
  "best_metric": null,
14
  "best_model_checkpoint": null,