nielsr HF staff commited on
Commit
6c0cf6b
1 Parent(s): 3aec5d1

Upload InstructBlipForConditionalGeneration

Browse files
config.json CHANGED
@@ -82,7 +82,7 @@
82
  "top_p": 1.0,
83
  "torch_dtype": null,
84
  "torchscript": false,
85
- "transformers_version": "4.30.0.dev0",
86
  "typical_p": 1.0,
87
  "use_bfloat16": false,
88
  "vocab_size": 30523
@@ -193,7 +193,7 @@
193
  "top_p": 1.0,
194
  "torch_dtype": "float32",
195
  "torchscript": false,
196
- "transformers_version": "4.30.0.dev0",
197
  "typical_p": 1.0,
198
  "use_bfloat16": false,
199
  "use_cache": true,
@@ -240,7 +240,7 @@
240
  "LABEL_0": 0,
241
  "LABEL_1": 1
242
  },
243
- "layer_norm_eps": 1e-05,
244
  "length_penalty": 1.0,
245
  "max_length": 20,
246
  "min_length": 0,
@@ -259,7 +259,6 @@
259
  "patch_size": 14,
260
  "prefix": null,
261
  "problem_type": null,
262
- "projection_dim": 512,
263
  "pruned_heads": {},
264
  "qkv_bias": true,
265
  "remove_invalid_values": false,
@@ -278,7 +277,7 @@
278
  "top_p": 1.0,
279
  "torch_dtype": null,
280
  "torchscript": false,
281
- "transformers_version": "4.30.0.dev0",
282
  "typical_p": 1.0,
283
  "use_bfloat16": false
284
  }
 
82
  "top_p": 1.0,
83
  "torch_dtype": null,
84
  "torchscript": false,
85
+ "transformers_version": "4.31.0.dev0",
86
  "typical_p": 1.0,
87
  "use_bfloat16": false,
88
  "vocab_size": 30523
 
193
  "top_p": 1.0,
194
  "torch_dtype": "float32",
195
  "torchscript": false,
196
+ "transformers_version": "4.31.0.dev0",
197
  "typical_p": 1.0,
198
  "use_bfloat16": false,
199
  "use_cache": true,
 
240
  "LABEL_0": 0,
241
  "LABEL_1": 1
242
  },
243
+ "layer_norm_eps": 1e-06,
244
  "length_penalty": 1.0,
245
  "max_length": 20,
246
  "min_length": 0,
 
259
  "patch_size": 14,
260
  "prefix": null,
261
  "problem_type": null,
 
262
  "pruned_heads": {},
263
  "qkv_bias": true,
264
  "remove_invalid_values": false,
 
277
  "top_p": 1.0,
278
  "torch_dtype": null,
279
  "torchscript": false,
280
+ "transformers_version": "4.31.0.dev0",
281
  "typical_p": 1.0,
282
  "use_bfloat16": false
283
  }
pytorch_model-00001-of-00002.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8756aca7708f1c9197f94c04ea2dc27a50040e4de85124e9366836013659fbe9
3
- size 9587341371
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df1dfe95c6345c17661be74efd2a8891a692ee8f3c71c06debaebccba6e4354d
3
+ size 9981661575
pytorch_model-00002-of-00002.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e652fa687e40280479a7e29d28a7a3c7ec7e6a525c4880e604294c826c32c8bd
3
- size 6768240895
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33324062656d33010bfc4a28be0ced982bbd53c9db705fcbabedd5a3f078aa6c
3
+ size 6110728493
pytorch_model.bin.index.json CHANGED
@@ -1,33 +1,33 @@
1
  {
2
  "metadata": {
3
- "total_size": 16618265600
4
  },
5
  "weight_map": {
6
- "language_model.decoder.block.0.layer.0.SelfAttention.k.weight": "pytorch_model-00002-of-00002.bin",
7
- "language_model.decoder.block.0.layer.0.SelfAttention.o.weight": "pytorch_model-00002-of-00002.bin",
8
- "language_model.decoder.block.0.layer.0.SelfAttention.q.weight": "pytorch_model-00002-of-00002.bin",
9
- "language_model.decoder.block.0.layer.0.SelfAttention.relative_attention_bias.weight": "pytorch_model-00002-of-00002.bin",
10
- "language_model.decoder.block.0.layer.0.SelfAttention.v.weight": "pytorch_model-00002-of-00002.bin",
11
- "language_model.decoder.block.0.layer.0.layer_norm.weight": "pytorch_model-00002-of-00002.bin",
12
- "language_model.decoder.block.0.layer.1.EncDecAttention.k.weight": "pytorch_model-00002-of-00002.bin",
13
- "language_model.decoder.block.0.layer.1.EncDecAttention.o.weight": "pytorch_model-00002-of-00002.bin",
14
- "language_model.decoder.block.0.layer.1.EncDecAttention.q.weight": "pytorch_model-00002-of-00002.bin",
15
- "language_model.decoder.block.0.layer.1.EncDecAttention.v.weight": "pytorch_model-00002-of-00002.bin",
16
- "language_model.decoder.block.0.layer.1.layer_norm.weight": "pytorch_model-00002-of-00002.bin",
17
- "language_model.decoder.block.0.layer.2.DenseReluDense.wi_0.weight": "pytorch_model-00002-of-00002.bin",
18
- "language_model.decoder.block.0.layer.2.DenseReluDense.wi_1.weight": "pytorch_model-00002-of-00002.bin",
19
- "language_model.decoder.block.0.layer.2.DenseReluDense.wo.weight": "pytorch_model-00002-of-00002.bin",
20
- "language_model.decoder.block.0.layer.2.layer_norm.weight": "pytorch_model-00002-of-00002.bin",
21
- "language_model.decoder.block.1.layer.0.SelfAttention.k.weight": "pytorch_model-00002-of-00002.bin",
22
- "language_model.decoder.block.1.layer.0.SelfAttention.o.weight": "pytorch_model-00002-of-00002.bin",
23
- "language_model.decoder.block.1.layer.0.SelfAttention.q.weight": "pytorch_model-00002-of-00002.bin",
24
- "language_model.decoder.block.1.layer.0.SelfAttention.v.weight": "pytorch_model-00002-of-00002.bin",
25
- "language_model.decoder.block.1.layer.0.layer_norm.weight": "pytorch_model-00002-of-00002.bin",
26
- "language_model.decoder.block.1.layer.1.EncDecAttention.k.weight": "pytorch_model-00002-of-00002.bin",
27
- "language_model.decoder.block.1.layer.1.EncDecAttention.o.weight": "pytorch_model-00002-of-00002.bin",
28
- "language_model.decoder.block.1.layer.1.EncDecAttention.q.weight": "pytorch_model-00002-of-00002.bin",
29
- "language_model.decoder.block.1.layer.1.EncDecAttention.v.weight": "pytorch_model-00002-of-00002.bin",
30
- "language_model.decoder.block.1.layer.1.layer_norm.weight": "pytorch_model-00002-of-00002.bin",
31
  "language_model.decoder.block.1.layer.2.DenseReluDense.wi_0.weight": "pytorch_model-00002-of-00002.bin",
32
  "language_model.decoder.block.1.layer.2.DenseReluDense.wi_1.weight": "pytorch_model-00002-of-00002.bin",
33
  "language_model.decoder.block.1.layer.2.DenseReluDense.wo.weight": "pytorch_model-00002-of-00002.bin",
@@ -340,7 +340,7 @@
340
  "language_model.decoder.block.9.layer.2.DenseReluDense.wi_1.weight": "pytorch_model-00002-of-00002.bin",
341
  "language_model.decoder.block.9.layer.2.DenseReluDense.wo.weight": "pytorch_model-00002-of-00002.bin",
342
  "language_model.decoder.block.9.layer.2.layer_norm.weight": "pytorch_model-00002-of-00002.bin",
343
- "language_model.decoder.embed_tokens.weight": "pytorch_model-00002-of-00002.bin",
344
  "language_model.decoder.final_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
345
  "language_model.encoder.block.0.layer.0.SelfAttention.k.weight": "pytorch_model-00001-of-00002.bin",
346
  "language_model.encoder.block.0.layer.0.SelfAttention.o.weight": "pytorch_model-00001-of-00002.bin",
 
1
  {
2
  "metadata": {
3
+ "total_size": 16091880448
4
  },
5
  "weight_map": {
6
+ "language_model.decoder.block.0.layer.0.SelfAttention.k.weight": "pytorch_model-00001-of-00002.bin",
7
+ "language_model.decoder.block.0.layer.0.SelfAttention.o.weight": "pytorch_model-00001-of-00002.bin",
8
+ "language_model.decoder.block.0.layer.0.SelfAttention.q.weight": "pytorch_model-00001-of-00002.bin",
9
+ "language_model.decoder.block.0.layer.0.SelfAttention.relative_attention_bias.weight": "pytorch_model-00001-of-00002.bin",
10
+ "language_model.decoder.block.0.layer.0.SelfAttention.v.weight": "pytorch_model-00001-of-00002.bin",
11
+ "language_model.decoder.block.0.layer.0.layer_norm.weight": "pytorch_model-00001-of-00002.bin",
12
+ "language_model.decoder.block.0.layer.1.EncDecAttention.k.weight": "pytorch_model-00001-of-00002.bin",
13
+ "language_model.decoder.block.0.layer.1.EncDecAttention.o.weight": "pytorch_model-00001-of-00002.bin",
14
+ "language_model.decoder.block.0.layer.1.EncDecAttention.q.weight": "pytorch_model-00001-of-00002.bin",
15
+ "language_model.decoder.block.0.layer.1.EncDecAttention.v.weight": "pytorch_model-00001-of-00002.bin",
16
+ "language_model.decoder.block.0.layer.1.layer_norm.weight": "pytorch_model-00001-of-00002.bin",
17
+ "language_model.decoder.block.0.layer.2.DenseReluDense.wi_0.weight": "pytorch_model-00001-of-00002.bin",
18
+ "language_model.decoder.block.0.layer.2.DenseReluDense.wi_1.weight": "pytorch_model-00001-of-00002.bin",
19
+ "language_model.decoder.block.0.layer.2.DenseReluDense.wo.weight": "pytorch_model-00001-of-00002.bin",
20
+ "language_model.decoder.block.0.layer.2.layer_norm.weight": "pytorch_model-00001-of-00002.bin",
21
+ "language_model.decoder.block.1.layer.0.SelfAttention.k.weight": "pytorch_model-00001-of-00002.bin",
22
+ "language_model.decoder.block.1.layer.0.SelfAttention.o.weight": "pytorch_model-00001-of-00002.bin",
23
+ "language_model.decoder.block.1.layer.0.SelfAttention.q.weight": "pytorch_model-00001-of-00002.bin",
24
+ "language_model.decoder.block.1.layer.0.SelfAttention.v.weight": "pytorch_model-00001-of-00002.bin",
25
+ "language_model.decoder.block.1.layer.0.layer_norm.weight": "pytorch_model-00001-of-00002.bin",
26
+ "language_model.decoder.block.1.layer.1.EncDecAttention.k.weight": "pytorch_model-00001-of-00002.bin",
27
+ "language_model.decoder.block.1.layer.1.EncDecAttention.o.weight": "pytorch_model-00001-of-00002.bin",
28
+ "language_model.decoder.block.1.layer.1.EncDecAttention.q.weight": "pytorch_model-00001-of-00002.bin",
29
+ "language_model.decoder.block.1.layer.1.EncDecAttention.v.weight": "pytorch_model-00001-of-00002.bin",
30
+ "language_model.decoder.block.1.layer.1.layer_norm.weight": "pytorch_model-00001-of-00002.bin",
31
  "language_model.decoder.block.1.layer.2.DenseReluDense.wi_0.weight": "pytorch_model-00002-of-00002.bin",
32
  "language_model.decoder.block.1.layer.2.DenseReluDense.wi_1.weight": "pytorch_model-00002-of-00002.bin",
33
  "language_model.decoder.block.1.layer.2.DenseReluDense.wo.weight": "pytorch_model-00002-of-00002.bin",
 
340
  "language_model.decoder.block.9.layer.2.DenseReluDense.wi_1.weight": "pytorch_model-00002-of-00002.bin",
341
  "language_model.decoder.block.9.layer.2.DenseReluDense.wo.weight": "pytorch_model-00002-of-00002.bin",
342
  "language_model.decoder.block.9.layer.2.layer_norm.weight": "pytorch_model-00002-of-00002.bin",
343
+ "language_model.decoder.embed_tokens.weight": "pytorch_model-00001-of-00002.bin",
344
  "language_model.decoder.final_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
345
  "language_model.encoder.block.0.layer.0.SelfAttention.k.weight": "pytorch_model-00001-of-00002.bin",
346
  "language_model.encoder.block.0.layer.0.SelfAttention.o.weight": "pytorch_model-00001-of-00002.bin",