Text Generation
Transformers
PyTorch
bloom
feature-extraction
Eval Results
text-generation-inference
Inference Endpoints
Younes Belkada commited on
Commit
6b60887
·
1 Parent(s): 5d7ac41

add architecutre + fix transformers version

Browse files
Files changed (1) hide show
  1. config.json +4 -1
config.json CHANGED
@@ -3,6 +3,9 @@
3
  "attention_dropout": 0.0,
4
  "attention_softmax_in_fp32": true,
5
  "bias_dropout_fusion": true,
 
 
 
6
  "bos_token_id": 1,
7
  "dtype": "float16",
8
  "eos_token_id": 2,
@@ -22,7 +25,7 @@
22
  "seq_length": 2048,
23
  "skip_bias_add": true,
24
  "skip_bias_add_qkv": false,
25
- "transformers_version": "4.20.0.dev0",
26
  "use_cache": true,
27
  "vocab_size": 250880
28
  }
 
3
  "attention_dropout": 0.0,
4
  "attention_softmax_in_fp32": true,
5
  "bias_dropout_fusion": true,
6
+ "architectures": [
7
+ "BloomModel"
8
+ ],
9
  "bos_token_id": 1,
10
  "dtype": "float16",
11
  "eos_token_id": 2,
 
25
  "seq_length": 2048,
26
  "skip_bias_add": true,
27
  "skip_bias_add_qkv": false,
28
+ "transformers_version": "4.20.0",
29
  "use_cache": true,
30
  "vocab_size": 250880
31
  }