Text Generation
Transformers
PyTorch
RefinedWebModel
custom_code
text-generation-inference
Inference Endpoints
lifeofcoding commited on
Commit
cfdd76b
1 Parent(s): eeb5311

Upload RWForCausalLM

Browse files
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "tiiuae/falcon-7b",
3
  "alibi": false,
4
  "apply_residual_connection_post_layernorm": false,
5
  "architectures": [
@@ -7,12 +7,12 @@
7
  ],
8
  "attention_dropout": 0.0,
9
  "auto_map": {
10
- "AutoConfig": "configuration_RW.RWConfig",
11
- "AutoModel": "modelling_RW.RWModel",
12
- "AutoModelForSequenceClassification": "modelling_RW.RWForSequenceClassification",
13
- "AutoModelForTokenClassification": "modelling_RW.RWForTokenClassification",
14
- "AutoModelForQuestionAnswering": "modelling_RW.RWForQuestionAnswering",
15
- "AutoModelForCausalLM": "modelling_RW.RWForCausalLM"
16
  },
17
  "bias": false,
18
  "bos_token_id": 11,
@@ -26,8 +26,8 @@
26
  "n_head": 71,
27
  "n_layer": 32,
28
  "parallel_attn": true,
29
- "torch_dtype": "float16",
30
- "transformers_version": "4.30.2",
31
  "use_cache": true,
32
  "vocab_size": 65024
33
- }
 
1
  {
2
+ "_name_or_path": "lifeofcoding/mastermax-7b",
3
  "alibi": false,
4
  "apply_residual_connection_post_layernorm": false,
5
  "architectures": [
 
7
  ],
8
  "attention_dropout": 0.0,
9
  "auto_map": {
10
+ "AutoConfig": "lifeofcoding/mastermax-7b--configuration_RW.RWConfig",
11
+ "AutoModel": "lifeofcoding/mastermax-7b--modelling_RW.RWModel",
12
+ "AutoModelForCausalLM": "lifeofcoding/mastermax-7b--modelling_RW.RWForCausalLM",
13
+ "AutoModelForQuestionAnswering": "lifeofcoding/mastermax-7b--modelling_RW.RWForQuestionAnswering",
14
+ "AutoModelForSequenceClassification": "lifeofcoding/mastermax-7b--modelling_RW.RWForSequenceClassification",
15
+ "AutoModelForTokenClassification": "lifeofcoding/mastermax-7b--modelling_RW.RWForTokenClassification"
16
  },
17
  "bias": false,
18
  "bos_token_id": 11,
 
26
  "n_head": 71,
27
  "n_layer": 32,
28
  "parallel_attn": true,
29
+ "torch_dtype": "bfloat16",
30
+ "transformers_version": "4.31.0.dev0",
31
  "use_cache": true,
32
  "vocab_size": 65024
33
+ }
generation_config.json CHANGED
@@ -1,6 +1,11 @@
1
  {
2
  "_from_model_config": true,
3
  "bos_token_id": 1,
4
- "eos_token_id": 2,
5
- "transformers_version": "4.30.2"
 
 
 
 
 
6
  }
 
1
  {
2
  "_from_model_config": true,
3
  "bos_token_id": 1,
4
+ "eos_token_id": 11,
5
+ "max_new_tokens": 200,
6
+ "num_return_sequence": 1,
7
+ "pad_token_id": 11,
8
+ "temperature": 0.7,
9
+ "top_p": 0.7,
10
+ "transformers_version": "4.31.0.dev0"
11
  }
pytorch_model-00001-of-00002.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:184f632487fd0e34601aa202168bb3bc72d07dc0059310d18af400113c5aa2a9
3
  size 9951028257
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7a8d542f34c17bd664bae4b021d9d84a8846e2c640dc1634475e5c108bdc391
3
  size 9951028257
pytorch_model-00002-of-00002.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dfb7053899f78fab813c94f34168e6ccafb843dc967cc566ba1641824398362c
3
  size 3892483153
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:712981c74a1543300f6a29c704d36c95eff7454c4ab0bb4bc76434b02e82e26b
3
  size 3892483153