hf-transformers-bot commited on
Commit
ad5d05b
1 Parent(s): 5d655fb

Upload tiny models for XmodForCausalLM

Browse files
.gitattributes CHANGED
@@ -32,3 +32,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
config.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "adapter_layer_norm": false,
3
+ "adapter_reduction_factor": 2,
4
+ "adapter_reuse_layer_norm": true,
5
+ "architectures": [
6
+ "XmodForCausalLM"
7
+ ],
8
+ "attention_probs_dropout_prob": 0.1,
9
+ "bos_token_id": 0,
10
+ "classifier_dropout": null,
11
+ "default_language": "en_XX",
12
+ "eos_token_id": 2,
13
+ "hidden_act": "gelu",
14
+ "hidden_dropout_prob": 0.1,
15
+ "hidden_size": 32,
16
+ "initializer_range": 0.02,
17
+ "intermediate_size": 37,
18
+ "is_decoder": true,
19
+ "languages": [
20
+ "en_XX"
21
+ ],
22
+ "layer_norm_eps": 1e-12,
23
+ "ln_before_adapter": true,
24
+ "max_position_embeddings": 512,
25
+ "model_type": "xmod",
26
+ "num_attention_heads": 4,
27
+ "num_hidden_layers": 5,
28
+ "pad_token_id": 1,
29
+ "position_embedding_type": "absolute",
30
+ "pre_norm": false,
31
+ "torch_dtype": "float32",
32
+ "transformers_version": "4.28.0.dev0",
33
+ "type_vocab_size": 16,
34
+ "use_cache": true,
35
+ "vocab_size": 250002
36
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 0,
4
+ "eos_token_id": 2,
5
+ "pad_token_id": 1,
6
+ "transformers_version": "4.28.0.dev0"
7
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:491e0ed93d5a1f8ae08a3a09a87217b9797ed872cf04a2cdaaf7882a3ae39136
3
+ size 33270025
sentencepiece.bpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfc8146abe2a0488e9e2a0c56de7952f7c11ab059eca145a0a727afce0db2865
3
+ size 5069051
special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "content": "<mask>",
7
+ "lstrip": true,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
+ "unk_token": "<unk>"
15
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:62c24cdc13d4c9952d63718d6c9fa4c287974249e16b7ade6d5a85e7bbb75626
3
+ size 17082660
tokenizer_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "clean_up_tokenization_spaces": true,
4
+ "cls_token": "<s>",
5
+ "eos_token": "</s>",
6
+ "mask_token": {
7
+ "__type": "AddedToken",
8
+ "content": "<mask>",
9
+ "lstrip": true,
10
+ "normalized": true,
11
+ "rstrip": false,
12
+ "single_word": false
13
+ },
14
+ "model_max_length": 510,
15
+ "pad_token": "<pad>",
16
+ "sep_token": "</s>",
17
+ "sp_model_kwargs": {},
18
+ "special_tokens_map_file": null,
19
+ "tokenizer_class": "XLMRobertaTokenizer",
20
+ "unk_token": "<unk>"
21
+ }