VarunGumma commited on
Commit
ab4f7b6
·
verified ·
1 Parent(s): 415c09e

Upload config.json with huggingface_hub

Browse files
Files changed (1) hide show
  1. config.json +47 -0
config.json ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_dropout": 0.0,
3
+ "activation_function": "gelu",
4
+ "architectures": [
5
+ "RotaryIndicTransForConditionalGeneration"
6
+ ],
7
+ "attention_dropout": 0.0,
8
+ "attn_implementation": "eager",
9
+ "bos_token_id": 0,
10
+ "decoder_attention_heads": 8,
11
+ "decoder_embed_dim": 512,
12
+ "decoder_ffn_dim": 2048,
13
+ "decoder_layerdrop": 0,
14
+ "decoder_layers": 18,
15
+ "decoder_normalize_before": true,
16
+ "decoder_start_token_id": 2,
17
+ "decoder_vocab_size": 122672,
18
+ "dropout": 0.2,
19
+ "encoder_attention_heads": 8,
20
+ "encoder_embed_dim": 512,
21
+ "encoder_ffn_dim": 2048,
22
+ "encoder_layerdrop": 0,
23
+ "encoder_layers": 18,
24
+ "encoder_normalize_before": true,
25
+ "encoder_vocab_size": 32322,
26
+ "eos_token_id": 2,
27
+ "init_std": 0.02,
28
+ "is_encoder_decoder": true,
29
+ "layernorm_embedding": true,
30
+ "model_type": "RotaryIndicTrans",
31
+ "num_hidden_layers": 18,
32
+ "pad_token_id": 1,
33
+ "rope_args": {
34
+ "theta": 10000
35
+ },
36
+ "scale_embedding": true,
37
+ "share_decoder_input_output_embed": true,
38
+ "torch_dtype": "float32",
39
+ "transformers_version": "4.44.0",
40
+ "use_cache": true,
41
+ "name_or_path": "VarunGumma/rotary-indictrans2-en-indic-dist-200M",
42
+ "auto_map": {
43
+ "AutoConfig": "configuration_rotary_indictrans.RotaryIndicTransConfig",
44
+ "AutoModelForSeq2SeqLM": "modeling_rotary_indictrans.RotaryIndicTransForConditionalGeneration"
45
+ },
46
+ "tokenizer_class": "IndicTransTokenizer"
47
+ }