File size: 3,467 Bytes
300a419
72c03b9
300a419
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
{
  "_name_or_path": "sarvamai/shuka_v1",
  "architectures": [
    "ShukaModel"
  ],
  "audio_config": {
    "_name_or_path": "sarvam/saaras_0_11",
    "activation_dropout": 0.0,
    "activation_function": "gelu",
    "apply_spec_augment": true,
    "architectures": [
      "WhisperForConditionalGeneration"
    ],
    "attention_dropout": 0.0,
    "begin_suppress_tokens": [
      220,
      50257
    ],
    "bos_token_id": 50257,
    "d_model": 1280,
    "decoder_attention_heads": 20,
    "decoder_ffn_dim": 5120,
    "decoder_layerdrop": 0.0,
    "decoder_layers": 32,
    "decoder_start_token_id": 50258,
    "dropout": 0.0,
    "encoder_attention_heads": 20,
    "encoder_ffn_dim": 5120,
    "encoder_layerdrop": 0.0,
    "encoder_layers": 32,
    "eos_token_id": 50257,
    "forced_decoder_ids": [
      [
        1,
        50259
      ],
      [
        2,
        50359
      ],
      [
        3,
        50363
      ]
    ],
    "init_std": 0.02,
    "is_encoder_decoder": true,
    "max_length": 448,
    "max_source_positions": 1500,
    "max_target_positions": 448,
    "median_filter_width": 7,
    "model_type": "whisper",
    "num_hidden_layers": 32,
    "num_mel_bins": 80,
    "pad_token_id": 50257,
    "scale_embedding": false,
    "suppress_tokens": [
      1,
      2,
      7,
      8,
      9,
      10,
      14,
      25,
      26,
      27,
      28,
      29,
      31,
      58,
      59,
      60,
      61,
      62,
      63,
      90,
      91,
      92,
      93,
      359,
      503,
      522,
      542,
      873,
      893,
      902,
      918,
      922,
      931,
      1350,
      1853,
      1982,
      2460,
      2627,
      3246,
      3253,
      3268,
      3536,
      3846,
      3961,
      4183,
      4667,
      6585,
      6647,
      7273,
      9061,
      9383,
      10428,
      10929,
      11938,
      12033,
      12331,
      12562,
      13793,
      14157,
      14635,
      15265,
      15618,
      16553,
      16604,
      18362,
      18956,
      20075,
      21675,
      22520,
      26130,
      26161,
      26435,
      28279,
      29464,
      31650,
      32302,
      32470,
      36865,
      42863,
      47425,
      49870,
      50254,
      50258,
      50358,
      50359,
      50360,
      50361,
      50362
    ],
    "torch_dtype": "float32",
    "use_cache": true,
    "vocab_size": 51865
  },
  "audio_model_id": null,
  "auto_map": {
    "AutoConfig": "shuka_config.ShukaConfig",
    "AutoModel": "shuka_model.ShukaModel"
  },
  "custom_pipelines": {
    "shuka-pipeline": {
      "impl": "shuka_pipeline.ShukaPipeline",
      "pt": [
        "AutoModel"
      ],
      "tf": [],
      "type": "multimodal"
    }
  },
  "encoder_ds_factor": 160,
  "hidden_size": 4096,
  "ignore_index": -100,
  "initializer_range": 0.02,
  "model_type": "shuka",
  "norm_init": 0.4,
  "projector_act": "silu",
  "stack_factor": 8,
  "text_config": {
    "_name_or_path": "meta-llama/Meta-Llama-3-8B-Instruct",
    "architectures": [
      "LlamaForCausalLM"
    ],
    "bos_token_id": 128000,
    "eos_token_id": 128009,
    "intermediate_size": 14336,
    "max_position_embeddings": 8192,
    "model_type": "llama",
    "num_key_value_heads": 8,
    "rms_norm_eps": 1e-05,
    "rope_theta": 500000.0,
    "torch_dtype": "bfloat16",
    "vocab_size": 128256
  },
  "text_model_id": null,
  "torch_dtype": "bfloat16",
  "transformers_version": "4.41.2",
  "vocab_size": 128256
}