yujiepan commited on
Commit
a69055f
1 Parent(s): 056ce4e

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ pipeline_tag: text-generation
4
+ inference: true
5
+ widget:
6
+ - text: Hello!
7
+ example_title: Hello world
8
+ group: Python
9
+ ---
10
+
11
+ This model is for debugging purposes. It is randomly initialized using the config from [mistralai/Mamba-Codestral-7B-v0.1](https://huggingface.co/mistralai/Mamba-Codestral-7B-v0.1) but with a smaller size.
12
+
13
+ Codes:
14
+ ```python
15
+ import os
16
+
17
+ import torch
18
+
19
+ from huggingface_hub import create_repo, upload_folder
20
+ from transformers import (
21
+ AutoModelForCausalLM,
22
+ AutoTokenizer,
23
+ GenerationConfig,
24
+ Mamba2Config,
25
+ pipeline,
26
+ set_seed,
27
+ )
28
+
29
+ model_id = "mistralai/Mamba-Codestral-7B-v0.1"
30
+ repo_id = "yujiepan/mamba2-codestral-v0.1-tiny-random"
31
+ save_path = f"/tmp/{repo_id}"
32
+
33
+ os.system(f'rm -rf {save_path}')
34
+
35
+ config = Mamba2Config.from_pretrained(model_id)
36
+ config.use_cache = True
37
+ config.num_hidden_layers = 2
38
+ config.num_heads = 8
39
+ config.head_dim = 4
40
+ config.hidden_size = 8
41
+ config.expand = 4
42
+ config.intermediate_size = 32
43
+ config.state_size = 8
44
+ config.n_groups = 2
45
+
46
+ assert config.intermediate_size == \
47
+ config.hidden_size * config.expand == config.num_heads * config.head_dim
48
+ assert config.num_heads // config.n_groups > 0
49
+ assert config.num_heads % 8 == 0
50
+
51
+ tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
52
+ tokenizer.save_pretrained(save_path)
53
+
54
+ model = AutoModelForCausalLM.from_config(
55
+ config, torch_dtype=torch.bfloat16,
56
+ trust_remote_code=True,
57
+ )
58
+ model.generation_config = GenerationConfig.from_pretrained(
59
+ model_id,
60
+ trust_remote_code=True,
61
+ )
62
+
63
+ set_seed(42)
64
+ with torch.no_grad():
65
+ for name, p in sorted(model.named_parameters()):
66
+ print(name, p.shape)
67
+ torch.nn.init.uniform_(p, -0.5, 0.5)
68
+
69
+ model.save_pretrained(save_path)
70
+
71
+ pipe = pipeline(
72
+ "text-generation",
73
+ model=save_path,
74
+ device="cuda",
75
+ trust_remote_code=True,
76
+ max_new_tokens=20,
77
+ )
78
+ print(pipe("Hello World!"))
79
+
80
+ with open(__file__, 'r') as f:
81
+ codes = f.read()
82
+ with open(f'{save_path}/README.md', 'w') as f:
83
+ f.write(
84
+ f'''---
85
+ library_name: transformers
86
+ pipeline_tag: text-generation
87
+ inference: true
88
+ widget:
89
+ - text: Hello!
90
+ example_title: Hello world
91
+ group: Python
92
+ ---
93
+
94
+ This model is for debugging purposes. It is randomly initialized using the config from [{model_id}](https://huggingface.co/{model_id}) but with a smaller size.
95
+
96
+ Codes:
97
+ ```python
98
+ {codes}
99
+ ```'''
100
+ )
101
+
102
+ create_repo(repo_id, exist_ok=True)
103
+ upload_folder(repo_id=repo_id, folder_path=save_path, repo_type='model')
104
+
105
+ ```
config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/raid/pablo/codestral-hf-good/",
3
+ "architectures": [
4
+ "Mamba2ForCausalLM"
5
+ ],
6
+ "bos_token_id": 0,
7
+ "chunk_size": 256,
8
+ "conv_kernel": 4,
9
+ "eos_token_id": 0,
10
+ "expand": 4,
11
+ "head_dim": 4,
12
+ "hidden_act": "silu",
13
+ "hidden_size": 8,
14
+ "initializer_range": 0.1,
15
+ "intermediate_size": 32,
16
+ "layer_norm_epsilon": 1e-05,
17
+ "model_type": "mamba2",
18
+ "n_groups": 2,
19
+ "norm_before_gate": true,
20
+ "num_heads": 8,
21
+ "num_hidden_layers": 2,
22
+ "pad_token_id": 0,
23
+ "rescale_prenorm_residual": false,
24
+ "residual_in_fp32": true,
25
+ "rms_norm": true,
26
+ "state_size": 8,
27
+ "tie_word_embeddings": false,
28
+ "time_step_floor": 0.0001,
29
+ "time_step_init_scheme": "random",
30
+ "time_step_limit": [
31
+ 0.0,
32
+ Infinity
33
+ ],
34
+ "time_step_max": 0.1,
35
+ "time_step_min": 0.001,
36
+ "time_step_rank": 256,
37
+ "time_step_scale": 1.0,
38
+ "torch_dtype": "bfloat16",
39
+ "transformers_version": "4.45.2",
40
+ "use_bias": false,
41
+ "use_cache": true,
42
+ "use_conv_bias": true,
43
+ "vocab_size": 32768
44
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 0,
4
+ "eos_token_id": 2,
5
+ "pad_token_id": 1,
6
+ "transformers_version": "4.45.2"
7
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0bf16d21b31bed6b3d74761a73c2ee17897fba814c780b7c8f1c228eb8789c90
3
+ size 1056544
special_tokens_map.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "unk_token": {
17
+ "content": "<unk>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ }
23
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59f95e28944c062244741268596badc900df86c7f5ded05088d2da22a7379e06
3
+ size 587583
tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff