Tiny dummy models
Collection
Randomly initialized tiny models for debugging/testing purpose
•
143 items
•
Updated
•
6
This tiny model is intended for debugging. It is randomly initialized using the configuration adapted from tiiuae/Falcon-H1-34B-Instruct.
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
model_id = "yujiepan/falcon-h1-tiny-random"
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
model_id,
torch_dtype=torch.bfloat16,
trust_remote_code=True,
)
pipe = pipeline('text-generation', model=model,
tokenizer=tokenizer, trust_remote_code=True)
print(pipe('Write an article about Artificial Intelligence.', max_new_tokens=32))
import json
from pathlib import Path
import accelerate
import torch
from huggingface_hub import file_exists, hf_hub_download
from transformers import (
AutoConfig,
AutoModelForCausalLM,
AutoTokenizer,
GenerationConfig,
set_seed,
)
source_model_id = "tiiuae/Falcon-H1-34B-Instruct"
save_folder = "/tmp/yujiepan/falcon-h1-tiny-random"
processor = AutoTokenizer.from_pretrained(source_model_id)
processor.save_pretrained(save_folder)
with open(hf_hub_download(source_model_id, filename='config.json', repo_type='model'), 'r', encoding='utf-8') as f:
config_json = json.load(f)
for k, v in config_json.get('auto_map', {}).items():
config_json['auto_map'][k] = f'{source_model_id}--{v}'
config_json['head_dim'] = 32
config_json['hidden_size'] = 8
config_json['intermediate_size'] = 64
config_json['num_attention_heads'] = 8
config_json['num_key_value_heads'] = 4
config_json['num_hidden_layers'] = 2
config_json['mamba_d_head'] = 32
config_json['mamba_n_heads'] = 8
config_json['mamba_d_state'] = 32
config_json['mamba_d_ssm'] = config_json['mamba_d_head'] * \
config_json['mamba_n_heads']
config_json['mamba_expand'] = config_json['mamba_d_ssm'] // config_json['hidden_size']
config_json['tie_word_embeddings'] = True
with open(f"{save_folder}/config.json", "w", encoding='utf-8') as f:
json.dump(config_json, f, indent=2)
config = AutoConfig.from_pretrained(
save_folder,
trust_remote_code=True,
)
print(config)
automap = config_json.get('auto_map', None)
torch.set_default_dtype(torch.bfloat16)
model = AutoModelForCausalLM.from_config(config, trust_remote_code=True)
torch.set_default_dtype(torch.float32)
if file_exists(filename="generation_config.json", repo_id=source_model_id, repo_type='model'):
model.generation_config = GenerationConfig.from_pretrained(
source_model_id, trust_remote_code=True,
)
set_seed(42)
model = model.cpu()
with torch.no_grad():
for name, p in sorted(model.named_parameters()):
torch.nn.init.normal_(p, 0, 0.1)
print(name, p.shape)
model.save_pretrained(save_folder)
print(model)
if automap:
with open(f"{save_folder}/config.json", "r", encoding='utf-8') as f:
config_json = json.load(f)
config_json['auto_map'] = automap
with open(f"{save_folder}/config.json", "w", encoding='utf-8') as f:
json.dump(config_json, f, indent=2)
for python_file in Path(save_folder).glob('*.py'):
python_file.unlink()
FalconH1ForCausalLM(
(model): FalconH1Model(
(embed_tokens): Embedding(261120, 8, padding_idx=0)
(layers): ModuleList(
(0-1): 2 x FalconH1DecoderLayer(
(feed_forward): FalconH1MLP(
(gate_proj): Linear(in_features=8, out_features=64, bias=False)
(up_proj): Linear(in_features=8, out_features=64, bias=False)
(down_proj): Linear(in_features=64, out_features=8, bias=False)
(act_fn): SiLUActivation()
)
(mamba): FalconH1Mixer(
(act): SiLUActivation()
(conv1d): Conv1d(384, 384, kernel_size=(4,), stride=(1,), padding=(3,), groups=384)
(in_proj): Linear(in_features=8, out_features=648, bias=False)
(norm): FalconH1RMSNormGated()
(out_proj): Linear(in_features=256, out_features=8, bias=False)
)
(self_attn): FalconH1Attention(
(q_proj): Linear(in_features=8, out_features=256, bias=False)
(k_proj): Linear(in_features=8, out_features=128, bias=False)
(v_proj): Linear(in_features=8, out_features=128, bias=False)
(o_proj): Linear(in_features=256, out_features=8, bias=False)
)
(input_layernorm): FalconH1RMSNorm((8,), eps=1e-05)
(pre_ff_layernorm): FalconH1RMSNorm((8,), eps=1e-05)
)
)
(final_layernorm): FalconH1RMSNorm((8,), eps=1e-05)
(rotary_emb): FalconH1RotaryEmbedding()
)
(lm_head): Linear(in_features=8, out_features=261120, bias=False)
)
Base model
tiiuae/Falcon-H1-34B-Base