femiari commited on
Commit
ea144f2
1 Parent(s): 3ecf165

Upload folder using huggingface_hub

Browse files
Files changed (2) hide show
  1. README.md +68 -0
  2. config.json +54 -0
README.md ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model:
3
+ - Qwen/Qwen-7B
4
+ - TideDra/Qwen-VL-Chat-DPO
5
+ license: apache-2.0
6
+ tags:
7
+ - moe
8
+ - frankenmoe
9
+ - merge
10
+ - mergekit
11
+ - lazymergekit
12
+ - Qwen/Qwen-7B
13
+ - TideDra/Qwen-VL-Chat-DPO
14
+ ---
15
+
16
+ # QwenMoEAriel
17
+
18
+ QwenMoEAriel is a Mixture of Experts (MoE) made with the following models using [LazyMergekit](https://colab.research.google.com/drive/1obulZ1ROXHjYLn6PPZJwRR6GzgQogxxb?usp=sharing):
19
+ * [Qwen/Qwen-7B](https://huggingface.co/Qwen/Qwen-7B)
20
+ * [TideDra/Qwen-VL-Chat-DPO](https://huggingface.co/TideDra/Qwen-VL-Chat-DPO)
21
+
22
+ ## 🧩 Configuration
23
+
24
+ ```yaml
25
+ base_model: Qwen/Qwen-7B
26
+ gate_mode: cheap_embed
27
+ experts:
28
+ - source_model: Qwen/Qwen-7B
29
+ positive_prompts:
30
+ - "chat"
31
+ - "assistant"
32
+ - "tell me"
33
+ - "explain"
34
+ - "I want"
35
+ - source_model: TideDra/Qwen-VL-Chat-DPO
36
+ positive_prompts:
37
+ - "code"
38
+ - "python"
39
+ - "javascript"
40
+ - "programming"
41
+ - "algorithm"
42
+ shared_experts:
43
+ - source_model: Qwen/Qwen-7B
44
+ ```
45
+
46
+ ## 💻 Usage
47
+
48
+ ```python
49
+ !pip install -qU transformers bitsandbytes accelerate
50
+
51
+ from transformers import AutoTokenizer
52
+ import transformers
53
+ import torch
54
+
55
+ model = "femiari/QwenMoEAriel"
56
+
57
+ tokenizer = AutoTokenizer.from_pretrained(model)
58
+ pipeline = transformers.pipeline(
59
+ "text-generation",
60
+ model=model,
61
+ model_kwargs={"torch_dtype": torch.float16, "load_in_4bit": True},
62
+ )
63
+
64
+ messages = [{"role": "user", "content": "Explain what a Mixture of Experts is in less than 100 words."}]
65
+ prompt = pipeline.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
66
+ outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
67
+ print(outputs[0]["generated_text"])
68
+ ```
config.json ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "Qwen/Qwen-7B",
3
+ "architectures": [
4
+ "MixtralForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "attn_dropout_prob": 0.0,
8
+ "auto_map": {
9
+ "AutoConfig": "Qwen/Qwen-7B--configuration_qwen.QWenConfig",
10
+ "AutoModelForCausalLM": "Qwen/Qwen-7B--modeling_qwen.QWenLMHeadModel"
11
+ },
12
+ "bf16": false,
13
+ "bos_token_id": null,
14
+ "emb_dropout_prob": 0.0,
15
+ "eos_token_id": null,
16
+ "fp16": false,
17
+ "fp32": false,
18
+ "hidden_act": "silu",
19
+ "hidden_size": 4096,
20
+ "initializer_range": 0.02,
21
+ "intermediate_size": 22016,
22
+ "kv_channels": 128,
23
+ "layer_norm_epsilon": 1e-06,
24
+ "max_position_embeddings": 32768,
25
+ "model_type": "mixtral",
26
+ "no_bias": true,
27
+ "num_attention_heads": 32,
28
+ "num_experts_per_tok": 2,
29
+ "num_hidden_layers": 32,
30
+ "num_key_value_heads": 8,
31
+ "num_local_experts": 2,
32
+ "onnx_safe": null,
33
+ "output_router_logits": false,
34
+ "rms_norm_eps": 1e-06,
35
+ "rope_theta": 10000.0,
36
+ "rotary_emb_base": 10000,
37
+ "rotary_pct": 1.0,
38
+ "router_aux_loss_coef": 0.001,
39
+ "router_jitter_noise": 0.0,
40
+ "scale_attn_weights": true,
41
+ "seq_length": 8192,
42
+ "sliding_window": null,
43
+ "softmax_in_fp32": false,
44
+ "tie_word_embeddings": false,
45
+ "tokenizer_class": "QWenTokenizer",
46
+ "transformers_version": "4.41.2",
47
+ "use_cache": true,
48
+ "use_cache_kernel": false,
49
+ "use_cache_quantization": false,
50
+ "use_dynamic_ntk": true,
51
+ "use_flash_attn": "auto",
52
+ "use_logn_attn": true,
53
+ "vocab_size": 151936
54
+ }