|
slices: |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/5 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/5 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ICEPVP8977/Uncensored_llama_3.2_3b_safetensors |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/2 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/4 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: chuanli11/Llama-3.2-3B-Instruct-uncensored |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: PJMixers-Dev/LLaMa-3.2-Instruct-JankMix-v0.1-SFT-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Bllossom/llama-3.2-Korean-Bllossom-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Qwen/Qwen2.5-3B-Instruct |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Qwen/Qwen2.5-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: stabilityai/stable-code-3b |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ibm-granite/granite-3b-code-base-2k |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ministral/Ministral-3b-instruct |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ICEPVP8977/Uncensored_llama_3.2_3b_safetensors |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/2 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/4 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: chuanli11/Llama-3.2-3B-Instruct-uncensored |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: PJMixers-Dev/LLaMa-3.2-Instruct-JankMix-v0.1-SFT-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Bllossom/llama-3.2-Korean-Bllossom-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Qwen/Qwen2.5-3B-Instruct |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Qwen/Qwen2.5-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: stabilityai/stable-code-3b |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ibm-granite/granite-3b-code-base-2k |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ministral/Ministral-3b-instruct |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ICEPVP8977/Uncensored_llama_3.2_3b_safetensors |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/2 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/4 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: chuanli11/Llama-3.2-3B-Instruct-uncensored |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: PJMixers-Dev/LLaMa-3.2-Instruct-JankMix-v0.1-SFT-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Bllossom/llama-3.2-Korean-Bllossom-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Qwen/Qwen2.5-3B-Instruct |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Qwen/Qwen2.5-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: stabilityai/stable-code-3b |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ibm-granite/granite-3b-code-base-2k |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ministral/Ministral-3b-instruct |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/5 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/5 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ICEPVP8977/Uncensored_llama_3.2_3b_safetensors |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/2 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/4 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: chuanli11/Llama-3.2-3B-Instruct-uncensored |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: PJMixers-Dev/LLaMa-3.2-Instruct-JankMix-v0.1-SFT-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Bllossom/llama-3.2-Korean-Bllossom-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Qwen/Qwen2.5-3B-Instruct |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Qwen/Qwen2.5-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: stabilityai/stable-code-3b |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ibm-granite/granite-3b-code-base-2k |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ministral/Ministral-3b-instruct |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ICEPVP8977/Uncensored_llama_3.2_3b_safetensors |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/2 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/4 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: chuanli11/Llama-3.2-3B-Instruct-uncensored |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: PJMixers-Dev/LLaMa-3.2-Instruct-JankMix-v0.1-SFT-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Bllossom/llama-3.2-Korean-Bllossom-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Qwen/Qwen2.5-3B-Instruct |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Qwen/Qwen2.5-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: stabilityai/stable-code-3b |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ibm-granite/granite-3b-code-base-2k |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ministral/Ministral-3b-instruct |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ICEPVP8977/Uncensored_llama_3.2_3b_safetensors |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/2 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/4 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: chuanli11/Llama-3.2-3B-Instruct-uncensored |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: PJMixers-Dev/LLaMa-3.2-Instruct-JankMix-v0.1-SFT-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Bllossom/llama-3.2-Korean-Bllossom-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Qwen/Qwen2.5-3B-Instruct |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Qwen/Qwen2.5-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: stabilityai/stable-code-3b |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ibm-granite/granite-3b-code-base-2k |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ministral/Ministral-3b-instruct |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/5 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/5 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ICEPVP8977/Uncensored_llama_3.2_3b_safetensors |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/2 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/4 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: chuanli11/Llama-3.2-3B-Instruct-uncensored |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: PJMixers-Dev/LLaMa-3.2-Instruct-JankMix-v0.1-SFT-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Bllossom/llama-3.2-Korean-Bllossom-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Qwen/Qwen2.5-3B-Instruct |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Qwen/Qwen2.5-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: stabilityai/stable-code-3b |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ibm-granite/granite-3b-code-base-2k |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ministral/Ministral-3b-instruct |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ICEPVP8977/Uncensored_llama_3.2_3b_safetensors |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/2 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/4 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: chuanli11/Llama-3.2-3B-Instruct-uncensored |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: PJMixers-Dev/LLaMa-3.2-Instruct-JankMix-v0.1-SFT-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Bllossom/llama-3.2-Korean-Bllossom-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Qwen/Qwen2.5-3B-Instruct |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Qwen/Qwen2.5-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: stabilityai/stable-code-3b |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ibm-granite/granite-3b-code-base-2k |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ministral/Ministral-3b-instruct |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ICEPVP8977/Uncensored_llama_3.2_3b_safetensors |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/2 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/4 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: chuanli11/Llama-3.2-3B-Instruct-uncensored |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: PJMixers-Dev/LLaMa-3.2-Instruct-JankMix-v0.1-SFT-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Bllossom/llama-3.2-Korean-Bllossom-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Qwen/Qwen2.5-3B-Instruct |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Qwen/Qwen2.5-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: stabilityai/stable-code-3b |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ibm-granite/granite-3b-code-base-2k |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ministral/Ministral-3b-instruct |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/5 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/5 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ICEPVP8977/Uncensored_llama_3.2_3b_safetensors |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/2 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/4 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: chuanli11/Llama-3.2-3B-Instruct-uncensored |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: PJMixers-Dev/LLaMa-3.2-Instruct-JankMix-v0.1-SFT-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Bllossom/llama-3.2-Korean-Bllossom-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Qwen/Qwen2.5-3B-Instruct |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Qwen/Qwen2.5-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: stabilityai/stable-code-3b |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ibm-granite/granite-3b-code-base-2k |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ministral/Ministral-3b-instruct |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ICEPVP8977/Uncensored_llama_3.2_3b_safetensors |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/2 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/4 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: chuanli11/Llama-3.2-3B-Instruct-uncensored |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: PJMixers-Dev/LLaMa-3.2-Instruct-JankMix-v0.1-SFT-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Bllossom/llama-3.2-Korean-Bllossom-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Qwen/Qwen2.5-3B-Instruct |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Qwen/Qwen2.5-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: stabilityai/stable-code-3b |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ibm-granite/granite-3b-code-base-2k |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ministral/Ministral-3b-instruct |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ICEPVP8977/Uncensored_llama_3.2_3b_safetensors |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/2 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/4 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: chuanli11/Llama-3.2-3B-Instruct-uncensored |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: PJMixers-Dev/LLaMa-3.2-Instruct-JankMix-v0.1-SFT-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Bllossom/llama-3.2-Korean-Bllossom-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Qwen/Qwen2.5-3B-Instruct |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Qwen/Qwen2.5-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: stabilityai/stable-code-3b |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ibm-granite/granite-3b-code-base-2k |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ministral/Ministral-3b-instruct |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/5 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/5 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ICEPVP8977/Uncensored_llama_3.2_3b_safetensors |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/2 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/4 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: chuanli11/Llama-3.2-3B-Instruct-uncensored |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: PJMixers-Dev/LLaMa-3.2-Instruct-JankMix-v0.1-SFT-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Bllossom/llama-3.2-Korean-Bllossom-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Qwen/Qwen2.5-3B-Instruct |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Qwen/Qwen2.5-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: stabilityai/stable-code-3b |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ibm-granite/granite-3b-code-base-2k |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ministral/Ministral-3b-instruct |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ICEPVP8977/Uncensored_llama_3.2_3b_safetensors |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/2 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/4 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: chuanli11/Llama-3.2-3B-Instruct-uncensored |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: PJMixers-Dev/LLaMa-3.2-Instruct-JankMix-v0.1-SFT-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Bllossom/llama-3.2-Korean-Bllossom-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Qwen/Qwen2.5-3B-Instruct |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Qwen/Qwen2.5-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: stabilityai/stable-code-3b |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ibm-granite/granite-3b-code-base-2k |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ministral/Ministral-3b-instruct |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ICEPVP8977/Uncensored_llama_3.2_3b_safetensors |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/2 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/4 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: chuanli11/Llama-3.2-3B-Instruct-uncensored |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: PJMixers-Dev/LLaMa-3.2-Instruct-JankMix-v0.1-SFT-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Bllossom/llama-3.2-Korean-Bllossom-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Qwen/Qwen2.5-3B-Instruct |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Qwen/Qwen2.5-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: stabilityai/stable-code-3b |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ibm-granite/granite-3b-code-base-2k |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ministral/Ministral-3b-instruct |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/5 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/5 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ICEPVP8977/Uncensored_llama_3.2_3b_safetensors |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/2 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/4 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: chuanli11/Llama-3.2-3B-Instruct-uncensored |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: PJMixers-Dev/LLaMa-3.2-Instruct-JankMix-v0.1-SFT-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Bllossom/llama-3.2-Korean-Bllossom-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Qwen/Qwen2.5-3B-Instruct |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Qwen/Qwen2.5-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: stabilityai/stable-code-3b |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ibm-granite/granite-3b-code-base-2k |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ministral/Ministral-3b-instruct |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ICEPVP8977/Uncensored_llama_3.2_3b_safetensors |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/2 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/4 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: chuanli11/Llama-3.2-3B-Instruct-uncensored |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: PJMixers-Dev/LLaMa-3.2-Instruct-JankMix-v0.1-SFT-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Bllossom/llama-3.2-Korean-Bllossom-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Qwen/Qwen2.5-3B-Instruct |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Qwen/Qwen2.5-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: stabilityai/stable-code-3b |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ibm-granite/granite-3b-code-base-2k |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ministral/Ministral-3b-instruct |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ICEPVP8977/Uncensored_llama_3.2_3b_safetensors |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/2 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/4 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: chuanli11/Llama-3.2-3B-Instruct-uncensored |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: PJMixers-Dev/LLaMa-3.2-Instruct-JankMix-v0.1-SFT-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Bllossom/llama-3.2-Korean-Bllossom-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Qwen/Qwen2.5-3B-Instruct |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Qwen/Qwen2.5-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: stabilityai/stable-code-3b |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ibm-granite/granite-3b-code-base-2k |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ministral/Ministral-3b-instruct |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/5 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/5 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ICEPVP8977/Uncensored_llama_3.2_3b_safetensors |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/2 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/4 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: chuanli11/Llama-3.2-3B-Instruct-uncensored |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: PJMixers-Dev/LLaMa-3.2-Instruct-JankMix-v0.1-SFT-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Bllossom/llama-3.2-Korean-Bllossom-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Qwen/Qwen2.5-3B-Instruct |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Qwen/Qwen2.5-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: stabilityai/stable-code-3b |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ibm-granite/granite-3b-code-base-2k |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ministral/Ministral-3b-instruct |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ICEPVP8977/Uncensored_llama_3.2_3b_safetensors |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/2 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/4 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: chuanli11/Llama-3.2-3B-Instruct-uncensored |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: PJMixers-Dev/LLaMa-3.2-Instruct-JankMix-v0.1-SFT-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Bllossom/llama-3.2-Korean-Bllossom-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Qwen/Qwen2.5-3B-Instruct |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Qwen/Qwen2.5-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: stabilityai/stable-code-3b |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ibm-granite/granite-3b-code-base-2k |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ministral/Ministral-3b-instruct |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ICEPVP8977/Uncensored_llama_3.2_3b_safetensors |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/2 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: lilmeaty/4 |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: chuanli11/Llama-3.2-3B-Instruct-uncensored |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: PJMixers-Dev/LLaMa-3.2-Instruct-JankMix-v0.1-SFT-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Bllossom/llama-3.2-Korean-Bllossom-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Qwen/Qwen2.5-3B-Instruct |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: Qwen/Qwen2.5-3B |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: stabilityai/stable-code-3b |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ibm-granite/granite-3b-code-base-2k |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
- sources: |
|
- layer_range: [0, 3] |
|
model: ministral/Ministral-3b-instruct |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
parameters: |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
normalize: true |
|
int8_mask: true |
|
density: 0.5 |
|
weight: 0.1 |
|
random_seed: 0 |
|
temperature: 0.5 |
|
top_p: 0.65 |
|
inference: true |
|
max_tokens: 999999999 |
|
stream: true |
|
merge_method: passthrough |
|
|