merge_method: model_stock | |
base_model: EpistemeAI/Llama-3.2-3B-Agent007 | |
dtype: float16 | |
parameters: | |
filter_wise: false | |
weight: 1 | |
density: 0.42 | |
gamma: 0.03 | |
models: | |
- model: EpistemeAI/Llama-3.2-3B-Agent007 | |
layer_range: [0, 28] | |
- model: Bllossom/llama-3.2-Korean-Bllossom-3B | |
layer_range: [0, 28] | |
- model: CarrotAI/Llama-3.2-Rabbit-Ko-3B-Instruct | |
layer_range: [0, 28] | |
- model: Saxo/Linkbricks-Llama3.2-Korean-cpt-3b | |
layer_range: [0, 28] | |
- model: RyanYr/llama32-3b-it_CoT-it_SFT | |
layer_range: [0, 28] |