Qwen2.5-14B-BrocaV9 / mergekit_config.yml
CultriX's picture
Upload folder using huggingface_hub
883dafb verified
merge_method: della_linear
base_model: CultriX/Qwen2.5-14B-Wernickev3
dtype: bfloat16
parameters:
epsilon: 0.015 # Fine-grain scaling for precision.
lambda: 1.6 # Strong emphasis on top-performing models.
normalize: true # Stable parameter integration across models.
adaptive_merge_parameters:
task_weights:
tinyArc: 1.75 # Logical reasoning.
tinyHellaswag: 1.65 # Contextual predictions.
tinyMMLU: 1.8 # Domain knowledge.
tinyTruthfulQA: 2.0 # Prioritize truthful reasoning.
tinyTruthfulQA_mc1: 1.85
tinyWinogrande: 1.9 # Advanced reasoning and predictions.
IFEval: 2.1 # Instruction-following and multitasking.
BBH: 1.9 # Complex reasoning.
MATH: 2.3 # Mathematical reasoning.
GPQA: 2.0 # Factual QA.
MUSR: 2.1 # Multi-step reasoning.
MMLU-PRO: 1.95 # Domain multitask performance.
smoothing_factor: 0.1 # Smooth blending across benchmarks.
gradient_clipping:
CultriX/Qwen2.5-14B-Wernickev3: 0.9
djuna/Q2.5-Veltha-14B-0.5: 0.92
qingy2019/Qwen2.5-Math-14B-Instruct: 0.94
CultriX/SeQwence-14Bv1: 0.88
allknowingroger/QwenSlerp6-14B: 0.87
CultriX/Qwenfinity-2.5-14B: 0.85
models:
- model: CultriX/Qwen2.5-14B-Wernickev3
parameters:
weight: 0.25
density: 0.72
- model: djuna/Q2.5-Veltha-14B-0.5
parameters:
weight: 0.22
density: 0.75
- model: qingy2019/Qwen2.5-Math-14B-Instruct
parameters:
weight: 0.18
density: 0.74
- model: CultriX/SeQwence-14Bv1
parameters:
weight: 0.15
density: 0.7
- model: allknowingroger/QwenSlerp6-14B
parameters:
weight: 0.12
density: 0.68
- model: CultriX/Qwenfinity-2.5-14B
parameters:
weight: 0.15
density: 0.7
tokenizer_source: CultriX/Qwen2.5-14B-Wernickev3