models: - model: NousResearch/Hermes-2-Pro-Mistral-7B - model: WizardLM/WizardMath-7B-V1.1 merge_method: slerp base_model: NousResearch/Hermes-2-Pro-Mistral-7B dtype: bfloat16 parameters: t: [0, 0.5, 1, 0.5, 0] train: auto_train: true quantization: enabled: true bits: 5 method: "k-means" subquantizers: 256 output: format: gguf filename: entrenado.gguf