merge_method: task_arithmetic base_model: meta-llama/Llama-3.1-70B models: - model: tokyotech-llm/Llama-3.1-Swallow-70B-v0.1 parameters: weight: 1.0 - model: meta-llama/Llama-3.3-70B-Instruct parameters: weight: 0.998 dtype: bfloat16 name: Llama-3.3-FakeSwallow-70B-Instruct-v0.1