|
base_model: Locutusque/llama-3-neural-chat-v1-8b |
|
dtype: bfloat16 |
|
merge_method: dare_ties |
|
parameters: |
|
int8_mask: 1.0 |
|
normalize: 0.0 |
|
slices: |
|
- sources: |
|
- layer_range: [0, 4] |
|
model: cognitivecomputations/dolphin-2.9-llama3-8b |
|
parameters: |
|
density: 1.0 |
|
weight: 0.6 |
|
- layer_range: [0, 4] |
|
model: Weyaxi/Einstein-v6.1-Llama3-8B |
|
parameters: |
|
density: 0.6 |
|
weight: 0.5 |
|
- layer_range: [0, 4] |
|
model: Locutusque/llama-3-neural-chat-v1-8b |
|
parameters: |
|
density: 1.0 |
|
weight: 0.5 |
|
- sources: |
|
- layer_range: [4, 8] |
|
model: cognitivecomputations/dolphin-2.9-llama3-8b |
|
parameters: |
|
density: 0.8 |
|
weight: 0.1 |
|
- layer_range: [4, 8] |
|
model: Weyaxi/Einstein-v6.1-Llama3-8B |
|
parameters: |
|
density: 1.0 |
|
weight: 0.2 |
|
- layer_range: [4, 8] |
|
model: Locutusque/llama-3-neural-chat-v1-8b |
|
parameters: |
|
density: 1.0 |
|
weight: 0.7 |
|
- sources: |
|
- layer_range: [8, 12] |
|
model: cognitivecomputations/dolphin-2.9-llama3-8b |
|
parameters: |
|
density: 0.7 |
|
weight: 0.1 |
|
- layer_range: [8, 12] |
|
model: Weyaxi/Einstein-v6.1-Llama3-8B |
|
parameters: |
|
density: 0.7 |
|
weight: 0.2 |
|
- layer_range: [8, 12] |
|
model: Locutusque/llama-3-neural-chat-v1-8b |
|
parameters: |
|
density: 0.7 |
|
weight: 0.6 |
|
- sources: |
|
- layer_range: [12, 16] |
|
model: cognitivecomputations/dolphin-2.9-llama3-8b |
|
parameters: |
|
density: 0.9 |
|
weight: 0.2 |
|
- layer_range: [12, 16] |
|
model: Weyaxi/Einstein-v6.1-Llama3-8B |
|
parameters: |
|
density: 0.6 |
|
weight: 0.6 |
|
- layer_range: [12, 16] |
|
model: Locutusque/llama-3-neural-chat-v1-8b |
|
parameters: |
|
density: 0.7 |
|
weight: 0.3 |
|
- sources: |
|
- layer_range: [16, 20] |
|
model: cognitivecomputations/dolphin-2.9-llama3-8b |
|
parameters: |
|
density: 1.0 |
|
weight: 0.2 |
|
- layer_range: [16, 20] |
|
model: Weyaxi/Einstein-v6.1-Llama3-8B |
|
parameters: |
|
density: 1.0 |
|
weight: 0.2 |
|
- layer_range: [16, 20] |
|
model: Locutusque/llama-3-neural-chat-v1-8b |
|
parameters: |
|
density: 0.9 |
|
weight: 0.4 |
|
- sources: |
|
- layer_range: [20, 24] |
|
model: cognitivecomputations/dolphin-2.9-llama3-8b |
|
parameters: |
|
density: 0.7 |
|
weight: 0.2 |
|
- layer_range: [20, 24] |
|
model: Weyaxi/Einstein-v6.1-Llama3-8B |
|
parameters: |
|
density: 0.9 |
|
weight: 0.3 |
|
- layer_range: [20, 24] |
|
model: Locutusque/llama-3-neural-chat-v1-8b |
|
parameters: |
|
density: 1.0 |
|
weight: 0.4 |
|
- sources: |
|
- layer_range: [24, 28] |
|
model: cognitivecomputations/dolphin-2.9-llama3-8b |
|
parameters: |
|
density: 1.0 |
|
weight: 0.4 |
|
- layer_range: [24, 28] |
|
model: Weyaxi/Einstein-v6.1-Llama3-8B |
|
parameters: |
|
density: 0.8 |
|
weight: 0.2 |
|
- layer_range: [24, 28] |
|
model: Locutusque/llama-3-neural-chat-v1-8b |
|
parameters: |
|
density: 0.9 |
|
weight: 0.4 |
|
- sources: |
|
- layer_range: [28, 32] |
|
model: cognitivecomputations/dolphin-2.9-llama3-8b |
|
parameters: |
|
density: 1.0 |
|
weight: 0.3 |
|
- layer_range: [28, 32] |
|
model: Weyaxi/Einstein-v6.1-Llama3-8B |
|
parameters: |
|
density: 0.9 |
|
weight: 0.2 |
|
- layer_range: [28, 32] |
|
model: Locutusque/llama-3-neural-chat-v1-8b |
|
parameters: |
|
density: 1.0 |
|
weight: 0.3 |
|
|