Upload folder using huggingface_hub
Browse files- 3.5bpw/.gitattributes +35 -0
- 3.5bpw/README.md +291 -0
- 3.5bpw/Readme_en.md +283 -0
- 3.5bpw/config.json +38 -0
- 3.5bpw/generation_config.json +6 -0
- 3.5bpw/model.safetensors.index.json +370 -0
- 3.5bpw/output.safetensors +3 -0
- 3.5bpw/special_tokens_map.json +34 -0
- 3.5bpw/tokenizer.json +0 -0
- 3.5bpw/tokenizer_config.json +0 -0
3.5bpw/.gitattributes
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
3.5bpw/README.md
ADDED
@@ -0,0 +1,291 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: apache-2.0
|
3 |
+
datasets:
|
4 |
+
- Vikhrmodels/GrandMaster-PRO-MAX
|
5 |
+
- Vikhrmodels/Grounded-RAG-RU-v2
|
6 |
+
language:
|
7 |
+
- en
|
8 |
+
- ru
|
9 |
+
base_model:
|
10 |
+
- mistralai/Mistral-Nemo-Instruct-2407
|
11 |
+
library_name: transformers
|
12 |
+
---
|
13 |
+
[Reame.md in English](Readme_en.md)
|
14 |
+
|
15 |
+
## Vikhr-Nemo-12B-Instruct-R-21-09-24
|
16 |
+
|
17 |
+
### Описание
|
18 |
+
|
19 |
+
**Vikhr-Nemo** - это наша флагманская унимодальная LLM (Large Language Model) представляющая из себя улучшенную версию [mistralai/Mistral-Nemo-Instruct-2407](https://huggingface.co/mistralai/Mistral-Nemo-Instruct-2407) командой **VikhrModels**, адаптированную преимущественно для русского и английского языков. Для ее обучения мы использовали несколько этапов включающих в себя **SFT** и **SMPO** - нашу собственную вариацию DPO, подробнее читайте в секции *"Как эта модель создавалась"*.
|
20 |
+
|
21 |
+
Модель оптимизированна для различных вариантов использования, включая ризонинг, суммаризацию, код, roleplay, поддержание диалога. Vikhr-Nemo обладает возможностью многоязычной генерации, и высокопроизводительными возможностями RAG. Модель иммет лучшие оценки среди прочих на наших инструктивных и RAG бенчарках и, поэтому, мы верим, что в некоторых задачах (например, RAG) может быть не хуже gpt-4o-mini от OpenAI.
|
22 |
+
|
23 |
+
Весь использованный код для обучения доступен в нашем репозитории [effective_llm_alignment](https://github.com/VikhrModels/effective_llm_alignment/) на GitHub, а основные датасеты доступны в нашем [профиле на HF](https://huggingface.co/Vikhrmodels).
|
24 |
+
|
25 |
+
### Особенности
|
26 |
+
1. Высокое качество генераций на русском и английском языках, а также некоторых других языках, благодаря датасету [Grandmaster-PRO-MAX](https://huggingface.co/datasets/Vikhrmodels/GrandMaster-PRO-MAX) и исходной модели
|
27 |
+
2. Поддержка системных промптов для регулриования стиля ответов
|
28 |
+
3. Поддержка до 128k токенов контекста благодаря исходной модели
|
29 |
+
4. Grounded RAG режим - модель имеет специальную роль documents и специальный режим работы для поиска идентификаторов релевантных вопросу пользователя документов и использования их для ответа на вопрос, вдохновлено аналогичной способностью модели Command-R
|
30 |
+
|
31 |
+
### Метрики и оценка качества
|
32 |
+
|
33 |
+
Модель оценивалась на нашем русскоязычном open-source SbS бенчмарке [ru-arena-general](https://github.com/VikhrModels/ru_llm_arena) (50 топиков по 10 вопросов), где судьей выступает gpt-4-1106-preview и [бенчмарке](https://colab.research.google.com/drive/16730rWQ4-yGqWoooLs0Ece_16frmOniP?usp=sharing) для RAG на основе тестового сета [Grounded-RAG-v2](https://huggingface.co/datasets/Vikhrmodels/Grounded-RAG-RU-v2), где судей выступа gpt-4o.
|
34 |
+
|
35 |
+
#### Результаты на Ru-Arena-General
|
36 |
+
|
37 |
+
В качестве референсых ответов, с которыми сравниваются модели выступают ответы от gpt-3.5-turbo-0125, поэтому она имеет винрейт 50%.
|
38 |
+
|
39 |
+
Здесь приведена лишь часть лидерборда, подробнее смотрите в репозитории бенчмарка.
|
40 |
+
|
41 |
+
| Model Name | Winrate | 95% CI | Average # Tokens |
|
42 |
+
|--------------------------------------------------|--------|--------------------|------------------|
|
43 |
+
| gpt-4-1106-preview | 90.9 | (-1.3, 1.0) | 541 |
|
44 |
+
| gpt-4o-mini | 83.9 | (-1.8, 1.1) | 448 |
|
45 |
+
| **vikhr-nemo-12b-instruct-r-21-09-24** | **79.8** | (-2.2, 1.9) | **627** |
|
46 |
+
| gemma-2-9b-it-sppo-iter3 | 73.6 | (-1.6, 2.2) | 509 |
|
47 |
+
| gemma-2-9b-it | 69.2 | (-2.5, 1.9) | 459 |
|
48 |
+
| t-lite-instruct-0.1 | 64.7 | (-2.1, 1.7) | 810 |
|
49 |
+
| vikhr-llama3.1-8b-instruct-r-21-09-24 | 63.4 | (-2.1, 2.5) | 618 |
|
50 |
+
| suzume-llama-3-8B-multilingual-orpo-borda-half | 57.1 | (-1.9, 2.2) | 682 |
|
51 |
+
| mistral-nemo-instruct-2407 | 50.5 | (-2.7, 2.6) | 403 |
|
52 |
+
| gpt-3.5-turbo-0125 | 50.0 | (0.0, 0.0) | 220 |
|
53 |
+
| c4ai-command-r-v01 | 49.0 | (-1.7, 2.2) | 529 |
|
54 |
+
| meta-llama-3.1-8b-instruct | 43.1 | (-2.8, 2.3) | 628 |
|
55 |
+
|
56 |
+
#### Результаты на бенчмарке RAG
|
57 |
+
|
58 |
+
Общий размер тестового сета - 200 примеров, 100 для in_domain вопросов и 100 для out_of_domain.
|
59 |
+
|
60 |
+
Тут для оценки качества модель-судья gpt-4o была проинструктирована учитывать релеватность и фактологичкскую полноту ответов исходя из документов и реферсного ответа от gpt-4-1106-preview.
|
61 |
+
|
62 |
+
Подробности промптов и оценок смотрите в коде бенчмарка на [коллабе](https://colab.research.google.com/drive/16730rWQ4-yGqWoooLs0Ece_16frmOniP?usp=sharing)
|
63 |
+
|
64 |
+
in_domain - вопросы которые связаны с содержанием предоставленных документов в той или иной степени \
|
65 |
+
out_of_domain - вопросы которые специально никак не связаны с содержанием предоставленных документов
|
66 |
+
|
67 |
+
<table>
|
68 |
+
<thead>
|
69 |
+
<tr>
|
70 |
+
<th rowspan="2">question_type</th>
|
71 |
+
<th colspan="3">gpt-4o</th>
|
72 |
+
</tr>
|
73 |
+
<tr>
|
74 |
+
<th>judge_correct_percent</th>
|
75 |
+
<th>avg_answer_match_rougeL</th>
|
76 |
+
<th>avg_abs_indexes_diff</th>
|
77 |
+
</tr>
|
78 |
+
</thead>
|
79 |
+
<tbody>
|
80 |
+
<tr>
|
81 |
+
<td>in_domain</td>
|
82 |
+
<td>73%</td>
|
83 |
+
<td>0.34</td>
|
84 |
+
<td>NaN</td>
|
85 |
+
</tr>
|
86 |
+
<tr>
|
87 |
+
<td>out_of_domain</td>
|
88 |
+
<td>81%</td>
|
89 |
+
<td>0.20</td>
|
90 |
+
<td>NaN</td>
|
91 |
+
</tr>
|
92 |
+
</tbody>
|
93 |
+
</table>
|
94 |
+
|
95 |
+
<table>
|
96 |
+
<thead>
|
97 |
+
<tr>
|
98 |
+
<th style="visibility: hidden;" rowspan="2">question_type</th>
|
99 |
+
<th colspan="3">Vikhr-Nemo-12B-Instruct-R-21-09-24</th>
|
100 |
+
</tr>
|
101 |
+
<tr>
|
102 |
+
<th style="visibility: hidden;">judge_correct_percent</th>
|
103 |
+
<th style="visibility: hidden;">avg_answer_match_rougeL</th>
|
104 |
+
<th style="visibility: hidden;">avg_abs_indexes_diff</th>
|
105 |
+
</tr>
|
106 |
+
</thead>
|
107 |
+
<tbody>
|
108 |
+
<tr>
|
109 |
+
<td>in_domain</td>
|
110 |
+
<td>68%</td>
|
111 |
+
<td>0.41</td>
|
112 |
+
<td>0</td>
|
113 |
+
</tr>
|
114 |
+
<tr>
|
115 |
+
<td>out_of_domain</td>
|
116 |
+
<td>92%</td>
|
117 |
+
<td>0.52</td>
|
118 |
+
<td>0</td>
|
119 |
+
</tr>
|
120 |
+
</tbody>
|
121 |
+
</table>
|
122 |
+
|
123 |
+
<table>
|
124 |
+
<thead>
|
125 |
+
<tr>
|
126 |
+
<th style="visibility: hidden;" rowspan="2">question_type</th>
|
127 |
+
<th colspan="3">gpt-4o-mini</th>
|
128 |
+
</tr>
|
129 |
+
<tr>
|
130 |
+
<th style="visibility: hidden;">judge_correct_percent</th>
|
131 |
+
<th style="visibility: hidden;">avg_answer_match_rougeL</th>
|
132 |
+
<th style="visibility: hidden;">avg_abs_indexes_diff</th>
|
133 |
+
</tr>
|
134 |
+
</thead>
|
135 |
+
<tbody>
|
136 |
+
<tr>
|
137 |
+
<td>in_domain</td>
|
138 |
+
<td>65%</td>
|
139 |
+
<td>0.33</td>
|
140 |
+
<td>NaN</td>
|
141 |
+
</tr>
|
142 |
+
<tr>
|
143 |
+
<td>out_of_domain</td>
|
144 |
+
<td>73%</td>
|
145 |
+
<td>0.18</td>
|
146 |
+
<td>NaN</td>
|
147 |
+
</tr>
|
148 |
+
</tbody>
|
149 |
+
</table>
|
150 |
+
|
151 |
+
<table>
|
152 |
+
<thead>
|
153 |
+
<tr>
|
154 |
+
<th style="visibility: hidden;" rowspan="2">question_type</th>
|
155 |
+
<th colspan="3">gpt-3.5-turbo-0125 </th>
|
156 |
+
</tr>
|
157 |
+
<tr>
|
158 |
+
<th style="visibility: hidden;">judge_correct_percent</th>
|
159 |
+
<th style="visibility: hidden;">avg_answer_match_rougeL</th>
|
160 |
+
<th style="visibility: hidden;">avg_abs_indexes_diff</th>
|
161 |
+
</tr>
|
162 |
+
</thead>
|
163 |
+
<tbody>
|
164 |
+
<tr>
|
165 |
+
<td>in_domain</td>
|
166 |
+
<td>49%</td>
|
167 |
+
<td>0.28</td>
|
168 |
+
<td>NaN</td>
|
169 |
+
</tr>
|
170 |
+
<tr>
|
171 |
+
<td>out_of_domain</td>
|
172 |
+
<td>76%</td>
|
173 |
+
<td>0.20</td>
|
174 |
+
<td>NaN</td>
|
175 |
+
</tr>
|
176 |
+
</tbody>
|
177 |
+
</table>
|
178 |
+
|
179 |
+
### Как эта модель создавалась
|
180 |
+
|
181 |
+
#### Инструктивная SFT часть
|
182 |
+
|
183 |
+
Для SFT этапа обучения модели мы подготовили большой (150к инструкций) инструктивный синтетический датасет [Vikhrmodels/GrandMaster-PRO-MAX](https://huggingface.co/datasets/Vikhrmodels/GrandMaster-PRO-MAX). Его особенностью является встроеный CoT (Chain-Of-Thought), для сбора которого мы использовали модифицированный промет для gpt-4-turbo, подробности в карточке датасета.
|
184 |
+
|
185 |
+
Кроме того, для того чтобы сделать RAG Grounding, мы подготовили другой синтетический датасет - [Vikhrmodels/Grounded-RAG-RU-v2](https://huggingface.co/datasets/Vikhrmodels/Grounded-RAG-RU-v2) (50k диалогов), его пайплайн сборки достаточно сложный для короткого описания и полробнее об этом вы мож��те прочитать в его карточке.
|
186 |
+
|
187 |
+
#### Этап алайнмента с SMPO
|
188 |
+
|
189 |
+
Для дальнейшего улучшения качества ответов мы использовали следущий пайплайн:
|
190 |
+
1) Обучили кастомную Reward модель (она пока не будет выкладываться в открытый доступ)
|
191 |
+
2) Дедуплицировали и отфилтровали используя RM модель оригинальный датасет Vikhrmodels/GrandMaster-PRO-MAX, получив порядка 10к самых высококачественных и разнообразных диалогов.
|
192 |
+
3) Сделали Rejection Sampling с SFT чекпоинтом используя полученный датасет и Reward модель. (Генерировали 7 гипотез и брали только 2 самые худшие как rejected)
|
193 |
+
4) Дообучили SFT чекпоинт с помощью нашего метода SMPO используя полученный датасет из этапа 3. SMPO был спроектирован и выбран как метод для повышения стабильности тренировки преференсов в условиях Rejection Sampling и достижения нужного margin.
|
194 |
+
|
195 |
+
Реализацию SMPO, rejection sampling и тд можно найти в нашей библиотеке [effective_llm_alignment](https://github.com/VikhrModels/effective_llm_alignment/) на GitHub
|
196 |
+
|
197 |
+
Идея использования именно SMPO, а не другого PO метода, возникла в результате проведения большого количества экспериментов с классическими методами, при необходимости лучшего контроля процесса сходимости. При тщательной настройке других методов (например SimPO), можно добится похожего результата, однако мы постарались стаблизировать этот процесс и объединить лучшие практики из других методов.
|
198 |
+
|
199 |
+
### Как работать с RAG
|
200 |
+
|
201 |
+
Роль documents представляет из себя список словарей с описанием контента документов, с примнением `json.dumps(array, ensure_ascii=False)` (см. пример ниже). \
|
202 |
+
Контент документов может быть представлен в **3** различных форматах: **Markdown**, **HTML**, **Plain Text**. Контент каждого документа - может быть чанком текста длиной до 4к символов.
|
203 |
+
|
204 |
+
```json
|
205 |
+
[
|
206 |
+
{
|
207 |
+
"doc_id": (0..5),
|
208 |
+
"title": "(null or str)",
|
209 |
+
"content": "(html or markdown or plain text)"
|
210 |
+
}
|
211 |
+
]
|
212 |
+
```
|
213 |
+
|
214 |
+
#### Пример правильного использования с OpenAI-like API
|
215 |
+
|
216 |
+
Запуск vLLM сервера: `vllm serve --dtype half --max-model-len 32000 -tp 1 Vikhrmodels/Vikhr-Nemo-12B-Instruct-R-21-09-24 --api-key token-abc123`
|
217 |
+
|
218 |
+
```python
|
219 |
+
GROUNDED_SYSTEM_PROMPT = "Your task is to answer the user's questions using only the information from the provided documents. Give two answers to each question: one with a list of relevant document identifiers and the second with the answer to the question itself, using documents with these identifiers."
|
220 |
+
|
221 |
+
documents = [
|
222 |
+
{
|
223 |
+
"doc_id": 0,
|
224 |
+
"title": "Глобальное потепление: ледники",
|
225 |
+
"content": "За последние 50 лет объем ледников в мире уменьшился на 30%"
|
226 |
+
},
|
227 |
+
{
|
228 |
+
"doc_id": 1,
|
229 |
+
"title": "Глобальное потепление: Уровень моря",
|
230 |
+
"content": "Уровень мирового океана повысился на 20 см с 1880 года и продолжает расти на 3,3 мм в год"
|
231 |
+
}
|
232 |
+
]
|
233 |
+
sample_history = [
|
234 |
+
{'role': 'system', 'content': GROUNDED_SYSTEM_PROMPT},
|
235 |
+
{'role': 'documents', 'content': json.dumps(documents, ensure_ascii=False)},
|
236 |
+
{'role': 'user', 'content': 'Глоабльное потепление'}
|
237 |
+
]
|
238 |
+
relevant_indexes = llm_client.chat.completions.create(
|
239 |
+
model=llm_model,
|
240 |
+
messages=sample_history,
|
241 |
+
temperature=0.0,
|
242 |
+
max_tokens=2048
|
243 |
+
).choices[0].message.content
|
244 |
+
|
245 |
+
print('Using documents: ' + relevant_indexes + '\n----')
|
246 |
+
final_answer = llm_client.chat.completions.create(
|
247 |
+
model=llm_model,
|
248 |
+
messages=sample_history + [{'role': 'assistant', 'content': relevant_indexes}],
|
249 |
+
temperature=0.3,
|
250 |
+
max_tokens=2048
|
251 |
+
).choices[0].message.content
|
252 |
+
|
253 |
+
print(final_answer)
|
254 |
+
```
|
255 |
+
|
256 |
+
Ответ после выполнения кода будет выглядеть примерно так:
|
257 |
+
|
258 |
+
Using documents: {"relevant_doc_ids": [0, 1]}
|
259 |
+
----
|
260 |
+
|
261 |
+
Глобальное потепление – это долгосрочное повышение средней температуры атмосферы и океанов Земли. Это явление имеет множество последствий для нашей планеты, и среди них можно выделить два ключевых аспекта, основанных на доступных мне данных:
|
262 |
+
|
263 |
+
1. **Уменьшение объема ледников**: За последние 50 лет наблюдается значительное сокращение объема ледников по всему миру. Согласно данным, объем ледников уменьшился на 30%. Это может быть связано с таянием ледников из-за повышения температур, что является одним из признаков глобального потепления.
|
264 |
+
|
265 |
+
2. **Повышение уровня моря**: Уровень мирового океана также увеличивается, что связано с таянием ледников и ледяных покровов, а также с расширением воды при повышении температуры. С 1880 года уровень моря повысился на 20 сантиметров, и этот процесс продолжается, с ежегодным увеличением на 3,3 миллиметра.
|
266 |
+
|
267 |
+
Эти изменения имеют серьезные последствия для экосистем, климата и человеческого общества. Таяние ледников приводит к повышению уровня моря, что может привести к затоплению прибрежных территорий и островов, а также к изменению водных ресурсов и климатических паттернов.
|
268 |
+
|
269 |
+
Используя первый ответ модели `relevant_indexes` (JSON), можно понять нашла ли модель информацию в документах или нет, она обучена возврашать пустой массив если ее нет и в таком случае она будет отвечать, что не смогла найти информацию в базе знаний (при генерации второго ответа).
|
270 |
+
|
271 |
+
### Нюансы и ограничения
|
272 |
+
- Модель имеет **низкий уровень безопасности ответов** и нацелена на правильное и полное выполенние инструкций, имейте это ввиду при использовании и тестируйте самостоятельно. Частично это исправляется системными промптами и дополнительными указаниями о важности безопасности в промпте пользователя.
|
273 |
+
- Системные промпты не предназначены для описание персонажей, мы рекомендуем использовать их для спецификации стиля ответа (вроде "answer only in json format"). Кроме того, желательно, писать их **на английском языке**, так как так было в датасете, от использования английского в системных промтпах не зависит язык ответа.
|
274 |
+
- RAG режим **требует обязательного** наличия системного промпта `GROUNDED_SYSTEM_PROMPT` описаного в секции *Как работать с RAG*. Так же иногда модель может добавлять общую информацию из своих знаний в ответ к той, что есть в документах.
|
275 |
+
- Модель лучше использовать с низкой темптературой (0.1-0.5), а таже использовать top_k (30-50), при температуре 1.0 были замечены случайные дефекты генерации.
|
276 |
+
|
277 |
+
### Авторы
|
278 |
+
- Sergei Bratchikov, [NLP Wanderer](https://t.me/nlpwanderer), Vikhr Team
|
279 |
+
- Konstantin Korolev, Vikhr Team
|
280 |
+
- Aleksandr Nikolich, Vikhr Team
|
281 |
+
|
282 |
+
### Cite
|
283 |
+
```
|
284 |
+
@inproceedings{nikolich2024vikhr,
|
285 |
+
title={Vikhr: Constructing a State-of-the-art Bilingual Open-Source Instruction-Following Large Language Model for Russian},
|
286 |
+
author={Aleksandr Nikolich and Konstantin Korolev and Sergei Bratchikov and Igor Kiselev and Artem Shelmanov},
|
287 |
+
booktitle={Proceedings of the 4th Multilingual Representation Learning (MRL) Workshop @ EMNLP-2024},
|
288 |
+
year={2024},
|
289 |
+
organization={Association for Computational Linguistics}
|
290 |
+
}
|
291 |
+
```
|
3.5bpw/Readme_en.md
ADDED
@@ -0,0 +1,283 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
## Vikhr-Nemo-12B-Instruct-R-21-09-24
|
2 |
+
|
3 |
+
### Description
|
4 |
+
|
5 |
+
**Vikhr-Nemo** is our flagship unimodal LLM (Large Language Model), representing an improved version of [mistralai/Mistral-Nemo-Instruct-2407](https://huggingface.co/mistralai/Mistral-Nemo-Instruct-2407) developed by the **VikhrModels** team, primarily adapted for Russian and English languages. The training involved several stages, including **SFT** and **SMPO** – our custom variant of DPO, details of which are available in the *"How This Model Was Created"* section.
|
6 |
+
|
7 |
+
The model is optimized for a wide range of use cases, including reasoning, summarization, coding, role-playing, and dialogue maintenance. Vikhr-Nemo has capabilities for multilingual generation and high-performance RAG capabilities. It achieves top scores on our instruction and RAG benchmarks, and we believe that for certain tasks (e.g., RAG), it can rival OpenAI's gpt-4o-mini.
|
8 |
+
|
9 |
+
All training code is available in our [effective_llm_alignment](https://github.com/VikhrModels/effective_llm_alignment/) repository on GitHub, and the main datasets are available on our [HF profile](https://huggingface.co/Vikhrmodels).
|
10 |
+
|
11 |
+
### Features
|
12 |
+
1. High-quality generation in Russian, English, and several other languages, thanks to the [Grandmaster-PRO-MAX](https://huggingface.co/datasets/Vikhrmodels/GrandMaster-PRO-MAX) dataset and the base model.
|
13 |
+
2. Support for system prompts to regulate response styles.
|
14 |
+
3. Up to 128k token context support thanks to the base model.
|
15 |
+
4. Grounded RAG mode – the model features a special 'documents' role and a mode for identifying relevant document IDs for user queries and using them for responses, inspired by Command-R’s similar capabilities.
|
16 |
+
|
17 |
+
### Metrics and Quality Evaluation
|
18 |
+
|
19 |
+
The model was evaluated on our open-source Russian-language SbS benchmark [ru-arena-general](https://github.com/VikhrModels/ru_llm_arena) (50 topics with 10 questions each), where gpt-4-1106-preview acted as the judge, and the [RAG benchmark](https://colab.research.google.com/drive/16730rWQ4-yGqWoooLs0Ece_16frmOniP?usp=sharing) based on the [Grounded-RAG-v2](https://huggingface.co/datasets/Vikhrmodels/Grounded-RAG-RU-v2) test set, judged by gpt-4o.
|
20 |
+
|
21 |
+
#### Results on Ru-Arena-General
|
22 |
+
|
23 |
+
The reference answers, to which models are compared, were generated by gpt-3.5-turbo-0125, hence it has a win rate of 50%.
|
24 |
+
|
25 |
+
Only part of the leaderboard is shown here; for more details, check the benchmark repository.
|
26 |
+
|
27 |
+
| Model Name | Winrate | 95% CI | Average # Tokens |
|
28 |
+
|--------------------------------------------------|--------|--------------------|------------------|
|
29 |
+
| gpt-4-1106-preview | 90.9 | (-1.3, 1.0) | 541 |
|
30 |
+
| gpt-4o-mini | 83.9 | (-1.8, 1.1) | 448 |
|
31 |
+
| **vikhr-nemo-12b-instruct-r-21-09-24** | **79.8** | (-2.2, 1.9) | **627** |
|
32 |
+
| gemma-2-9b-it-sppo-iter3 | 73.6 | (-1.6, 2.2) | 509 |
|
33 |
+
| gemma-2-9b-it | 69.2 | (-2.5, 1.9) | 459 |
|
34 |
+
| t-lite-instruct-0.1 | 64.7 | (-2.1, 1.7) | 810 |
|
35 |
+
| vikhr-llama3.1-8b-instruct-r-21-09-24 | 63.4 | (-2.1, 2.5) | 618 |
|
36 |
+
| suzume-llama-3-8B-multilingual-orpo-borda-half | 57.1 | (-1.9, 2.2) | 682 |
|
37 |
+
| mistral-nemo-instruct-2407 | 50.5 | (-2.7, 2.6) | 403 |
|
38 |
+
| gpt-3.5-turbo-0125 | 50.0 | (0.0, 0.0) | 220 |
|
39 |
+
| c4ai-command-r-v01 | 49.0 | (-1.7, 2.2) | 529 |
|
40 |
+
| meta-llama-3.1-8b-instruct | 43.1 | (-2.8, 2.3) | 628 |
|
41 |
+
|
42 |
+
#### RAG Benchmark Results
|
43 |
+
|
44 |
+
The test set comprises 200 examples: 100 in-domain questions and 100 out-of-domain questions.
|
45 |
+
|
46 |
+
For evaluation, the judge model gpt-4o was instructed to consider relevance and factual completeness based on documents and the reference answer from gpt-4-1106-preview.
|
47 |
+
|
48 |
+
For prompt details and evaluations, refer to the [Colab notebook](https://colab.research.google.com/drive/16730rWQ4-yGqWoooLs0Ece_16frmOniP?usp=sharing).
|
49 |
+
|
50 |
+
**In-Domain**: Questions related to the provided documents.
|
51 |
+
**Out-of-Domain**: Questions deliberately unrelated to the provided documents.
|
52 |
+
|
53 |
+
[Table representations of the results follow in the original text.]
|
54 |
+
|
55 |
+
<table>
|
56 |
+
<thead>
|
57 |
+
<tr>
|
58 |
+
<th rowspan="2">question_type</th>
|
59 |
+
<th colspan="3">gpt-4o</th>
|
60 |
+
</tr>
|
61 |
+
<tr>
|
62 |
+
<th>judge_correct_percent</th>
|
63 |
+
<th>avg_answer_match_rougeL</th>
|
64 |
+
<th>avg_abs_indexes_diff</th>
|
65 |
+
</tr>
|
66 |
+
</thead>
|
67 |
+
<tbody>
|
68 |
+
<tr>
|
69 |
+
<td>in_domain</td>
|
70 |
+
<td>73%</td>
|
71 |
+
<td>0.34</td>
|
72 |
+
<td>NaN</td>
|
73 |
+
</tr>
|
74 |
+
<tr>
|
75 |
+
<td>out_of_domain</td>
|
76 |
+
<td>81%</td>
|
77 |
+
<td>0.20</td>
|
78 |
+
<td>NaN</td>
|
79 |
+
</tr>
|
80 |
+
</tbody>
|
81 |
+
</table>
|
82 |
+
|
83 |
+
<table>
|
84 |
+
<thead>
|
85 |
+
<tr>
|
86 |
+
<th style="visibility: hidden;" rowspan="2">question_type</th>
|
87 |
+
<th colspan="3">Vikhr-Nemo-12B-Instruct-R-21-09-24</th>
|
88 |
+
</tr>
|
89 |
+
<tr>
|
90 |
+
<th style="visibility: hidden;">judge_correct_percent</th>
|
91 |
+
<th style="visibility: hidden;">avg_answer_match_rougeL</th>
|
92 |
+
<th style="visibility: hidden;">avg_abs_indexes_diff</th>
|
93 |
+
</tr>
|
94 |
+
</thead>
|
95 |
+
<tbody>
|
96 |
+
<tr>
|
97 |
+
<td>in_domain</td>
|
98 |
+
<td>68%</td>
|
99 |
+
<td>0.41</td>
|
100 |
+
<td>0</td>
|
101 |
+
</tr>
|
102 |
+
<tr>
|
103 |
+
<td>out_of_domain</td>
|
104 |
+
<td>92%</td>
|
105 |
+
<td>0.52</td>
|
106 |
+
<td>0</td>
|
107 |
+
</tr>
|
108 |
+
</tbody>
|
109 |
+
</table>
|
110 |
+
|
111 |
+
<table>
|
112 |
+
<thead>
|
113 |
+
<tr>
|
114 |
+
<th style="visibility: hidden;" rowspan="2">question_type</th>
|
115 |
+
<th colspan="3">gpt-4o-mini</th>
|
116 |
+
</tr>
|
117 |
+
<tr>
|
118 |
+
<th style="visibility: hidden;">judge_correct_percent</th>
|
119 |
+
<th style="visibility: hidden;">avg_answer_match_rougeL</th>
|
120 |
+
<th style="visibility: hidden;">avg_abs_indexes_diff</th>
|
121 |
+
</tr>
|
122 |
+
</thead>
|
123 |
+
<tbody>
|
124 |
+
<tr>
|
125 |
+
<td>in_domain</td>
|
126 |
+
<td>65%</td>
|
127 |
+
<td>0.33</td>
|
128 |
+
<td>NaN</td>
|
129 |
+
</tr>
|
130 |
+
<tr>
|
131 |
+
<td>out_of_domain</td>
|
132 |
+
<td>73%</td>
|
133 |
+
<td>0.18</td>
|
134 |
+
<td>NaN</td>
|
135 |
+
</tr>
|
136 |
+
</tbody>
|
137 |
+
</table>
|
138 |
+
|
139 |
+
<table>
|
140 |
+
<thead>
|
141 |
+
<tr>
|
142 |
+
<th style="visibility: hidden;" rowspan="2">question_type</th>
|
143 |
+
<th colspan="3">gpt-3.5-turbo-0125 </th>
|
144 |
+
</tr>
|
145 |
+
<tr>
|
146 |
+
<th style="visibility: hidden;">judge_correct_percent</th>
|
147 |
+
<th style="visibility: hidden;">avg_answer_match_rougeL</th>
|
148 |
+
<th style="visibility: hidden;">avg_abs_indexes_diff</th>
|
149 |
+
</tr>
|
150 |
+
</thead>
|
151 |
+
<tbody>
|
152 |
+
<tr>
|
153 |
+
<td>in_domain</td>
|
154 |
+
<td>49%</td>
|
155 |
+
<td>0.28</td>
|
156 |
+
<td>NaN</td>
|
157 |
+
</tr>
|
158 |
+
<tr>
|
159 |
+
<td>out_of_domain</td>
|
160 |
+
<td>76%</td>
|
161 |
+
<td>0.20</td>
|
162 |
+
<td>NaN</td>
|
163 |
+
</tr>
|
164 |
+
</tbody>
|
165 |
+
</table>
|
166 |
+
|
167 |
+
### How This Model Was Created
|
168 |
+
|
169 |
+
#### Instructional SFT Part
|
170 |
+
|
171 |
+
For the SFT training stage, we prepared a large (150k instructions) synthetic dataset [Vikhrmodels/GrandMaster-PRO-MAX](https://huggingface.co/datasets/Vikhrmodels)
|
172 |
+
|
173 |
+
#### Instructional SFT Part
|
174 |
+
|
175 |
+
For the SFT stage of model training, we have prepared a large (150k instructions) synthetic instructional dataset [Vikhrmodels/GrandMaster-PRO-MAX](https://huggingface.co/datasets/Vikhrmodels/GrandMaster-PRO-MAX). Its unique feature is the built-in Chain-Of-Thought (CoT), which we collected using a modified prompt for gpt-4-turbo. For more details, please refer to the dataset card.
|
176 |
+
|
177 |
+
Additionally, to perform RAG Grounding, we have prepared another synthetic dataset - [Vikhrmodels/Grounded-RAG-RU-v2](https://huggingface.co/datasets/Vikhrmodels/Grounded-RAG-RU-v2) (50k dialogues). The pipeline for its construction is quite complex, so you can find more information in the dataset card.
|
178 |
+
|
179 |
+
#### SMPO Alignment Stage
|
180 |
+
|
181 |
+
To further improve the quality of responses, we used the following pipeline:
|
182 |
+
1) Trained a custom Reward model (it will not be released publicly for now).
|
183 |
+
2) Deduplicated and filtered the original Vikhrmodels/GrandMaster-PRO-MAX dataset using the Reward model, resulting in around 10k of the highest-quality and most diverse dialogues.
|
184 |
+
3) Performed Rejection Sampling with the SFT checkpoint using the resulting dataset and Reward model. We generated 7 hypotheses and selected the 2 worst ones as rejected.
|
185 |
+
4) Fine-tuned the SFT checkpoint using our SMPO method with the dataset obtained from step 3. SMPO was designed and chosen as the method to enhance the stability of preference training under Rejection Sampling conditions and to achieve the desired margin.
|
186 |
+
|
187 |
+
The implementation of SMPO, rejection sampling, etc., can be found in our [effective_llm_alignment](https://github.com/VikhrModels/effective_llm_alignment/) library on GitHub.
|
188 |
+
|
189 |
+
The idea of using SMPO over other PO methods arose from numerous experiments with classical methods and the need for better convergence control. While other methods (e.g., SimPO) can achieve similar results with careful tuning, we aimed to stabilize the process and combine the best practices from other methods.
|
190 |
+
|
191 |
+
### How to Work with RAG
|
192 |
+
|
193 |
+
The role of "documents" represents a list of dictionaries describing document content, using `json.dumps(array, ensure_ascii=False)` (see example below). \
|
194 |
+
The document content can be presented in **3** different formats: **Markdown**, **HTML**, **Plain Text**. The content of each document can be a chunk of text up to 4k characters long.
|
195 |
+
|
196 |
+
```json
|
197 |
+
[
|
198 |
+
{
|
199 |
+
"doc_id": (0..5),
|
200 |
+
"title": "(null or str)",
|
201 |
+
"content": "(html or markdown or plain text)"
|
202 |
+
}
|
203 |
+
]
|
204 |
+
```
|
205 |
+
|
206 |
+
#### Example of Correct Usage with an OpenAI-like API
|
207 |
+
|
208 |
+
Launching the vLLM server: `vllm serve --dtype half --max-model-len 32000 -tp 1 Vikhrmodels/Vikhr-Nemo-12B-Instruct-R-21-09-24 --api-key token-abc123`
|
209 |
+
|
210 |
+
```python
|
211 |
+
GROUNDED_SYSTEM_PROMPT = "Your task is to answer the user's questions using only the information from the provided documents. Give two answers to each question: one with a list of relevant document identifiers and the second with the answer to the question itself, using documents with these identifiers."
|
212 |
+
|
213 |
+
documents = [
|
214 |
+
{
|
215 |
+
"doc_id": 0,
|
216 |
+
"title": "Global Warming: Glaciers",
|
217 |
+
"content": "Over the past 50 years, the volume of glaciers worldwide has decreased by 30%."
|
218 |
+
},
|
219 |
+
{
|
220 |
+
"doc_id": 1,
|
221 |
+
"title": "Global Warming: Sea Level",
|
222 |
+
"content": "The global sea level has risen by 20 cm since 1880 and continues to rise by 3.3 mm per year."
|
223 |
+
}
|
224 |
+
]
|
225 |
+
sample_history = [
|
226 |
+
{'role': 'system', 'content': GROUNDED_SYSTEM_PROMPT},
|
227 |
+
{'role': 'documents', 'content': json.dumps(documents, ensure_ascii=False)},
|
228 |
+
{'role': 'user', 'content': 'Global warming'}
|
229 |
+
]
|
230 |
+
relevant_indexes = llm_client.chat.completions.create(
|
231 |
+
model=llm_model,
|
232 |
+
messages=sample_history,
|
233 |
+
temperature=0.0,
|
234 |
+
max_tokens=2048
|
235 |
+
).choices[0].message.content
|
236 |
+
|
237 |
+
print('Using documents: ' + relevant_indexes + '\n----')
|
238 |
+
final_answer = llm_client.chat.completions.create(
|
239 |
+
model=llm_model,
|
240 |
+
messages=sample_history + [{'role': 'assistant', 'content': relevant_indexes}],
|
241 |
+
temperature=0.3,
|
242 |
+
max_tokens=2048
|
243 |
+
).choices[0].message.content
|
244 |
+
|
245 |
+
print(final_answer)
|
246 |
+
```
|
247 |
+
|
248 |
+
The response after running the code might look like this:
|
249 |
+
|
250 |
+
Using documents: {"relevant_doc_ids": [0, 1]}
|
251 |
+
----
|
252 |
+
|
253 |
+
Global warming is a long-term increase in the Earth's atmosphere and ocean temperatures. This phenomenon has numerous consequences for our planet, and two key aspects based on the data available to me are:
|
254 |
+
|
255 |
+
1. **Reduction of glacier volume**: Over the past 50 years, there has been a significant reduction in glacier volume worldwide. According to the data, glacier volume has decreased by 30%. This could be related to glacier melting due to rising temperatures, which is one of the signs of global warming.
|
256 |
+
|
257 |
+
2. **Sea level rise**: The global sea level is also rising, which is associated with the melting of glaciers and ice sheets, as well as the expansion of water as it warms. Since 1880, the sea level has risen by 20 centimeters, and this process continues, with an annual increase of 3.3 millimeters.
|
258 |
+
|
259 |
+
These changes have serious consequences for ecosystems, climate, and human society. Glacier melting leads to sea-level rise, which can result in the flooding of coastal areas and islands, as well as changes in water resources and climate patterns.
|
260 |
+
|
261 |
+
Using the model's first response, `relevant_indexes` (JSON), one can determine whether the model found information in the documents. The model is trained to return an empty array if no information is found, in which case it will state that it couldn’t find the information in the knowledge base (when generating the second response).
|
262 |
+
|
263 |
+
### Nuances and Limitations
|
264 |
+
- The model has a **low level of response safety** and is focused on correctly and fully executing instructions. Keep this in mind during usage and test it independently. This can be partially corrected with system prompts and additional user prompt guidance about the importance of safety.
|
265 |
+
- System prompts are not intended for character descriptions; we recommend using them to specify the response style (e.g., "answer only in JSON format"). Additionally, it’s preferable to write them **in English**, as this was the case in the dataset; using English in system prompts does not affect the response language.
|
266 |
+
- The RAG mode **requires the presence** of the system prompt `GROUNDED_SYSTEM_PROMPT` described in the "How to Work with RAG" section. The model may sometimes add general knowledge information to the response along with the information present in the documents.
|
267 |
+
- The model works best with low temperatures (0.1-0.5) and top_k (30-50). At a temperature of 1.0, random generation defects were observed.
|
268 |
+
|
269 |
+
### Authors
|
270 |
+
- Sergei Bratchikov, [NLP Wanderer](https://t.me/nlpwanderer), Vikhr Team
|
271 |
+
- Konstantin Korolev, Vikhr Team
|
272 |
+
- Aleksandr Nikolich, Vikhr Team
|
273 |
+
|
274 |
+
### Cite
|
275 |
+
```
|
276 |
+
@inproceedings{nikolich2024vikhr,
|
277 |
+
title={Vikhr: Constructing a State-of-the-art Bilingual Open-Source Instruction-Following Large Language Model for Russian},
|
278 |
+
author={Aleksandr Nikolich and Konstantin Korolev and Sergei Bratchikov and Igor Kiselev and Artem Shelmanov},
|
279 |
+
booktitle={Proceedings of the 4th Multilingual Representation Learning (MRL) Workshop @ EMNLP-2024},
|
280 |
+
year={2024},
|
281 |
+
organization={Association for Computational Linguistics}
|
282 |
+
}
|
283 |
+
```
|
3.5bpw/config.json
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "Vikhrmodels/Vikhr-Nemo-12B-Instruct-R-05-09-24",
|
3 |
+
"architectures": [
|
4 |
+
"MistralForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_dropout": 0.0,
|
7 |
+
"bos_token_id": 1,
|
8 |
+
"eos_token_id": 2,
|
9 |
+
"head_dim": 128,
|
10 |
+
"hidden_act": "silu",
|
11 |
+
"hidden_size": 5120,
|
12 |
+
"initializer_range": 0.02,
|
13 |
+
"intermediate_size": 14336,
|
14 |
+
"max_position_embeddings": 1024000,
|
15 |
+
"model_type": "mistral",
|
16 |
+
"num_attention_heads": 32,
|
17 |
+
"num_hidden_layers": 40,
|
18 |
+
"num_key_value_heads": 8,
|
19 |
+
"rms_norm_eps": 1e-05,
|
20 |
+
"rope_theta": 1000000.0,
|
21 |
+
"sliding_window": null,
|
22 |
+
"tie_word_embeddings": false,
|
23 |
+
"torch_dtype": "bfloat16",
|
24 |
+
"transformers_version": "4.44.2",
|
25 |
+
"use_cache": true,
|
26 |
+
"vocab_size": 131074,
|
27 |
+
"quantization_config": {
|
28 |
+
"quant_method": "exl2",
|
29 |
+
"version": "0.2.3",
|
30 |
+
"bits": 3.5,
|
31 |
+
"head_bits": 8,
|
32 |
+
"calibration": {
|
33 |
+
"rows": 100,
|
34 |
+
"length": 512,
|
35 |
+
"dataset": "rulm"
|
36 |
+
}
|
37 |
+
}
|
38 |
+
}
|
3.5bpw/generation_config.json
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"bos_token_id": 1,
|
4 |
+
"eos_token_id": 2,
|
5 |
+
"transformers_version": "4.44.2"
|
6 |
+
}
|
3.5bpw/model.safetensors.index.json
ADDED
@@ -0,0 +1,370 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"metadata": {
|
3 |
+
"total_size": 24495605760
|
4 |
+
},
|
5 |
+
"weight_map": {
|
6 |
+
"lm_head.weight": "model-00005-of-00005.safetensors",
|
7 |
+
"model.embed_tokens.weight": "model-00001-of-00005.safetensors",
|
8 |
+
"model.layers.0.input_layernorm.weight": "model-00001-of-00005.safetensors",
|
9 |
+
"model.layers.0.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
|
10 |
+
"model.layers.0.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
|
11 |
+
"model.layers.0.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
|
12 |
+
"model.layers.0.post_attention_layernorm.weight": "model-00001-of-00005.safetensors",
|
13 |
+
"model.layers.0.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
|
14 |
+
"model.layers.0.self_attn.o_proj.weight": "model-00001-of-00005.safetensors",
|
15 |
+
"model.layers.0.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
|
16 |
+
"model.layers.0.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
|
17 |
+
"model.layers.1.input_layernorm.weight": "model-00001-of-00005.safetensors",
|
18 |
+
"model.layers.1.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
|
19 |
+
"model.layers.1.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
|
20 |
+
"model.layers.1.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
|
21 |
+
"model.layers.1.post_attention_layernorm.weight": "model-00001-of-00005.safetensors",
|
22 |
+
"model.layers.1.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
|
23 |
+
"model.layers.1.self_attn.o_proj.weight": "model-00001-of-00005.safetensors",
|
24 |
+
"model.layers.1.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
|
25 |
+
"model.layers.1.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
|
26 |
+
"model.layers.10.input_layernorm.weight": "model-00002-of-00005.safetensors",
|
27 |
+
"model.layers.10.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
|
28 |
+
"model.layers.10.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
|
29 |
+
"model.layers.10.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
|
30 |
+
"model.layers.10.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
|
31 |
+
"model.layers.10.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
|
32 |
+
"model.layers.10.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
|
33 |
+
"model.layers.10.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
|
34 |
+
"model.layers.10.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
|
35 |
+
"model.layers.11.input_layernorm.weight": "model-00002-of-00005.safetensors",
|
36 |
+
"model.layers.11.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
|
37 |
+
"model.layers.11.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
|
38 |
+
"model.layers.11.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
|
39 |
+
"model.layers.11.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
|
40 |
+
"model.layers.11.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
|
41 |
+
"model.layers.11.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
|
42 |
+
"model.layers.11.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
|
43 |
+
"model.layers.11.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
|
44 |
+
"model.layers.12.input_layernorm.weight": "model-00002-of-00005.safetensors",
|
45 |
+
"model.layers.12.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
|
46 |
+
"model.layers.12.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
|
47 |
+
"model.layers.12.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
|
48 |
+
"model.layers.12.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
|
49 |
+
"model.layers.12.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
|
50 |
+
"model.layers.12.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
|
51 |
+
"model.layers.12.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
|
52 |
+
"model.layers.12.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
|
53 |
+
"model.layers.13.input_layernorm.weight": "model-00002-of-00005.safetensors",
|
54 |
+
"model.layers.13.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
|
55 |
+
"model.layers.13.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
|
56 |
+
"model.layers.13.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
|
57 |
+
"model.layers.13.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
|
58 |
+
"model.layers.13.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
|
59 |
+
"model.layers.13.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
|
60 |
+
"model.layers.13.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
|
61 |
+
"model.layers.13.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
|
62 |
+
"model.layers.14.input_layernorm.weight": "model-00002-of-00005.safetensors",
|
63 |
+
"model.layers.14.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
|
64 |
+
"model.layers.14.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
|
65 |
+
"model.layers.14.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
|
66 |
+
"model.layers.14.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
|
67 |
+
"model.layers.14.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
|
68 |
+
"model.layers.14.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
|
69 |
+
"model.layers.14.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
|
70 |
+
"model.layers.14.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
|
71 |
+
"model.layers.15.input_layernorm.weight": "model-00003-of-00005.safetensors",
|
72 |
+
"model.layers.15.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
|
73 |
+
"model.layers.15.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
|
74 |
+
"model.layers.15.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
|
75 |
+
"model.layers.15.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
|
76 |
+
"model.layers.15.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
|
77 |
+
"model.layers.15.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
|
78 |
+
"model.layers.15.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
|
79 |
+
"model.layers.15.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
|
80 |
+
"model.layers.16.input_layernorm.weight": "model-00003-of-00005.safetensors",
|
81 |
+
"model.layers.16.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
|
82 |
+
"model.layers.16.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
|
83 |
+
"model.layers.16.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
|
84 |
+
"model.layers.16.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
|
85 |
+
"model.layers.16.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
|
86 |
+
"model.layers.16.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
|
87 |
+
"model.layers.16.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
|
88 |
+
"model.layers.16.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
|
89 |
+
"model.layers.17.input_layernorm.weight": "model-00003-of-00005.safetensors",
|
90 |
+
"model.layers.17.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
|
91 |
+
"model.layers.17.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
|
92 |
+
"model.layers.17.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
|
93 |
+
"model.layers.17.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
|
94 |
+
"model.layers.17.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
|
95 |
+
"model.layers.17.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
|
96 |
+
"model.layers.17.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
|
97 |
+
"model.layers.17.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
|
98 |
+
"model.layers.18.input_layernorm.weight": "model-00003-of-00005.safetensors",
|
99 |
+
"model.layers.18.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
|
100 |
+
"model.layers.18.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
|
101 |
+
"model.layers.18.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
|
102 |
+
"model.layers.18.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
|
103 |
+
"model.layers.18.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
|
104 |
+
"model.layers.18.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
|
105 |
+
"model.layers.18.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
|
106 |
+
"model.layers.18.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
|
107 |
+
"model.layers.19.input_layernorm.weight": "model-00003-of-00005.safetensors",
|
108 |
+
"model.layers.19.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
|
109 |
+
"model.layers.19.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
|
110 |
+
"model.layers.19.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
|
111 |
+
"model.layers.19.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
|
112 |
+
"model.layers.19.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
|
113 |
+
"model.layers.19.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
|
114 |
+
"model.layers.19.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
|
115 |
+
"model.layers.19.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
|
116 |
+
"model.layers.2.input_layernorm.weight": "model-00001-of-00005.safetensors",
|
117 |
+
"model.layers.2.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
|
118 |
+
"model.layers.2.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
|
119 |
+
"model.layers.2.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
|
120 |
+
"model.layers.2.post_attention_layernorm.weight": "model-00001-of-00005.safetensors",
|
121 |
+
"model.layers.2.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
|
122 |
+
"model.layers.2.self_attn.o_proj.weight": "model-00001-of-00005.safetensors",
|
123 |
+
"model.layers.2.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
|
124 |
+
"model.layers.2.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
|
125 |
+
"model.layers.20.input_layernorm.weight": "model-00003-of-00005.safetensors",
|
126 |
+
"model.layers.20.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
|
127 |
+
"model.layers.20.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
|
128 |
+
"model.layers.20.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
|
129 |
+
"model.layers.20.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
|
130 |
+
"model.layers.20.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
|
131 |
+
"model.layers.20.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
|
132 |
+
"model.layers.20.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
|
133 |
+
"model.layers.20.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
|
134 |
+
"model.layers.21.input_layernorm.weight": "model-00003-of-00005.safetensors",
|
135 |
+
"model.layers.21.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
|
136 |
+
"model.layers.21.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
|
137 |
+
"model.layers.21.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
|
138 |
+
"model.layers.21.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
|
139 |
+
"model.layers.21.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
|
140 |
+
"model.layers.21.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
|
141 |
+
"model.layers.21.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
|
142 |
+
"model.layers.21.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
|
143 |
+
"model.layers.22.input_layernorm.weight": "model-00003-of-00005.safetensors",
|
144 |
+
"model.layers.22.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
|
145 |
+
"model.layers.22.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
|
146 |
+
"model.layers.22.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
|
147 |
+
"model.layers.22.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
|
148 |
+
"model.layers.22.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
|
149 |
+
"model.layers.22.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
|
150 |
+
"model.layers.22.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
|
151 |
+
"model.layers.22.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
|
152 |
+
"model.layers.23.input_layernorm.weight": "model-00003-of-00005.safetensors",
|
153 |
+
"model.layers.23.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
|
154 |
+
"model.layers.23.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
|
155 |
+
"model.layers.23.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
|
156 |
+
"model.layers.23.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
|
157 |
+
"model.layers.23.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
|
158 |
+
"model.layers.23.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
|
159 |
+
"model.layers.23.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
|
160 |
+
"model.layers.23.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
|
161 |
+
"model.layers.24.input_layernorm.weight": "model-00004-of-00005.safetensors",
|
162 |
+
"model.layers.24.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
|
163 |
+
"model.layers.24.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
|
164 |
+
"model.layers.24.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
|
165 |
+
"model.layers.24.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
|
166 |
+
"model.layers.24.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
|
167 |
+
"model.layers.24.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
|
168 |
+
"model.layers.24.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
|
169 |
+
"model.layers.24.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
|
170 |
+
"model.layers.25.input_layernorm.weight": "model-00004-of-00005.safetensors",
|
171 |
+
"model.layers.25.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
|
172 |
+
"model.layers.25.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
|
173 |
+
"model.layers.25.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
|
174 |
+
"model.layers.25.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
|
175 |
+
"model.layers.25.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
|
176 |
+
"model.layers.25.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
|
177 |
+
"model.layers.25.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
|
178 |
+
"model.layers.25.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
|
179 |
+
"model.layers.26.input_layernorm.weight": "model-00004-of-00005.safetensors",
|
180 |
+
"model.layers.26.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
|
181 |
+
"model.layers.26.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
|
182 |
+
"model.layers.26.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
|
183 |
+
"model.layers.26.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
|
184 |
+
"model.layers.26.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
|
185 |
+
"model.layers.26.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
|
186 |
+
"model.layers.26.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
|
187 |
+
"model.layers.26.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
|
188 |
+
"model.layers.27.input_layernorm.weight": "model-00004-of-00005.safetensors",
|
189 |
+
"model.layers.27.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
|
190 |
+
"model.layers.27.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
|
191 |
+
"model.layers.27.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
|
192 |
+
"model.layers.27.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
|
193 |
+
"model.layers.27.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
|
194 |
+
"model.layers.27.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
|
195 |
+
"model.layers.27.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
|
196 |
+
"model.layers.27.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
|
197 |
+
"model.layers.28.input_layernorm.weight": "model-00004-of-00005.safetensors",
|
198 |
+
"model.layers.28.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
|
199 |
+
"model.layers.28.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
|
200 |
+
"model.layers.28.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
|
201 |
+
"model.layers.28.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
|
202 |
+
"model.layers.28.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
|
203 |
+
"model.layers.28.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
|
204 |
+
"model.layers.28.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
|
205 |
+
"model.layers.28.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
|
206 |
+
"model.layers.29.input_layernorm.weight": "model-00004-of-00005.safetensors",
|
207 |
+
"model.layers.29.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
|
208 |
+
"model.layers.29.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
|
209 |
+
"model.layers.29.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
|
210 |
+
"model.layers.29.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
|
211 |
+
"model.layers.29.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
|
212 |
+
"model.layers.29.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
|
213 |
+
"model.layers.29.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
|
214 |
+
"model.layers.29.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
|
215 |
+
"model.layers.3.input_layernorm.weight": "model-00001-of-00005.safetensors",
|
216 |
+
"model.layers.3.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
|
217 |
+
"model.layers.3.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
|
218 |
+
"model.layers.3.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
|
219 |
+
"model.layers.3.post_attention_layernorm.weight": "model-00001-of-00005.safetensors",
|
220 |
+
"model.layers.3.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
|
221 |
+
"model.layers.3.self_attn.o_proj.weight": "model-00001-of-00005.safetensors",
|
222 |
+
"model.layers.3.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
|
223 |
+
"model.layers.3.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
|
224 |
+
"model.layers.30.input_layernorm.weight": "model-00004-of-00005.safetensors",
|
225 |
+
"model.layers.30.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
|
226 |
+
"model.layers.30.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
|
227 |
+
"model.layers.30.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
|
228 |
+
"model.layers.30.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
|
229 |
+
"model.layers.30.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
|
230 |
+
"model.layers.30.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
|
231 |
+
"model.layers.30.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
|
232 |
+
"model.layers.30.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
|
233 |
+
"model.layers.31.input_layernorm.weight": "model-00004-of-00005.safetensors",
|
234 |
+
"model.layers.31.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
|
235 |
+
"model.layers.31.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
|
236 |
+
"model.layers.31.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
|
237 |
+
"model.layers.31.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
|
238 |
+
"model.layers.31.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
|
239 |
+
"model.layers.31.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
|
240 |
+
"model.layers.31.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
|
241 |
+
"model.layers.31.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
|
242 |
+
"model.layers.32.input_layernorm.weight": "model-00004-of-00005.safetensors",
|
243 |
+
"model.layers.32.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
|
244 |
+
"model.layers.32.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
|
245 |
+
"model.layers.32.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
|
246 |
+
"model.layers.32.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
|
247 |
+
"model.layers.32.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
|
248 |
+
"model.layers.32.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
|
249 |
+
"model.layers.32.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
|
250 |
+
"model.layers.32.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
|
251 |
+
"model.layers.33.input_layernorm.weight": "model-00005-of-00005.safetensors",
|
252 |
+
"model.layers.33.mlp.down_proj.weight": "model-00005-of-00005.safetensors",
|
253 |
+
"model.layers.33.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
|
254 |
+
"model.layers.33.mlp.up_proj.weight": "model-00005-of-00005.safetensors",
|
255 |
+
"model.layers.33.post_attention_layernorm.weight": "model-00005-of-00005.safetensors",
|
256 |
+
"model.layers.33.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
|
257 |
+
"model.layers.33.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
|
258 |
+
"model.layers.33.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
|
259 |
+
"model.layers.33.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
|
260 |
+
"model.layers.34.input_layernorm.weight": "model-00005-of-00005.safetensors",
|
261 |
+
"model.layers.34.mlp.down_proj.weight": "model-00005-of-00005.safetensors",
|
262 |
+
"model.layers.34.mlp.gate_proj.weight": "model-00005-of-00005.safetensors",
|
263 |
+
"model.layers.34.mlp.up_proj.weight": "model-00005-of-00005.safetensors",
|
264 |
+
"model.layers.34.post_attention_layernorm.weight": "model-00005-of-00005.safetensors",
|
265 |
+
"model.layers.34.self_attn.k_proj.weight": "model-00005-of-00005.safetensors",
|
266 |
+
"model.layers.34.self_attn.o_proj.weight": "model-00005-of-00005.safetensors",
|
267 |
+
"model.layers.34.self_attn.q_proj.weight": "model-00005-of-00005.safetensors",
|
268 |
+
"model.layers.34.self_attn.v_proj.weight": "model-00005-of-00005.safetensors",
|
269 |
+
"model.layers.35.input_layernorm.weight": "model-00005-of-00005.safetensors",
|
270 |
+
"model.layers.35.mlp.down_proj.weight": "model-00005-of-00005.safetensors",
|
271 |
+
"model.layers.35.mlp.gate_proj.weight": "model-00005-of-00005.safetensors",
|
272 |
+
"model.layers.35.mlp.up_proj.weight": "model-00005-of-00005.safetensors",
|
273 |
+
"model.layers.35.post_attention_layernorm.weight": "model-00005-of-00005.safetensors",
|
274 |
+
"model.layers.35.self_attn.k_proj.weight": "model-00005-of-00005.safetensors",
|
275 |
+
"model.layers.35.self_attn.o_proj.weight": "model-00005-of-00005.safetensors",
|
276 |
+
"model.layers.35.self_attn.q_proj.weight": "model-00005-of-00005.safetensors",
|
277 |
+
"model.layers.35.self_attn.v_proj.weight": "model-00005-of-00005.safetensors",
|
278 |
+
"model.layers.36.input_layernorm.weight": "model-00005-of-00005.safetensors",
|
279 |
+
"model.layers.36.mlp.down_proj.weight": "model-00005-of-00005.safetensors",
|
280 |
+
"model.layers.36.mlp.gate_proj.weight": "model-00005-of-00005.safetensors",
|
281 |
+
"model.layers.36.mlp.up_proj.weight": "model-00005-of-00005.safetensors",
|
282 |
+
"model.layers.36.post_attention_layernorm.weight": "model-00005-of-00005.safetensors",
|
283 |
+
"model.layers.36.self_attn.k_proj.weight": "model-00005-of-00005.safetensors",
|
284 |
+
"model.layers.36.self_attn.o_proj.weight": "model-00005-of-00005.safetensors",
|
285 |
+
"model.layers.36.self_attn.q_proj.weight": "model-00005-of-00005.safetensors",
|
286 |
+
"model.layers.36.self_attn.v_proj.weight": "model-00005-of-00005.safetensors",
|
287 |
+
"model.layers.37.input_layernorm.weight": "model-00005-of-00005.safetensors",
|
288 |
+
"model.layers.37.mlp.down_proj.weight": "model-00005-of-00005.safetensors",
|
289 |
+
"model.layers.37.mlp.gate_proj.weight": "model-00005-of-00005.safetensors",
|
290 |
+
"model.layers.37.mlp.up_proj.weight": "model-00005-of-00005.safetensors",
|
291 |
+
"model.layers.37.post_attention_layernorm.weight": "model-00005-of-00005.safetensors",
|
292 |
+
"model.layers.37.self_attn.k_proj.weight": "model-00005-of-00005.safetensors",
|
293 |
+
"model.layers.37.self_attn.o_proj.weight": "model-00005-of-00005.safetensors",
|
294 |
+
"model.layers.37.self_attn.q_proj.weight": "model-00005-of-00005.safetensors",
|
295 |
+
"model.layers.37.self_attn.v_proj.weight": "model-00005-of-00005.safetensors",
|
296 |
+
"model.layers.38.input_layernorm.weight": "model-00005-of-00005.safetensors",
|
297 |
+
"model.layers.38.mlp.down_proj.weight": "model-00005-of-00005.safetensors",
|
298 |
+
"model.layers.38.mlp.gate_proj.weight": "model-00005-of-00005.safetensors",
|
299 |
+
"model.layers.38.mlp.up_proj.weight": "model-00005-of-00005.safetensors",
|
300 |
+
"model.layers.38.post_attention_layernorm.weight": "model-00005-of-00005.safetensors",
|
301 |
+
"model.layers.38.self_attn.k_proj.weight": "model-00005-of-00005.safetensors",
|
302 |
+
"model.layers.38.self_attn.o_proj.weight": "model-00005-of-00005.safetensors",
|
303 |
+
"model.layers.38.self_attn.q_proj.weight": "model-00005-of-00005.safetensors",
|
304 |
+
"model.layers.38.self_attn.v_proj.weight": "model-00005-of-00005.safetensors",
|
305 |
+
"model.layers.39.input_layernorm.weight": "model-00005-of-00005.safetensors",
|
306 |
+
"model.layers.39.mlp.down_proj.weight": "model-00005-of-00005.safetensors",
|
307 |
+
"model.layers.39.mlp.gate_proj.weight": "model-00005-of-00005.safetensors",
|
308 |
+
"model.layers.39.mlp.up_proj.weight": "model-00005-of-00005.safetensors",
|
309 |
+
"model.layers.39.post_attention_layernorm.weight": "model-00005-of-00005.safetensors",
|
310 |
+
"model.layers.39.self_attn.k_proj.weight": "model-00005-of-00005.safetensors",
|
311 |
+
"model.layers.39.self_attn.o_proj.weight": "model-00005-of-00005.safetensors",
|
312 |
+
"model.layers.39.self_attn.q_proj.weight": "model-00005-of-00005.safetensors",
|
313 |
+
"model.layers.39.self_attn.v_proj.weight": "model-00005-of-00005.safetensors",
|
314 |
+
"model.layers.4.input_layernorm.weight": "model-00001-of-00005.safetensors",
|
315 |
+
"model.layers.4.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
|
316 |
+
"model.layers.4.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
|
317 |
+
"model.layers.4.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
|
318 |
+
"model.layers.4.post_attention_layernorm.weight": "model-00001-of-00005.safetensors",
|
319 |
+
"model.layers.4.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
|
320 |
+
"model.layers.4.self_attn.o_proj.weight": "model-00001-of-00005.safetensors",
|
321 |
+
"model.layers.4.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
|
322 |
+
"model.layers.4.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
|
323 |
+
"model.layers.5.input_layernorm.weight": "model-00001-of-00005.safetensors",
|
324 |
+
"model.layers.5.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
|
325 |
+
"model.layers.5.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
|
326 |
+
"model.layers.5.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
|
327 |
+
"model.layers.5.post_attention_layernorm.weight": "model-00001-of-00005.safetensors",
|
328 |
+
"model.layers.5.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
|
329 |
+
"model.layers.5.self_attn.o_proj.weight": "model-00001-of-00005.safetensors",
|
330 |
+
"model.layers.5.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
|
331 |
+
"model.layers.5.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
|
332 |
+
"model.layers.6.input_layernorm.weight": "model-00002-of-00005.safetensors",
|
333 |
+
"model.layers.6.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
|
334 |
+
"model.layers.6.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
|
335 |
+
"model.layers.6.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
|
336 |
+
"model.layers.6.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
|
337 |
+
"model.layers.6.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
|
338 |
+
"model.layers.6.self_attn.o_proj.weight": "model-00001-of-00005.safetensors",
|
339 |
+
"model.layers.6.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
|
340 |
+
"model.layers.6.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
|
341 |
+
"model.layers.7.input_layernorm.weight": "model-00002-of-00005.safetensors",
|
342 |
+
"model.layers.7.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
|
343 |
+
"model.layers.7.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
|
344 |
+
"model.layers.7.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
|
345 |
+
"model.layers.7.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
|
346 |
+
"model.layers.7.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
|
347 |
+
"model.layers.7.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
|
348 |
+
"model.layers.7.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
|
349 |
+
"model.layers.7.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
|
350 |
+
"model.layers.8.input_layernorm.weight": "model-00002-of-00005.safetensors",
|
351 |
+
"model.layers.8.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
|
352 |
+
"model.layers.8.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
|
353 |
+
"model.layers.8.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
|
354 |
+
"model.layers.8.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
|
355 |
+
"model.layers.8.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
|
356 |
+
"model.layers.8.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
|
357 |
+
"model.layers.8.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
|
358 |
+
"model.layers.8.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
|
359 |
+
"model.layers.9.input_layernorm.weight": "model-00002-of-00005.safetensors",
|
360 |
+
"model.layers.9.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
|
361 |
+
"model.layers.9.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
|
362 |
+
"model.layers.9.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
|
363 |
+
"model.layers.9.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
|
364 |
+
"model.layers.9.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
|
365 |
+
"model.layers.9.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
|
366 |
+
"model.layers.9.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
|
367 |
+
"model.layers.9.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
|
368 |
+
"model.norm.weight": "model-00005-of-00005.safetensors"
|
369 |
+
}
|
370 |
+
}
|
3.5bpw/output.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a078d015ae5026abad7d6dba9fb5582338f0a139d4e9366ed18e81c769b3b355
|
3 |
+
size 6787820356
|
3.5bpw/special_tokens_map.json
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"additional_special_tokens": [
|
3 |
+
"<|start_header_id|>",
|
4 |
+
"<|end_header_id|>"
|
5 |
+
],
|
6 |
+
"bos_token": {
|
7 |
+
"content": "<s>",
|
8 |
+
"lstrip": false,
|
9 |
+
"normalized": false,
|
10 |
+
"rstrip": false,
|
11 |
+
"single_word": false
|
12 |
+
},
|
13 |
+
"eos_token": {
|
14 |
+
"content": "</s>",
|
15 |
+
"lstrip": false,
|
16 |
+
"normalized": false,
|
17 |
+
"rstrip": false,
|
18 |
+
"single_word": false
|
19 |
+
},
|
20 |
+
"pad_token": {
|
21 |
+
"content": "<pad>",
|
22 |
+
"lstrip": false,
|
23 |
+
"normalized": false,
|
24 |
+
"rstrip": false,
|
25 |
+
"single_word": false
|
26 |
+
},
|
27 |
+
"unk_token": {
|
28 |
+
"content": "<unk>",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false
|
33 |
+
}
|
34 |
+
}
|
3.5bpw/tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
3.5bpw/tokenizer_config.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|