morriszms commited on
Commit
afbe2c5
1 Parent(s): 2458655

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,18 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ calme-2.4-llama3-70b-Q2_K.gguf filter=lfs diff=lfs merge=lfs -text
37
+ calme-2.4-llama3-70b-Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text
38
+ calme-2.4-llama3-70b-Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
39
+ calme-2.4-llama3-70b-Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text
40
+ calme-2.4-llama3-70b-Q4_0.gguf filter=lfs diff=lfs merge=lfs -text
41
+ calme-2.4-llama3-70b-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
42
+ calme-2.4-llama3-70b-Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text
43
+ calme-2.4-llama3-70b-Q5_0.gguf filter=lfs diff=lfs merge=lfs -text
44
+ calme-2.4-llama3-70b-Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
45
+ calme-2.4-llama3-70b-Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text
46
+ calme-2.4-llama3-70b-Q6_K/calme-2.4-llama3-70b-Q6_K-00001-of-00002.gguf filter=lfs diff=lfs merge=lfs -text
47
+ calme-2.4-llama3-70b-Q6_K/calme-2.4-llama3-70b-Q6_K-00002-of-00002.gguf filter=lfs diff=lfs merge=lfs -text
48
+ calme-2.4-llama3-70b-Q8_0/calme-2.4-llama3-70b-Q8_0-00001-of-00003.gguf filter=lfs diff=lfs merge=lfs -text
49
+ calme-2.4-llama3-70b-Q8_0/calme-2.4-llama3-70b-Q8_0-00002-of-00003.gguf filter=lfs diff=lfs merge=lfs -text
50
+ calme-2.4-llama3-70b-Q8_0/calme-2.4-llama3-70b-Q8_0-00003-of-00003.gguf filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,295 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ license: llama3
5
+ library_name: transformers
6
+ tags:
7
+ - axolotl
8
+ - finetune
9
+ - dpo
10
+ - facebook
11
+ - meta
12
+ - pytorch
13
+ - llama
14
+ - llama-3
15
+ - chatml
16
+ - TensorBlock
17
+ - GGUF
18
+ base_model: MaziyarPanahi/calme-2.4-llama3-70b
19
+ datasets:
20
+ - argilla/ultrafeedback-binarized-preferences
21
+ pipeline_tag: text-generation
22
+ license_name: llama3
23
+ license_link: LICENSE
24
+ inference: false
25
+ model_creator: MaziyarPanahi
26
+ quantized_by: MaziyarPanahi
27
+ model-index:
28
+ - name: calme-2.4-llama3-70b
29
+ results:
30
+ - task:
31
+ type: text-generation
32
+ name: Text Generation
33
+ dataset:
34
+ name: AI2 Reasoning Challenge (25-Shot)
35
+ type: ai2_arc
36
+ config: ARC-Challenge
37
+ split: test
38
+ args:
39
+ num_few_shot: 25
40
+ metrics:
41
+ - type: acc_norm
42
+ value: 72.61
43
+ name: normalized accuracy
44
+ source:
45
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=MaziyarPanahi/calme-2.4-llama3-70b
46
+ name: Open LLM Leaderboard
47
+ - task:
48
+ type: text-generation
49
+ name: Text Generation
50
+ dataset:
51
+ name: HellaSwag (10-Shot)
52
+ type: hellaswag
53
+ split: validation
54
+ args:
55
+ num_few_shot: 10
56
+ metrics:
57
+ - type: acc_norm
58
+ value: 86.03
59
+ name: normalized accuracy
60
+ source:
61
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=MaziyarPanahi/calme-2.4-llama3-70b
62
+ name: Open LLM Leaderboard
63
+ - task:
64
+ type: text-generation
65
+ name: Text Generation
66
+ dataset:
67
+ name: MMLU (5-Shot)
68
+ type: cais/mmlu
69
+ config: all
70
+ split: test
71
+ args:
72
+ num_few_shot: 5
73
+ metrics:
74
+ - type: acc
75
+ value: 80.5
76
+ name: accuracy
77
+ source:
78
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=MaziyarPanahi/calme-2.4-llama3-70b
79
+ name: Open LLM Leaderboard
80
+ - task:
81
+ type: text-generation
82
+ name: Text Generation
83
+ dataset:
84
+ name: TruthfulQA (0-shot)
85
+ type: truthful_qa
86
+ config: multiple_choice
87
+ split: validation
88
+ args:
89
+ num_few_shot: 0
90
+ metrics:
91
+ - type: mc2
92
+ value: 63.26
93
+ source:
94
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=MaziyarPanahi/calme-2.4-llama3-70b
95
+ name: Open LLM Leaderboard
96
+ - task:
97
+ type: text-generation
98
+ name: Text Generation
99
+ dataset:
100
+ name: Winogrande (5-shot)
101
+ type: winogrande
102
+ config: winogrande_xl
103
+ split: validation
104
+ args:
105
+ num_few_shot: 5
106
+ metrics:
107
+ - type: acc
108
+ value: 83.58
109
+ name: accuracy
110
+ source:
111
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=MaziyarPanahi/calme-2.4-llama3-70b
112
+ name: Open LLM Leaderboard
113
+ - task:
114
+ type: text-generation
115
+ name: Text Generation
116
+ dataset:
117
+ name: GSM8k (5-shot)
118
+ type: gsm8k
119
+ config: main
120
+ split: test
121
+ args:
122
+ num_few_shot: 5
123
+ metrics:
124
+ - type: acc
125
+ value: 87.34
126
+ name: accuracy
127
+ source:
128
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=MaziyarPanahi/calme-2.4-llama3-70b
129
+ name: Open LLM Leaderboard
130
+ - task:
131
+ type: text-generation
132
+ name: Text Generation
133
+ dataset:
134
+ name: IFEval (0-Shot)
135
+ type: HuggingFaceH4/ifeval
136
+ args:
137
+ num_few_shot: 0
138
+ metrics:
139
+ - type: inst_level_strict_acc and prompt_level_strict_acc
140
+ value: 50.27
141
+ name: strict accuracy
142
+ source:
143
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=MaziyarPanahi/calme-2.4-llama3-70b
144
+ name: Open LLM Leaderboard
145
+ - task:
146
+ type: text-generation
147
+ name: Text Generation
148
+ dataset:
149
+ name: BBH (3-Shot)
150
+ type: BBH
151
+ args:
152
+ num_few_shot: 3
153
+ metrics:
154
+ - type: acc_norm
155
+ value: 48.4
156
+ name: normalized accuracy
157
+ source:
158
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=MaziyarPanahi/calme-2.4-llama3-70b
159
+ name: Open LLM Leaderboard
160
+ - task:
161
+ type: text-generation
162
+ name: Text Generation
163
+ dataset:
164
+ name: MATH Lvl 5 (4-Shot)
165
+ type: hendrycks/competition_math
166
+ args:
167
+ num_few_shot: 4
168
+ metrics:
169
+ - type: exact_match
170
+ value: 22.66
171
+ name: exact match
172
+ source:
173
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=MaziyarPanahi/calme-2.4-llama3-70b
174
+ name: Open LLM Leaderboard
175
+ - task:
176
+ type: text-generation
177
+ name: Text Generation
178
+ dataset:
179
+ name: GPQA (0-shot)
180
+ type: Idavidrein/gpqa
181
+ args:
182
+ num_few_shot: 0
183
+ metrics:
184
+ - type: acc_norm
185
+ value: 11.97
186
+ name: acc_norm
187
+ source:
188
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=MaziyarPanahi/calme-2.4-llama3-70b
189
+ name: Open LLM Leaderboard
190
+ - task:
191
+ type: text-generation
192
+ name: Text Generation
193
+ dataset:
194
+ name: MuSR (0-shot)
195
+ type: TAUR-Lab/MuSR
196
+ args:
197
+ num_few_shot: 0
198
+ metrics:
199
+ - type: acc_norm
200
+ value: 13.1
201
+ name: acc_norm
202
+ source:
203
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=MaziyarPanahi/calme-2.4-llama3-70b
204
+ name: Open LLM Leaderboard
205
+ - task:
206
+ type: text-generation
207
+ name: Text Generation
208
+ dataset:
209
+ name: MMLU-PRO (5-shot)
210
+ type: TIGER-Lab/MMLU-Pro
211
+ config: main
212
+ split: test
213
+ args:
214
+ num_few_shot: 5
215
+ metrics:
216
+ - type: acc
217
+ value: 46.71
218
+ name: accuracy
219
+ source:
220
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=MaziyarPanahi/calme-2.4-llama3-70b
221
+ name: Open LLM Leaderboard
222
+ ---
223
+
224
+ <div style="width: auto; margin-left: auto; margin-right: auto">
225
+ <img src="https://i.imgur.com/jC7kdl8.jpeg" alt="TensorBlock" style="width: 100%; min-width: 400px; display: block; margin: auto;">
226
+ </div>
227
+ <div style="display: flex; justify-content: space-between; width: 100%;">
228
+ <div style="display: flex; flex-direction: column; align-items: flex-start;">
229
+ <p style="margin-top: 0.5em; margin-bottom: 0em;">
230
+ Feedback and support: TensorBlock's <a href="https://x.com/tensorblock_aoi">Twitter/X</a>, <a href="https://t.me/TensorBlock">Telegram Group</a> and <a href="https://x.com/tensorblock_aoi">Discord server</a>
231
+ </p>
232
+ </div>
233
+ </div>
234
+
235
+ ## MaziyarPanahi/calme-2.4-llama3-70b - GGUF
236
+
237
+ This repo contains GGUF format model files for [MaziyarPanahi/calme-2.4-llama3-70b](https://huggingface.co/MaziyarPanahi/calme-2.4-llama3-70b).
238
+
239
+ The files were quantized using machines provided by [TensorBlock](https://tensorblock.co/), and they are compatible with llama.cpp as of [commit b4242](https://github.com/ggerganov/llama.cpp/commit/a6744e43e80f4be6398fc7733a01642c846dce1d).
240
+
241
+ <div style="text-align: left; margin: 20px 0;">
242
+ <a href="https://tensorblock.co/waitlist/client" style="display: inline-block; padding: 10px 20px; background-color: #007bff; color: white; text-decoration: none; border-radius: 5px; font-weight: bold;">
243
+ Run them on the TensorBlock client using your local machine ↗
244
+ </a>
245
+ </div>
246
+
247
+ ## Prompt template
248
+
249
+ ```
250
+ <|im_start|>system
251
+ {system_prompt}<|im_end|>
252
+ <|im_start|>user
253
+ {prompt}<|im_end|>
254
+ <|im_start|>assistant
255
+ ```
256
+
257
+ ## Model file specification
258
+
259
+ | Filename | Quant type | File Size | Description |
260
+ | -------- | ---------- | --------- | ----------- |
261
+ | [calme-2.4-llama3-70b-Q2_K.gguf](https://huggingface.co/tensorblock/calme-2.4-llama3-70b-GGUF/blob/main/calme-2.4-llama3-70b-Q2_K.gguf) | Q2_K | 26.375 GB | smallest, significant quality loss - not recommended for most purposes |
262
+ | [calme-2.4-llama3-70b-Q3_K_S.gguf](https://huggingface.co/tensorblock/calme-2.4-llama3-70b-GGUF/blob/main/calme-2.4-llama3-70b-Q3_K_S.gguf) | Q3_K_S | 30.912 GB | very small, high quality loss |
263
+ | [calme-2.4-llama3-70b-Q3_K_M.gguf](https://huggingface.co/tensorblock/calme-2.4-llama3-70b-GGUF/blob/main/calme-2.4-llama3-70b-Q3_K_M.gguf) | Q3_K_M | 34.268 GB | very small, high quality loss |
264
+ | [calme-2.4-llama3-70b-Q3_K_L.gguf](https://huggingface.co/tensorblock/calme-2.4-llama3-70b-GGUF/blob/main/calme-2.4-llama3-70b-Q3_K_L.gguf) | Q3_K_L | 37.141 GB | small, substantial quality loss |
265
+ | [calme-2.4-llama3-70b-Q4_0.gguf](https://huggingface.co/tensorblock/calme-2.4-llama3-70b-GGUF/blob/main/calme-2.4-llama3-70b-Q4_0.gguf) | Q4_0 | 39.970 GB | legacy; small, very high quality loss - prefer using Q3_K_M |
266
+ | [calme-2.4-llama3-70b-Q4_K_S.gguf](https://huggingface.co/tensorblock/calme-2.4-llama3-70b-GGUF/blob/main/calme-2.4-llama3-70b-Q4_K_S.gguf) | Q4_K_S | 40.347 GB | small, greater quality loss |
267
+ | [calme-2.4-llama3-70b-Q4_K_M.gguf](https://huggingface.co/tensorblock/calme-2.4-llama3-70b-GGUF/blob/main/calme-2.4-llama3-70b-Q4_K_M.gguf) | Q4_K_M | 42.520 GB | medium, balanced quality - recommended |
268
+ | [calme-2.4-llama3-70b-Q5_0.gguf](https://huggingface.co/tensorblock/calme-2.4-llama3-70b-GGUF/blob/main/calme-2.4-llama3-70b-Q5_0.gguf) | Q5_0 | 48.657 GB | legacy; medium, balanced quality - prefer using Q4_K_M |
269
+ | [calme-2.4-llama3-70b-Q5_K_S.gguf](https://huggingface.co/tensorblock/calme-2.4-llama3-70b-GGUF/blob/main/calme-2.4-llama3-70b-Q5_K_S.gguf) | Q5_K_S | 48.657 GB | large, low quality loss - recommended |
270
+ | [calme-2.4-llama3-70b-Q5_K_M.gguf](https://huggingface.co/tensorblock/calme-2.4-llama3-70b-GGUF/blob/main/calme-2.4-llama3-70b-Q5_K_M.gguf) | Q5_K_M | 49.950 GB | large, very low quality loss - recommended |
271
+ | [calme-2.4-llama3-70b-Q6_K](https://huggingface.co/tensorblock/calme-2.4-llama3-70b-GGUF/blob/main/calme-2.4-llama3-70b-Q6_K) | Q6_K | 57.888 GB | very large, extremely low quality loss |
272
+ | [calme-2.4-llama3-70b-Q8_0](https://huggingface.co/tensorblock/calme-2.4-llama3-70b-GGUF/blob/main/calme-2.4-llama3-70b-Q8_0) | Q8_0 | 74.975 GB | very large, extremely low quality loss - not recommended |
273
+
274
+
275
+ ## Downloading instruction
276
+
277
+ ### Command line
278
+
279
+ Firstly, install Huggingface Client
280
+
281
+ ```shell
282
+ pip install -U "huggingface_hub[cli]"
283
+ ```
284
+
285
+ Then, downoad the individual model file the a local directory
286
+
287
+ ```shell
288
+ huggingface-cli download tensorblock/calme-2.4-llama3-70b-GGUF --include "calme-2.4-llama3-70b-Q2_K.gguf" --local-dir MY_LOCAL_DIR
289
+ ```
290
+
291
+ If you wanna download multiple model files with a pattern (e.g., `*Q4_K*gguf`), you can try:
292
+
293
+ ```shell
294
+ huggingface-cli download tensorblock/calme-2.4-llama3-70b-GGUF --local-dir MY_LOCAL_DIR --local-dir-use-symlinks False --include='*Q4_K*gguf'
295
+ ```
calme-2.4-llama3-70b-Q2_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c983d72950a02cb416b82fadb9bddcdfaa22be31dbba6723e0cdfaffbba5c325
3
+ size 26375118688
calme-2.4-llama3-70b-Q3_K_L.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e50872a77a32c6d7ccc4fafd4a7bf08d03b3edaf859eabc4a59c239da7b5ee12
3
+ size 37140603552
calme-2.4-llama3-70b-Q3_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53c53a3c886c03c18767056e8055fc6bd6277015da9538f411c82d8f56bbb12f
3
+ size 34267505312
calme-2.4-llama3-70b-Q3_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07f6d747588154c22d705faa0a65dc0f3bfb8fbe429103a99f253d5304ad4587
3
+ size 30912062112
calme-2.4-llama3-70b-Q4_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dfe056102f5635fad4c6b766a34e9f41801edb06f3873e693fcc297ef8f49de4
3
+ size 39969744608
calme-2.4-llama3-70b-Q4_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c69bbe220190a1d0e06e0e55442e3b8833119eda4a8809f30ac45a2b0b935e5
3
+ size 42520405728
calme-2.4-llama3-70b-Q4_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f95a16c6dd2d763d0dc7a385201045b766c78c2c870f5d484b1ba6e7bd279e9
3
+ size 40347231968
calme-2.4-llama3-70b-Q5_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:19836a6913bda2c2d8c7389272edcd66c79fb2e72f6d4a56acefccb951fe5245
3
+ size 48657459936
calme-2.4-llama3-70b-Q5_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:48c95cc8542a32087661dcaaae73384094bfb1d602a88ea28148475cef1f2ab3
3
+ size 49949829856
calme-2.4-llama3-70b-Q5_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:796a183bcf2f0c6dc7174a8b2252b93414e3813fd0fc8f454298202ee77c329a
3
+ size 48657459936
calme-2.4-llama3-70b-Q6_K/calme-2.4-llama3-70b-Q6_K-00001-of-00002.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:032d865209389842181c189dcfa1ed6fe38003d07ab71b5034c122aba68111f3
3
+ size 34847484736
calme-2.4-llama3-70b-Q6_K/calme-2.4-llama3-70b-Q6_K-00002-of-00002.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed8ab441baf21dd02690588cf0cd48dba703f1342dd766d0372bb5abb354dfe2
3
+ size 23040672928
calme-2.4-llama3-70b-Q8_0/calme-2.4-llama3-70b-Q8_0-00001-of-00003.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec582ac6f7222f798360b13b77209e205a4659febd73029d1d849dec1fbe9087
3
+ size 34980029152
calme-2.4-llama3-70b-Q8_0/calme-2.4-llama3-70b-Q8_0-00002-of-00003.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c81958428c5ef5b997b3833b195b2fc7121da8363f803ea62b5746a8462c3782
3
+ size 34949976384
calme-2.4-llama3-70b-Q8_0/calme-2.4-llama3-70b-Q8_0-00003-of-00003.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce6dab21d684719c5142604eb04818af647b243676326bd3c941db8a4d40fe8b
3
+ size 5045062560