morriszms commited on
Commit
a23faa7
1 Parent(s): 502ac56

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,18 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ calme-2.3-llama3-70b-Q2_K.gguf filter=lfs diff=lfs merge=lfs -text
37
+ calme-2.3-llama3-70b-Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text
38
+ calme-2.3-llama3-70b-Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
39
+ calme-2.3-llama3-70b-Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text
40
+ calme-2.3-llama3-70b-Q4_0.gguf filter=lfs diff=lfs merge=lfs -text
41
+ calme-2.3-llama3-70b-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
42
+ calme-2.3-llama3-70b-Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text
43
+ calme-2.3-llama3-70b-Q5_0.gguf filter=lfs diff=lfs merge=lfs -text
44
+ calme-2.3-llama3-70b-Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
45
+ calme-2.3-llama3-70b-Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text
46
+ calme-2.3-llama3-70b-Q6_K/calme-2.3-llama3-70b-Q6_K-00001-of-00002.gguf filter=lfs diff=lfs merge=lfs -text
47
+ calme-2.3-llama3-70b-Q6_K/calme-2.3-llama3-70b-Q6_K-00002-of-00002.gguf filter=lfs diff=lfs merge=lfs -text
48
+ calme-2.3-llama3-70b-Q8_0/calme-2.3-llama3-70b-Q8_0-00001-of-00003.gguf filter=lfs diff=lfs merge=lfs -text
49
+ calme-2.3-llama3-70b-Q8_0/calme-2.3-llama3-70b-Q8_0-00002-of-00003.gguf filter=lfs diff=lfs merge=lfs -text
50
+ calme-2.3-llama3-70b-Q8_0/calme-2.3-llama3-70b-Q8_0-00003-of-00003.gguf filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ license: llama3
5
+ library_name: transformers
6
+ tags:
7
+ - axolotl
8
+ - finetune
9
+ - dpo
10
+ - facebook
11
+ - meta
12
+ - pytorch
13
+ - llama
14
+ - llama-3
15
+ - chatml
16
+ - TensorBlock
17
+ - GGUF
18
+ base_model: MaziyarPanahi/calme-2.3-llama3-70b
19
+ datasets:
20
+ - MaziyarPanahi/truthy-dpo-v0.1-axolotl
21
+ pipeline_tag: text-generation
22
+ license_name: llama3
23
+ license_link: LICENSE
24
+ inference: false
25
+ model_creator: MaziyarPanahi
26
+ quantized_by: MaziyarPanahi
27
+ model-index:
28
+ - name: calme-2.3-llama3-70b
29
+ results:
30
+ - task:
31
+ type: text-generation
32
+ name: Text Generation
33
+ dataset:
34
+ name: AI2 Reasoning Challenge (25-Shot)
35
+ type: ai2_arc
36
+ config: ARC-Challenge
37
+ split: test
38
+ args:
39
+ num_few_shot: 25
40
+ metrics:
41
+ - type: acc_norm
42
+ value: 72.35
43
+ name: normalized accuracy
44
+ source:
45
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=MaziyarPanahi/calme-2.3-llama3-70b
46
+ name: Open LLM Leaderboard
47
+ - task:
48
+ type: text-generation
49
+ name: Text Generation
50
+ dataset:
51
+ name: HellaSwag (10-Shot)
52
+ type: hellaswag
53
+ split: validation
54
+ args:
55
+ num_few_shot: 10
56
+ metrics:
57
+ - type: acc_norm
58
+ value: 86
59
+ name: normalized accuracy
60
+ source:
61
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=MaziyarPanahi/calme-2.3-llama3-70b
62
+ name: Open LLM Leaderboard
63
+ - task:
64
+ type: text-generation
65
+ name: Text Generation
66
+ dataset:
67
+ name: MMLU (5-Shot)
68
+ type: cais/mmlu
69
+ config: all
70
+ split: test
71
+ args:
72
+ num_few_shot: 5
73
+ metrics:
74
+ - type: acc
75
+ value: 80.47
76
+ name: accuracy
77
+ source:
78
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=MaziyarPanahi/calme-2.3-llama3-70b
79
+ name: Open LLM Leaderboard
80
+ - task:
81
+ type: text-generation
82
+ name: Text Generation
83
+ dataset:
84
+ name: TruthfulQA (0-shot)
85
+ type: truthful_qa
86
+ config: multiple_choice
87
+ split: validation
88
+ args:
89
+ num_few_shot: 0
90
+ metrics:
91
+ - type: mc2
92
+ value: 63.45
93
+ source:
94
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=MaziyarPanahi/calme-2.3-llama3-70b
95
+ name: Open LLM Leaderboard
96
+ - task:
97
+ type: text-generation
98
+ name: Text Generation
99
+ dataset:
100
+ name: Winogrande (5-shot)
101
+ type: winogrande
102
+ config: winogrande_xl
103
+ split: validation
104
+ args:
105
+ num_few_shot: 5
106
+ metrics:
107
+ - type: acc
108
+ value: 82.95
109
+ name: accuracy
110
+ source:
111
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=MaziyarPanahi/calme-2.3-llama3-70b
112
+ name: Open LLM Leaderboard
113
+ - task:
114
+ type: text-generation
115
+ name: Text Generation
116
+ dataset:
117
+ name: GSM8k (5-shot)
118
+ type: gsm8k
119
+ config: main
120
+ split: test
121
+ args:
122
+ num_few_shot: 5
123
+ metrics:
124
+ - type: acc
125
+ value: 87.19
126
+ name: accuracy
127
+ source:
128
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=MaziyarPanahi/calme-2.3-llama3-70b
129
+ name: Open LLM Leaderboard
130
+ ---
131
+
132
+ <div style="width: auto; margin-left: auto; margin-right: auto">
133
+ <img src="https://i.imgur.com/jC7kdl8.jpeg" alt="TensorBlock" style="width: 100%; min-width: 400px; display: block; margin: auto;">
134
+ </div>
135
+ <div style="display: flex; justify-content: space-between; width: 100%;">
136
+ <div style="display: flex; flex-direction: column; align-items: flex-start;">
137
+ <p style="margin-top: 0.5em; margin-bottom: 0em;">
138
+ Feedback and support: TensorBlock's <a href="https://x.com/tensorblock_aoi">Twitter/X</a>, <a href="https://t.me/TensorBlock">Telegram Group</a> and <a href="https://x.com/tensorblock_aoi">Discord server</a>
139
+ </p>
140
+ </div>
141
+ </div>
142
+
143
+ ## MaziyarPanahi/calme-2.3-llama3-70b - GGUF
144
+
145
+ This repo contains GGUF format model files for [MaziyarPanahi/calme-2.3-llama3-70b](https://huggingface.co/MaziyarPanahi/calme-2.3-llama3-70b).
146
+
147
+ The files were quantized using machines provided by [TensorBlock](https://tensorblock.co/), and they are compatible with llama.cpp as of [commit b4242](https://github.com/ggerganov/llama.cpp/commit/a6744e43e80f4be6398fc7733a01642c846dce1d).
148
+
149
+ <div style="text-align: left; margin: 20px 0;">
150
+ <a href="https://tensorblock.co/waitlist/client" style="display: inline-block; padding: 10px 20px; background-color: #007bff; color: white; text-decoration: none; border-radius: 5px; font-weight: bold;">
151
+ Run them on the TensorBlock client using your local machine ↗
152
+ </a>
153
+ </div>
154
+
155
+ ## Prompt template
156
+
157
+ ```
158
+ <|im_start|>system
159
+ {system_prompt}<|im_end|>
160
+ <|im_start|>user
161
+ {prompt}<|im_end|>
162
+ <|im_start|>assistant
163
+ ```
164
+
165
+ ## Model file specification
166
+
167
+ | Filename | Quant type | File Size | Description |
168
+ | -------- | ---------- | --------- | ----------- |
169
+ | [calme-2.3-llama3-70b-Q2_K.gguf](https://huggingface.co/tensorblock/calme-2.3-llama3-70b-GGUF/blob/main/calme-2.3-llama3-70b-Q2_K.gguf) | Q2_K | 26.375 GB | smallest, significant quality loss - not recommended for most purposes |
170
+ | [calme-2.3-llama3-70b-Q3_K_S.gguf](https://huggingface.co/tensorblock/calme-2.3-llama3-70b-GGUF/blob/main/calme-2.3-llama3-70b-Q3_K_S.gguf) | Q3_K_S | 30.912 GB | very small, high quality loss |
171
+ | [calme-2.3-llama3-70b-Q3_K_M.gguf](https://huggingface.co/tensorblock/calme-2.3-llama3-70b-GGUF/blob/main/calme-2.3-llama3-70b-Q3_K_M.gguf) | Q3_K_M | 34.268 GB | very small, high quality loss |
172
+ | [calme-2.3-llama3-70b-Q3_K_L.gguf](https://huggingface.co/tensorblock/calme-2.3-llama3-70b-GGUF/blob/main/calme-2.3-llama3-70b-Q3_K_L.gguf) | Q3_K_L | 37.141 GB | small, substantial quality loss |
173
+ | [calme-2.3-llama3-70b-Q4_0.gguf](https://huggingface.co/tensorblock/calme-2.3-llama3-70b-GGUF/blob/main/calme-2.3-llama3-70b-Q4_0.gguf) | Q4_0 | 39.970 GB | legacy; small, very high quality loss - prefer using Q3_K_M |
174
+ | [calme-2.3-llama3-70b-Q4_K_S.gguf](https://huggingface.co/tensorblock/calme-2.3-llama3-70b-GGUF/blob/main/calme-2.3-llama3-70b-Q4_K_S.gguf) | Q4_K_S | 40.347 GB | small, greater quality loss |
175
+ | [calme-2.3-llama3-70b-Q4_K_M.gguf](https://huggingface.co/tensorblock/calme-2.3-llama3-70b-GGUF/blob/main/calme-2.3-llama3-70b-Q4_K_M.gguf) | Q4_K_M | 42.520 GB | medium, balanced quality - recommended |
176
+ | [calme-2.3-llama3-70b-Q5_0.gguf](https://huggingface.co/tensorblock/calme-2.3-llama3-70b-GGUF/blob/main/calme-2.3-llama3-70b-Q5_0.gguf) | Q5_0 | 48.657 GB | legacy; medium, balanced quality - prefer using Q4_K_M |
177
+ | [calme-2.3-llama3-70b-Q5_K_S.gguf](https://huggingface.co/tensorblock/calme-2.3-llama3-70b-GGUF/blob/main/calme-2.3-llama3-70b-Q5_K_S.gguf) | Q5_K_S | 48.657 GB | large, low quality loss - recommended |
178
+ | [calme-2.3-llama3-70b-Q5_K_M.gguf](https://huggingface.co/tensorblock/calme-2.3-llama3-70b-GGUF/blob/main/calme-2.3-llama3-70b-Q5_K_M.gguf) | Q5_K_M | 49.950 GB | large, very low quality loss - recommended |
179
+ | [calme-2.3-llama3-70b-Q8_0](https://huggingface.co/tensorblock/calme-2.3-llama3-70b-GGUF/blob/main/calme-2.3-llama3-70b-Q8_0) | Q6_K | 74.975 GB | very large, extremely low quality loss |
180
+ | [calme-2.3-llama3-70b-Q6_K](https://huggingface.co/tensorblock/calme-2.3-llama3-70b-GGUF/blob/main/calme-2.3-llama3-70b-Q6_K) | Q8_0 | 57.888 GB | very large, extremely low quality loss - not recommended |
181
+
182
+
183
+ ## Downloading instruction
184
+
185
+ ### Command line
186
+
187
+ Firstly, install Huggingface Client
188
+
189
+ ```shell
190
+ pip install -U "huggingface_hub[cli]"
191
+ ```
192
+
193
+ Then, downoad the individual model file the a local directory
194
+
195
+ ```shell
196
+ huggingface-cli download tensorblock/calme-2.3-llama3-70b-GGUF --include "calme-2.3-llama3-70b-Q2_K.gguf" --local-dir MY_LOCAL_DIR
197
+ ```
198
+
199
+ If you wanna download multiple model files with a pattern (e.g., `*Q4_K*gguf`), you can try:
200
+
201
+ ```shell
202
+ huggingface-cli download tensorblock/calme-2.3-llama3-70b-GGUF --local-dir MY_LOCAL_DIR --local-dir-use-symlinks False --include='*Q4_K*gguf'
203
+ ```
calme-2.3-llama3-70b-Q2_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32a4a635d5a2700cb1b43edab767624414da446795b0dc1381571cf8edc22b0d
3
+ size 26375118656
calme-2.3-llama3-70b-Q3_K_L.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f34d21aa8e2711eab602226d55113e55a60849759142b43a6945bb307b1b09cc
3
+ size 37140603520
calme-2.3-llama3-70b-Q3_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d196023f357fac37cfef27f7675d1ba281006e7858dfd24b95c4029e060ccaa8
3
+ size 34267505280
calme-2.3-llama3-70b-Q3_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bcb48683c52f01b03c5612e7864962d9e11ab359ca71951f2e00341e9a914269
3
+ size 30912062080
calme-2.3-llama3-70b-Q4_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac6d6cd6c56a016f1bf7eb4eaf5ff8b46e7f67ce2d0c02629e79ba840129db74
3
+ size 39969744576
calme-2.3-llama3-70b-Q4_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d545188806b47e7b5e342b6ef409698f2903431aa750c6fa26170a1eaad72796
3
+ size 42520405696
calme-2.3-llama3-70b-Q4_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11bd972ac1f4f66755c9a5a82a0ab2aa2b5f38cf94a389ae507365811d81897d
3
+ size 40347231936
calme-2.3-llama3-70b-Q5_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a78e2920ecb36931314883d54de0b4bc37db2bb2f841148882ba95b6e333b065
3
+ size 48657459904
calme-2.3-llama3-70b-Q5_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c92c080633b985414777e9e801786e9ea1d89bb7ae710260793040f506d88a3d
3
+ size 49949829824
calme-2.3-llama3-70b-Q5_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ffe292fae17c1d4a194e445b0fe44cc02f477975942ced7de53fdeb2411fb0f0
3
+ size 48657459904
calme-2.3-llama3-70b-Q6_K/calme-2.3-llama3-70b-Q6_K-00001-of-00002.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a17d13f78033ff19e4567e0084ecc43ce740cb74fe1863e48c6d66242731604
3
+ size 34847484704
calme-2.3-llama3-70b-Q6_K/calme-2.3-llama3-70b-Q6_K-00002-of-00002.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b67d7d74f2f4f6ce87d7b395296f10b1713fe52f9136ddc7ca735c19472b84b
3
+ size 23040672928
calme-2.3-llama3-70b-Q8_0/calme-2.3-llama3-70b-Q8_0-00001-of-00003.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b867a3be398d5bd3a90f4e039a1d43d65073556602d609671c2c660dc5c1c43f
3
+ size 34980029120
calme-2.3-llama3-70b-Q8_0/calme-2.3-llama3-70b-Q8_0-00002-of-00003.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a10af57c60497d3aa076e0832b7c3a6de662c8e615121b3cd2c68ffb76ad162
3
+ size 34949976384
calme-2.3-llama3-70b-Q8_0/calme-2.3-llama3-70b-Q8_0-00003-of-00003.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c029f4742b546fcf07319f81a36a41258725e5b69d60db58020a72e8d83020e5
3
+ size 5045062560