asiansoul commited on
Commit
a8ce4ce
·
verified ·
1 Parent(s): 2a35896

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,8 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ llama-3.2-koen-gx-3b-instruct-Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
37
+ llama-3.2-koen-gx-3b-instruct-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
38
+ llama-3.2-koen-gx-3b-instruct-Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
39
+ llama-3.2-koen-gx-3b-instruct-Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
40
+ llama-3.2-koen-gx-3b-instruct-Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
Modelfile_3b ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM llama-3.2-koen-gx-3b-instruct-Q5_K_M.gguf
2
+ TEMPLATE """<|start_header_id|>system<|end_header_id|>
3
+
4
+ Cutting Knowledge Date: December 2023
5
+
6
+ {{ if .System }}{{ .System }}
7
+ {{- end }}
8
+ {{- if .Tools }}When you receive a tool call response, use the output to format an answer to the orginal user question.
9
+
10
+ You are a helpful assistant with tool calling capabilities.
11
+ {{- end }}<|eot_id|>
12
+ {{- range $i, $_ := .Messages }}
13
+ {{- $last := eq (len (slice $.Messages $i)) 1 }}
14
+ {{- if eq .Role "user" }}<|start_header_id|>user<|end_header_id|>
15
+ {{- if and $.Tools $last }}
16
+
17
+ Given the following functions, please respond with a JSON for a function call with its proper arguments that best answers the given prompt.
18
+
19
+ Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}. Do not use variables.
20
+
21
+ {{ range $.Tools }}
22
+ {{- . }}
23
+ {{ end }}
24
+ {{ .Content }}<|eot_id|>
25
+ {{- else }}
26
+
27
+ {{ .Content }}<|eot_id|>
28
+ {{- end }}{{ if $last }}<|start_header_id|>assistant<|end_header_id|>
29
+
30
+ {{ end }}
31
+ {{- else if eq .Role "assistant" }}<|start_header_id|>assistant<|end_header_id|>
32
+ {{- if .ToolCalls }}
33
+ {{ range .ToolCalls }}
34
+ {"name": "{{ .Function.Name }}", "parameters": {{ .Function.Arguments }}}{{ end }}
35
+ {{- else }}
36
+
37
+ {{ .Content }}
38
+ {{- end }}{{ if not $last }}<|eot_id|>{{ end }}
39
+ {{- else if eq .Role "tool" }}<|start_header_id|>ipython<|end_header_id|>
40
+
41
+ {{ .Content }}<|eot_id|>{{ if $last }}<|start_header_id|>assistant<|end_header_id|>
42
+
43
+ {{ end }}
44
+ {{- end }}
45
+ {{- end }}"""
46
+ PARAMETER stop <|start_header_id|>
47
+ PARAMETER stop <|end_header_id|>
48
+ PARAMETER stop <|eot_id|>
llama-3.2-koen-gx-3b-instruct-Q3_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b17c2923e32209c34c8568ac618fed574ae75f22a28f091a716efe571573876
3
+ size 1687159648
llama-3.2-koen-gx-3b-instruct-Q4_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e83322afb0472cf07ec0d30dff1f2ce044219e87de270de70365e18fa4db3741
3
+ size 2019378016
llama-3.2-koen-gx-3b-instruct-Q5_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7e60df75175e0bfe4bad1c7715247f122a03893d8eb45f052613ffeca193af1
3
+ size 2322154336
llama-3.2-koen-gx-3b-instruct-Q6_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3be7c7b0714a0c8e43c5ba5f646204116687e02efd64dac2ff4b129422f70831
3
+ size 2643854176
llama-3.2-koen-gx-3b-instruct-Q8_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:705886cf4d670a095ad51727b28738e2517ce6818c5780e5f1014fa3293fd809
3
+ size 3421899616