Spaces:
Runtime error
Runtime error
Arrcttacsrks
commited on
Upload llama.cpp/Package.swift with huggingface_hub
Browse files- llama.cpp/Package.swift +82 -0
llama.cpp/Package.swift
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// swift-tools-version:5.5
|
2 |
+
|
3 |
+
import PackageDescription
|
4 |
+
|
5 |
+
var sources = [
|
6 |
+
"src/llama.cpp",
|
7 |
+
"src/llama-vocab.cpp",
|
8 |
+
"src/llama-grammar.cpp",
|
9 |
+
"src/llama-sampling.cpp",
|
10 |
+
"src/unicode.cpp",
|
11 |
+
"src/unicode-data.cpp",
|
12 |
+
"ggml/src/ggml.c",
|
13 |
+
"ggml/src/ggml-cpu.c",
|
14 |
+
"ggml/src/ggml-alloc.c",
|
15 |
+
"ggml/src/ggml-backend.cpp",
|
16 |
+
"ggml/src/ggml-quants.c",
|
17 |
+
"ggml/src/ggml-aarch64.c",
|
18 |
+
]
|
19 |
+
|
20 |
+
var resources: [Resource] = []
|
21 |
+
var linkerSettings: [LinkerSetting] = []
|
22 |
+
var cSettings: [CSetting] = [
|
23 |
+
.unsafeFlags(["-Wno-shorten-64-to-32", "-O3", "-DNDEBUG"]),
|
24 |
+
.unsafeFlags(["-fno-objc-arc"]),
|
25 |
+
// NOTE: NEW_LAPACK will required iOS version 16.4+
|
26 |
+
// We should consider add this in the future when we drop support for iOS 14
|
27 |
+
// (ref: ref: https://developer.apple.com/documentation/accelerate/1513264-cblas_sgemm?language=objc)
|
28 |
+
// .define("ACCELERATE_NEW_LAPACK"),
|
29 |
+
// .define("ACCELERATE_LAPACK_ILP64")
|
30 |
+
]
|
31 |
+
|
32 |
+
#if canImport(Darwin)
|
33 |
+
sources.append("ggml/src/ggml-metal.m")
|
34 |
+
resources.append(.process("ggml/src/ggml-metal.metal"))
|
35 |
+
linkerSettings.append(.linkedFramework("Accelerate"))
|
36 |
+
cSettings.append(
|
37 |
+
contentsOf: [
|
38 |
+
.define("GGML_USE_ACCELERATE"),
|
39 |
+
.define("GGML_USE_METAL")
|
40 |
+
]
|
41 |
+
)
|
42 |
+
#endif
|
43 |
+
|
44 |
+
#if os(Linux)
|
45 |
+
cSettings.append(.define("_GNU_SOURCE"))
|
46 |
+
#endif
|
47 |
+
|
48 |
+
let package = Package(
|
49 |
+
name: "llama",
|
50 |
+
platforms: [
|
51 |
+
.macOS(.v12),
|
52 |
+
.iOS(.v14),
|
53 |
+
.watchOS(.v4),
|
54 |
+
.tvOS(.v14)
|
55 |
+
],
|
56 |
+
products: [
|
57 |
+
.library(name: "llama", targets: ["llama"]),
|
58 |
+
],
|
59 |
+
targets: [
|
60 |
+
.target(
|
61 |
+
name: "llama",
|
62 |
+
path: ".",
|
63 |
+
exclude: [
|
64 |
+
"build",
|
65 |
+
"cmake",
|
66 |
+
"examples",
|
67 |
+
"scripts",
|
68 |
+
"models",
|
69 |
+
"tests",
|
70 |
+
"CMakeLists.txt",
|
71 |
+
"Makefile",
|
72 |
+
"ggml/src/ggml-metal-embed.metal"
|
73 |
+
],
|
74 |
+
sources: sources,
|
75 |
+
resources: resources,
|
76 |
+
publicHeadersPath: "spm-headers",
|
77 |
+
cSettings: cSettings,
|
78 |
+
linkerSettings: linkerSettings
|
79 |
+
)
|
80 |
+
],
|
81 |
+
cxxLanguageStandard: .cxx11
|
82 |
+
)
|