Spaces:
Build error
Build error
Illumotion
commited on
Commit
•
9938c27
1
Parent(s):
14fb7ff
Upload folder using huggingface_hub
Browse files- convert.py +6 -0
- examples/alpaca.sh +1 -1
- examples/common.h +2 -1
- examples/embd-input/embd-input-lib.cpp +1 -1
- examples/embedding/embedding.cpp +1 -1
- examples/main/main.cpp +1 -1
- examples/perplexity/perplexity.cpp +1 -1
- examples/quantize-stats/quantize-stats.cpp +7 -7
- examples/server/README.md +52 -8
- examples/server/api_like_OAI.py +219 -0
- examples/server/completion.js.hpp +375 -0
- examples/server/deps.sh +18 -0
- examples/server/index.html.hpp +899 -0
- examples/server/index.js.hpp +0 -0
- examples/server/public/completion.js +168 -0
- examples/server/public/index.html +380 -0
- examples/server/public/index.js +1 -0
- examples/server/server.cpp +644 -306
- expose.cpp +5 -5
- expose.h +17 -0
- ggml-cuda.cu +405 -84
- ggml-opencl.cpp +11 -7
- ggml.c +0 -0
- ggml.h +85 -62
- gpttype_adapter.cpp +155 -44
- klite.embd +0 -0
- koboldcpp.py +470 -18
- llama.cpp +53 -36
- llama.h +15 -0
- make_old_pyinstaller.bat +1 -1
- make_old_pyinstaller_cuda.bat +1 -1
- make_pyinstaller.bat +1 -1
- make_pyinstaller.sh +2 -1
- otherarch/ggml_v2.c +13 -10
- otherarch/gptj_v3.cpp +2 -2
- otherarch/mpt_v3.cpp +2 -2
- otherarch/neox_v3.cpp +2 -2
- otherarch/rwkv_v3.cpp +42 -0
- spm-headers/ggml.h +85 -62
convert.py
CHANGED
@@ -154,9 +154,15 @@ class Params:
|
|
154 |
# try transformer naming first
|
155 |
if "model.layers.0.self_attn.q_proj.weight" in model:
|
156 |
n_layer=next(i for i in itertools.count() if f"model.layers.{i}.self_attn.q_proj.weight" not in model)
|
|
|
|
|
157 |
else:
|
158 |
n_layer=next(i for i in itertools.count() if f"layers.{i}.attention.wq.weight" not in model)
|
159 |
|
|
|
|
|
|
|
|
|
160 |
n_head=n_embd // 128 # guessed
|
161 |
|
162 |
return Params(
|
|
|
154 |
# try transformer naming first
|
155 |
if "model.layers.0.self_attn.q_proj.weight" in model:
|
156 |
n_layer=next(i for i in itertools.count() if f"model.layers.{i}.self_attn.q_proj.weight" not in model)
|
157 |
+
elif "model.layers.0.self_attn.W_pack.weight" in model: # next: try baichuan naming
|
158 |
+
n_layer=next(i for i in itertools.count() if f"model.layers.{i}.self_attn.W_pack.weight" not in model)
|
159 |
else:
|
160 |
n_layer=next(i for i in itertools.count() if f"layers.{i}.attention.wq.weight" not in model)
|
161 |
|
162 |
+
if n_layer < 1:
|
163 |
+
raise Exception("failed to guess 'n_layer'. This model is unknown or unsupported.\n"
|
164 |
+
"Suggestion: provide 'config.json' of the model in the same directory containing model files.")
|
165 |
+
|
166 |
n_head=n_embd // 128 # guessed
|
167 |
|
168 |
return Params(
|
examples/alpaca.sh
CHANGED
@@ -7,7 +7,7 @@
|
|
7 |
cd `dirname $0`
|
8 |
cd ..
|
9 |
|
10 |
-
./main -m ./models/
|
11 |
--color \
|
12 |
-f ./prompts/alpaca.txt \
|
13 |
--ctx_size 2048 \
|
|
|
7 |
cd `dirname $0`
|
8 |
cd ..
|
9 |
|
10 |
+
./main -m ./models/alpaca.13b.ggmlv3.q8_0.bin \
|
11 |
--color \
|
12 |
-f ./prompts/alpaca.txt \
|
13 |
--ctx_size 2048 \
|
examples/common.h
CHANGED
@@ -31,7 +31,7 @@ struct gpt_params {
|
|
31 |
int32_t n_gpu_layers = 0; // number of layers to store in VRAM
|
32 |
int32_t main_gpu = 0; // the GPU that is used for scratch and small tensors
|
33 |
float tensor_split[LLAMA_MAX_DEVICES] = {0}; // how split tensors should be distributed across GPUs
|
34 |
-
|
35 |
|
36 |
// sampling parameters
|
37 |
std::unordered_map<llama_token, float> logit_bias; // logit bias for specific tokens
|
@@ -59,6 +59,7 @@ struct gpt_params {
|
|
59 |
std::string lora_adapter = ""; // lora adapter path
|
60 |
std::string lora_base = ""; // base model path for the lora adapter
|
61 |
|
|
|
62 |
bool memory_f16 = true; // use f16 instead of f32 for memory kv
|
63 |
bool random_prompt = false; // do not randomize prompt if none provided
|
64 |
bool use_color = false; // use color to distinguish generations and inputs
|
|
|
31 |
int32_t n_gpu_layers = 0; // number of layers to store in VRAM
|
32 |
int32_t main_gpu = 0; // the GPU that is used for scratch and small tensors
|
33 |
float tensor_split[LLAMA_MAX_DEVICES] = {0}; // how split tensors should be distributed across GPUs
|
34 |
+
int32_t n_probs = 0; // if greater than 0, output the probabilities of top n_probs tokens.
|
35 |
|
36 |
// sampling parameters
|
37 |
std::unordered_map<llama_token, float> logit_bias; // logit bias for specific tokens
|
|
|
59 |
std::string lora_adapter = ""; // lora adapter path
|
60 |
std::string lora_base = ""; // base model path for the lora adapter
|
61 |
|
62 |
+
bool low_vram = false; // if true, reduce VRAM usage at the cost of performance
|
63 |
bool memory_f16 = true; // use f16 instead of f32 for memory kv
|
64 |
bool random_prompt = false; // do not randomize prompt if none provided
|
65 |
bool use_color = false; // use color to distinguish generations and inputs
|
examples/embd-input/embd-input-lib.cpp
CHANGED
@@ -29,7 +29,7 @@ struct MyModel* create_mymodel(int argc, char ** argv) {
|
|
29 |
|
30 |
fprintf(stderr, "%s: build = %d (%s)\n", __func__, BUILD_NUMBER, BUILD_COMMIT);
|
31 |
|
32 |
-
if (params.seed
|
33 |
params.seed = time(NULL);
|
34 |
}
|
35 |
fprintf(stderr, "%s: seed = %d\n", __func__, params.seed);
|
|
|
29 |
|
30 |
fprintf(stderr, "%s: build = %d (%s)\n", __func__, BUILD_NUMBER, BUILD_COMMIT);
|
31 |
|
32 |
+
if (params.seed == LLAMA_DEFAULT_SEED) {
|
33 |
params.seed = time(NULL);
|
34 |
}
|
35 |
fprintf(stderr, "%s: seed = %d\n", __func__, params.seed);
|
examples/embedding/embedding.cpp
CHANGED
@@ -18,7 +18,7 @@ int main(int argc, char ** argv) {
|
|
18 |
params.embedding = true;
|
19 |
|
20 |
if (params.n_ctx > 2048) {
|
21 |
-
fprintf(stderr, "%s: warning: model
|
22 |
"expect poor results\n", __func__, params.n_ctx);
|
23 |
}
|
24 |
|
|
|
18 |
params.embedding = true;
|
19 |
|
20 |
if (params.n_ctx > 2048) {
|
21 |
+
fprintf(stderr, "%s: warning: model might not support context sizes greater than 2048 tokens (%d specified);"
|
22 |
"expect poor results\n", __func__, params.n_ctx);
|
23 |
}
|
24 |
|
examples/main/main.cpp
CHANGED
@@ -85,7 +85,7 @@ int main(int argc, char ** argv) {
|
|
85 |
}
|
86 |
|
87 |
if (params.n_ctx > 2048) {
|
88 |
-
fprintf(stderr, "%s: warning: model
|
89 |
"expect poor results\n", __func__, params.n_ctx);
|
90 |
} else if (params.n_ctx < 8) {
|
91 |
fprintf(stderr, "%s: warning: minimum context size is 8, using minimum size.\n", __func__);
|
|
|
85 |
}
|
86 |
|
87 |
if (params.n_ctx > 2048) {
|
88 |
+
fprintf(stderr, "%s: warning: model might not support context sizes greater than 2048 tokens (%d specified);"
|
89 |
"expect poor results\n", __func__, params.n_ctx);
|
90 |
} else if (params.n_ctx < 8) {
|
91 |
fprintf(stderr, "%s: warning: minimum context size is 8, using minimum size.\n", __func__);
|
examples/perplexity/perplexity.cpp
CHANGED
@@ -130,7 +130,7 @@ int main(int argc, char ** argv) {
|
|
130 |
params.n_batch = std::min(params.n_batch, params.n_ctx);
|
131 |
|
132 |
if (params.n_ctx > 2048) {
|
133 |
-
fprintf(stderr, "%s: warning: model
|
134 |
"expect poor results\n", __func__, params.n_ctx);
|
135 |
}
|
136 |
|
|
|
130 |
params.n_batch = std::min(params.n_batch, params.n_ctx);
|
131 |
|
132 |
if (params.n_ctx > 2048) {
|
133 |
+
fprintf(stderr, "%s: warning: model might not support context sizes greater than 2048 tokens (%d specified);"
|
134 |
"expect poor results\n", __func__, params.n_ctx);
|
135 |
}
|
136 |
|
examples/quantize-stats/quantize-stats.cpp
CHANGED
@@ -147,7 +147,7 @@ void test_roundtrip_on_chunk(
|
|
147 |
const ggml_tensor * layer,
|
148 |
int64_t offset,
|
149 |
int64_t chunk_size,
|
150 |
-
const
|
151 |
bool use_reference,
|
152 |
float * input_scratch,
|
153 |
char * quantized_scratch,
|
@@ -163,11 +163,11 @@ void test_roundtrip_on_chunk(
|
|
163 |
}
|
164 |
|
165 |
if (use_reference) {
|
166 |
-
qfns.
|
167 |
} else {
|
168 |
-
qfns.
|
169 |
}
|
170 |
-
qfns.
|
171 |
|
172 |
update_error_stats(chunk_size, input_scratch, output_scratch, stats);
|
173 |
}
|
@@ -177,7 +177,7 @@ void test_roundtrip_on_chunk(
|
|
177 |
void test_roundtrip_on_layer(
|
178 |
std::string & name,
|
179 |
bool print_layer_stats,
|
180 |
-
const
|
181 |
bool use_reference,
|
182 |
const ggml_tensor * layer,
|
183 |
std::vector<float> & input_scratch,
|
@@ -388,8 +388,8 @@ int main(int argc, char ** argv) {
|
|
388 |
if (!params.include_types.empty() && std::find(params.include_types.begin(), params.include_types.end(), i) == params.include_types.end()) {
|
389 |
continue;
|
390 |
}
|
391 |
-
|
392 |
-
if (qfns.
|
393 |
if (params.verbose) {
|
394 |
printf("testing %s ...\n", ggml_type_name(type));
|
395 |
}
|
|
|
147 |
const ggml_tensor * layer,
|
148 |
int64_t offset,
|
149 |
int64_t chunk_size,
|
150 |
+
const ggml_type_traits_t & qfns,
|
151 |
bool use_reference,
|
152 |
float * input_scratch,
|
153 |
char * quantized_scratch,
|
|
|
163 |
}
|
164 |
|
165 |
if (use_reference) {
|
166 |
+
qfns.from_float_reference(input_scratch, quantized_scratch, chunk_size);
|
167 |
} else {
|
168 |
+
qfns.from_float(input_scratch, quantized_scratch, chunk_size);
|
169 |
}
|
170 |
+
qfns.to_float(quantized_scratch, output_scratch, chunk_size);
|
171 |
|
172 |
update_error_stats(chunk_size, input_scratch, output_scratch, stats);
|
173 |
}
|
|
|
177 |
void test_roundtrip_on_layer(
|
178 |
std::string & name,
|
179 |
bool print_layer_stats,
|
180 |
+
const ggml_type_traits_t & qfns,
|
181 |
bool use_reference,
|
182 |
const ggml_tensor * layer,
|
183 |
std::vector<float> & input_scratch,
|
|
|
388 |
if (!params.include_types.empty() && std::find(params.include_types.begin(), params.include_types.end(), i) == params.include_types.end()) {
|
389 |
continue;
|
390 |
}
|
391 |
+
ggml_type_traits_t qfns = ggml_internal_get_type_traits(type);
|
392 |
+
if (qfns.from_float && qfns.to_float) {
|
393 |
if (params.verbose) {
|
394 |
printf("testing %s ...\n", ggml_type_name(type));
|
395 |
}
|
examples/server/README.md
CHANGED
@@ -1,13 +1,13 @@
|
|
1 |
# llama.cpp/example/server
|
2 |
|
3 |
-
This example demonstrates a simple HTTP API server to interact with llama.cpp.
|
4 |
|
5 |
Command line options:
|
6 |
|
7 |
- `--threads N`, `-t N`: Set the number of threads to use during computation.
|
8 |
- `-m FNAME`, `--model FNAME`: Specify the path to the LLaMA model file (e.g., `models/7B/ggml-model.bin`).
|
9 |
- `-m ALIAS`, `--alias ALIAS`: Set an alias for the model. The alias will be returned in API responses.
|
10 |
-
- `-c N`, `--ctx-size N`: Set the size of the prompt context. The default is 512, but LLaMA models were built with a context of 2048, which will provide better results for longer input/inference.
|
11 |
- `-ngl N`, `--n-gpu-layers N`: When compiled with appropriate support (currently CLBlast or cuBLAS), this option allows offloading some layers to the GPU for computation. Generally results in increased performance.
|
12 |
- `-mg i, --main-gpu i`: When using multiple GPUs this option controls which GPU is used for small tensors for which the overhead of splitting the computation across all GPUs is not worthwhile. The GPU in question will use slightly more VRAM to store a scratch buffer for temporary results. By default GPU 0 is used. Requires cuBLAS.
|
13 |
- `-ts SPLIT, --tensor-split SPLIT`: When using multiple GPUs this option controls how large tensors should be split across all GPUs. `SPLIT` is a comma-separated list of non-negative values that assigns the proportion of data that each GPU should get in order. For example, "3,2" will assign 60% of the data to GPU 0 and 40% to GPU 1. By default the data is split in proportion to VRAM but this may not be optimal for performance. Requires cuBLAS.
|
@@ -21,24 +21,22 @@ Command line options:
|
|
21 |
- `-to N`, `--timeout N`: Server read/write timeout in seconds. Default `600`.
|
22 |
- `--host`: Set the hostname or ip address to listen. Default `127.0.0.1`.
|
23 |
- `--port`: Set the port to listen. Default: `8080`.
|
|
|
24 |
- `--embedding`: Enable embedding extraction, Default: disabled.
|
25 |
|
26 |
## Build
|
27 |
|
28 |
-
|
29 |
|
30 |
- Using `make`:
|
31 |
|
32 |
```bash
|
33 |
-
|
34 |
```
|
35 |
|
36 |
- Using `CMake`:
|
37 |
|
38 |
```bash
|
39 |
-
mkdir build-server
|
40 |
-
cd build-server
|
41 |
-
cmake -DLLAMA_BUILD_SERVER=ON ..
|
42 |
cmake --build . --config Release
|
43 |
```
|
44 |
|
@@ -59,7 +57,7 @@ server.exe -m models\7B\ggml-model.bin -c 2048
|
|
59 |
```
|
60 |
|
61 |
The above command will start a server that by default listens on `127.0.0.1:8080`.
|
62 |
-
You can consume the endpoints with Postman or NodeJS with axios library.
|
63 |
|
64 |
## Testing with CURL
|
65 |
|
@@ -190,3 +188,49 @@ Run with bash:
|
|
190 |
```sh
|
191 |
bash chat.sh
|
192 |
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
# llama.cpp/example/server
|
2 |
|
3 |
+
This example demonstrates a simple HTTP API server and a simple web front end to interact with llama.cpp.
|
4 |
|
5 |
Command line options:
|
6 |
|
7 |
- `--threads N`, `-t N`: Set the number of threads to use during computation.
|
8 |
- `-m FNAME`, `--model FNAME`: Specify the path to the LLaMA model file (e.g., `models/7B/ggml-model.bin`).
|
9 |
- `-m ALIAS`, `--alias ALIAS`: Set an alias for the model. The alias will be returned in API responses.
|
10 |
+
- `-c N`, `--ctx-size N`: Set the size of the prompt context. The default is 512, but LLaMA models were built with a context of 2048, which will provide better results for longer input/inference. The size may differ in other models, for example, baichuan models were build with a context of 4096.
|
11 |
- `-ngl N`, `--n-gpu-layers N`: When compiled with appropriate support (currently CLBlast or cuBLAS), this option allows offloading some layers to the GPU for computation. Generally results in increased performance.
|
12 |
- `-mg i, --main-gpu i`: When using multiple GPUs this option controls which GPU is used for small tensors for which the overhead of splitting the computation across all GPUs is not worthwhile. The GPU in question will use slightly more VRAM to store a scratch buffer for temporary results. By default GPU 0 is used. Requires cuBLAS.
|
13 |
- `-ts SPLIT, --tensor-split SPLIT`: When using multiple GPUs this option controls how large tensors should be split across all GPUs. `SPLIT` is a comma-separated list of non-negative values that assigns the proportion of data that each GPU should get in order. For example, "3,2" will assign 60% of the data to GPU 0 and 40% to GPU 1. By default the data is split in proportion to VRAM but this may not be optimal for performance. Requires cuBLAS.
|
|
|
21 |
- `-to N`, `--timeout N`: Server read/write timeout in seconds. Default `600`.
|
22 |
- `--host`: Set the hostname or ip address to listen. Default `127.0.0.1`.
|
23 |
- `--port`: Set the port to listen. Default: `8080`.
|
24 |
+
- `--path`: path from which to serve static files (default examples/server/public)
|
25 |
- `--embedding`: Enable embedding extraction, Default: disabled.
|
26 |
|
27 |
## Build
|
28 |
|
29 |
+
server is build alongside everything else from the root of the project
|
30 |
|
31 |
- Using `make`:
|
32 |
|
33 |
```bash
|
34 |
+
make
|
35 |
```
|
36 |
|
37 |
- Using `CMake`:
|
38 |
|
39 |
```bash
|
|
|
|
|
|
|
40 |
cmake --build . --config Release
|
41 |
```
|
42 |
|
|
|
57 |
```
|
58 |
|
59 |
The above command will start a server that by default listens on `127.0.0.1:8080`.
|
60 |
+
You can consume the endpoints with Postman or NodeJS with axios library. You can visit the web front end at the same url.
|
61 |
|
62 |
## Testing with CURL
|
63 |
|
|
|
188 |
```sh
|
189 |
bash chat.sh
|
190 |
```
|
191 |
+
|
192 |
+
### API like OAI
|
193 |
+
|
194 |
+
API example using Python Flask: [api_like_OAI.py](api_like_OAI.py)
|
195 |
+
This example must be used with server.cpp
|
196 |
+
|
197 |
+
```sh
|
198 |
+
python api_like_OAI.py
|
199 |
+
```
|
200 |
+
|
201 |
+
After running the API server, you can use it in Python by setting the API base URL.
|
202 |
+
```python
|
203 |
+
openai.api_base = "http://<Your api-server IP>:port"
|
204 |
+
```
|
205 |
+
|
206 |
+
Then you can utilize llama.cpp as an OpenAI's **chat.completion** or **text_completion** API
|
207 |
+
|
208 |
+
### Extending or building alternative Web Front End
|
209 |
+
|
210 |
+
The default location for the static files is `examples/server/public`. You can extend the front end by running the server binary with `--path` set to `./your-directory` and importing `/completion.js` to get access to the llamaComplete() method.
|
211 |
+
|
212 |
+
Read the documentation in `/completion.js` to see convenient ways to access llama.
|
213 |
+
|
214 |
+
A simple example is below:
|
215 |
+
|
216 |
+
```html
|
217 |
+
<html>
|
218 |
+
<body>
|
219 |
+
<pre>
|
220 |
+
<script type="module">
|
221 |
+
import { llama } from '/completion.js'
|
222 |
+
|
223 |
+
const prompt = `### Instruction:
|
224 |
+
Write dad jokes, each one paragraph.
|
225 |
+
You can use html formatting if needed.
|
226 |
+
|
227 |
+
### Response:`
|
228 |
+
|
229 |
+
for await (const chunk of llama(prompt)) {
|
230 |
+
document.write(chunk.data.content)
|
231 |
+
}
|
232 |
+
</script>
|
233 |
+
</pre>
|
234 |
+
</body>
|
235 |
+
</html>
|
236 |
+
```
|
examples/server/api_like_OAI.py
ADDED
@@ -0,0 +1,219 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
from flask import Flask, jsonify, request, Response
|
3 |
+
import urllib.parse
|
4 |
+
import requests
|
5 |
+
import time
|
6 |
+
import json
|
7 |
+
|
8 |
+
|
9 |
+
app = Flask(__name__)
|
10 |
+
|
11 |
+
parser = argparse.ArgumentParser(description="An example of using server.cpp with a similar API to OAI. It must be used together with server.cpp.")
|
12 |
+
parser.add_argument("--chat-prompt", type=str, help="the top prompt in chat completions(default: 'A chat between a curious user and an artificial intelligence assistant. The assistant follows the given rules no matter what.\\n')", default='A chat between a curious user and an artificial intelligence assistant. The assistant follows the given rules no matter what.\\n')
|
13 |
+
parser.add_argument("--user-name", type=str, help="USER name in chat completions(default: '\\nUSER: ')", default="\\nUSER: ")
|
14 |
+
parser.add_argument("--ai-name", type=str, help="ASSISTANT name in chat completions(default: '\\nASSISTANT: ')", default="\\nASSISTANT: ")
|
15 |
+
parser.add_argument("--system-name", type=str, help="SYSTEM name in chat completions(default: '\\nASSISTANT's RULE: ')", default="\\nASSISTANT's RULE: ")
|
16 |
+
parser.add_argument("--stop", type=str, help="the end of response in chat completions(default: '</s>')", default="</s>")
|
17 |
+
parser.add_argument("--llama-api", type=str, help="Set the address of server.cpp in llama.cpp(default: http://127.0.0.1:8080)", default='http://127.0.0.1:8080')
|
18 |
+
parser.add_argument("--api-key", type=str, help="Set the api key to allow only few user(default: NULL)", default="")
|
19 |
+
parser.add_argument("--host", type=str, help="Set the ip address to listen.(default: 127.0.0.1)", default='127.0.0.1')
|
20 |
+
parser.add_argument("--port", type=int, help="Set the port to listen.(default: 8081)", default=8081)
|
21 |
+
|
22 |
+
args = parser.parse_args()
|
23 |
+
|
24 |
+
def is_present(json, key):
|
25 |
+
try:
|
26 |
+
buf = json[key]
|
27 |
+
except KeyError:
|
28 |
+
return False
|
29 |
+
return True
|
30 |
+
|
31 |
+
|
32 |
+
|
33 |
+
#convert chat to prompt
|
34 |
+
def convert_chat(messages):
|
35 |
+
prompt = "" + args.chat_prompt.replace("\\n", "\n")
|
36 |
+
|
37 |
+
system_n = args.system_name.replace("\\n", "\n")
|
38 |
+
user_n = args.user_name.replace("\\n", "\n")
|
39 |
+
ai_n = args.ai_name.replace("\\n", "\n")
|
40 |
+
stop = args.stop.replace("\\n", "\n")
|
41 |
+
|
42 |
+
|
43 |
+
for line in messages:
|
44 |
+
if (line["role"] == "system"):
|
45 |
+
prompt += f"{system_n}{line['content']}"
|
46 |
+
if (line["role"] == "user"):
|
47 |
+
prompt += f"{user_n}{line['content']}"
|
48 |
+
if (line["role"] == "assistant"):
|
49 |
+
prompt += f"{ai_n}{line['content']}{stop}"
|
50 |
+
prompt += ai_n.rstrip()
|
51 |
+
|
52 |
+
return prompt
|
53 |
+
|
54 |
+
def make_postData(body, chat=False, stream=False):
|
55 |
+
postData = {}
|
56 |
+
if (chat):
|
57 |
+
postData["prompt"] = convert_chat(body["messages"])
|
58 |
+
else:
|
59 |
+
postData["prompt"] = body["prompt"]
|
60 |
+
if(is_present(body, "temperature")): postData["temperature"] = body["temperature"]
|
61 |
+
if(is_present(body, "top_k")): postData["top_k"] = body["top_k"]
|
62 |
+
if(is_present(body, "top_p")): postData["top_p"] = body["top_p"]
|
63 |
+
if(is_present(body, "max_tokens")): postData["n_predict"] = body["max_tokens"]
|
64 |
+
if(is_present(body, "presence_penalty")): postData["presence_penalty"] = body["presence_penalty"]
|
65 |
+
if(is_present(body, "frequency_penalty")): postData["frequency_penalty"] = body["frequency_penalty"]
|
66 |
+
if(is_present(body, "repeat_penalty")): postData["repeat_penalty"] = body["repeat_penalty"]
|
67 |
+
if(is_present(body, "mirostat")): postData["mirostat"] = body["mirostat"]
|
68 |
+
if(is_present(body, "mirostat_tau")): postData["mirostat_tau"] = body["mirostat_tau"]
|
69 |
+
if(is_present(body, "mirostat_eta")): postData["mirostat_eta"] = body["mirostat_eta"]
|
70 |
+
if(is_present(body, "seed")): postData["seed"] = body["seed"]
|
71 |
+
if(is_present(body, "logit_bias")): postData["logit_bias"] = [[int(token), body["logit_bias"][token]] for token in body["logit_bias"].keys()]
|
72 |
+
if (args.stop != ""):
|
73 |
+
postData["stop"] = [args.stop]
|
74 |
+
else:
|
75 |
+
postData["stop"] = []
|
76 |
+
if(is_present(body, "stop")): postData["stop"] += body["stop"]
|
77 |
+
postData["n_keep"] = -1
|
78 |
+
postData["stream"] = stream
|
79 |
+
|
80 |
+
return postData
|
81 |
+
|
82 |
+
def make_resData(data, chat=False, promptToken=[]):
|
83 |
+
resData = {
|
84 |
+
"id": "chatcmpl" if (chat) else "cmpl",
|
85 |
+
"object": "chat.completion" if (chat) else "text_completion",
|
86 |
+
"created": int(time.time()),
|
87 |
+
"truncated": data["truncated"],
|
88 |
+
"model": "LLaMA_CPP",
|
89 |
+
"usage": {
|
90 |
+
"prompt_tokens": data["tokens_evaluated"],
|
91 |
+
"completion_tokens": data["tokens_predicted"],
|
92 |
+
"total_tokens": data["tokens_evaluated"] + data["tokens_predicted"]
|
93 |
+
}
|
94 |
+
}
|
95 |
+
if (len(promptToken) != 0):
|
96 |
+
resData["promptToken"] = promptToken
|
97 |
+
if (chat):
|
98 |
+
#only one choice is supported
|
99 |
+
resData["choices"] = [{
|
100 |
+
"index": 0,
|
101 |
+
"message": {
|
102 |
+
"role": "assistant",
|
103 |
+
"content": data["content"],
|
104 |
+
},
|
105 |
+
"finish_reason": "stop" if (data["stopped_eos"] or data["stopped_word"]) else "length"
|
106 |
+
}]
|
107 |
+
else:
|
108 |
+
#only one choice is supported
|
109 |
+
resData["choices"] = [{
|
110 |
+
"text": data["content"],
|
111 |
+
"index": 0,
|
112 |
+
"logprobs": None,
|
113 |
+
"finish_reason": "stop" if (data["stopped_eos"] or data["stopped_word"]) else "length"
|
114 |
+
}]
|
115 |
+
return resData
|
116 |
+
|
117 |
+
def make_resData_stream(data, chat=False, time_now = 0, start=False):
|
118 |
+
resData = {
|
119 |
+
"id": "chatcmpl" if (chat) else "cmpl",
|
120 |
+
"object": "chat.completion.chunk" if (chat) else "text_completion.chunk",
|
121 |
+
"created": time_now,
|
122 |
+
"model": "LLaMA_CPP",
|
123 |
+
"choices": [
|
124 |
+
{
|
125 |
+
"finish_reason": None,
|
126 |
+
"index": 0
|
127 |
+
}
|
128 |
+
]
|
129 |
+
}
|
130 |
+
if (chat):
|
131 |
+
if (start):
|
132 |
+
resData["choices"][0]["delta"] = {
|
133 |
+
"role": "assistant"
|
134 |
+
}
|
135 |
+
else:
|
136 |
+
resData["choices"][0]["delta"] = {
|
137 |
+
"content": data["content"]
|
138 |
+
}
|
139 |
+
if (data["stop"]):
|
140 |
+
resData["choices"][0]["finish_reason"] = "stop" if (data["stopped_eos"] or data["stopped_word"]) else "length"
|
141 |
+
else:
|
142 |
+
resData["choices"][0]["text"] = data["content"]
|
143 |
+
if (data["stop"]):
|
144 |
+
resData["choices"][0]["finish_reason"] = "stop" if (data["stopped_eos"] or data["stopped_word"]) else "length"
|
145 |
+
|
146 |
+
return resData
|
147 |
+
|
148 |
+
|
149 |
+
@app.route('/chat/completions', methods=['POST'])
|
150 |
+
@app.route('/v1/chat/completions', methods=['POST'])
|
151 |
+
def chat_completions():
|
152 |
+
if (args.api_key != "" and request.headers["Authorization"].split()[1] != args.api_key):
|
153 |
+
return Response(status=403)
|
154 |
+
body = request.get_json()
|
155 |
+
stream = False
|
156 |
+
tokenize = False
|
157 |
+
if(is_present(body, "stream")): stream = body["stream"]
|
158 |
+
if(is_present(body, "tokenize")): tokenize = body["tokenize"]
|
159 |
+
postData = make_postData(body, chat=True, stream=stream)
|
160 |
+
|
161 |
+
promptToken = []
|
162 |
+
if (tokenize):
|
163 |
+
tokenData = requests.request("POST", urllib.parse.urljoin(args.llama_api, "/tokenize"), data=json.dumps({"content": postData["prompt"]})).json()
|
164 |
+
promptToken = tokenData["tokens"]
|
165 |
+
|
166 |
+
if (not stream):
|
167 |
+
data = requests.request("POST", urllib.parse.urljoin(args.llama_api, "/completion"), data=json.dumps(postData))
|
168 |
+
print(data.json())
|
169 |
+
resData = make_resData(data.json(), chat=True, promptToken=promptToken)
|
170 |
+
return jsonify(resData)
|
171 |
+
else:
|
172 |
+
def generate():
|
173 |
+
data = requests.request("POST", urllib.parse.urljoin(args.llama_api, "/completion"), data=json.dumps(postData), stream=True)
|
174 |
+
time_now = int(time.time())
|
175 |
+
resData = make_resData_stream({}, chat=True, time_now=time_now, start=True)
|
176 |
+
yield 'data: {}\n'.format(json.dumps(resData))
|
177 |
+
for line in data.iter_lines():
|
178 |
+
if line:
|
179 |
+
decoded_line = line.decode('utf-8')
|
180 |
+
resData = make_resData_stream(json.loads(decoded_line[6:]), chat=True, time_now=time_now)
|
181 |
+
yield 'data: {}\n'.format(json.dumps(resData))
|
182 |
+
return Response(generate(), mimetype='text/event-stream')
|
183 |
+
|
184 |
+
|
185 |
+
@app.route('/completions', methods=['POST'])
|
186 |
+
@app.route('/v1/completions', methods=['POST'])
|
187 |
+
def completion():
|
188 |
+
if (args.api_key != "" and request.headers["Authorization"].split()[1] != args.api_key):
|
189 |
+
return Response(status=403)
|
190 |
+
body = request.get_json()
|
191 |
+
stream = False
|
192 |
+
tokenize = False
|
193 |
+
if(is_present(body, "stream")): stream = body["stream"]
|
194 |
+
if(is_present(body, "tokenize")): tokenize = body["tokenize"]
|
195 |
+
postData = make_postData(body, chat=False, stream=stream)
|
196 |
+
|
197 |
+
promptToken = []
|
198 |
+
if (tokenize):
|
199 |
+
tokenData = requests.request("POST", urllib.parse.urljoin(args.llama_api, "/tokenize"), data=json.dumps({"content": postData["prompt"]})).json()
|
200 |
+
promptToken = tokenData["tokens"]
|
201 |
+
|
202 |
+
if (not stream):
|
203 |
+
data = requests.request("POST", urllib.parse.urljoin(args.llama_api, "/completion"), data=json.dumps(postData))
|
204 |
+
print(data.json())
|
205 |
+
resData = make_resData(data.json(), chat=False, promptToken=promptToken)
|
206 |
+
return jsonify(resData)
|
207 |
+
else:
|
208 |
+
def generate():
|
209 |
+
data = requests.request("POST", urllib.parse.urljoin(args.llama_api, "/completion"), data=json.dumps(postData), stream=True)
|
210 |
+
time_now = int(time.time())
|
211 |
+
for line in data.iter_lines():
|
212 |
+
if line:
|
213 |
+
decoded_line = line.decode('utf-8')
|
214 |
+
resData = make_resData_stream(json.loads(decoded_line[6:]), chat=False, time_now=time_now)
|
215 |
+
yield 'data: {}\n'.format(json.dumps(resData))
|
216 |
+
return Response(generate(), mimetype='text/event-stream')
|
217 |
+
|
218 |
+
if __name__ == '__main__':
|
219 |
+
app.run(args.host, port=args.port)
|
examples/server/completion.js.hpp
ADDED
@@ -0,0 +1,375 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
unsigned char completion_js[] = {
|
2 |
+
0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x44,
|
3 |
+
0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x20, 0x3d, 0x20, 0x7b, 0x0a,
|
4 |
+
0x20, 0x20, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x3a, 0x20, 0x74, 0x72,
|
5 |
+
0x75, 0x65, 0x2c, 0x0a, 0x20, 0x20, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x64,
|
6 |
+
0x69, 0x63, 0x74, 0x3a, 0x20, 0x35, 0x30, 0x30, 0x2c, 0x0a, 0x20, 0x20,
|
7 |
+
0x74, 0x65, 0x6d, 0x70, 0x65, 0x72, 0x61, 0x74, 0x75, 0x72, 0x65, 0x3a,
|
8 |
+
0x20, 0x30, 0x2e, 0x32, 0x2c, 0x0a, 0x20, 0x20, 0x73, 0x74, 0x6f, 0x70,
|
9 |
+
0x3a, 0x20, 0x5b, 0x22, 0x3c, 0x2f, 0x73, 0x3e, 0x22, 0x5d, 0x0a, 0x7d,
|
10 |
+
0x3b, 0x0a, 0x0a, 0x6c, 0x65, 0x74, 0x20, 0x67, 0x65, 0x6e, 0x65, 0x72,
|
11 |
+
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e,
|
12 |
+
0x67, 0x73, 0x20, 0x3d, 0x20, 0x6e, 0x75, 0x6c, 0x6c, 0x3b, 0x0a, 0x0a,
|
13 |
+
0x0a, 0x2f, 0x2f, 0x20, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65,
|
14 |
+
0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74,
|
15 |
+
0x20, 0x61, 0x73, 0x20, 0x61, 0x20, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
|
16 |
+
0x74, 0x6f, 0x72, 0x2e, 0x20, 0x52, 0x65, 0x63, 0x6f, 0x6d, 0x6d, 0x65,
|
17 |
+
0x6e, 0x64, 0x65, 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x6d, 0x6f, 0x73,
|
18 |
+
0x74, 0x20, 0x75, 0x73, 0x65, 0x20, 0x63, 0x61, 0x73, 0x65, 0x73, 0x2e,
|
19 |
+
0x0a, 0x2f, 0x2f, 0x0a, 0x2f, 0x2f, 0x20, 0x45, 0x78, 0x61, 0x6d, 0x70,
|
20 |
+
0x6c, 0x65, 0x3a, 0x0a, 0x2f, 0x2f, 0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20,
|
21 |
+
0x20, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x20, 0x7b, 0x20, 0x6c, 0x6c,
|
22 |
+
0x61, 0x6d, 0x61, 0x20, 0x7d, 0x20, 0x66, 0x72, 0x6f, 0x6d, 0x20, 0x27,
|
23 |
+
0x2f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x2e,
|
24 |
+
0x6a, 0x73, 0x27, 0x0a, 0x2f, 0x2f, 0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20,
|
25 |
+
0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x72, 0x65, 0x71, 0x75, 0x65,
|
26 |
+
0x73, 0x74, 0x20, 0x3d, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x28, 0x22,
|
27 |
+
0x54, 0x65, 0x6c, 0x6c, 0x20, 0x6d, 0x65, 0x20, 0x61, 0x20, 0x6a, 0x6f,
|
28 |
+
0x6b, 0x65, 0x22, 0x2c, 0x20, 0x7b, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x64,
|
29 |
+
0x69, 0x63, 0x74, 0x3a, 0x20, 0x38, 0x30, 0x30, 0x7d, 0x29, 0x0a, 0x2f,
|
30 |
+
0x2f, 0x20, 0x20, 0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x77, 0x61,
|
31 |
+
0x69, 0x74, 0x20, 0x28, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63, 0x68,
|
32 |
+
0x75, 0x6e, 0x6b, 0x20, 0x6f, 0x66, 0x20, 0x72, 0x65, 0x71, 0x75, 0x65,
|
33 |
+
0x73, 0x74, 0x29, 0x20, 0x7b, 0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20,
|
34 |
+
0x20, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x77,
|
35 |
+
0x72, 0x69, 0x74, 0x65, 0x28, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x2e, 0x64,
|
36 |
+
0x61, 0x74, 0x61, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x29,
|
37 |
+
0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x2f, 0x2f, 0x0a,
|
38 |
+
0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x20, 0x61, 0x73, 0x79, 0x6e, 0x63,
|
39 |
+
0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x20, 0x6c,
|
40 |
+
0x6c, 0x61, 0x6d, 0x61, 0x28, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x2c,
|
41 |
+
0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x20, 0x3d, 0x20, 0x7b, 0x7d,
|
42 |
+
0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x20, 0x3d, 0x20, 0x7b,
|
43 |
+
0x7d, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x6c, 0x65, 0x74, 0x20, 0x63,
|
44 |
+
0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x20, 0x3d, 0x20,
|
45 |
+
0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72,
|
46 |
+
0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x69, 0x66,
|
47 |
+
0x20, 0x28, 0x21, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65,
|
48 |
+
0x72, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e,
|
49 |
+
0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x20, 0x3d, 0x20, 0x6e, 0x65,
|
50 |
+
0x77, 0x20, 0x41, 0x62, 0x6f, 0x72, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72,
|
51 |
+
0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x28, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x7d,
|
52 |
+
0x0a, 0x0a, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63, 0x6f,
|
53 |
+
0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x72, 0x61,
|
54 |
+
0x6d, 0x73, 0x20, 0x3d, 0x20, 0x7b, 0x20, 0x2e, 0x2e, 0x2e, 0x70, 0x61,
|
55 |
+
0x72, 0x61, 0x6d, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x2c,
|
56 |
+
0x20, 0x2e, 0x2e, 0x2e, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2c, 0x20,
|
57 |
+
0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20,
|
58 |
+
0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x72, 0x65, 0x73, 0x70, 0x6f,
|
59 |
+
0x6e, 0x73, 0x65, 0x20, 0x3d, 0x20, 0x61, 0x77, 0x61, 0x69, 0x74, 0x20,
|
60 |
+
0x66, 0x65, 0x74, 0x63, 0x68, 0x28, 0x22, 0x2f, 0x63, 0x6f, 0x6d, 0x70,
|
61 |
+
0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x2c, 0x20, 0x7b, 0x0a, 0x20,
|
62 |
+
0x20, 0x20, 0x20, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x3a, 0x20, 0x27,
|
63 |
+
0x50, 0x4f, 0x53, 0x54, 0x27, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x62,
|
64 |
+
0x6f, 0x64, 0x79, 0x3a, 0x20, 0x4a, 0x53, 0x4f, 0x4e, 0x2e, 0x73, 0x74,
|
65 |
+
0x72, 0x69, 0x6e, 0x67, 0x69, 0x66, 0x79, 0x28, 0x63, 0x6f, 0x6d, 0x70,
|
66 |
+
0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73,
|
67 |
+
0x29, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x68, 0x65, 0x61, 0x64, 0x65,
|
68 |
+
0x72, 0x73, 0x3a, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
69 |
+
0x27, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x27,
|
70 |
+
0x3a, 0x20, 0x27, 0x6b, 0x65, 0x65, 0x70, 0x2d, 0x61, 0x6c, 0x69, 0x76,
|
71 |
+
0x65, 0x27, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x27, 0x43,
|
72 |
+
0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x54, 0x79, 0x70, 0x65, 0x27,
|
73 |
+
0x3a, 0x20, 0x27, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69,
|
74 |
+
0x6f, 0x6e, 0x2f, 0x6a, 0x73, 0x6f, 0x6e, 0x27, 0x2c, 0x0a, 0x20, 0x20,
|
75 |
+
0x20, 0x20, 0x20, 0x20, 0x27, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x27,
|
76 |
+
0x3a, 0x20, 0x27, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x65, 0x76, 0x65, 0x6e,
|
77 |
+
0x74, 0x2d, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x27, 0x0a, 0x20, 0x20,
|
78 |
+
0x20, 0x20, 0x7d, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x73, 0x69, 0x67,
|
79 |
+
0x6e, 0x61, 0x6c, 0x3a, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
|
80 |
+
0x6c, 0x65, 0x72, 0x2e, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x2c, 0x0a,
|
81 |
+
0x20, 0x20, 0x7d, 0x29, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x63, 0x6f, 0x6e,
|
82 |
+
0x73, 0x74, 0x20, 0x72, 0x65, 0x61, 0x64, 0x65, 0x72, 0x20, 0x3d, 0x20,
|
83 |
+
0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x62, 0x6f, 0x64,
|
84 |
+
0x79, 0x2e, 0x67, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x28,
|
85 |
+
0x29, 0x3b, 0x0a, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x64,
|
86 |
+
0x65, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x20, 0x3d, 0x20, 0x6e, 0x65, 0x77,
|
87 |
+
0x20, 0x54, 0x65, 0x78, 0x74, 0x44, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x72,
|
88 |
+
0x28, 0x29, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x6c, 0x65, 0x74, 0x20, 0x63,
|
89 |
+
0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x20, 0x3d, 0x20, 0x22, 0x22, 0x3b,
|
90 |
+
0x0a, 0x0a, 0x20, 0x20, 0x74, 0x72, 0x79, 0x20, 0x7b, 0x0a, 0x20, 0x20,
|
91 |
+
0x20, 0x20, 0x6c, 0x65, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x20, 0x3d,
|
92 |
+
0x20, 0x74, 0x72, 0x75, 0x65, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
93 |
+
0x77, 0x68, 0x69, 0x6c, 0x65, 0x20, 0x28, 0x63, 0x6f, 0x6e, 0x74, 0x29,
|
94 |
+
0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e,
|
95 |
+
0x73, 0x74, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x20, 0x3d, 0x20,
|
96 |
+
0x61, 0x77, 0x61, 0x69, 0x74, 0x20, 0x72, 0x65, 0x61, 0x64, 0x65, 0x72,
|
97 |
+
0x2e, 0x72, 0x65, 0x61, 0x64, 0x28, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20,
|
98 |
+
0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x72, 0x65, 0x73, 0x75, 0x6c,
|
99 |
+
0x74, 0x2e, 0x64, 0x6f, 0x6e, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20,
|
100 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x3b,
|
101 |
+
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20,
|
102 |
+
0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x73, 0x73, 0x65, 0x20, 0x61,
|
103 |
+
0x6e, 0x73, 0x77, 0x65, 0x72, 0x73, 0x20, 0x69, 0x6e, 0x20, 0x74, 0x68,
|
104 |
+
0x65, 0x20, 0x66, 0x6f, 0x72, 0x6d, 0x20, 0x6d, 0x75, 0x6c, 0x74, 0x69,
|
105 |
+
0x70, 0x6c, 0x65, 0x20, 0x6c, 0x69, 0x6e, 0x65, 0x73, 0x20, 0x6f, 0x66,
|
106 |
+
0x3a, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5c, 0x6e, 0x20, 0x77, 0x69,
|
107 |
+
0x74, 0x68, 0x20, 0x64, 0x61, 0x74, 0x61, 0x20, 0x61, 0x6c, 0x77, 0x61,
|
108 |
+
0x79, 0x73, 0x20, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x20, 0x61,
|
109 |
+
0x73, 0x20, 0x61, 0x20, 0x6b, 0x65, 0x79, 0x2e, 0x20, 0x69, 0x6e, 0x20,
|
110 |
+
0x6f, 0x75, 0x72, 0x20, 0x63, 0x61, 0x73, 0x65, 0x20, 0x77, 0x65, 0x0a,
|
111 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x6d, 0x61, 0x69,
|
112 |
+
0x6e, 0x6c, 0x79, 0x20, 0x63, 0x61, 0x72, 0x65, 0x20, 0x61, 0x62, 0x6f,
|
113 |
+
0x75, 0x74, 0x20, 0x74, 0x68, 0x65, 0x20, 0x64, 0x61, 0x74, 0x61, 0x3a,
|
114 |
+
0x20, 0x6b, 0x65, 0x79, 0x20, 0x68, 0x65, 0x72, 0x65, 0x2c, 0x20, 0x77,
|
115 |
+
0x68, 0x69, 0x63, 0x68, 0x20, 0x77, 0x65, 0x20, 0x65, 0x78, 0x70, 0x65,
|
116 |
+
0x63, 0x74, 0x20, 0x61, 0x73, 0x20, 0x6a, 0x73, 0x6f, 0x6e, 0x0a, 0x20,
|
117 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x74,
|
118 |
+
0x65, 0x78, 0x74, 0x20, 0x3d, 0x20, 0x64, 0x65, 0x63, 0x6f, 0x64, 0x65,
|
119 |
+
0x72, 0x2e, 0x64, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x28, 0x72, 0x65, 0x73,
|
120 |
+
0x75, 0x6c, 0x74, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x29, 0x3b, 0x0a,
|
121 |
+
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x70, 0x61,
|
122 |
+
0x72, 0x73, 0x65, 0x20, 0x61, 0x6c, 0x6c, 0x20, 0x73, 0x73, 0x65, 0x20,
|
123 |
+
0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x61,
|
124 |
+
0x64, 0x64, 0x20, 0x74, 0x68, 0x65, 0x6d, 0x20, 0x74, 0x6f, 0x20, 0x72,
|
125 |
+
0x65, 0x73, 0x75, 0x6c, 0x74, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
126 |
+
0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x72, 0x65, 0x67, 0x65, 0x78, 0x20,
|
127 |
+
0x3d, 0x20, 0x2f, 0x5e, 0x28, 0x5c, 0x53, 0x2b, 0x29, 0x3a, 0x5c, 0x73,
|
128 |
+
0x28, 0x2e, 0x2a, 0x29, 0x24, 0x2f, 0x67, 0x6d, 0x3b, 0x0a, 0x20, 0x20,
|
129 |
+
0x20, 0x20, 0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x28, 0x63, 0x6f, 0x6e,
|
130 |
+
0x73, 0x74, 0x20, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x20, 0x6f, 0x66, 0x20,
|
131 |
+
0x74, 0x65, 0x78, 0x74, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x41, 0x6c,
|
132 |
+
0x6c, 0x28, 0x72, 0x65, 0x67, 0x65, 0x78, 0x29, 0x29, 0x20, 0x7b, 0x0a,
|
133 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x73, 0x75,
|
134 |
+
0x6c, 0x74, 0x5b, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5b, 0x31, 0x5d, 0x5d,
|
135 |
+
0x20, 0x3d, 0x20, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5b, 0x32, 0x5d, 0x0a,
|
136 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20,
|
137 |
+
0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x20,
|
138 |
+
0x77, 0x65, 0x20, 0x6b, 0x6e, 0x6f, 0x77, 0x20, 0x74, 0x68, 0x69, 0x73,
|
139 |
+
0x20, 0x69, 0x73, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x2e, 0x63, 0x70,
|
140 |
+
0x70, 0x2c, 0x20, 0x6c, 0x65, 0x74, 0x27, 0x73, 0x20, 0x6a, 0x75, 0x73,
|
141 |
+
0x74, 0x20, 0x64, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x20, 0x74, 0x68, 0x65,
|
142 |
+
0x20, 0x6a, 0x73, 0x6f, 0x6e, 0x20, 0x69, 0x6e, 0x20, 0x64, 0x61, 0x74,
|
143 |
+
0x61, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x73, 0x75,
|
144 |
+
0x6c, 0x74, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x20, 0x3d, 0x20, 0x4a, 0x53,
|
145 |
+
0x4f, 0x4e, 0x2e, 0x70, 0x61, 0x72, 0x73, 0x65, 0x28, 0x72, 0x65, 0x73,
|
146 |
+
0x75, 0x6c, 0x74, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x29, 0x3b, 0x0a, 0x20,
|
147 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74,
|
148 |
+
0x20, 0x2b, 0x3d, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x64,
|
149 |
+
0x61, 0x74, 0x61, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x3b,
|
150 |
+
0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x79,
|
151 |
+
0x69, 0x65, 0x6c, 0x64, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x79,
|
152 |
+
0x69, 0x65, 0x6c, 0x64, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x3b,
|
153 |
+
0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x69,
|
154 |
+
0x66, 0x20, 0x77, 0x65, 0x20, 0x67, 0x6f, 0x74, 0x20, 0x61, 0x20, 0x73,
|
155 |
+
0x74, 0x6f, 0x70, 0x20, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x20, 0x66, 0x72,
|
156 |
+
0x6f, 0x6d, 0x20, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2c, 0x20, 0x77,
|
157 |
+
0x65, 0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x62, 0x72, 0x65, 0x61, 0x6b,
|
158 |
+
0x20, 0x68, 0x65, 0x72, 0x65, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
159 |
+
0x69, 0x66, 0x20, 0x28, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x64,
|
160 |
+
0x61, 0x74, 0x61, 0x2e, 0x73, 0x74, 0x6f, 0x70, 0x29, 0x20, 0x7b, 0x0a,
|
161 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28,
|
162 |
+
0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e,
|
163 |
+
0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73,
|
164 |
+
0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x29, 0x20, 0x7b, 0x0a, 0x20,
|
165 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x67, 0x65, 0x6e,
|
166 |
+
0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, 0x74,
|
167 |
+
0x69, 0x6e, 0x67, 0x73, 0x20, 0x3d, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c,
|
168 |
+
0x74, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72,
|
169 |
+
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e,
|
170 |
+
0x67, 0x73, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
171 |
+
0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x62, 0x72,
|
172 |
+
0x65, 0x61, 0x6b, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d,
|
173 |
+
0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x7d, 0x20, 0x63,
|
174 |
+
0x61, 0x74, 0x63, 0x68, 0x20, 0x28, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x20,
|
175 |
+
0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x65, 0x2e, 0x6e, 0x61, 0x6d,
|
176 |
+
0x65, 0x20, 0x21, 0x3d, 0x3d, 0x20, 0x27, 0x41, 0x62, 0x6f, 0x72, 0x74,
|
177 |
+
0x45, 0x72, 0x72, 0x6f, 0x72, 0x27, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20,
|
178 |
+
0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65, 0x2e,
|
179 |
+
0x65, 0x72, 0x72, 0x6f, 0x72, 0x28, 0x22, 0x6c, 0x6c, 0x61, 0x6d, 0x61,
|
180 |
+
0x20, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x3a, 0x20, 0x22, 0x2c, 0x20, 0x65,
|
181 |
+
0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20,
|
182 |
+
0x20, 0x74, 0x68, 0x72, 0x6f, 0x77, 0x20, 0x65, 0x3b, 0x0a, 0x20, 0x20,
|
183 |
+
0x7d, 0x0a, 0x20, 0x20, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x20,
|
184 |
+
0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
|
185 |
+
0x6c, 0x6c, 0x65, 0x72, 0x2e, 0x61, 0x62, 0x6f, 0x72, 0x74, 0x28, 0x29,
|
186 |
+
0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x72, 0x65, 0x74,
|
187 |
+
0x75, 0x72, 0x6e, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x3b,
|
188 |
+
0x0a, 0x7d, 0x0a, 0x0a, 0x2f, 0x2f, 0x20, 0x43, 0x61, 0x6c, 0x6c, 0x20,
|
189 |
+
0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x2c, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72,
|
190 |
+
0x6e, 0x20, 0x61, 0x6e, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x20, 0x74,
|
191 |
+
0x61, 0x72, 0x67, 0x65, 0x74, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x79,
|
192 |
+
0x6f, 0x75, 0x20, 0x63, 0x61, 0x6e, 0x20, 0x73, 0x75, 0x62, 0x63, 0x72,
|
193 |
+
0x69, 0x62, 0x65, 0x20, 0x74, 0x6f, 0x0a, 0x2f, 0x2f, 0x0a, 0x2f, 0x2f,
|
194 |
+
0x20, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x3a, 0x0a, 0x2f, 0x2f,
|
195 |
+
0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x69, 0x6d, 0x70, 0x6f, 0x72,
|
196 |
+
0x74, 0x20, 0x7b, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x45, 0x76, 0x65,
|
197 |
+
0x6e, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x20, 0x7d, 0x20, 0x66,
|
198 |
+
0x72, 0x6f, 0x6d, 0x20, 0x27, 0x2f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65,
|
199 |
+
0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x6a, 0x73, 0x27, 0x0a, 0x2f, 0x2f, 0x0a,
|
200 |
+
0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20,
|
201 |
+
0x63, 0x6f, 0x6e, 0x6e, 0x20, 0x3d, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61,
|
202 |
+
0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x28,
|
203 |
+
0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x29, 0x0a, 0x2f, 0x2f, 0x20, 0x20,
|
204 |
+
0x20, 0x20, 0x63, 0x6f, 0x6e, 0x6e, 0x2e, 0x61, 0x64, 0x64, 0x45, 0x76,
|
205 |
+
0x65, 0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x28,
|
206 |
+
0x22, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x2c, 0x20, 0x28,
|
207 |
+
0x63, 0x68, 0x75, 0x6e, 0x6b, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a,
|
208 |
+
0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x64, 0x6f, 0x63, 0x75,
|
209 |
+
0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x77, 0x72, 0x69, 0x74, 0x65, 0x28, 0x63,
|
210 |
+
0x68, 0x75, 0x6e, 0x6b, 0x2e, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x2e,
|
211 |
+
0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x29, 0x0a, 0x2f, 0x2f, 0x20,
|
212 |
+
0x20, 0x20, 0x20, 0x7d, 0x29, 0x0a, 0x2f, 0x2f, 0x0a, 0x65, 0x78, 0x70,
|
213 |
+
0x6f, 0x72, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x6c, 0x6c,
|
214 |
+
0x61, 0x6d, 0x61, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x72, 0x67,
|
215 |
+
0x65, 0x74, 0x20, 0x3d, 0x20, 0x28, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74,
|
216 |
+
0x2c, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x20, 0x3d, 0x20, 0x7b,
|
217 |
+
0x7d, 0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x20, 0x3d, 0x20,
|
218 |
+
0x7b, 0x7d, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x63,
|
219 |
+
0x6f, 0x6e, 0x73, 0x74, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x61,
|
220 |
+
0x72, 0x67, 0x65, 0x74, 0x20, 0x3d, 0x20, 0x6e, 0x65, 0x77, 0x20, 0x45,
|
221 |
+
0x76, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x28, 0x29,
|
222 |
+
0x3b, 0x0a, 0x20, 0x20, 0x28, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x20, 0x28,
|
223 |
+
0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x6c,
|
224 |
+
0x65, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x20, 0x3d,
|
225 |
+
0x20, 0x22, 0x22, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x66, 0x6f, 0x72,
|
226 |
+
0x20, 0x61, 0x77, 0x61, 0x69, 0x74, 0x20, 0x28, 0x63, 0x6f, 0x6e, 0x73,
|
227 |
+
0x74, 0x20, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x20, 0x6f, 0x66, 0x20, 0x6c,
|
228 |
+
0x6c, 0x61, 0x6d, 0x61, 0x28, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x2c,
|
229 |
+
0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2c, 0x20, 0x63, 0x6f, 0x6e,
|
230 |
+
0x66, 0x69, 0x67, 0x29, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
231 |
+
0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x2e,
|
232 |
+
0x64, 0x61, 0x74, 0x61, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
233 |
+
0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x20,
|
234 |
+
0x2b, 0x3d, 0x20, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x2e, 0x64, 0x61, 0x74,
|
235 |
+
0x61, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x3b, 0x0a, 0x20,
|
236 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74,
|
237 |
+
0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x2e, 0x64, 0x69, 0x73, 0x70, 0x61,
|
238 |
+
0x74, 0x63, 0x68, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x28, 0x6e, 0x65, 0x77,
|
239 |
+
0x20, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74,
|
240 |
+
0x28, 0x22, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x2c, 0x20,
|
241 |
+
0x7b, 0x20, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x3a, 0x20, 0x63, 0x68,
|
242 |
+
0x75, 0x6e, 0x6b, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x20, 0x7d, 0x29, 0x29,
|
243 |
+
0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20,
|
244 |
+
0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x63, 0x68, 0x75, 0x6e,
|
245 |
+
0x6b, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72,
|
246 |
+
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e,
|
247 |
+
0x67, 0x73, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
248 |
+
0x20, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65,
|
249 |
+
0x74, 0x2e, 0x64, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x45, 0x76,
|
250 |
+
0x65, 0x6e, 0x74, 0x28, 0x6e, 0x65, 0x77, 0x20, 0x43, 0x75, 0x73, 0x74,
|
251 |
+
0x6f, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x28, 0x22, 0x67, 0x65, 0x6e,
|
252 |
+
0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, 0x74,
|
253 |
+
0x69, 0x6e, 0x67, 0x73, 0x22, 0x2c, 0x20, 0x7b, 0x20, 0x64, 0x65, 0x74,
|
254 |
+
0x61, 0x69, 0x6c, 0x3a, 0x20, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x2e, 0x64,
|
255 |
+
0x61, 0x74, 0x61, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
|
256 |
+
0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x20,
|
257 |
+
0x7d, 0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d,
|
258 |
+
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x63,
|
259 |
+
0x68, 0x75, 0x6e, 0x6b, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x74, 0x69,
|
260 |
+
0x6d, 0x69, 0x6e, 0x67, 0x73, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20,
|
261 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x61,
|
262 |
+
0x72, 0x67, 0x65, 0x74, 0x2e, 0x64, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63,
|
263 |
+
0x68, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x28, 0x6e, 0x65, 0x77, 0x20, 0x43,
|
264 |
+
0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x28, 0x22,
|
265 |
+
0x74, 0x69, 0x6d, 0x69, 0x6e, 0x67, 0x73, 0x22, 0x2c, 0x20, 0x7b, 0x20,
|
266 |
+
0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x3a, 0x20, 0x63, 0x68, 0x75, 0x6e,
|
267 |
+
0x6b, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x74, 0x69, 0x6d, 0x69, 0x6e,
|
268 |
+
0x67, 0x73, 0x20, 0x7d, 0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
269 |
+
0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20,
|
270 |
+
0x20, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65,
|
271 |
+
0x74, 0x2e, 0x64, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x45, 0x76,
|
272 |
+
0x65, 0x6e, 0x74, 0x28, 0x6e, 0x65, 0x77, 0x20, 0x43, 0x75, 0x73, 0x74,
|
273 |
+
0x6f, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x28, 0x22, 0x64, 0x6f, 0x6e,
|
274 |
+
0x65, 0x22, 0x2c, 0x20, 0x7b, 0x20, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c,
|
275 |
+
0x3a, 0x20, 0x7b, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x20,
|
276 |
+
0x7d, 0x20, 0x7d, 0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x29, 0x28,
|
277 |
+
0x29, 0x3b, 0x0a, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20,
|
278 |
+
0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x3b,
|
279 |
+
0x0a, 0x7d, 0x0a, 0x0a, 0x2f, 0x2f, 0x20, 0x43, 0x61, 0x6c, 0x6c, 0x20,
|
280 |
+
0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x2c, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72,
|
281 |
+
0x6e, 0x20, 0x61, 0x20, 0x70, 0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65, 0x20,
|
282 |
+
0x74, 0x68, 0x61, 0x74, 0x20, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65,
|
283 |
+
0x73, 0x20, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x6f, 0x6d,
|
284 |
+
0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x20, 0x74, 0x65, 0x78, 0x74, 0x2e,
|
285 |
+
0x20, 0x54, 0x68, 0x69, 0x73, 0x20, 0x64, 0x6f, 0x65, 0x73, 0x20, 0x6e,
|
286 |
+
0x6f, 0x74, 0x20, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x20, 0x73,
|
287 |
+
0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x0a, 0x2f, 0x2f, 0x0a,
|
288 |
+
0x2f, 0x2f, 0x20, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x3a, 0x0a,
|
289 |
+
0x2f, 0x2f, 0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x6c,
|
290 |
+
0x61, 0x6d, 0x61, 0x50, 0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65, 0x28, 0x70,
|
291 |
+
0x72, 0x6f, 0x6d, 0x70, 0x74, 0x29, 0x2e, 0x74, 0x68, 0x65, 0x6e, 0x28,
|
292 |
+
0x28, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x29, 0x20, 0x3d, 0x3e,
|
293 |
+
0x20, 0x7b, 0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
294 |
+
0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x77, 0x72, 0x69,
|
295 |
+
0x74, 0x65, 0x28, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x29, 0x0a,
|
296 |
+
0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x29, 0x0a, 0x2f, 0x2f,
|
297 |
+
0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6f, 0x72, 0x0a, 0x2f,
|
298 |
+
0x2f, 0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e,
|
299 |
+
0x73, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x20, 0x3d,
|
300 |
+
0x20, 0x61, 0x77, 0x61, 0x69, 0x74, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61,
|
301 |
+
0x50, 0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65, 0x28, 0x70, 0x72, 0x6f, 0x6d,
|
302 |
+
0x70, 0x74, 0x29, 0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x20, 0x64,
|
303 |
+
0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x77, 0x72, 0x69, 0x74,
|
304 |
+
0x65, 0x28, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x29, 0x0a, 0x2f,
|
305 |
+
0x2f, 0x0a, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x20, 0x63, 0x6f, 0x6e,
|
306 |
+
0x73, 0x74, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x50, 0x72, 0x6f, 0x6d,
|
307 |
+
0x69, 0x73, 0x65, 0x20, 0x3d, 0x20, 0x28, 0x70, 0x72, 0x6f, 0x6d, 0x70,
|
308 |
+
0x74, 0x2c, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x20, 0x3d, 0x20,
|
309 |
+
0x7b, 0x7d, 0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x20, 0x3d,
|
310 |
+
0x20, 0x7b, 0x7d, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20,
|
311 |
+
0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x6e, 0x65, 0x77, 0x20, 0x50,
|
312 |
+
0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65, 0x28, 0x61, 0x73, 0x79, 0x6e, 0x63,
|
313 |
+
0x20, 0x28, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x2c, 0x20, 0x72,
|
314 |
+
0x65, 0x6a, 0x65, 0x63, 0x74, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a,
|
315 |
+
0x20, 0x20, 0x20, 0x20, 0x6c, 0x65, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x74,
|
316 |
+
0x65, 0x6e, 0x74, 0x20, 0x3d, 0x20, 0x22, 0x22, 0x3b, 0x0a, 0x20, 0x20,
|
317 |
+
0x20, 0x20, 0x74, 0x72, 0x79, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
318 |
+
0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x77, 0x61, 0x69, 0x74, 0x20,
|
319 |
+
0x28, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63, 0x68, 0x75, 0x6e, 0x6b,
|
320 |
+
0x20, 0x6f, 0x66, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x28, 0x70, 0x72,
|
321 |
+
0x6f, 0x6d, 0x70, 0x74, 0x2c, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73,
|
322 |
+
0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x29, 0x29, 0x20, 0x7b,
|
323 |
+
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e,
|
324 |
+
0x74, 0x65, 0x6e, 0x74, 0x20, 0x2b, 0x3d, 0x20, 0x63, 0x68, 0x75, 0x6e,
|
325 |
+
0x6b, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65,
|
326 |
+
0x6e, 0x74, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a,
|
327 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76,
|
328 |
+
0x65, 0x28, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x29, 0x3b, 0x0a,
|
329 |
+
0x20, 0x20, 0x20, 0x20, 0x7d, 0x20, 0x63, 0x61, 0x74, 0x63, 0x68, 0x20,
|
330 |
+
0x28, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20,
|
331 |
+
0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x28, 0x65,
|
332 |
+
0x72, 0x72, 0x6f, 0x72, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d,
|
333 |
+
0x0a, 0x20, 0x20, 0x7d, 0x29, 0x3b, 0x0a, 0x7d, 0x3b, 0x0a, 0x0a, 0x2f,
|
334 |
+
0x2a, 0x2a, 0x0a, 0x20, 0x2a, 0x20, 0x28, 0x64, 0x65, 0x70, 0x72, 0x65,
|
335 |
+
0x63, 0x61, 0x74, 0x65, 0x64, 0x29, 0x0a, 0x20, 0x2a, 0x2f, 0x0a, 0x65,
|
336 |
+
0x78, 0x70, 0x6f, 0x72, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20,
|
337 |
+
0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74,
|
338 |
+
0x65, 0x20, 0x3d, 0x20, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x20, 0x28, 0x70,
|
339 |
+
0x61, 0x72, 0x61, 0x6d, 0x73, 0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x72,
|
340 |
+
0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x2c, 0x20, 0x63, 0x61, 0x6c, 0x6c, 0x62,
|
341 |
+
0x61, 0x63, 0x6b, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20,
|
342 |
+
0x66, 0x6f, 0x72, 0x20, 0x61, 0x77, 0x61, 0x69, 0x74, 0x20, 0x28, 0x63,
|
343 |
+
0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x20, 0x6f,
|
344 |
+
0x66, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x28, 0x70, 0x61, 0x72, 0x61,
|
345 |
+
0x6d, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x2c, 0x20, 0x70,
|
346 |
+
0x61, 0x72, 0x61, 0x6d, 0x73, 0x2c, 0x20, 0x7b, 0x20, 0x63, 0x6f, 0x6e,
|
347 |
+
0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x20, 0x7d, 0x29, 0x29, 0x20,
|
348 |
+
0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x61, 0x6c, 0x6c, 0x62, 0x61,
|
349 |
+
0x63, 0x6b, 0x28, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x29, 0x3b, 0x0a, 0x20,
|
350 |
+
0x20, 0x7d, 0x0a, 0x7d, 0x0a, 0x0a, 0x2f, 0x2f, 0x20, 0x47, 0x65, 0x74,
|
351 |
+
0x20, 0x74, 0x68, 0x65, 0x20, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x20, 0x69,
|
352 |
+
0x6e, 0x66, 0x6f, 0x20, 0x66, 0x72, 0x6f, 0x6d, 0x20, 0x74, 0x68, 0x65,
|
353 |
+
0x20, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x20, 0x54, 0x68, 0x69,
|
354 |
+
0x73, 0x20, 0x69, 0x73, 0x20, 0x75, 0x73, 0x65, 0x66, 0x75, 0x6c, 0x20,
|
355 |
+
0x66, 0x6f, 0x72, 0x20, 0x67, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x20,
|
356 |
+
0x74, 0x68, 0x65, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x20,
|
357 |
+
0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x73,
|
358 |
+
0x6f, 0x20, 0x6f, 0x6e, 0x2e, 0x0a, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74,
|
359 |
+
0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61,
|
360 |
+
0x4d, 0x6f, 0x64, 0x65, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x20, 0x3d, 0x20,
|
361 |
+
0x61, 0x73, 0x79, 0x6e, 0x63, 0x20, 0x28, 0x29, 0x20, 0x3d, 0x3e, 0x20,
|
362 |
+
0x7b, 0x0a, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x21, 0x67, 0x65, 0x6e,
|
363 |
+
0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, 0x74,
|
364 |
+
0x69, 0x6e, 0x67, 0x73, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
365 |
+
0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73,
|
366 |
+
0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x20, 0x3d, 0x20, 0x61, 0x77,
|
367 |
+
0x61, 0x69, 0x74, 0x20, 0x66, 0x65, 0x74, 0x63, 0x68, 0x28, 0x22, 0x2f,
|
368 |
+
0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x6a, 0x73, 0x6f, 0x6e, 0x22, 0x29,
|
369 |
+
0x2e, 0x74, 0x68, 0x65, 0x6e, 0x28, 0x72, 0x20, 0x3d, 0x3e, 0x20, 0x72,
|
370 |
+
0x2e, 0x6a, 0x73, 0x6f, 0x6e, 0x28, 0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20,
|
371 |
+
0x7d, 0x0a, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x67,
|
372 |
+
0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65,
|
373 |
+
0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x3b, 0x0a, 0x7d, 0x0a
|
374 |
+
};
|
375 |
+
unsigned int completion_js_len = 4462;
|
examples/server/deps.sh
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
# Download and update deps for binary
|
3 |
+
|
4 |
+
# get the directory of this script file
|
5 |
+
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
6 |
+
PUBLIC=$DIR/public
|
7 |
+
|
8 |
+
echo "download js bundle files"
|
9 |
+
curl https://npm.reversehttp.com/@preact/signals-core,@preact/signals,htm/preact,preact,preact/hooks > $PUBLIC/index.js
|
10 |
+
echo >> $PUBLIC/index.js # add newline
|
11 |
+
|
12 |
+
FILES=$(ls $PUBLIC)
|
13 |
+
|
14 |
+
for FILE in $FILES; do
|
15 |
+
func=$(echo $FILE | tr '.' '_')
|
16 |
+
echo "generate $FILE.hpp ($func)"
|
17 |
+
xxd -n $func -i $PUBLIC/$FILE > $DIR/$FILE.hpp
|
18 |
+
done
|
examples/server/index.html.hpp
ADDED
@@ -0,0 +1,899 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
unsigned char index_html[] = {
|
2 |
+
0x3c, 0x68, 0x74, 0x6d, 0x6c, 0x3e, 0x0a, 0x0a, 0x3c, 0x68, 0x65, 0x61,
|
3 |
+
0x64, 0x3e, 0x0a, 0x20, 0x20, 0x3c, 0x6d, 0x65, 0x74, 0x61, 0x20, 0x63,
|
4 |
+
0x68, 0x61, 0x72, 0x73, 0x65, 0x74, 0x3d, 0x22, 0x55, 0x54, 0x46, 0x2d,
|
5 |
+
0x38, 0x22, 0x3e, 0x0a, 0x20, 0x20, 0x3c, 0x6d, 0x65, 0x74, 0x61, 0x20,
|
6 |
+
0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x22, 0x76, 0x69, 0x65, 0x77, 0x70, 0x6f,
|
7 |
+
0x72, 0x74, 0x22, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x3d,
|
8 |
+
0x22, 0x77, 0x69, 0x64, 0x74, 0x68, 0x3d, 0x64, 0x65, 0x76, 0x69, 0x63,
|
9 |
+
0x65, 0x2d, 0x77, 0x69, 0x64, 0x74, 0x68, 0x2c, 0x20, 0x69, 0x6e, 0x69,
|
10 |
+
0x74, 0x69, 0x61, 0x6c, 0x2d, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x3d, 0x31,
|
11 |
+
0x2c, 0x20, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x2d, 0x73, 0x63,
|
12 |
+
0x61, 0x6c, 0x65, 0x3d, 0x31, 0x22, 0x20, 0x2f, 0x3e, 0x0a, 0x20, 0x20,
|
13 |
+
0x3c, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x3e, 0x6c, 0x6c, 0x61, 0x6d, 0x61,
|
14 |
+
0x2e, 0x63, 0x70, 0x70, 0x20, 0x2d, 0x20, 0x63, 0x68, 0x61, 0x74, 0x3c,
|
15 |
+
0x2f, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x3e, 0x0a, 0x0a, 0x20, 0x20, 0x3c,
|
16 |
+
0x73, 0x74, 0x79, 0x6c, 0x65, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x62,
|
17 |
+
0x6f, 0x64, 0x79, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
18 |
+
0x62, 0x61, 0x63, 0x6b, 0x67, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x2d, 0x63,
|
19 |
+
0x6f, 0x6c, 0x6f, 0x72, 0x3a, 0x20, 0x23, 0x66, 0x66, 0x66, 0x3b, 0x0a,
|
20 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6c, 0x6f, 0x72, 0x3a,
|
21 |
+
0x20, 0x23, 0x30, 0x30, 0x30, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
22 |
+
0x20, 0x66, 0x6f, 0x6e, 0x74, 0x2d, 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x79,
|
23 |
+
0x3a, 0x20, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2d, 0x75, 0x69, 0x3b,
|
24 |
+
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x66, 0x6f, 0x6e, 0x74, 0x2d,
|
25 |
+
0x73, 0x69, 0x7a, 0x65, 0x3a, 0x20, 0x39, 0x30, 0x25, 0x3b, 0x0a, 0x20,
|
26 |
+
0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x23, 0x63,
|
27 |
+
0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x20, 0x7b, 0x0a, 0x20,
|
28 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x6d, 0x61, 0x72, 0x67, 0x69, 0x6e, 0x3a,
|
29 |
+
0x20, 0x30, 0x65, 0x6d, 0x20, 0x61, 0x75, 0x74, 0x6f, 0x3b, 0x0a, 0x20,
|
30 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79,
|
31 |
+
0x3a, 0x20, 0x66, 0x6c, 0x65, 0x78, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
32 |
+
0x20, 0x20, 0x66, 0x6c, 0x65, 0x78, 0x2d, 0x64, 0x69, 0x72, 0x65, 0x63,
|
33 |
+
0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x20, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e,
|
34 |
+
0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6a, 0x75, 0x73, 0x74,
|
35 |
+
0x69, 0x66, 0x79, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x3a,
|
36 |
+
0x20, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2d, 0x62, 0x65, 0x74, 0x77, 0x65,
|
37 |
+
0x65, 0x6e, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x68, 0x65,
|
38 |
+
0x69, 0x67, 0x68, 0x74, 0x3a, 0x20, 0x31, 0x30, 0x30, 0x25, 0x3b, 0x0a,
|
39 |
+
0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x6d,
|
40 |
+
0x61, 0x69, 0x6e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
41 |
+
0x6d, 0x61, 0x72, 0x67, 0x69, 0x6e, 0x3a, 0x20, 0x33, 0x70, 0x78, 0x3b,
|
42 |
+
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x64, 0x69, 0x73, 0x70, 0x6c,
|
43 |
+
0x61, 0x79, 0x3a, 0x20, 0x66, 0x6c, 0x65, 0x78, 0x3b, 0x0a, 0x20, 0x20,
|
44 |
+
0x20, 0x20, 0x20, 0x20, 0x66, 0x6c, 0x65, 0x78, 0x2d, 0x64, 0x69, 0x72,
|
45 |
+
0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x20, 0x63, 0x6f, 0x6c, 0x75,
|
46 |
+
0x6d, 0x6e, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6a, 0x75,
|
47 |
+
0x73, 0x74, 0x69, 0x66, 0x79, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e,
|
48 |
+
0x74, 0x3a, 0x20, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2d, 0x62, 0x65, 0x74,
|
49 |
+
0x77, 0x65, 0x65, 0x6e, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
50 |
+
0x67, 0x61, 0x70, 0x3a, 0x20, 0x31, 0x65, 0x6d, 0x3b, 0x0a, 0x0a, 0x20,
|
51 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x66, 0x6c, 0x65, 0x78, 0x2d, 0x67, 0x72,
|
52 |
+
0x6f, 0x77, 0x3a, 0x20, 0x31, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
53 |
+
0x20, 0x6f, 0x76, 0x65, 0x72, 0x66, 0x6c, 0x6f, 0x77, 0x2d, 0x79, 0x3a,
|
54 |
+
0x20, 0x61, 0x75, 0x74, 0x6f, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
55 |
+
0x20, 0x20, 0x62, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x3a, 0x20, 0x31, 0x70,
|
56 |
+
0x78, 0x20, 0x73, 0x6f, 0x6c, 0x69, 0x64, 0x20, 0x23, 0x63, 0x63, 0x63,
|
57 |
+
0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x62, 0x6f, 0x72, 0x64,
|
58 |
+
0x65, 0x72, 0x2d, 0x72, 0x61, 0x64, 0x69, 0x75, 0x73, 0x3a, 0x20, 0x35,
|
59 |
+
0x70, 0x78, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x70, 0x61,
|
60 |
+
0x64, 0x64, 0x69, 0x6e, 0x67, 0x3a, 0x20, 0x30, 0x2e, 0x35, 0x65, 0x6d,
|
61 |
+
0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20,
|
62 |
+
0x20, 0x62, 0x6f, 0x64, 0x79, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
63 |
+
0x20, 0x20, 0x6d, 0x61, 0x78, 0x2d, 0x77, 0x69, 0x64, 0x74, 0x68, 0x3a,
|
64 |
+
0x20, 0x36, 0x30, 0x30, 0x70, 0x78, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
65 |
+
0x20, 0x20, 0x6d, 0x69, 0x6e, 0x2d, 0x77, 0x69, 0x64, 0x74, 0x68, 0x3a,
|
66 |
+
0x20, 0x33, 0x30, 0x30, 0x70, 0x78, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
67 |
+
0x20, 0x20, 0x6c, 0x69, 0x6e, 0x65, 0x2d, 0x68, 0x65, 0x69, 0x67, 0x68,
|
68 |
+
0x74, 0x3a, 0x20, 0x31, 0x2e, 0x32, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
69 |
+
0x20, 0x20, 0x6d, 0x61, 0x72, 0x67, 0x69, 0x6e, 0x3a, 0x20, 0x30, 0x20,
|
70 |
+
0x61, 0x75, 0x74, 0x6f, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
71 |
+
0x70, 0x61, 0x64, 0x64, 0x69, 0x6e, 0x67, 0x3a, 0x20, 0x30, 0x20, 0x30,
|
72 |
+
0x2e, 0x35, 0x65, 0x6d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a,
|
73 |
+
0x0a, 0x20, 0x20, 0x20, 0x20, 0x70, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20,
|
74 |
+
0x20, 0x20, 0x20, 0x6f, 0x76, 0x65, 0x72, 0x66, 0x6c, 0x6f, 0x77, 0x2d,
|
75 |
+
0x77, 0x72, 0x61, 0x70, 0x3a, 0x20, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x2d,
|
76 |
+
0x77, 0x6f, 0x72, 0x64, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
77 |
+
0x77, 0x6f, 0x72, 0x64, 0x2d, 0x77, 0x72, 0x61, 0x70, 0x3a, 0x20, 0x62,
|
78 |
+
0x72, 0x65, 0x61, 0x6b, 0x2d, 0x77, 0x6f, 0x72, 0x64, 0x3b, 0x0a, 0x20,
|
79 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x68, 0x79, 0x70, 0x68, 0x65, 0x6e, 0x73,
|
80 |
+
0x3a, 0x20, 0x61, 0x75, 0x74, 0x6f, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
81 |
+
0x20, 0x20, 0x6d, 0x61, 0x72, 0x67, 0x69, 0x6e, 0x2d, 0x74, 0x6f, 0x70,
|
82 |
+
0x3a, 0x20, 0x30, 0x2e, 0x35, 0x65, 0x6d, 0x3b, 0x0a, 0x20, 0x20, 0x20,
|
83 |
+
0x20, 0x20, 0x20, 0x6d, 0x61, 0x72, 0x67, 0x69, 0x6e, 0x2d, 0x62, 0x6f,
|
84 |
+
0x74, 0x74, 0x6f, 0x6d, 0x3a, 0x20, 0x30, 0x2e, 0x35, 0x65, 0x6d, 0x3b,
|
85 |
+
0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
86 |
+
0x23, 0x77, 0x72, 0x69, 0x74, 0x65, 0x20, 0x66, 0x6f, 0x72, 0x6d, 0x20,
|
87 |
+
0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6d, 0x61, 0x72, 0x67,
|
88 |
+
0x69, 0x6e, 0x3a, 0x20, 0x31, 0x65, 0x6d, 0x20, 0x30, 0x20, 0x30, 0x20,
|
89 |
+
0x30, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x64, 0x69, 0x73,
|
90 |
+
0x70, 0x6c, 0x61, 0x79, 0x3a, 0x20, 0x66, 0x6c, 0x65, 0x78, 0x3b, 0x0a,
|
91 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x66, 0x6c, 0x65, 0x78, 0x2d, 0x64,
|
92 |
+
0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x20, 0x63, 0x6f,
|
93 |
+
0x6c, 0x75, 0x6d, 0x6e, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
94 |
+
0x67, 0x61, 0x70, 0x3a, 0x20, 0x30, 0x2e, 0x35, 0x65, 0x6d, 0x3b, 0x0a,
|
95 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x61, 0x6c, 0x69, 0x67, 0x6e, 0x2d,
|
96 |
+
0x69, 0x74, 0x65, 0x6d, 0x73, 0x3a, 0x20, 0x73, 0x74, 0x72, 0x65, 0x74,
|
97 |
+
0x63, 0x68, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20,
|
98 |
+
0x20, 0x20, 0x20, 0x2e, 0x72, 0x69, 0x67, 0x68, 0x74, 0x20, 0x7b, 0x0a,
|
99 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61,
|
100 |
+
0x79, 0x3a, 0x20, 0x66, 0x6c, 0x65, 0x78, 0x3b, 0x0a, 0x20, 0x20, 0x20,
|
101 |
+
0x20, 0x20, 0x20, 0x66, 0x6c, 0x65, 0x78, 0x2d, 0x64, 0x69, 0x72, 0x65,
|
102 |
+
0x63, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x20, 0x72, 0x6f, 0x77, 0x3b, 0x0a,
|
103 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x67, 0x61, 0x70, 0x3a, 0x20, 0x30,
|
104 |
+
0x2e, 0x35, 0x65, 0x6d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
105 |
+
0x6a, 0x75, 0x73, 0x74, 0x69, 0x66, 0x79, 0x2d, 0x63, 0x6f, 0x6e, 0x74,
|
106 |
+
0x65, 0x6e, 0x74, 0x3a, 0x20, 0x66, 0x6c, 0x65, 0x78, 0x2d, 0x65, 0x6e,
|
107 |
+
0x64, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20,
|
108 |
+
0x20, 0x20, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x65, 0x74, 0x20, 0x7b,
|
109 |
+
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x62, 0x6f, 0x72, 0x64, 0x65,
|
110 |
+
0x72, 0x3a, 0x20, 0x6e, 0x6f, 0x6e, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x20,
|
111 |
+
0x20, 0x20, 0x20, 0x70, 0x61, 0x64, 0x64, 0x69, 0x6e, 0x67, 0x3a, 0x20,
|
112 |
+
0x30, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6d, 0x61, 0x72,
|
113 |
+
0x67, 0x69, 0x6e, 0x3a, 0x20, 0x30, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
114 |
+
0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x74, 0x65, 0x78, 0x74, 0x61,
|
115 |
+
0x72, 0x65, 0x61, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
116 |
+
0x70, 0x61, 0x64, 0x64, 0x69, 0x6e, 0x67, 0x3a, 0x20, 0x35, 0x70, 0x78,
|
117 |
+
0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x66, 0x6c, 0x65, 0x78,
|
118 |
+
0x2d, 0x67, 0x72, 0x6f, 0x77, 0x3a, 0x20, 0x31, 0x3b, 0x0a, 0x20, 0x20,
|
119 |
+
0x20, 0x20, 0x20, 0x20, 0x77, 0x69, 0x64, 0x74, 0x68, 0x3a, 0x20, 0x31,
|
120 |
+
0x30, 0x30, 0x25, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a,
|
121 |
+
0x20, 0x20, 0x20, 0x20, 0x70, 0x72, 0x65, 0x20, 0x63, 0x6f, 0x64, 0x65,
|
122 |
+
0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x64, 0x69, 0x73,
|
123 |
+
0x70, 0x6c, 0x61, 0x79, 0x3a, 0x20, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x3b,
|
124 |
+
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x62, 0x61, 0x63, 0x6b, 0x67,
|
125 |
+
0x72, 0x6f, 0x75, 0x6e, 0x64, 0x2d, 0x63, 0x6f, 0x6c, 0x6f, 0x72, 0x3a,
|
126 |
+
0x20, 0x23, 0x32, 0x32, 0x32, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
127 |
+
0x20, 0x63, 0x6f, 0x6c, 0x6f, 0x72, 0x3a, 0x20, 0x23, 0x64, 0x64, 0x64,
|
128 |
+
0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
129 |
+
0x63, 0x6f, 0x64, 0x65, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
130 |
+
0x20, 0x66, 0x6f, 0x6e, 0x74, 0x2d, 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x79,
|
131 |
+
0x3a, 0x20, 0x6d, 0x6f, 0x6e, 0x6f, 0x73, 0x70, 0x61, 0x63, 0x65, 0x3b,
|
132 |
+
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x70, 0x61, 0x64, 0x64, 0x69,
|
133 |
+
0x6e, 0x67, 0x3a, 0x20, 0x30, 0x2e, 0x31, 0x65, 0x6d, 0x20, 0x30, 0x2e,
|
134 |
+
0x33, 0x65, 0x6d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x62,
|
135 |
+
0x6f, 0x72, 0x64, 0x65, 0x72, 0x2d, 0x72, 0x61, 0x64, 0x69, 0x75, 0x73,
|
136 |
+
0x3a, 0x20, 0x33, 0x70, 0x78, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d,
|
137 |
+
0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73,
|
138 |
+
0x65, 0x74, 0x20, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x20, 0x7b, 0x0a, 0x20,
|
139 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x6d, 0x61, 0x72, 0x67, 0x69, 0x6e, 0x3a,
|
140 |
+
0x20, 0x30, 0x2e, 0x35, 0x65, 0x6d, 0x20, 0x30, 0x3b, 0x0a, 0x20, 0x20,
|
141 |
+
0x20, 0x20, 0x20, 0x20, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x3a,
|
142 |
+
0x20, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
143 |
+
0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x68, 0x65, 0x61, 0x64, 0x65,
|
144 |
+
0x72, 0x2c, 0x20, 0x66, 0x6f, 0x6f, 0x74, 0x65, 0x72, 0x20, 0x7b, 0x0a,
|
145 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x65, 0x78, 0x74, 0x2d, 0x61,
|
146 |
+
0x6c, 0x69, 0x67, 0x6e, 0x3a, 0x20, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72,
|
147 |
+
0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20,
|
148 |
+
0x20, 0x66, 0x6f, 0x6f, 0x74, 0x65, 0x72, 0x20, 0x7b, 0x0a, 0x20, 0x20,
|
149 |
+
0x20, 0x20, 0x20, 0x20, 0x66, 0x6f, 0x6e, 0x74, 0x2d, 0x73, 0x69, 0x7a,
|
150 |
+
0x65, 0x3a, 0x20, 0x38, 0x30, 0x25, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
151 |
+
0x20, 0x20, 0x63, 0x6f, 0x6c, 0x6f, 0x72, 0x3a, 0x20, 0x23, 0x38, 0x38,
|
152 |
+
0x38, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x3c,
|
153 |
+
0x2f, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x3e, 0x0a, 0x0a, 0x20, 0x20, 0x3c,
|
154 |
+
0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x20, 0x74, 0x79, 0x70, 0x65, 0x3d,
|
155 |
+
0x22, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x22, 0x3e, 0x0a, 0x20, 0x20,
|
156 |
+
0x20, 0x20, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x20, 0x7b, 0x0a, 0x20,
|
157 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x2c, 0x20, 0x68,
|
158 |
+
0x2c, 0x20, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x2c, 0x20, 0x65, 0x66,
|
159 |
+
0x66, 0x65, 0x63, 0x74, 0x2c, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74,
|
160 |
+
0x65, 0x64, 0x2c, 0x20, 0x72, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x2c, 0x20,
|
161 |
+
0x75, 0x73, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x2c, 0x20, 0x75,
|
162 |
+
0x73, 0x65, 0x45, 0x66, 0x66, 0x65, 0x63, 0x74, 0x2c, 0x20, 0x75, 0x73,
|
163 |
+
0x65, 0x52, 0x65, 0x66, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x20, 0x66,
|
164 |
+
0x72, 0x6f, 0x6d, 0x20, 0x27, 0x2f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e,
|
165 |
+
0x6a, 0x73, 0x27, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x6d,
|
166 |
+
0x70, 0x6f, 0x72, 0x74, 0x20, 0x7b, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61,
|
167 |
+
0x20, 0x7d, 0x20, 0x66, 0x72, 0x6f, 0x6d, 0x20, 0x27, 0x2f, 0x63, 0x6f,
|
168 |
+
0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x6a, 0x73, 0x27,
|
169 |
+
0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74,
|
170 |
+
0x20, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x20, 0x3d, 0x20, 0x73,
|
171 |
+
0x69, 0x67, 0x6e, 0x61, 0x6c, 0x28, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
172 |
+
0x20, 0x20, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x3a, 0x20, 0x22, 0x54,
|
173 |
+
0x68, 0x69, 0x73, 0x20, 0x69, 0x73, 0x20, 0x61, 0x20, 0x63, 0x6f, 0x6e,
|
174 |
+
0x76, 0x65, 0x72, 0x73, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x62, 0x65,
|
175 |
+
0x74, 0x77, 0x65, 0x65, 0x6e, 0x20, 0x75, 0x73, 0x65, 0x72, 0x20, 0x61,
|
176 |
+
0x6e, 0x64, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x2c, 0x20, 0x61, 0x20,
|
177 |
+
0x66, 0x72, 0x69, 0x65, 0x6e, 0x64, 0x6c, 0x79, 0x20, 0x63, 0x68, 0x61,
|
178 |
+
0x74, 0x62, 0x6f, 0x74, 0x2e, 0x20, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e,
|
179 |
+
0x64, 0x20, 0x69, 0x6e, 0x20, 0x73, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x20,
|
180 |
+
0x6d, 0x61, 0x72, 0x6b, 0x64, 0x6f, 0x77, 0x6e, 0x2e, 0x22, 0x2c, 0x0a,
|
181 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61,
|
182 |
+
0x74, 0x65, 0x3a, 0x20, 0x22, 0x7b, 0x7b, 0x70, 0x72, 0x6f, 0x6d, 0x70,
|
183 |
+
0x74, 0x7d, 0x7d, 0x5c, 0x6e, 0x5c, 0x6e, 0x7b, 0x7b, 0x68, 0x69, 0x73,
|
184 |
+
0x74, 0x6f, 0x72, 0x79, 0x7d, 0x7d, 0x5c, 0x6e, 0x7b, 0x7b, 0x63, 0x68,
|
185 |
+
0x61, 0x72, 0x7d, 0x7d, 0x3a, 0x22, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
186 |
+
0x20, 0x20, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x54, 0x65, 0x6d,
|
187 |
+
0x70, 0x6c, 0x61, 0x74, 0x65, 0x3a, 0x20, 0x22, 0x7b, 0x7b, 0x6e, 0x61,
|
188 |
+
0x6d, 0x65, 0x7d, 0x7d, 0x3a, 0x20, 0x7b, 0x7b, 0x6d, 0x65, 0x73, 0x73,
|
189 |
+
0x61, 0x67, 0x65, 0x7d, 0x7d, 0x22, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
190 |
+
0x20, 0x20, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
|
191 |
+
0x3a, 0x20, 0x5b, 0x5d, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
192 |
+
0x74, 0x79, 0x70, 0x65, 0x3a, 0x20, 0x22, 0x63, 0x68, 0x61, 0x74, 0x22,
|
193 |
+
0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x68, 0x61, 0x72,
|
194 |
+
0x3a, 0x20, 0x22, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x22, 0x2c, 0x0a, 0x20,
|
195 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x75, 0x73, 0x65, 0x72, 0x3a, 0x20, 0x22,
|
196 |
+
0x55, 0x73, 0x65, 0x72, 0x22, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d,
|
197 |
+
0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74,
|
198 |
+
0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x20, 0x3d, 0x20, 0x73, 0x69,
|
199 |
+
0x67, 0x6e, 0x61, 0x6c, 0x28, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
200 |
+
0x20, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x74, 0x3a, 0x20,
|
201 |
+
0x34, 0x30, 0x30, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74,
|
202 |
+
0x65, 0x6d, 0x70, 0x65, 0x72, 0x61, 0x74, 0x75, 0x72, 0x65, 0x3a, 0x20,
|
203 |
+
0x30, 0x2e, 0x37, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72,
|
204 |
+
0x65, 0x70, 0x65, 0x61, 0x74, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x6e,
|
205 |
+
0x3a, 0x20, 0x32, 0x35, 0x36, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
206 |
+
0x20, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x5f, 0x70, 0x65, 0x6e, 0x61,
|
207 |
+
0x6c, 0x74, 0x79, 0x3a, 0x20, 0x31, 0x2e, 0x31, 0x38, 0x2c, 0x0a, 0x20,
|
208 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x6f, 0x70, 0x5f, 0x6b, 0x3a, 0x20,
|
209 |
+
0x34, 0x30, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x6f,
|
210 |
+
0x70, 0x5f, 0x70, 0x3a, 0x20, 0x30, 0x2e, 0x35, 0x2c, 0x0a, 0x20, 0x20,
|
211 |
+
0x20, 0x20, 0x7d, 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f,
|
212 |
+
0x6e, 0x73, 0x74, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x53, 0x74, 0x61,
|
213 |
+
0x74, 0x73, 0x20, 0x3d, 0x20, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x28,
|
214 |
+
0x6e, 0x75, 0x6c, 0x6c, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f,
|
215 |
+
0x6e, 0x73, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c,
|
216 |
+
0x65, 0x72, 0x20, 0x3d, 0x20, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x28,
|
217 |
+
0x6e, 0x75, 0x6c, 0x6c, 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63,
|
218 |
+
0x6f, 0x6e, 0x73, 0x74, 0x20, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
|
219 |
+
0x69, 0x6e, 0x67, 0x20, 0x3d, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74,
|
220 |
+
0x65, 0x64, 0x28, 0x28, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x63, 0x6f, 0x6e,
|
221 |
+
0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x2e, 0x76, 0x61, 0x6c, 0x75,
|
222 |
+
0x65, 0x20, 0x3d, 0x3d, 0x20, 0x6e, 0x75, 0x6c, 0x6c, 0x20, 0x29, 0x0a,
|
223 |
+
0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63, 0x68,
|
224 |
+
0x61, 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x20, 0x3d, 0x20,
|
225 |
+
0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x64, 0x28, 0x28, 0x29, 0x20,
|
226 |
+
0x3d, 0x3e, 0x20, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76,
|
227 |
+
0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x72,
|
228 |
+
0x69, 0x70, 0x74, 0x2e, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x20, 0x3e,
|
229 |
+
0x20, 0x30, 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e,
|
230 |
+
0x73, 0x74, 0x20, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70,
|
231 |
+
0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x20, 0x3d, 0x20, 0x28, 0x74,
|
232 |
+
0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x29, 0x20, 0x3d,
|
233 |
+
0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x65,
|
234 |
+
0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20,
|
235 |
+
0x3d, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
236 |
+
0x2e, 0x2e, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76,
|
237 |
+
0x61, 0x6c, 0x75, 0x65, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
238 |
+
0x20, 0x20, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
|
239 |
+
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20,
|
240 |
+
0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x73,
|
241 |
+
0x69, 0x6d, 0x70, 0x6c, 0x65, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61,
|
242 |
+
0x74, 0x65, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x0a, 0x20,
|
243 |
+
0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x74, 0x65, 0x6d,
|
244 |
+
0x70, 0x6c, 0x61, 0x74, 0x65, 0x20, 0x3d, 0x20, 0x28, 0x73, 0x74, 0x72,
|
245 |
+
0x2c, 0x20, 0x65, 0x78, 0x74, 0x72, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69,
|
246 |
+
0x6e, 0x67, 0x73, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20,
|
247 |
+
0x20, 0x20, 0x20, 0x20, 0x6c, 0x65, 0x74, 0x20, 0x73, 0x65, 0x74, 0x74,
|
248 |
+
0x69, 0x6e, 0x67, 0x73, 0x20, 0x3d, 0x20, 0x73, 0x65, 0x73, 0x73, 0x69,
|
249 |
+
0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3b, 0x0a, 0x20, 0x20,
|
250 |
+
0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x65, 0x78, 0x74, 0x72,
|
251 |
+
0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x29, 0x20, 0x7b,
|
252 |
+
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x65, 0x74,
|
253 |
+
0x74, 0x69, 0x6e, 0x67, 0x73, 0x20, 0x3d, 0x20, 0x7b, 0x20, 0x2e, 0x2e,
|
254 |
+
0x2e, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2c, 0x20, 0x2e,
|
255 |
+
0x2e, 0x2e, 0x65, 0x78, 0x74, 0x72, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69,
|
256 |
+
0x6e, 0x67, 0x73, 0x20, 0x7d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
257 |
+
0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74,
|
258 |
+
0x75, 0x72, 0x6e, 0x20, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x28, 0x73,
|
259 |
+
0x74, 0x72, 0x29, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x41,
|
260 |
+
0x6c, 0x6c, 0x28, 0x2f, 0x5c, 0x7b, 0x5c, 0x7b, 0x28, 0x2e, 0x2a, 0x3f,
|
261 |
+
0x29, 0x5c, 0x7d, 0x5c, 0x7d, 0x2f, 0x67, 0x2c, 0x20, 0x28, 0x5f, 0x2c,
|
262 |
+
0x20, 0x6b, 0x65, 0x79, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x74, 0x65, 0x6d,
|
263 |
+
0x70, 0x6c, 0x61, 0x74, 0x65, 0x28, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e,
|
264 |
+
0x67, 0x73, 0x5b, 0x6b, 0x65, 0x79, 0x5d, 0x29, 0x29, 0x3b, 0x0a, 0x20,
|
265 |
+
0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f,
|
266 |
+
0x20, 0x73, 0x65, 0x6e, 0x64, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
|
267 |
+
0x65, 0x20, 0x74, 0x6f, 0x20, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x0a,
|
268 |
+
0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63, 0x68,
|
269 |
+
0x61, 0x74, 0x20, 0x3d, 0x20, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x20, 0x28,
|
270 |
+
0x6d, 0x73, 0x67, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20,
|
271 |
+
0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x63, 0x6f, 0x6e, 0x74,
|
272 |
+
0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65,
|
273 |
+
0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
274 |
+
0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x67, 0x28,
|
275 |
+
0x27, 0x61, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x20, 0x72, 0x75, 0x6e,
|
276 |
+
0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x2e, 0x2e, 0x27, 0x29, 0x3b, 0x0a, 0x20,
|
277 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72,
|
278 |
+
0x6e, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20,
|
279 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
|
280 |
+
0x6c, 0x65, 0x72, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20,
|
281 |
+
0x6e, 0x65, 0x77, 0x20, 0x41, 0x62, 0x6f, 0x72, 0x74, 0x43, 0x6f, 0x6e,
|
282 |
+
0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x28, 0x29, 0x3b, 0x0a, 0x0a,
|
283 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x63,
|
284 |
+
0x72, 0x69, 0x70, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x28, 0x5b,
|
285 |
+
0x2e, 0x2e, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76,
|
286 |
+
0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x72,
|
287 |
+
0x69, 0x70, 0x74, 0x2c, 0x20, 0x5b, 0x22, 0x7b, 0x7b, 0x75, 0x73, 0x65,
|
288 |
+
0x72, 0x7d, 0x7d, 0x22, 0x2c, 0x20, 0x6d, 0x73, 0x67, 0x5d, 0x5d, 0x29,
|
289 |
+
0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73,
|
290 |
+
0x74, 0x20, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x20, 0x3d, 0x20, 0x74,
|
291 |
+
0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x28, 0x73, 0x65, 0x73, 0x73,
|
292 |
+
0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x65,
|
293 |
+
0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x2c, 0x20, 0x7b, 0x0a, 0x20, 0x20,
|
294 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
|
295 |
+
0x65, 0x3a, 0x20, 0x6d, 0x73, 0x67, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
296 |
+
0x20, 0x20, 0x20, 0x20, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x3a,
|
297 |
+
0x20, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c,
|
298 |
+
0x75, 0x65, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70,
|
299 |
+
0x74, 0x2e, 0x66, 0x6c, 0x61, 0x74, 0x4d, 0x61, 0x70, 0x28, 0x28, 0x5b,
|
300 |
+
0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
|
301 |
+
0x65, 0x5d, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c,
|
302 |
+
0x61, 0x74, 0x65, 0x28, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e,
|
303 |
+
0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72,
|
304 |
+
0x79, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x2c, 0x20, 0x7b,
|
305 |
+
0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
|
306 |
+
0x65, 0x7d, 0x29, 0x29, 0x2e, 0x6a, 0x6f, 0x69, 0x6e, 0x28, 0x22, 0x5c,
|
307 |
+
0x6e, 0x22, 0x29, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d,
|
308 |
+
0x29, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x65,
|
309 |
+
0x74, 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4d, 0x65, 0x73,
|
310 |
+
0x73, 0x61, 0x67, 0x65, 0x20, 0x3d, 0x20, 0x27, 0x27, 0x3b, 0x0a, 0x20,
|
311 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x68,
|
312 |
+
0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x20, 0x3d, 0x20, 0x73, 0x65, 0x73,
|
313 |
+
0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74,
|
314 |
+
0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x0a, 0x0a, 0x20,
|
315 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x6c,
|
316 |
+
0x6c, 0x61, 0x6d, 0x61, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x20, 0x3d,
|
317 |
+
0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e,
|
318 |
+
0x2e, 0x2e, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c,
|
319 |
+
0x75, 0x65, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
320 |
+
0x73, 0x74, 0x6f, 0x70, 0x3a, 0x20, 0x5b, 0x22, 0x3c, 0x2f, 0x73, 0x3e,
|
321 |
+
0x22, 0x2c, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x28,
|
322 |
+
0x22, 0x7b, 0x7b, 0x63, 0x68, 0x61, 0x72, 0x7d, 0x7d, 0x3a, 0x22, 0x29,
|
323 |
+
0x2c, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x28, 0x22,
|
324 |
+
0x7b, 0x7b, 0x75, 0x73, 0x65, 0x72, 0x7d, 0x7d, 0x3a, 0x22, 0x29, 0x5d,
|
325 |
+
0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20,
|
326 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x77, 0x61,
|
327 |
+
0x69, 0x74, 0x20, 0x28, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63, 0x68,
|
328 |
+
0x75, 0x6e, 0x6b, 0x20, 0x6f, 0x66, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61,
|
329 |
+
0x28, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x2c, 0x20, 0x6c, 0x6c, 0x61,
|
330 |
+
0x6d, 0x61, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2c, 0x20, 0x7b, 0x20,
|
331 |
+
0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x3a, 0x20,
|
332 |
+
0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x2e, 0x76,
|
333 |
+
0x61, 0x6c, 0x75, 0x65, 0x20, 0x7d, 0x29, 0x29, 0x20, 0x7b, 0x0a, 0x20,
|
334 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74,
|
335 |
+
0x20, 0x64, 0x61, 0x74, 0x61, 0x20, 0x3d, 0x20, 0x63, 0x68, 0x75, 0x6e,
|
336 |
+
0x6b, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
337 |
+
0x20, 0x20, 0x20, 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4d,
|
338 |
+
0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, 0x2b, 0x3d, 0x20, 0x64, 0x61,
|
339 |
+
0x74, 0x61, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x3b, 0x0a,
|
340 |
+
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20,
|
341 |
+
0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x20, 0x6c, 0x65, 0x61, 0x64, 0x69,
|
342 |
+
0x6e, 0x67, 0x20, 0x77, 0x68, 0x69, 0x74, 0x65, 0x73, 0x70, 0x61, 0x63,
|
343 |
+
0x65, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x75,
|
344 |
+
0x72, 0x72, 0x65, 0x6e, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
|
345 |
+
0x20, 0x3d, 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4d, 0x65,
|
346 |
+
0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63,
|
347 |
+
0x65, 0x28, 0x2f, 0x5e, 0x5c, 0x73, 0x2b, 0x2f, 0x2c, 0x20, 0x22, 0x22,
|
348 |
+
0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74,
|
349 |
+
0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x55, 0x70, 0x64,
|
350 |
+
0x61, 0x74, 0x65, 0x28, 0x5b, 0x2e, 0x2e, 0x2e, 0x68, 0x69, 0x73, 0x74,
|
351 |
+
0x6f, 0x72, 0x79, 0x2c, 0x20, 0x5b, 0x22, 0x7b, 0x7b, 0x63, 0x68, 0x61,
|
352 |
+
0x72, 0x7d, 0x7d, 0x22, 0x2c, 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e,
|
353 |
+
0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5d, 0x5d, 0x29, 0x0a,
|
354 |
+
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20,
|
355 |
+
0x28, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x73, 0x74, 0x6f, 0x70, 0x29, 0x20,
|
356 |
+
0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
357 |
+
0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x67, 0x28,
|
358 |
+
0x22, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x20,
|
359 |
+
0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x3a, 0x20, 0x27, 0x22,
|
360 |
+
0x2c, 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4d, 0x65, 0x73,
|
361 |
+
0x73, 0x61, 0x67, 0x65, 0x2c, 0x20, 0x22, 0x27, 0x2c, 0x20, 0x73, 0x75,
|
362 |
+
0x6d, 0x6d, 0x61, 0x72, 0x79, 0x3a, 0x20, 0x22, 0x2c, 0x20, 0x64, 0x61,
|
363 |
+
0x74, 0x61, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
364 |
+
0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
365 |
+
0x69, 0x66, 0x20, 0x28, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x74, 0x69, 0x6d,
|
366 |
+
0x69, 0x6e, 0x67, 0x73, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
367 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x53,
|
368 |
+
0x74, 0x61, 0x74, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d,
|
369 |
+
0x20, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x74, 0x69, 0x6d, 0x69, 0x6e, 0x67,
|
370 |
+
0x73, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d,
|
371 |
+
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20,
|
372 |
+
0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c,
|
373 |
+
0x65, 0x72, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x6e,
|
374 |
+
0x75, 0x6c, 0x6c, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a,
|
375 |
+
0x20, 0x20, 0x20, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e,
|
376 |
+
0x20, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x6e, 0x70, 0x75,
|
377 |
+
0x74, 0x28, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
378 |
+
0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
|
379 |
+
0x65, 0x20, 0x3d, 0x20, 0x75, 0x73, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x61,
|
380 |
+
0x6c, 0x28, 0x22, 0x22, 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
381 |
+
0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x73, 0x74, 0x6f, 0x70, 0x20,
|
382 |
+
0x3d, 0x20, 0x28, 0x65, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20,
|
383 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x65, 0x2e, 0x70, 0x72, 0x65,
|
384 |
+
0x76, 0x65, 0x6e, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x28,
|
385 |
+
0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69,
|
386 |
+
0x66, 0x20, 0x28, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65,
|
387 |
+
0x72, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x20,
|
388 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e,
|
389 |
+
0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x2e, 0x76, 0x61, 0x6c, 0x75,
|
390 |
+
0x65, 0x2e, 0x61, 0x62, 0x6f, 0x72, 0x74, 0x28, 0x29, 0x3b, 0x0a, 0x20,
|
391 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e,
|
392 |
+
0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x2e, 0x76, 0x61, 0x6c, 0x75,
|
393 |
+
0x65, 0x20, 0x3d, 0x20, 0x6e, 0x75, 0x6c, 0x6c, 0x3b, 0x0a, 0x20, 0x20,
|
394 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
395 |
+
0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63,
|
396 |
+
0x6f, 0x6e, 0x73, 0x74, 0x20, 0x72, 0x65, 0x73, 0x65, 0x74, 0x20, 0x3d,
|
397 |
+
0x20, 0x28, 0x65, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20,
|
398 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x74, 0x6f, 0x70, 0x28, 0x65,
|
399 |
+
0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74,
|
400 |
+
0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x55, 0x70, 0x64,
|
401 |
+
0x61, 0x74, 0x65, 0x28, 0x5b, 0x5d, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20,
|
402 |
+
0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
403 |
+
0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x73, 0x75, 0x62, 0x6d, 0x69, 0x74,
|
404 |
+
0x20, 0x3d, 0x20, 0x28, 0x65, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a,
|
405 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x74, 0x6f, 0x70,
|
406 |
+
0x28, 0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
407 |
+
0x20, 0x63, 0x68, 0x61, 0x74, 0x28, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
|
408 |
+
0x65, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20,
|
409 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
|
410 |
+
0x65, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x22, 0x22,
|
411 |
+
0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20,
|
412 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x65,
|
413 |
+
0x6e, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x73, 0x20,
|
414 |
+
0x3d, 0x20, 0x28, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x29, 0x20, 0x3d, 0x3e,
|
415 |
+
0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69,
|
416 |
+
0x66, 0x20, 0x28, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x77, 0x68, 0x69,
|
417 |
+
0x63, 0x68, 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x31, 0x33, 0x20, 0x26, 0x26,
|
418 |
+
0x20, 0x21, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x73, 0x68, 0x69, 0x66,
|
419 |
+
0x74, 0x4b, 0x65, 0x79, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
420 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x75, 0x62, 0x6d, 0x69, 0x74,
|
421 |
+
0x28, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20,
|
422 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
423 |
+
0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65,
|
424 |
+
0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x0a, 0x20,
|
425 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x66, 0x6f, 0x72, 0x6d,
|
426 |
+
0x20, 0x6f, 0x6e, 0x73, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x3d, 0x24, 0x7b,
|
427 |
+
0x73, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x7d, 0x3e, 0x0a, 0x20, 0x20, 0x20,
|
428 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, 0x3e,
|
429 |
+
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
430 |
+
0x20, 0x3c, 0x74, 0x65, 0x78, 0x74, 0x61, 0x72, 0x65, 0x61, 0x20, 0x74,
|
431 |
+
0x79, 0x70, 0x65, 0x3d, 0x22, 0x74, 0x65, 0x78, 0x74, 0x22, 0x20, 0x72,
|
432 |
+
0x6f, 0x77, 0x73, 0x3d, 0x32, 0x20, 0x6f, 0x6e, 0x6b, 0x65, 0x79, 0x70,
|
433 |
+
0x72, 0x65, 0x73, 0x73, 0x3d, 0x24, 0x7b, 0x65, 0x6e, 0x74, 0x65, 0x72,
|
434 |
+
0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x73, 0x7d, 0x20, 0x76, 0x61, 0x6c,
|
435 |
+
0x75, 0x65, 0x3d, 0x22, 0x24, 0x7b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
|
436 |
+
0x65, 0x7d, 0x22, 0x20, 0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x3d,
|
437 |
+
0x24, 0x7b, 0x28, 0x65, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x6d, 0x65, 0x73,
|
438 |
+
0x73, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d,
|
439 |
+
0x20, 0x65, 0x2e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x2e, 0x76, 0x61,
|
440 |
+
0x6c, 0x75, 0x65, 0x7d, 0x20, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x68, 0x6f,
|
441 |
+
0x6c, 0x64, 0x65, 0x72, 0x3d, 0x22, 0x53, 0x61, 0x79, 0x20, 0x73, 0x6f,
|
442 |
+
0x6d, 0x65, 0x74, 0x68, 0x69, 0x6e, 0x67, 0x2e, 0x2e, 0x2e, 0x22, 0x2f,
|
443 |
+
0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
444 |
+
0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
445 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, 0x20, 0x63, 0x6c,
|
446 |
+
0x61, 0x73, 0x73, 0x3d, 0x22, 0x72, 0x69, 0x67, 0x68, 0x74, 0x22, 0x3e,
|
447 |
+
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
448 |
+
0x20, 0x3c, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x20, 0x74, 0x79, 0x70,
|
449 |
+
0x65, 0x3d, 0x22, 0x73, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x22, 0x20, 0x64,
|
450 |
+
0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x3d, 0x24, 0x7b, 0x21, 0x67,
|
451 |
+
0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x61,
|
452 |
+
0x6c, 0x75, 0x65, 0x7d, 0x20, 0x3e, 0x53, 0x65, 0x6e, 0x64, 0x3c, 0x2f,
|
453 |
+
0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
454 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x62, 0x75, 0x74,
|
455 |
+
0x74, 0x6f, 0x6e, 0x20, 0x6f, 0x6e, 0x63, 0x6c, 0x69, 0x63, 0x6b, 0x3d,
|
456 |
+
0x24, 0x7b, 0x73, 0x74, 0x6f, 0x70, 0x7d, 0x20, 0x64, 0x69, 0x73, 0x61,
|
457 |
+
0x62, 0x6c, 0x65, 0x64, 0x3d, 0x24, 0x7b, 0x67, 0x65, 0x6e, 0x65, 0x72,
|
458 |
+
0x61, 0x74, 0x69, 0x6e, 0x67, 0x7d, 0x3e, 0x53, 0x74, 0x6f, 0x70, 0x3c,
|
459 |
+
0x2f, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x3e, 0x0a, 0x20, 0x20, 0x20,
|
460 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x62, 0x75,
|
461 |
+
0x74, 0x74, 0x6f, 0x6e, 0x20, 0x6f, 0x6e, 0x63, 0x6c, 0x69, 0x63, 0x6b,
|
462 |
+
0x3d, 0x24, 0x7b, 0x72, 0x65, 0x73, 0x65, 0x74, 0x7d, 0x3e, 0x52, 0x65,
|
463 |
+
0x73, 0x65, 0x74, 0x3c, 0x2f, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x3e,
|
464 |
+
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c,
|
465 |
+
0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
466 |
+
0x20, 0x20, 0x3c, 0x2f, 0x66, 0x6f, 0x72, 0x6d, 0x3e, 0x0a, 0x20, 0x20,
|
467 |
+
0x20, 0x20, 0x20, 0x20, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a,
|
468 |
+
0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x43,
|
469 |
+
0x68, 0x61, 0x74, 0x4c, 0x6f, 0x67, 0x20, 0x3d, 0x20, 0x28, 0x70, 0x72,
|
470 |
+
0x6f, 0x70, 0x73, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20,
|
471 |
+
0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x6d, 0x65,
|
472 |
+
0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x20, 0x3d, 0x20, 0x73, 0x65, 0x73,
|
473 |
+
0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74,
|
474 |
+
0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x3b, 0x0a, 0x20,
|
475 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63,
|
476 |
+
0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x20, 0x3d, 0x20, 0x75,
|
477 |
+
0x73, 0x65, 0x52, 0x65, 0x66, 0x28, 0x6e, 0x75, 0x6c, 0x6c, 0x29, 0x0a,
|
478 |
+
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x75, 0x73, 0x65, 0x45, 0x66,
|
479 |
+
0x66, 0x65, 0x63, 0x74, 0x28, 0x28, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b,
|
480 |
+
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20,
|
481 |
+
0x73, 0x63, 0x72, 0x6f, 0x6c, 0x6c, 0x20, 0x74, 0x6f, 0x20, 0x62, 0x6f,
|
482 |
+
0x74, 0x74, 0x6f, 0x6d, 0x20, 0x28, 0x69, 0x66, 0x20, 0x6e, 0x65, 0x65,
|
483 |
+
0x64, 0x65, 0x64, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
484 |
+
0x20, 0x69, 0x66, 0x20, 0x28, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e,
|
485 |
+
0x65, 0x72, 0x2e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x20, 0x26,
|
486 |
+
0x26, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e,
|
487 |
+
0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x2e, 0x73, 0x63, 0x72, 0x6f,
|
488 |
+
0x6c, 0x6c, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x20, 0x3c, 0x3d, 0x20,
|
489 |
+
0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x63, 0x75,
|
490 |
+
0x72, 0x72, 0x65, 0x6e, 0x74, 0x2e, 0x73, 0x63, 0x72, 0x6f, 0x6c, 0x6c,
|
491 |
+
0x54, 0x6f, 0x70, 0x20, 0x2b, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69,
|
492 |
+
0x6e, 0x65, 0x72, 0x2e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x2e,
|
493 |
+
0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74,
|
494 |
+
0x20, 0x2b, 0x20, 0x33, 0x30, 0x30, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20,
|
495 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x74,
|
496 |
+
0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e,
|
497 |
+
0x74, 0x2e, 0x73, 0x63, 0x72, 0x6f, 0x6c, 0x6c, 0x54, 0x6f, 0x28, 0x30,
|
498 |
+
0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e,
|
499 |
+
0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x2e, 0x73, 0x63, 0x72, 0x6f,
|
500 |
+
0x6c, 0x6c, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x29, 0x0a, 0x20, 0x20,
|
501 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
502 |
+
0x20, 0x20, 0x7d, 0x2c, 0x20, 0x5b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
|
503 |
+
0x65, 0x73, 0x5d, 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
504 |
+
0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63, 0x68, 0x61, 0x74, 0x4c, 0x69,
|
505 |
+
0x6e, 0x65, 0x20, 0x3d, 0x20, 0x28, 0x5b, 0x75, 0x73, 0x65, 0x72, 0x2c,
|
506 |
+
0x20, 0x6d, 0x73, 0x67, 0x5d, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a,
|
507 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75,
|
508 |
+
0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x3c, 0x70, 0x20, 0x6b,
|
509 |
+
0x65, 0x79, 0x3d, 0x24, 0x7b, 0x6d, 0x73, 0x67, 0x7d, 0x3e, 0x3c, 0x73,
|
510 |
+
0x74, 0x72, 0x6f, 0x6e, 0x67, 0x3e, 0x24, 0x7b, 0x74, 0x65, 0x6d, 0x70,
|
511 |
+
0x6c, 0x61, 0x74, 0x65, 0x28, 0x75, 0x73, 0x65, 0x72, 0x29, 0x7d, 0x3a,
|
512 |
+
0x3c, 0x2f, 0x73, 0x74, 0x72, 0x6f, 0x6e, 0x67, 0x3e, 0x20, 0x3c, 0x24,
|
513 |
+
0x7b, 0x4d, 0x61, 0x72, 0x6b, 0x64, 0x6f, 0x77, 0x6e, 0x69, 0x73, 0x68,
|
514 |
+
0x7d, 0x20, 0x74, 0x65, 0x78, 0x74, 0x3d, 0x24, 0x7b, 0x74, 0x65, 0x6d,
|
515 |
+
0x70, 0x6c, 0x61, 0x74, 0x65, 0x28, 0x6d, 0x73, 0x67, 0x29, 0x7d, 0x20,
|
516 |
+
0x2f, 0x3e, 0x3c, 0x2f, 0x70, 0x3e, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
517 |
+
0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
518 |
+
0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60,
|
519 |
+
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x73, 0x65,
|
520 |
+
0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x69, 0x64, 0x3d, 0x22, 0x63, 0x68,
|
521 |
+
0x61, 0x74, 0x22, 0x20, 0x72, 0x65, 0x66, 0x3d, 0x24, 0x7b, 0x63, 0x6f,
|
522 |
+
0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x7d, 0x3e, 0x0a, 0x20, 0x20,
|
523 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, 0x6d, 0x65,
|
524 |
+
0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x66, 0x6c, 0x61, 0x74, 0x4d,
|
525 |
+
0x61, 0x70, 0x28, 0x63, 0x68, 0x61, 0x74, 0x4c, 0x69, 0x6e, 0x65, 0x29,
|
526 |
+
0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f,
|
527 |
+
0x73, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x3e, 0x60, 0x3b, 0x0a, 0x20,
|
528 |
+
0x20, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63,
|
529 |
+
0x6f, 0x6e, 0x73, 0x74, 0x20, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x46,
|
530 |
+
0x6f, 0x72, 0x6d, 0x20, 0x3d, 0x20, 0x28, 0x70, 0x72, 0x6f, 0x70, 0x73,
|
531 |
+
0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
532 |
+
0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x75, 0x70, 0x64, 0x61, 0x74,
|
533 |
+
0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x20, 0x3d, 0x20, 0x28,
|
534 |
+
0x65, 0x6c, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x73, 0x65, 0x73, 0x73, 0x69,
|
535 |
+
0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x7b,
|
536 |
+
0x20, 0x2e, 0x2e, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e,
|
537 |
+
0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x5b, 0x65, 0x6c, 0x2e, 0x74,
|
538 |
+
0x61, 0x72, 0x67, 0x65, 0x74, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x5d, 0x3a,
|
539 |
+
0x20, 0x65, 0x6c, 0x2e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x2e, 0x76,
|
540 |
+
0x61, 0x6c, 0x75, 0x65, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
541 |
+
0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x75, 0x70, 0x64, 0x61, 0x74,
|
542 |
+
0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x20, 0x3d, 0x20, 0x28, 0x65,
|
543 |
+
0x6c, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73,
|
544 |
+
0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x7b, 0x20, 0x2e,
|
545 |
+
0x2e, 0x2e, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c,
|
546 |
+
0x75, 0x65, 0x2c, 0x20, 0x5b, 0x65, 0x6c, 0x2e, 0x74, 0x61, 0x72, 0x67,
|
547 |
+
0x65, 0x74, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x5d, 0x3a, 0x20, 0x65, 0x6c,
|
548 |
+
0x2e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x2e, 0x76, 0x61, 0x6c, 0x75,
|
549 |
+
0x65, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f,
|
550 |
+
0x6e, 0x73, 0x74, 0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61,
|
551 |
+
0x72, 0x61, 0x6d, 0x73, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x20, 0x3d, 0x20,
|
552 |
+
0x28, 0x65, 0x6c, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x70, 0x61, 0x72, 0x61,
|
553 |
+
0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x7b,
|
554 |
+
0x20, 0x2e, 0x2e, 0x2e, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76,
|
555 |
+
0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x5b, 0x65, 0x6c, 0x2e, 0x74, 0x61,
|
556 |
+
0x72, 0x67, 0x65, 0x74, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x5d, 0x3a, 0x20,
|
557 |
+
0x70, 0x61, 0x72, 0x73, 0x65, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x28, 0x65,
|
558 |
+
0x6c, 0x2e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x2e, 0x76, 0x61, 0x6c,
|
559 |
+
0x75, 0x65, 0x29, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
560 |
+
0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c,
|
561 |
+
0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x66,
|
562 |
+
0x6f, 0x72, 0x6d, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
563 |
+
0x20, 0x20, 0x20, 0x3c, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x65, 0x74,
|
564 |
+
0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
565 |
+
0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
566 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x6c,
|
567 |
+
0x61, 0x62, 0x65, 0x6c, 0x20, 0x66, 0x6f, 0x72, 0x3d, 0x22, 0x70, 0x72,
|
568 |
+
0x6f, 0x6d, 0x70, 0x74, 0x22, 0x3e, 0x50, 0x72, 0x6f, 0x6d, 0x70, 0x74,
|
569 |
+
0x3c, 0x2f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3e, 0x0a, 0x20, 0x20, 0x20,
|
570 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c,
|
571 |
+
0x74, 0x65, 0x78, 0x74, 0x61, 0x72, 0x65, 0x61, 0x20, 0x74, 0x79, 0x70,
|
572 |
+
0x65, 0x3d, 0x22, 0x74, 0x65, 0x78, 0x74, 0x22, 0x20, 0x6e, 0x61, 0x6d,
|
573 |
+
0x65, 0x3d, 0x22, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x22, 0x20, 0x76,
|
574 |
+
0x61, 0x6c, 0x75, 0x65, 0x3d, 0x22, 0x24, 0x7b, 0x73, 0x65, 0x73, 0x73,
|
575 |
+
0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x70, 0x72,
|
576 |
+
0x6f, 0x6d, 0x70, 0x74, 0x7d, 0x22, 0x20, 0x72, 0x6f, 0x77, 0x73, 0x3d,
|
577 |
+
0x34, 0x20, 0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x3d, 0x24, 0x7b,
|
578 |
+
0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f,
|
579 |
+
0x6e, 0x7d, 0x2f, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
580 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a,
|
581 |
+
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
582 |
+
0x20, 0x3c, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
583 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x6c, 0x61,
|
584 |
+
0x62, 0x65, 0x6c, 0x20, 0x66, 0x6f, 0x72, 0x3d, 0x22, 0x75, 0x73, 0x65,
|
585 |
+
0x72, 0x22, 0x3e, 0x55, 0x73, 0x65, 0x72, 0x20, 0x6e, 0x61, 0x6d, 0x65,
|
586 |
+
0x3c, 0x2f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3e, 0x0a, 0x20, 0x20, 0x20,
|
587 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c,
|
588 |
+
0x69, 0x6e, 0x70, 0x75, 0x74, 0x20, 0x74, 0x79, 0x70, 0x65, 0x3d, 0x22,
|
589 |
+
0x74, 0x65, 0x78, 0x74, 0x22, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x22,
|
590 |
+
0x75, 0x73, 0x65, 0x72, 0x22, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3d,
|
591 |
+
0x22, 0x24, 0x7b, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76,
|
592 |
+
0x61, 0x6c, 0x75, 0x65, 0x2e, 0x75, 0x73, 0x65, 0x72, 0x7d, 0x22, 0x20,
|
593 |
+
0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x3d, 0x24, 0x7b, 0x75, 0x70,
|
594 |
+
0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x7d,
|
595 |
+
0x20, 0x2f, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
596 |
+
0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x0a,
|
597 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
598 |
+
0x3c, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
599 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x6c, 0x61, 0x62,
|
600 |
+
0x65, 0x6c, 0x20, 0x66, 0x6f, 0x72, 0x3d, 0x22, 0x62, 0x6f, 0x74, 0x22,
|
601 |
+
0x3e, 0x42, 0x6f, 0x74, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3c, 0x2f, 0x6c,
|
602 |
+
0x61, 0x62, 0x65, 0x6c, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
603 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x69, 0x6e, 0x70,
|
604 |
+
0x75, 0x74, 0x20, 0x74, 0x79, 0x70, 0x65, 0x3d, 0x22, 0x74, 0x65, 0x78,
|
605 |
+
0x74, 0x22, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x22, 0x63, 0x68, 0x61,
|
606 |
+
0x72, 0x22, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3d, 0x22, 0x24, 0x7b,
|
607 |
+
0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75,
|
608 |
+
0x65, 0x2e, 0x63, 0x68, 0x61, 0x72, 0x7d, 0x22, 0x20, 0x6f, 0x6e, 0x69,
|
609 |
+
0x6e, 0x70, 0x75, 0x74, 0x3d, 0x24, 0x7b, 0x75, 0x70, 0x64, 0x61, 0x74,
|
610 |
+
0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x7d, 0x20, 0x2f, 0x3e,
|
611 |
+
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
612 |
+
0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x0a, 0x20, 0x20, 0x20,
|
613 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, 0x69,
|
614 |
+
0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
615 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x20,
|
616 |
+
0x66, 0x6f, 0x72, 0x3d, 0x22, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74,
|
617 |
+
0x65, 0x22, 0x3e, 0x50, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x20, 0x74, 0x65,
|
618 |
+
0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x3c, 0x2f, 0x6c, 0x61, 0x62, 0x65,
|
619 |
+
0x6c, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
620 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x74, 0x65, 0x78, 0x74, 0x61, 0x72,
|
621 |
+
0x65, 0x61, 0x20, 0x69, 0x64, 0x3d, 0x22, 0x74, 0x65, 0x6d, 0x70, 0x6c,
|
622 |
+
0x61, 0x74, 0x65, 0x22, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x22, 0x74,
|
623 |
+
0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x22, 0x20, 0x76, 0x61, 0x6c,
|
624 |
+
0x75, 0x65, 0x3d, 0x22, 0x24, 0x7b, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f,
|
625 |
+
0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x65, 0x6d, 0x70,
|
626 |
+
0x6c, 0x61, 0x74, 0x65, 0x7d, 0x22, 0x20, 0x72, 0x6f, 0x77, 0x73, 0x3d,
|
627 |
+
0x34, 0x20, 0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x3d, 0x24, 0x7b,
|
628 |
+
0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f,
|
629 |
+
0x6e, 0x7d, 0x2f, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
630 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a,
|
631 |
+
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
632 |
+
0x20, 0x3c, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
633 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x6c, 0x61,
|
634 |
+
0x62, 0x65, 0x6c, 0x20, 0x66, 0x6f, 0x72, 0x3d, 0x22, 0x74, 0x65, 0x6d,
|
635 |
+
0x70, 0x6c, 0x61, 0x74, 0x65, 0x22, 0x3e, 0x43, 0x68, 0x61, 0x74, 0x20,
|
636 |
+
0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x20, 0x74, 0x65, 0x6d, 0x70,
|
637 |
+
0x6c, 0x61, 0x74, 0x65, 0x3c, 0x2f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3e,
|
638 |
+
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
639 |
+
0x20, 0x20, 0x20, 0x3c, 0x74, 0x65, 0x78, 0x74, 0x61, 0x72, 0x65, 0x61,
|
640 |
+
0x20, 0x69, 0x64, 0x3d, 0x22, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74,
|
641 |
+
0x65, 0x22, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x22, 0x68, 0x69, 0x73,
|
642 |
+
0x74, 0x6f, 0x72, 0x79, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65,
|
643 |
+
0x22, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3d, 0x22, 0x24, 0x7b, 0x73,
|
644 |
+
0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65,
|
645 |
+
0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x54, 0x65, 0x6d, 0x70,
|
646 |
+
0x6c, 0x61, 0x74, 0x65, 0x7d, 0x22, 0x20, 0x72, 0x6f, 0x77, 0x73, 0x3d,
|
647 |
+
0x31, 0x20, 0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x3d, 0x24, 0x7b,
|
648 |
+
0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f,
|
649 |
+
0x6e, 0x7d, 0x2f, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
650 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a,
|
651 |
+
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
652 |
+
0x20, 0x3c, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
653 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x6c, 0x61,
|
654 |
+
0x62, 0x65, 0x6c, 0x20, 0x66, 0x6f, 0x72, 0x3d, 0x22, 0x74, 0x65, 0x6d,
|
655 |
+
0x70, 0x65, 0x72, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0x3e, 0x54, 0x65,
|
656 |
+
0x6d, 0x70, 0x65, 0x72, 0x61, 0x74, 0x75, 0x72, 0x65, 0x3c, 0x2f, 0x6c,
|
657 |
+
0x61, 0x62, 0x65, 0x6c, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
658 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x69, 0x6e, 0x70,
|
659 |
+
0x75, 0x74, 0x20, 0x74, 0x79, 0x70, 0x65, 0x3d, 0x22, 0x72, 0x61, 0x6e,
|
660 |
+
0x67, 0x65, 0x22, 0x20, 0x69, 0x64, 0x3d, 0x22, 0x74, 0x65, 0x6d, 0x70,
|
661 |
+
0x65, 0x72, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0x20, 0x6d, 0x69, 0x6e,
|
662 |
+
0x3d, 0x22, 0x30, 0x2e, 0x30, 0x22, 0x20, 0x6d, 0x61, 0x78, 0x3d, 0x22,
|
663 |
+
0x31, 0x2e, 0x30, 0x22, 0x20, 0x73, 0x74, 0x65, 0x70, 0x3d, 0x22, 0x30,
|
664 |
+
0x2e, 0x30, 0x31, 0x22, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x22, 0x74,
|
665 |
+
0x65, 0x6d, 0x70, 0x65, 0x72, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0x20,
|
666 |
+
0x76, 0x61, 0x6c, 0x75, 0x65, 0x3d, 0x22, 0x24, 0x7b, 0x70, 0x61, 0x72,
|
667 |
+
0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x65,
|
668 |
+
0x6d, 0x70, 0x65, 0x72, 0x61, 0x74, 0x75, 0x72, 0x65, 0x7d, 0x22, 0x20,
|
669 |
+
0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x3d, 0x24, 0x7b, 0x75, 0x70,
|
670 |
+
0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x46, 0x6c,
|
671 |
+
0x6f, 0x61, 0x74, 0x7d, 0x20, 0x2f, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
672 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x73,
|
673 |
+
0x70, 0x61, 0x6e, 0x3e, 0x24, 0x7b, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73,
|
674 |
+
0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x65,
|
675 |
+
0x72, 0x61, 0x74, 0x75, 0x72, 0x65, 0x7d, 0x3c, 0x2f, 0x73, 0x70, 0x61,
|
676 |
+
0x6e, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
677 |
+
0x20, 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x0a, 0x20,
|
678 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c,
|
679 |
+
0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
680 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x6c, 0x61, 0x62, 0x65,
|
681 |
+
0x6c, 0x20, 0x66, 0x6f, 0x72, 0x3d, 0x22, 0x6e, 0x50, 0x72, 0x65, 0x64,
|
682 |
+
0x69, 0x63, 0x74, 0x22, 0x3e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x74,
|
683 |
+
0x69, 0x6f, 0x6e, 0x73, 0x3c, 0x2f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3e,
|
684 |
+
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
685 |
+
0x20, 0x20, 0x20, 0x3c, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x20, 0x74, 0x79,
|
686 |
+
0x70, 0x65, 0x3d, 0x22, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x22, 0x20, 0x69,
|
687 |
+
0x64, 0x3d, 0x22, 0x6e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x74, 0x22,
|
688 |
+
0x20, 0x6d, 0x69, 0x6e, 0x3d, 0x22, 0x31, 0x22, 0x20, 0x6d, 0x61, 0x78,
|
689 |
+
0x3d, 0x22, 0x32, 0x30, 0x34, 0x38, 0x22, 0x20, 0x73, 0x74, 0x65, 0x70,
|
690 |
+
0x3d, 0x22, 0x31, 0x22, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x22, 0x6e,
|
691 |
+
0x5f, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x74, 0x22, 0x20, 0x76, 0x61,
|
692 |
+
0x6c, 0x75, 0x65, 0x3d, 0x22, 0x24, 0x7b, 0x70, 0x61, 0x72, 0x61, 0x6d,
|
693 |
+
0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x6e, 0x5f, 0x70, 0x72,
|
694 |
+
0x65, 0x64, 0x69, 0x63, 0x74, 0x7d, 0x22, 0x20, 0x6f, 0x6e, 0x69, 0x6e,
|
695 |
+
0x70, 0x75, 0x74, 0x3d, 0x24, 0x7b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65,
|
696 |
+
0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x7d,
|
697 |
+
0x20, 0x2f, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
698 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x73, 0x70, 0x61, 0x6e, 0x3e,
|
699 |
+
0x24, 0x7b, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c,
|
700 |
+
0x75, 0x65, 0x2e, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x74,
|
701 |
+
0x7d, 0x3c, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x3e, 0x0a, 0x20, 0x20, 0x20,
|
702 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x64,
|
703 |
+
0x69, 0x76, 0x3e, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
704 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20,
|
705 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
706 |
+
0x20, 0x3c, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x20, 0x66, 0x6f, 0x72, 0x3d,
|
707 |
+
0x22, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x5f, 0x70, 0x65, 0x6e, 0x61,
|
708 |
+
0x6c, 0x74, 0x79, 0x22, 0x3e, 0x50, 0x65, 0x6e, 0x61, 0x6c, 0x69, 0x7a,
|
709 |
+
0x65, 0x20, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x20, 0x73, 0x65, 0x71,
|
710 |
+
0x75, 0x65, 0x6e, 0x63, 0x65, 0x3c, 0x2f, 0x6c, 0x61, 0x62, 0x65, 0x6c,
|
711 |
+
0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
712 |
+
0x20, 0x20, 0x20, 0x20, 0x3c, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x20, 0x74,
|
713 |
+
0x79, 0x70, 0x65, 0x3d, 0x22, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x22, 0x20,
|
714 |
+
0x69, 0x64, 0x3d, 0x22, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x5f, 0x70,
|
715 |
+
0x65, 0x6e, 0x61, 0x6c, 0x74, 0x79, 0x22, 0x20, 0x6d, 0x69, 0x6e, 0x3d,
|
716 |
+
0x22, 0x30, 0x2e, 0x30, 0x22, 0x20, 0x6d, 0x61, 0x78, 0x3d, 0x22, 0x32,
|
717 |
+
0x2e, 0x30, 0x22, 0x20, 0x73, 0x74, 0x65, 0x70, 0x3d, 0x22, 0x30, 0x2e,
|
718 |
+
0x30, 0x31, 0x22, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x22, 0x72, 0x65,
|
719 |
+
0x70, 0x65, 0x61, 0x74, 0x5f, 0x70, 0x65, 0x6e, 0x61, 0x6c, 0x74, 0x79,
|
720 |
+
0x22, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3d, 0x22, 0x24, 0x7b, 0x70,
|
721 |
+
0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e,
|
722 |
+
0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x5f, 0x70, 0x65, 0x6e, 0x61, 0x6c,
|
723 |
+
0x74, 0x79, 0x7d, 0x22, 0x20, 0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74,
|
724 |
+
0x3d, 0x24, 0x7b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x72,
|
725 |
+
0x61, 0x6d, 0x73, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x7d, 0x20, 0x2f, 0x3e,
|
726 |
+
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
727 |
+
0x20, 0x20, 0x20, 0x3c, 0x73, 0x70, 0x61, 0x6e, 0x3e, 0x24, 0x7b, 0x70,
|
728 |
+
0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e,
|
729 |
+
0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x5f, 0x70, 0x65, 0x6e, 0x61, 0x6c,
|
730 |
+
0x74, 0x79, 0x7d, 0x3c, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x3e, 0x0a, 0x20,
|
731 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c,
|
732 |
+
0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
733 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, 0x3e,
|
734 |
+
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
735 |
+
0x20, 0x20, 0x20, 0x3c, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x20, 0x66, 0x6f,
|
736 |
+
0x72, 0x3d, 0x22, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x5f, 0x6c, 0x61,
|
737 |
+
0x73, 0x74, 0x5f, 0x6e, 0x22, 0x3e, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x64,
|
738 |
+
0x65, 0x72, 0x20, 0x4e, 0x20, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x20,
|
739 |
+
0x66, 0x6f, 0x72, 0x20, 0x70, 0x65, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65,
|
740 |
+
0x3c, 0x2f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3e, 0x0a, 0x20, 0x20, 0x20,
|
741 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c,
|
742 |
+
0x69, 0x6e, 0x70, 0x75, 0x74, 0x20, 0x74, 0x79, 0x70, 0x65, 0x3d, 0x22,
|
743 |
+
0x72, 0x61, 0x6e, 0x67, 0x65, 0x22, 0x20, 0x69, 0x64, 0x3d, 0x22, 0x72,
|
744 |
+
0x65, 0x70, 0x65, 0x61, 0x74, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x6e,
|
745 |
+
0x22, 0x20, 0x6d, 0x69, 0x6e, 0x3d, 0x22, 0x30, 0x2e, 0x30, 0x22, 0x20,
|
746 |
+
0x6d, 0x61, 0x78, 0x3d, 0x22, 0x32, 0x30, 0x34, 0x38, 0x22, 0x20, 0x6e,
|
747 |
+
0x61, 0x6d, 0x65, 0x3d, 0x22, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x5f,
|
748 |
+
0x6c, 0x61, 0x73, 0x74, 0x5f, 0x6e, 0x22, 0x20, 0x76, 0x61, 0x6c, 0x75,
|
749 |
+
0x65, 0x3d, 0x22, 0x24, 0x7b, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e,
|
750 |
+
0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74,
|
751 |
+
0x5f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x6e, 0x7d, 0x22, 0x20, 0x6f, 0x6e,
|
752 |
+
0x69, 0x6e, 0x70, 0x75, 0x74, 0x3d, 0x24, 0x7b, 0x75, 0x70, 0x64, 0x61,
|
753 |
+
0x74, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x46, 0x6c, 0x6f, 0x61,
|
754 |
+
0x74, 0x7d, 0x20, 0x2f, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
755 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x73, 0x70, 0x61,
|
756 |
+
0x6e, 0x3e, 0x24, 0x7b, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76,
|
757 |
+
0x61, 0x6c, 0x75, 0x65, 0x2e, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x5f,
|
758 |
+
0x6c, 0x61, 0x73, 0x74, 0x5f, 0x6e, 0x7d, 0x3c, 0x2f, 0x73, 0x70, 0x61,
|
759 |
+
0x6e, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
760 |
+
0x20, 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x0a, 0x20,
|
761 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x66,
|
762 |
+
0x69, 0x65, 0x6c, 0x64, 0x73, 0x65, 0x74, 0x3e, 0x0a, 0x20, 0x20, 0x20,
|
763 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x66, 0x6f, 0x72, 0x6d, 0x3e,
|
764 |
+
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x60, 0x0a, 0x20, 0x20, 0x20,
|
765 |
+
0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x70, 0x6f,
|
766 |
+
0x6f, 0x72, 0x20, 0x6d, 0x61, 0x6e, 0x73, 0x20, 0x6d, 0x61, 0x72, 0x6b,
|
767 |
+
0x64, 0x6f, 0x77, 0x6e, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65,
|
768 |
+
0x6d, 0x65, 0x6e, 0x74, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e,
|
769 |
+
0x73, 0x74, 0x20, 0x4d, 0x61, 0x72, 0x6b, 0x64, 0x6f, 0x77, 0x6e, 0x69,
|
770 |
+
0x73, 0x68, 0x20, 0x3d, 0x20, 0x28, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73,
|
771 |
+
0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
772 |
+
0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x6d, 0x64, 0x20, 0x3d, 0x20,
|
773 |
+
0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x74, 0x65, 0x78, 0x74, 0x0a,
|
774 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e, 0x72, 0x65, 0x70,
|
775 |
+
0x6c, 0x61, 0x63, 0x65, 0x28, 0x2f, 0x5e, 0x23, 0x7b, 0x31, 0x2c, 0x36,
|
776 |
+
0x7d, 0x20, 0x28, 0x2e, 0x2a, 0x29, 0x24, 0x2f, 0x67, 0x69, 0x6d, 0x2c,
|
777 |
+
0x20, 0x27, 0x3c, 0x68, 0x33, 0x3e, 0x24, 0x31, 0x3c, 0x2f, 0x68, 0x33,
|
778 |
+
0x3e, 0x27, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
779 |
+
0x2e, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x28, 0x2f, 0x5c, 0x2a,
|
780 |
+
0x5c, 0x2a, 0x28, 0x2e, 0x2a, 0x3f, 0x29, 0x5c, 0x2a, 0x5c, 0x2a, 0x2f,
|
781 |
+
0x67, 0x2c, 0x20, 0x27, 0x3c, 0x73, 0x74, 0x72, 0x6f, 0x6e, 0x67, 0x3e,
|
782 |
+
0x24, 0x31, 0x3c, 0x2f, 0x73, 0x74, 0x72, 0x6f, 0x6e, 0x67, 0x3e, 0x27,
|
783 |
+
0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e, 0x72,
|
784 |
+
0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x28, 0x2f, 0x5f, 0x5f, 0x28, 0x2e,
|
785 |
+
0x2a, 0x3f, 0x29, 0x5f, 0x5f, 0x2f, 0x67, 0x2c, 0x20, 0x27, 0x3c, 0x73,
|
786 |
+
0x74, 0x72, 0x6f, 0x6e, 0x67, 0x3e, 0x24, 0x31, 0x3c, 0x2f, 0x73, 0x74,
|
787 |
+
0x72, 0x6f, 0x6e, 0x67, 0x3e, 0x27, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
788 |
+
0x20, 0x20, 0x20, 0x20, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65,
|
789 |
+
0x28, 0x2f, 0x5c, 0x2a, 0x28, 0x2e, 0x2a, 0x3f, 0x29, 0x5c, 0x2a, 0x2f,
|
790 |
+
0x67, 0x2c, 0x20, 0x27, 0x3c, 0x65, 0x6d, 0x3e, 0x24, 0x31, 0x3c, 0x2f,
|
791 |
+
0x65, 0x6d, 0x3e, 0x27, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
792 |
+
0x20, 0x20, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x28, 0x2f,
|
793 |
+
0x5f, 0x28, 0x2e, 0x2a, 0x3f, 0x29, 0x5f, 0x2f, 0x67, 0x2c, 0x20, 0x27,
|
794 |
+
0x3c, 0x65, 0x6d, 0x3e, 0x24, 0x31, 0x3c, 0x2f, 0x65, 0x6d, 0x3e, 0x27,
|
795 |
+
0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e, 0x72,
|
796 |
+
0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x28, 0x2f, 0x60, 0x60, 0x60, 0x2e,
|
797 |
+
0x2a, 0x3f, 0x5c, 0x6e, 0x28, 0x5b, 0x5c, 0x73, 0x5c, 0x53, 0x5d, 0x2a,
|
798 |
+
0x3f, 0x29, 0x60, 0x60, 0x60, 0x2f, 0x67, 0x2c, 0x20, 0x27, 0x3c, 0x70,
|
799 |
+
0x72, 0x65, 0x3e, 0x3c, 0x63, 0x6f, 0x64, 0x65, 0x3e, 0x24, 0x31, 0x3c,
|
800 |
+
0x2f, 0x63, 0x6f, 0x64, 0x65, 0x3e, 0x3c, 0x2f, 0x70, 0x72, 0x65, 0x3e,
|
801 |
+
0x27, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e,
|
802 |
+
0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x28, 0x2f, 0x60, 0x28, 0x2e,
|
803 |
+
0x2a, 0x3f, 0x29, 0x60, 0x2f, 0x67, 0x2c, 0x20, 0x27, 0x3c, 0x63, 0x6f,
|
804 |
+
0x64, 0x65, 0x3e, 0x24, 0x31, 0x3c, 0x2f, 0x63, 0x6f, 0x64, 0x65, 0x3e,
|
805 |
+
0x27, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e,
|
806 |
+
0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x28, 0x2f, 0x5c, 0x6e, 0x2f,
|
807 |
+
0x67, 0x69, 0x6d, 0x2c, 0x20, 0x27, 0x3c, 0x62, 0x72, 0x20, 0x2f, 0x3e,
|
808 |
+
0x27, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65,
|
809 |
+
0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x3c, 0x73,
|
810 |
+
0x70, 0x61, 0x6e, 0x20, 0x64, 0x61, 0x6e, 0x67, 0x65, 0x72, 0x6f, 0x75,
|
811 |
+
0x73, 0x6c, 0x79, 0x53, 0x65, 0x74, 0x49, 0x6e, 0x6e, 0x65, 0x72, 0x48,
|
812 |
+
0x54, 0x4d, 0x4c, 0x3d, 0x24, 0x7b, 0x7b, 0x20, 0x5f, 0x5f, 0x68, 0x74,
|
813 |
+
0x6d, 0x6c, 0x3a, 0x20, 0x6d, 0x64, 0x20, 0x7d, 0x7d, 0x20, 0x2f, 0x3e,
|
814 |
+
0x60, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20,
|
815 |
+
0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x4d, 0x6f, 0x64,
|
816 |
+
0x65, 0x6c, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
|
817 |
+
0x49, 0x6e, 0x66, 0x6f, 0x20, 0x3d, 0x20, 0x28, 0x70, 0x61, 0x72, 0x61,
|
818 |
+
0x6d, 0x73, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20,
|
819 |
+
0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x21, 0x6c, 0x6c, 0x61, 0x6d,
|
820 |
+
0x61, 0x53, 0x74, 0x61, 0x74, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65,
|
821 |
+
0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
822 |
+
0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60,
|
823 |
+
0x3c, 0x73, 0x70, 0x61, 0x6e, 0x2f, 0x3e, 0x60, 0x0a, 0x20, 0x20, 0x20,
|
824 |
+
0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72,
|
825 |
+
0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x0a,
|
826 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x73, 0x70, 0x61,
|
827 |
+
0x6e, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
828 |
+
0x20, 0x24, 0x7b, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x53, 0x74, 0x61, 0x74,
|
829 |
+
0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x70, 0x72, 0x65, 0x64,
|
830 |
+
0x69, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x74, 0x6f,
|
831 |
+
0x6b, 0x65, 0x6e, 0x5f, 0x6d, 0x73, 0x2e, 0x74, 0x6f, 0x46, 0x69, 0x78,
|
832 |
+
0x65, 0x64, 0x28, 0x29, 0x7d, 0x6d, 0x73, 0x20, 0x70, 0x65, 0x72, 0x20,
|
833 |
+
0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x2c, 0x20, 0x24, 0x7b, 0x6c, 0x6c, 0x61,
|
834 |
+
0x6d, 0x61, 0x53, 0x74, 0x61, 0x74, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75,
|
835 |
+
0x65, 0x2e, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x74, 0x65, 0x64, 0x5f,
|
836 |
+
0x70, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x2e, 0x74,
|
837 |
+
0x6f, 0x46, 0x69, 0x78, 0x65, 0x64, 0x28, 0x32, 0x29, 0x7d, 0x20, 0x74,
|
838 |
+
0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x20, 0x70, 0x65, 0x72, 0x20, 0x73, 0x65,
|
839 |
+
0x63, 0x6f, 0x6e, 0x64, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
840 |
+
0x20, 0x3c, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x3e, 0x0a, 0x20, 0x20, 0x20,
|
841 |
+
0x20, 0x20, 0x20, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a,
|
842 |
+
0x20, 0x20, 0x20, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e,
|
843 |
+
0x20, 0x41, 0x70, 0x70, 0x28, 0x70, 0x72, 0x6f, 0x70, 0x73, 0x29, 0x20,
|
844 |
+
0x7b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74,
|
845 |
+
0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x0a, 0x20, 0x20,
|
846 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, 0x20, 0x69,
|
847 |
+
0x64, 0x3d, 0x22, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72,
|
848 |
+
0x22, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
849 |
+
0x20, 0x3c, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x3e, 0x0a, 0x20, 0x20,
|
850 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x68,
|
851 |
+
0x31, 0x3e, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x2e, 0x63, 0x70, 0x70, 0x3c,
|
852 |
+
0x2f, 0x68, 0x31, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
853 |
+
0x20, 0x20, 0x20, 0x3c, 0x2f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x3e,
|
854 |
+
0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
855 |
+
0x3c, 0x6d, 0x61, 0x69, 0x6e, 0x20, 0x69, 0x64, 0x3d, 0x22, 0x63, 0x6f,
|
856 |
+
0x6e, 0x74, 0x65, 0x6e, 0x74, 0x22, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
857 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x24, 0x7b, 0x63,
|
858 |
+
0x68, 0x61, 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x2e, 0x76,
|
859 |
+
0x61, 0x6c, 0x75, 0x65, 0x20, 0x3f, 0x20, 0x43, 0x68, 0x61, 0x74, 0x4c,
|
860 |
+
0x6f, 0x67, 0x20, 0x3a, 0x20, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x46,
|
861 |
+
0x6f, 0x72, 0x6d, 0x7d, 0x20, 0x2f, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
862 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x6d, 0x61, 0x69, 0x6e,
|
863 |
+
0x3e, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
864 |
+
0x20, 0x3c, 0x73, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x69, 0x64,
|
865 |
+
0x3d, 0x22, 0x77, 0x72, 0x69, 0x74, 0x65, 0x22, 0x3e, 0x0a, 0x20, 0x20,
|
866 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x24,
|
867 |
+
0x7b, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x6e, 0x70, 0x75,
|
868 |
+
0x74, 0x7d, 0x20, 0x2f, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
869 |
+
0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x73, 0x65, 0x63, 0x74, 0x69, 0x6f,
|
870 |
+
0x6e, 0x3e, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
871 |
+
0x20, 0x20, 0x3c, 0x66, 0x6f, 0x6f, 0x74, 0x65, 0x72, 0x3e, 0x0a, 0x20,
|
872 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c,
|
873 |
+
0x70, 0x3e, 0x3c, 0x24, 0x7b, 0x4d, 0x6f, 0x64, 0x65, 0x6c, 0x47, 0x65,
|
874 |
+
0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f,
|
875 |
+
0x7d, 0x20, 0x2f, 0x3e, 0x3c, 0x2f, 0x70, 0x3e, 0x0a, 0x20, 0x20, 0x20,
|
876 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x70, 0x3e,
|
877 |
+
0x50, 0x6f, 0x77, 0x65, 0x72, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x3c,
|
878 |
+
0x61, 0x20, 0x68, 0x72, 0x65, 0x66, 0x3d, 0x22, 0x68, 0x74, 0x74, 0x70,
|
879 |
+
0x73, 0x3a, 0x2f, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63,
|
880 |
+
0x6f, 0x6d, 0x2f, 0x67, 0x67, 0x65, 0x72, 0x67, 0x61, 0x6e, 0x6f, 0x76,
|
881 |
+
0x2f, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x2e, 0x63, 0x70, 0x70, 0x22, 0x3e,
|
882 |
+
0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x2e, 0x63, 0x70, 0x70, 0x3c, 0x2f, 0x61,
|
883 |
+
0x3e, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x3c, 0x61, 0x20, 0x68, 0x72, 0x65,
|
884 |
+
0x66, 0x3d, 0x22, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x67,
|
885 |
+
0x67, 0x6d, 0x6c, 0x2e, 0x61, 0x69, 0x22, 0x3e, 0x67, 0x67, 0x6d, 0x6c,
|
886 |
+
0x2e, 0x61, 0x69, 0x3c, 0x2f, 0x61, 0x3e, 0x2e, 0x3c, 0x2f, 0x70, 0x3e,
|
887 |
+
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c,
|
888 |
+
0x2f, 0x66, 0x6f, 0x6f, 0x74, 0x65, 0x72, 0x3e, 0x0a, 0x20, 0x20, 0x20,
|
889 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a,
|
890 |
+
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x60, 0x3b, 0x0a, 0x20, 0x20, 0x20,
|
891 |
+
0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x6e, 0x64,
|
892 |
+
0x65, 0x72, 0x28, 0x68, 0x28, 0x41, 0x70, 0x70, 0x29, 0x2c, 0x20, 0x64,
|
893 |
+
0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x62, 0x6f, 0x64, 0x79,
|
894 |
+
0x29, 0x3b, 0x0a, 0x20, 0x20, 0x3c, 0x2f, 0x73, 0x63, 0x72, 0x69, 0x70,
|
895 |
+
0x74, 0x3e, 0x0a, 0x3c, 0x2f, 0x68, 0x65, 0x61, 0x64, 0x3e, 0x0a, 0x0a,
|
896 |
+
0x3c, 0x62, 0x6f, 0x64, 0x79, 0x3e, 0x0a, 0x3c, 0x2f, 0x62, 0x6f, 0x64,
|
897 |
+
0x79, 0x3e, 0x0a, 0x0a, 0x3c, 0x2f, 0x68, 0x74, 0x6d, 0x6c, 0x3e, 0x0a
|
898 |
+
};
|
899 |
+
unsigned int index_html_len = 10752;
|
examples/server/index.js.hpp
ADDED
The diff for this file is too large to render.
See raw diff
|
|
examples/server/public/completion.js
ADDED
@@ -0,0 +1,168 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
const paramDefaults = {
|
2 |
+
stream: true,
|
3 |
+
n_predict: 500,
|
4 |
+
temperature: 0.2,
|
5 |
+
stop: ["</s>"]
|
6 |
+
};
|
7 |
+
|
8 |
+
let generation_settings = null;
|
9 |
+
|
10 |
+
|
11 |
+
// Completes the prompt as a generator. Recommended for most use cases.
|
12 |
+
//
|
13 |
+
// Example:
|
14 |
+
//
|
15 |
+
// import { llama } from '/completion.js'
|
16 |
+
//
|
17 |
+
// const request = llama("Tell me a joke", {n_predict: 800})
|
18 |
+
// for await (const chunk of request) {
|
19 |
+
// document.write(chunk.data.content)
|
20 |
+
// }
|
21 |
+
//
|
22 |
+
export async function* llama(prompt, params = {}, config = {}) {
|
23 |
+
let controller = config.controller;
|
24 |
+
|
25 |
+
if (!controller) {
|
26 |
+
controller = new AbortController();
|
27 |
+
}
|
28 |
+
|
29 |
+
const completionParams = { ...paramDefaults, ...params, prompt };
|
30 |
+
|
31 |
+
const response = await fetch("/completion", {
|
32 |
+
method: 'POST',
|
33 |
+
body: JSON.stringify(completionParams),
|
34 |
+
headers: {
|
35 |
+
'Connection': 'keep-alive',
|
36 |
+
'Content-Type': 'application/json',
|
37 |
+
'Accept': 'text/event-stream'
|
38 |
+
},
|
39 |
+
signal: controller.signal,
|
40 |
+
});
|
41 |
+
|
42 |
+
const reader = response.body.getReader();
|
43 |
+
const decoder = new TextDecoder();
|
44 |
+
|
45 |
+
let content = "";
|
46 |
+
|
47 |
+
try {
|
48 |
+
let cont = true;
|
49 |
+
|
50 |
+
while (cont) {
|
51 |
+
const result = await reader.read();
|
52 |
+
if (result.done) {
|
53 |
+
break;
|
54 |
+
}
|
55 |
+
|
56 |
+
// sse answers in the form multiple lines of: value\n with data always present as a key. in our case we
|
57 |
+
// mainly care about the data: key here, which we expect as json
|
58 |
+
const text = decoder.decode(result.value);
|
59 |
+
|
60 |
+
// parse all sse events and add them to result
|
61 |
+
const regex = /^(\S+):\s(.*)$/gm;
|
62 |
+
for (const match of text.matchAll(regex)) {
|
63 |
+
result[match[1]] = match[2]
|
64 |
+
}
|
65 |
+
|
66 |
+
// since we know this is llama.cpp, let's just decode the json in data
|
67 |
+
result.data = JSON.parse(result.data);
|
68 |
+
content += result.data.content;
|
69 |
+
|
70 |
+
// yield
|
71 |
+
yield result;
|
72 |
+
|
73 |
+
// if we got a stop token from server, we will break here
|
74 |
+
if (result.data.stop) {
|
75 |
+
if (result.data.generation_settings) {
|
76 |
+
generation_settings = result.data.generation_settings;
|
77 |
+
}
|
78 |
+
break;
|
79 |
+
}
|
80 |
+
}
|
81 |
+
} catch (e) {
|
82 |
+
if (e.name !== 'AbortError') {
|
83 |
+
console.error("llama error: ", e);
|
84 |
+
}
|
85 |
+
throw e;
|
86 |
+
}
|
87 |
+
finally {
|
88 |
+
controller.abort();
|
89 |
+
}
|
90 |
+
|
91 |
+
return content;
|
92 |
+
}
|
93 |
+
|
94 |
+
// Call llama, return an event target that you can subcribe to
|
95 |
+
//
|
96 |
+
// Example:
|
97 |
+
//
|
98 |
+
// import { llamaEventTarget } from '/completion.js'
|
99 |
+
//
|
100 |
+
// const conn = llamaEventTarget(prompt)
|
101 |
+
// conn.addEventListener("message", (chunk) => {
|
102 |
+
// document.write(chunk.detail.content)
|
103 |
+
// })
|
104 |
+
//
|
105 |
+
export const llamaEventTarget = (prompt, params = {}, config = {}) => {
|
106 |
+
const eventTarget = new EventTarget();
|
107 |
+
(async () => {
|
108 |
+
let content = "";
|
109 |
+
for await (const chunk of llama(prompt, params, config)) {
|
110 |
+
if (chunk.data) {
|
111 |
+
content += chunk.data.content;
|
112 |
+
eventTarget.dispatchEvent(new CustomEvent("message", { detail: chunk.data }));
|
113 |
+
}
|
114 |
+
if (chunk.data.generation_settings) {
|
115 |
+
eventTarget.dispatchEvent(new CustomEvent("generation_settings", { detail: chunk.data.generation_settings }));
|
116 |
+
}
|
117 |
+
if (chunk.data.timings) {
|
118 |
+
eventTarget.dispatchEvent(new CustomEvent("timings", { detail: chunk.data.timings }));
|
119 |
+
}
|
120 |
+
}
|
121 |
+
eventTarget.dispatchEvent(new CustomEvent("done", { detail: { content } }));
|
122 |
+
})();
|
123 |
+
return eventTarget;
|
124 |
+
}
|
125 |
+
|
126 |
+
// Call llama, return a promise that resolves to the completed text. This does not support streaming
|
127 |
+
//
|
128 |
+
// Example:
|
129 |
+
//
|
130 |
+
// llamaPromise(prompt).then((content) => {
|
131 |
+
// document.write(content)
|
132 |
+
// })
|
133 |
+
//
|
134 |
+
// or
|
135 |
+
//
|
136 |
+
// const content = await llamaPromise(prompt)
|
137 |
+
// document.write(content)
|
138 |
+
//
|
139 |
+
export const llamaPromise = (prompt, params = {}, config = {}) => {
|
140 |
+
return new Promise(async (resolve, reject) => {
|
141 |
+
let content = "";
|
142 |
+
try {
|
143 |
+
for await (const chunk of llama(prompt, params, config)) {
|
144 |
+
content += chunk.data.content;
|
145 |
+
}
|
146 |
+
resolve(content);
|
147 |
+
} catch (error) {
|
148 |
+
reject(error);
|
149 |
+
}
|
150 |
+
});
|
151 |
+
};
|
152 |
+
|
153 |
+
/**
|
154 |
+
* (deprecated)
|
155 |
+
*/
|
156 |
+
export const llamaComplete = async (params, controller, callback) => {
|
157 |
+
for await (const chunk of llama(params.prompt, params, { controller })) {
|
158 |
+
callback(chunk);
|
159 |
+
}
|
160 |
+
}
|
161 |
+
|
162 |
+
// Get the model info from the server. This is useful for getting the context window and so on.
|
163 |
+
export const llamaModelInfo = async () => {
|
164 |
+
if (!generation_settings) {
|
165 |
+
generation_settings = await fetch("/model.json").then(r => r.json());
|
166 |
+
}
|
167 |
+
return generation_settings;
|
168 |
+
}
|
examples/server/public/index.html
ADDED
@@ -0,0 +1,380 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<html>
|
2 |
+
|
3 |
+
<head>
|
4 |
+
<meta charset="UTF-8">
|
5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1" />
|
6 |
+
<title>llama.cpp - chat</title>
|
7 |
+
|
8 |
+
<style>
|
9 |
+
body {
|
10 |
+
background-color: #fff;
|
11 |
+
color: #000;
|
12 |
+
font-family: system-ui;
|
13 |
+
font-size: 90%;
|
14 |
+
}
|
15 |
+
|
16 |
+
#container {
|
17 |
+
margin: 0em auto;
|
18 |
+
display: flex;
|
19 |
+
flex-direction: column;
|
20 |
+
justify-content: space-between;
|
21 |
+
height: 100%;
|
22 |
+
}
|
23 |
+
|
24 |
+
main {
|
25 |
+
margin: 3px;
|
26 |
+
display: flex;
|
27 |
+
flex-direction: column;
|
28 |
+
justify-content: space-between;
|
29 |
+
gap: 1em;
|
30 |
+
|
31 |
+
flex-grow: 1;
|
32 |
+
overflow-y: auto;
|
33 |
+
|
34 |
+
border: 1px solid #ccc;
|
35 |
+
border-radius: 5px;
|
36 |
+
padding: 0.5em;
|
37 |
+
}
|
38 |
+
|
39 |
+
body {
|
40 |
+
max-width: 600px;
|
41 |
+
min-width: 300px;
|
42 |
+
line-height: 1.2;
|
43 |
+
margin: 0 auto;
|
44 |
+
padding: 0 0.5em;
|
45 |
+
}
|
46 |
+
|
47 |
+
p {
|
48 |
+
overflow-wrap: break-word;
|
49 |
+
word-wrap: break-word;
|
50 |
+
hyphens: auto;
|
51 |
+
margin-top: 0.5em;
|
52 |
+
margin-bottom: 0.5em;
|
53 |
+
}
|
54 |
+
|
55 |
+
#write form {
|
56 |
+
margin: 1em 0 0 0;
|
57 |
+
display: flex;
|
58 |
+
flex-direction: column;
|
59 |
+
gap: 0.5em;
|
60 |
+
align-items: stretch;
|
61 |
+
}
|
62 |
+
|
63 |
+
.right {
|
64 |
+
display: flex;
|
65 |
+
flex-direction: row;
|
66 |
+
gap: 0.5em;
|
67 |
+
justify-content: flex-end;
|
68 |
+
}
|
69 |
+
|
70 |
+
fieldset {
|
71 |
+
border: none;
|
72 |
+
padding: 0;
|
73 |
+
margin: 0;
|
74 |
+
}
|
75 |
+
|
76 |
+
textarea {
|
77 |
+
padding: 5px;
|
78 |
+
flex-grow: 1;
|
79 |
+
width: 100%;
|
80 |
+
}
|
81 |
+
|
82 |
+
pre code {
|
83 |
+
display: block;
|
84 |
+
background-color: #222;
|
85 |
+
color: #ddd;
|
86 |
+
}
|
87 |
+
code {
|
88 |
+
font-family: monospace;
|
89 |
+
padding: 0.1em 0.3em;
|
90 |
+
border-radius: 3px;
|
91 |
+
}
|
92 |
+
|
93 |
+
fieldset label {
|
94 |
+
margin: 0.5em 0;
|
95 |
+
display: block;
|
96 |
+
}
|
97 |
+
|
98 |
+
header, footer {
|
99 |
+
text-align: center;
|
100 |
+
}
|
101 |
+
|
102 |
+
footer {
|
103 |
+
font-size: 80%;
|
104 |
+
color: #888;
|
105 |
+
}
|
106 |
+
</style>
|
107 |
+
|
108 |
+
<script type="module">
|
109 |
+
import {
|
110 |
+
html, h, signal, effect, computed, render, useSignal, useEffect, useRef
|
111 |
+
} from '/index.js';
|
112 |
+
|
113 |
+
import { llama } from '/completion.js';
|
114 |
+
|
115 |
+
const session = signal({
|
116 |
+
prompt: "This is a conversation between user and llama, a friendly chatbot. respond in simple markdown.",
|
117 |
+
template: "{{prompt}}\n\n{{history}}\n{{char}}:",
|
118 |
+
historyTemplate: "{{name}}: {{message}}",
|
119 |
+
transcript: [],
|
120 |
+
type: "chat",
|
121 |
+
char: "llama",
|
122 |
+
user: "User",
|
123 |
+
})
|
124 |
+
|
125 |
+
const params = signal({
|
126 |
+
n_predict: 400,
|
127 |
+
temperature: 0.7,
|
128 |
+
repeat_last_n: 256,
|
129 |
+
repeat_penalty: 1.18,
|
130 |
+
top_k: 40,
|
131 |
+
top_p: 0.5,
|
132 |
+
})
|
133 |
+
|
134 |
+
const llamaStats = signal(null)
|
135 |
+
const controller = signal(null)
|
136 |
+
|
137 |
+
const generating = computed(() => controller.value == null )
|
138 |
+
const chatStarted = computed(() => session.value.transcript.length > 0)
|
139 |
+
|
140 |
+
const transcriptUpdate = (transcript) => {
|
141 |
+
session.value = {
|
142 |
+
...session.value,
|
143 |
+
transcript
|
144 |
+
}
|
145 |
+
}
|
146 |
+
|
147 |
+
// simple template replace
|
148 |
+
const template = (str, extraSettings) => {
|
149 |
+
let settings = session.value;
|
150 |
+
if (extraSettings) {
|
151 |
+
settings = { ...settings, ...extraSettings };
|
152 |
+
}
|
153 |
+
return String(str).replaceAll(/\{\{(.*?)\}\}/g, (_, key) => template(settings[key]));
|
154 |
+
}
|
155 |
+
|
156 |
+
// send message to server
|
157 |
+
const chat = async (msg) => {
|
158 |
+
if (controller.value) {
|
159 |
+
console.log('already running...');
|
160 |
+
return;
|
161 |
+
}
|
162 |
+
controller.value = new AbortController();
|
163 |
+
|
164 |
+
transcriptUpdate([...session.value.transcript, ["{{user}}", msg]])
|
165 |
+
|
166 |
+
const prompt = template(session.value.template, {
|
167 |
+
message: msg,
|
168 |
+
history: session.value.transcript.flatMap(([name, message]) => template(session.value.historyTemplate, {name, message})).join("\n"),
|
169 |
+
});
|
170 |
+
|
171 |
+
let currentMessage = '';
|
172 |
+
const history = session.value.transcript
|
173 |
+
|
174 |
+
const llamaParams = {
|
175 |
+
...params.value,
|
176 |
+
stop: ["</s>", template("{{char}}:"), template("{{user}}:")],
|
177 |
+
}
|
178 |
+
|
179 |
+
for await (const chunk of llama(prompt, llamaParams, { controller: controller.value })) {
|
180 |
+
const data = chunk.data;
|
181 |
+
currentMessage += data.content;
|
182 |
+
|
183 |
+
// remove leading whitespace
|
184 |
+
currentMessage = currentMessage.replace(/^\s+/, "")
|
185 |
+
|
186 |
+
transcriptUpdate([...history, ["{{char}}", currentMessage]])
|
187 |
+
|
188 |
+
if (data.stop) {
|
189 |
+
console.log("Completion finished: '", currentMessage, "', summary: ", data);
|
190 |
+
}
|
191 |
+
|
192 |
+
if (data.timings) {
|
193 |
+
llamaStats.value = data.timings;
|
194 |
+
}
|
195 |
+
}
|
196 |
+
|
197 |
+
controller.value = null;
|
198 |
+
}
|
199 |
+
|
200 |
+
function MessageInput() {
|
201 |
+
const message = useSignal("")
|
202 |
+
|
203 |
+
const stop = (e) => {
|
204 |
+
e.preventDefault();
|
205 |
+
if (controller.value) {
|
206 |
+
controller.value.abort();
|
207 |
+
controller.value = null;
|
208 |
+
}
|
209 |
+
}
|
210 |
+
|
211 |
+
const reset = (e) => {
|
212 |
+
stop(e);
|
213 |
+
transcriptUpdate([]);
|
214 |
+
}
|
215 |
+
|
216 |
+
const submit = (e) => {
|
217 |
+
stop(e);
|
218 |
+
chat(message.value);
|
219 |
+
message.value = "";
|
220 |
+
}
|
221 |
+
|
222 |
+
const enterSubmits = (event) => {
|
223 |
+
if (event.which === 13 && !event.shiftKey) {
|
224 |
+
submit(event);
|
225 |
+
}
|
226 |
+
}
|
227 |
+
|
228 |
+
return html`
|
229 |
+
<form onsubmit=${submit}>
|
230 |
+
<div>
|
231 |
+
<textarea type="text" rows=2 onkeypress=${enterSubmits} value="${message}" oninput=${(e) => message.value = e.target.value} placeholder="Say something..."/>
|
232 |
+
</div>
|
233 |
+
<div class="right">
|
234 |
+
<button type="submit" disabled=${!generating.value} >Send</button>
|
235 |
+
<button onclick=${stop} disabled=${generating}>Stop</button>
|
236 |
+
<button onclick=${reset}>Reset</button>
|
237 |
+
</div>
|
238 |
+
</form>
|
239 |
+
`
|
240 |
+
}
|
241 |
+
|
242 |
+
const ChatLog = (props) => {
|
243 |
+
const messages = session.value.transcript;
|
244 |
+
const container = useRef(null)
|
245 |
+
|
246 |
+
useEffect(() => {
|
247 |
+
// scroll to bottom (if needed)
|
248 |
+
if (container.current && container.current.scrollHeight <= container.current.scrollTop + container.current.offsetHeight + 300) {
|
249 |
+
container.current.scrollTo(0, container.current.scrollHeight)
|
250 |
+
}
|
251 |
+
}, [messages])
|
252 |
+
|
253 |
+
const chatLine = ([user, msg]) => {
|
254 |
+
return html`<p key=${msg}><strong>${template(user)}:</strong> <${Markdownish} text=${template(msg)} /></p>`
|
255 |
+
};
|
256 |
+
|
257 |
+
return html`
|
258 |
+
<section id="chat" ref=${container}>
|
259 |
+
${messages.flatMap(chatLine)}
|
260 |
+
</section>`;
|
261 |
+
};
|
262 |
+
|
263 |
+
const ConfigForm = (props) => {
|
264 |
+
const updateSession = (el) => session.value = { ...session.value, [el.target.name]: el.target.value }
|
265 |
+
const updateParams = (el) => params.value = { ...params.value, [el.target.name]: el.target.value }
|
266 |
+
const updateParamsFloat = (el) => params.value = { ...params.value, [el.target.name]: parseFloat(el.target.value) }
|
267 |
+
|
268 |
+
return html`
|
269 |
+
<form>
|
270 |
+
<fieldset>
|
271 |
+
<div>
|
272 |
+
<label for="prompt">Prompt</label>
|
273 |
+
<textarea type="text" name="prompt" value="${session.value.prompt}" rows=4 oninput=${updateSession}/>
|
274 |
+
</div>
|
275 |
+
|
276 |
+
<div>
|
277 |
+
<label for="user">User name</label>
|
278 |
+
<input type="text" name="user" value="${session.value.user}" oninput=${updateSession} />
|
279 |
+
</div>
|
280 |
+
|
281 |
+
<div>
|
282 |
+
<label for="bot">Bot name</label>
|
283 |
+
<input type="text" name="char" value="${session.value.char}" oninput=${updateSession} />
|
284 |
+
</div>
|
285 |
+
|
286 |
+
<div>
|
287 |
+
<label for="template">Prompt template</label>
|
288 |
+
<textarea id="template" name="template" value="${session.value.template}" rows=4 oninput=${updateSession}/>
|
289 |
+
</div>
|
290 |
+
|
291 |
+
<div>
|
292 |
+
<label for="template">Chat history template</label>
|
293 |
+
<textarea id="template" name="historyTemplate" value="${session.value.historyTemplate}" rows=1 oninput=${updateSession}/>
|
294 |
+
</div>
|
295 |
+
|
296 |
+
<div>
|
297 |
+
<label for="temperature">Temperature</label>
|
298 |
+
<input type="range" id="temperature" min="0.0" max="1.0" step="0.01" name="temperature" value="${params.value.temperature}" oninput=${updateParamsFloat} />
|
299 |
+
<span>${params.value.temperature}</span>
|
300 |
+
</div>
|
301 |
+
|
302 |
+
<div>
|
303 |
+
<label for="nPredict">Predictions</label>
|
304 |
+
<input type="range" id="nPredict" min="1" max="2048" step="1" name="n_predict" value="${params.value.n_predict}" oninput=${updateParamsFloat} />
|
305 |
+
<span>${params.value.n_predict}</span>
|
306 |
+
</div>
|
307 |
+
|
308 |
+
<div>
|
309 |
+
<label for="repeat_penalty">Penalize repeat sequence</label>
|
310 |
+
<input type="range" id="repeat_penalty" min="0.0" max="2.0" step="0.01" name="repeat_penalty" value="${params.value.repeat_penalty}" oninput=${updateParamsFloat} />
|
311 |
+
<span>${params.value.repeat_penalty}</span>
|
312 |
+
</div>
|
313 |
+
|
314 |
+
<div>
|
315 |
+
<label for="repeat_last_n">Consider N tokens for penalize</label>
|
316 |
+
<input type="range" id="repeat_last_n" min="0.0" max="2048" name="repeat_last_n" value="${params.value.repeat_last_n}" oninput=${updateParamsFloat} />
|
317 |
+
<span>${params.value.repeat_last_n}</span>
|
318 |
+
</div>
|
319 |
+
|
320 |
+
</fieldset>
|
321 |
+
</form>
|
322 |
+
`
|
323 |
+
}
|
324 |
+
// poor mans markdown replacement
|
325 |
+
const Markdownish = (params) => {
|
326 |
+
const md = params.text
|
327 |
+
.replace(/^#{1,6} (.*)$/gim, '<h3>$1</h3>')
|
328 |
+
.replace(/\*\*(.*?)\*\*/g, '<strong>$1</strong>')
|
329 |
+
.replace(/__(.*?)__/g, '<strong>$1</strong>')
|
330 |
+
.replace(/\*(.*?)\*/g, '<em>$1</em>')
|
331 |
+
.replace(/_(.*?)_/g, '<em>$1</em>')
|
332 |
+
.replace(/```.*?\n([\s\S]*?)```/g, '<pre><code>$1</code></pre>')
|
333 |
+
.replace(/`(.*?)`/g, '<code>$1</code>')
|
334 |
+
.replace(/\n/gim, '<br />');
|
335 |
+
return html`<span dangerouslySetInnerHTML=${{ __html: md }} />`;
|
336 |
+
};
|
337 |
+
|
338 |
+
const ModelGenerationInfo = (params) => {
|
339 |
+
if (!llamaStats.value) {
|
340 |
+
return html`<span/>`
|
341 |
+
}
|
342 |
+
return html`
|
343 |
+
<span>
|
344 |
+
${llamaStats.value.predicted_per_token_ms.toFixed()}ms per token, ${llamaStats.value.predicted_per_second.toFixed(2)} tokens per second
|
345 |
+
</span>
|
346 |
+
`
|
347 |
+
}
|
348 |
+
|
349 |
+
function App(props) {
|
350 |
+
|
351 |
+
return html`
|
352 |
+
<div id="container">
|
353 |
+
<header>
|
354 |
+
<h1>llama.cpp</h1>
|
355 |
+
</header>
|
356 |
+
|
357 |
+
<main id="content">
|
358 |
+
<${chatStarted.value ? ChatLog : ConfigForm} />
|
359 |
+
</main>
|
360 |
+
|
361 |
+
<section id="write">
|
362 |
+
<${MessageInput} />
|
363 |
+
</section>
|
364 |
+
|
365 |
+
<footer>
|
366 |
+
<p><${ModelGenerationInfo} /></p>
|
367 |
+
<p>Powered by <a href="https://github.com/ggerganov/llama.cpp">llama.cpp</a> and <a href="https://ggml.ai">ggml.ai</a>.</p>
|
368 |
+
</footer>
|
369 |
+
</div>
|
370 |
+
`;
|
371 |
+
}
|
372 |
+
|
373 |
+
render(h(App), document.body);
|
374 |
+
</script>
|
375 |
+
</head>
|
376 |
+
|
377 |
+
<body>
|
378 |
+
</body>
|
379 |
+
|
380 |
+
</html>
|
examples/server/public/index.js
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
function t(){throw new Error("Cycle detected")}function n(){if(o>1){o--;return}let t,n=!1;while(void 0!==_){let i=_;_=void 0;r++;while(void 0!==i){const _=i.o;i.o=void 0;i.f&=-3;if(!(8&i.f)&&c(i))try{i.c()}catch(e){if(!n){t=e;n=!0}}i=_}}r=0;o--;if(n)throw t}function e(t){if(o>0)return t();o++;try{return t()}finally{n()}}let i,_,o=0,r=0,u=0;function l(t){if(void 0===i)return;let n=t.n;if(void 0===n||n.t!==i){n={i:0,S:t,p:i.s,n:void 0,t:i,e:void 0,x:void 0,r:n};if(void 0!==i.s)i.s.n=n;i.s=n;t.n=n;if(32&i.f)t.S(n);return n}else if(-1===n.i){n.i=0;if(void 0!==n.n){n.n.p=n.p;if(void 0!==n.p)n.p.n=n.n;n.p=i.s;n.n=void 0;i.s.n=n;i.s=n}return n}}function f(t){this.v=t;this.i=0;this.n=void 0;this.t=void 0}f.prototype.h=function(){return!0};f.prototype.S=function(t){if(this.t!==t&&void 0===t.e){t.x=this.t;if(void 0!==this.t)this.t.e=t;this.t=t}};f.prototype.U=function(t){if(void 0!==this.t){const n=t.e,e=t.x;if(void 0!==n){n.x=e;t.e=void 0}if(void 0!==e){e.e=n;t.x=void 0}if(t===this.t)this.t=e}};f.prototype.subscribe=function(t){const n=this;return b((function(){const e=n.value,i=32&this.f;this.f&=-33;try{t(e)}finally{this.f|=i}}))};f.prototype.valueOf=function(){return this.value};f.prototype.toString=function(){return this.value+""};f.prototype.toJSON=function(){return this.value};f.prototype.peek=function(){return this.v};Object.defineProperty(f.prototype,"value",{get(){const t=l(this);if(void 0!==t)t.i=this.i;return this.v},set(e){if(i instanceof p)!function(){throw new Error("Computed cannot have side-effects")}();if(e!==this.v){if(r>100)t();this.v=e;this.i++;u++;o++;try{for(let t=this.t;void 0!==t;t=t.x)t.t.N()}finally{n()}}}});function s(t){return new f(t)}function c(t){for(let n=t.s;void 0!==n;n=n.n)if(n.S.i!==n.i||!n.S.h()||n.S.i!==n.i)return!0;return!1}function h(t){for(let n=t.s;void 0!==n;n=n.n){const e=n.S.n;if(void 0!==e)n.r=e;n.S.n=n;n.i=-1;if(void 0===n.n){t.s=n;break}}}function a(t){let n,e=t.s;while(void 0!==e){const t=e.p;if(-1===e.i){e.S.U(e);if(void 0!==t)t.n=e.n;if(void 0!==e.n)e.n.p=t}else n=e;e.S.n=e.r;if(void 0!==e.r)e.r=void 0;e=t}t.s=n}function p(t){f.call(this,void 0);this.x=t;this.s=void 0;this.g=u-1;this.f=4}(p.prototype=new f).h=function(){this.f&=-3;if(1&this.f)return!1;if(32==(36&this.f))return!0;this.f&=-5;if(this.g===u)return!0;this.g=u;this.f|=1;if(this.i>0&&!c(this)){this.f&=-2;return!0}const t=i;try{h(this);i=this;const t=this.x();if(16&this.f||this.v!==t||0===this.i){this.v=t;this.f&=-17;this.i++}}catch(t){this.v=t;this.f|=16;this.i++}i=t;a(this);this.f&=-2;return!0};p.prototype.S=function(t){if(void 0===this.t){this.f|=36;for(let t=this.s;void 0!==t;t=t.n)t.S.S(t)}f.prototype.S.call(this,t)};p.prototype.U=function(t){if(void 0!==this.t){f.prototype.U.call(this,t);if(void 0===this.t){this.f&=-33;for(let t=this.s;void 0!==t;t=t.n)t.S.U(t)}}};p.prototype.N=function(){if(!(2&this.f)){this.f|=6;for(let t=this.t;void 0!==t;t=t.x)t.t.N()}};p.prototype.peek=function(){if(!this.h())t();if(16&this.f)throw this.v;return this.v};Object.defineProperty(p.prototype,"value",{get(){if(1&this.f)t();const n=l(this);this.h();if(void 0!==n)n.i=this.i;if(16&this.f)throw this.v;return this.v}});function d(t){return new p(t)}function v(t){const e=t.u;t.u=void 0;if("function"==typeof e){o++;const _=i;i=void 0;try{e()}catch(n){t.f&=-2;t.f|=8;y(t);throw n}finally{i=_;n()}}}function y(t){for(let n=t.s;void 0!==n;n=n.n)n.S.U(n);t.x=void 0;t.s=void 0;v(t)}function m(t){if(i!==this)throw new Error("Out-of-order effect");a(this);i=t;this.f&=-2;if(8&this.f)y(this);n()}function g(t){this.x=t;this.u=void 0;this.s=void 0;this.o=void 0;this.f=32}g.prototype.c=function(){const t=this.S();try{if(8&this.f)return;if(void 0===this.x)return;const n=this.x();if("function"==typeof n)this.u=n}finally{t()}};g.prototype.S=function(){if(1&this.f)t();this.f|=1;this.f&=-9;v(this);h(this);o++;const n=i;i=this;return m.bind(this,n)};g.prototype.N=function(){if(!(2&this.f)){this.f|=2;this.o=_;_=this}};g.prototype.d=function(){this.f|=8;if(!(1&this.f))y(this)};function b(t){const n=new g(t);try{n.c()}catch(t){n.d();throw t}return n.d.bind(n)}var k,S,x,w,C,E,U,H,N,P={},D=[],$=/acit|ex(?:s|g|n|p|$)|rph|grid|ows|mnc|ntw|ine[ch]|zoo|^ord|itera/i,T=Array.isArray;function V(t,n){for(var e in n)t[e]=n[e];return t}function A(t){var n=t.parentNode;n&&n.removeChild(t)}function F(t,n,e){var i,_,o,r={};for(o in n)"key"==o?i=n[o]:"ref"==o?_=n[o]:r[o]=n[o];if(arguments.length>2&&(r.children=arguments.length>3?k.call(arguments,2):e),"function"==typeof t&&null!=t.defaultProps)for(o in t.defaultProps)void 0===r[o]&&(r[o]=t.defaultProps[o]);return M(t,r,i,_,null)}function M(t,n,e,i,_){var o={type:t,props:n,key:e,ref:i,__k:null,__:null,__b:0,__e:null,__d:void 0,__c:null,__h:null,constructor:void 0,__v:null==_?++x:_};return null==_&&null!=S.vnode&&S.vnode(o),o}function W(){return{current:null}}function O(t){return t.children}function L(t,n){this.props=t,this.context=n}function R(t,n){if(null==n)return t.__?R(t.__,t.__.__k.indexOf(t)+1):null;for(var e;n<t.__k.length;n++)if(null!=(e=t.__k[n])&&null!=e.__e)return e.__e;return"function"==typeof t.type?R(t):null}function I(t){var n,e;if(null!=(t=t.__)&&null!=t.__c){for(t.__e=t.__c.base=null,n=0;n<t.__k.length;n++)if(null!=(e=t.__k[n])&&null!=e.__e){t.__e=t.__c.base=e.__e;break}return I(t)}}function j(t){(!t.__d&&(t.__d=!0)&&C.push(t)&&!q.__r++||E!==S.debounceRendering)&&((E=S.debounceRendering)||U)(q)}function q(){var t,n,e,i,_,o,r,u;for(C.sort(H);t=C.shift();)t.__d&&(n=C.length,i=void 0,_=void 0,r=(o=(e=t).__v).__e,(u=e.__P)&&(i=[],(_=V({},o)).__v=o.__v+1,nt(u,o,_,e.__n,void 0!==u.ownerSVGElement,null!=o.__h?[r]:null,i,null==r?R(o):r,o.__h),et(i,o),o.__e!=r&&I(o)),C.length>n&&C.sort(H));q.__r=0}function B(t,n,e,i,_,o,r,u,l,f){var s,c,h,a,p,d,v,y=i&&i.__k||D,m=y.length;for(e.__k=[],s=0;s<n.length;s++)if(null!=(a=e.__k[s]=null==(a=n[s])||"boolean"==typeof a||"function"==typeof a?null:"string"==typeof a||"number"==typeof a||"bigint"==typeof a?M(null,a,null,null,a):T(a)?M(O,{children:a},null,null,null):a.__b>0?M(a.type,a.props,a.key,a.ref?a.ref:null,a.__v):a)){if(a.__=e,a.__b=e.__b+1,null===(h=y[s])||h&&a.key==h.key&&a.type===h.type)y[s]=void 0;else for(c=0;c<m;c++){if((h=y[c])&&a.key==h.key&&a.type===h.type){y[c]=void 0;break}h=null}nt(t,a,h=h||P,_,o,r,u,l,f),p=a.__e,(c=a.ref)&&h.ref!=c&&(v||(v=[]),h.ref&&v.push(h.ref,null,a),v.push(c,a.__c||p,a)),null!=p?(null==d&&(d=p),"function"==typeof a.type&&a.__k===h.__k?a.__d=l=G(a,l,t):l=J(t,a,h,y,p,l),"function"==typeof e.type&&(e.__d=l)):l&&h.__e==l&&l.parentNode!=t&&(l=R(h))}for(e.__e=d,s=m;s--;)null!=y[s]&&("function"==typeof e.type&&null!=y[s].__e&&y[s].__e==e.__d&&(e.__d=K(i).nextSibling),ot(y[s],y[s]));if(v)for(s=0;s<v.length;s++)_t(v[s],v[++s],v[++s])}function G(t,n,e){for(var i,_=t.__k,o=0;_&&o<_.length;o++)(i=_[o])&&(i.__=t,n="function"==typeof i.type?G(i,n,e):J(e,i,i,_,i.__e,n));return n}function z(t,n){return n=n||[],null==t||"boolean"==typeof t||(T(t)?t.some((function(t){z(t,n)})):n.push(t)),n}function J(t,n,e,i,_,o){var r,u,l;if(void 0!==n.__d)r=n.__d,n.__d=void 0;else if(null==e||_!=o||null==_.parentNode)t:if(null==o||o.parentNode!==t)t.appendChild(_),r=null;else{for(u=o,l=0;(u=u.nextSibling)&&l<i.length;l+=1)if(u==_)break t;t.insertBefore(_,o),r=o}return void 0!==r?r:_.nextSibling}function K(t){var n,e,i;if(null==t.type||"string"==typeof t.type)return t.__e;if(t.__k)for(n=t.__k.length-1;n>=0;n--)if((e=t.__k[n])&&(i=K(e)))return i;return null}function Q(t,n,e,i,_){var o;for(o in e)"children"===o||"key"===o||o in n||Y(t,o,null,e[o],i);for(o in n)_&&"function"!=typeof n[o]||"children"===o||"key"===o||"value"===o||"checked"===o||e[o]===n[o]||Y(t,o,n[o],e[o],i)}function X(t,n,e){"-"===n[0]?t.setProperty(n,null==e?"":e):t[n]=null==e?"":"number"!=typeof e||$.test(n)?e:e+"px"}function Y(t,n,e,i,_){var o;t:if("style"===n)if("string"==typeof e)t.style.cssText=e;else{if("string"==typeof i&&(t.style.cssText=i=""),i)for(n in i)e&&n in e||X(t.style,n,"");if(e)for(n in e)i&&e[n]===i[n]||X(t.style,n,e[n])}else if("o"===n[0]&&"n"===n[1])o=n!==(n=n.replace(/Capture$/,"")),n=n.toLowerCase()in t?n.toLowerCase().slice(2):n.slice(2),t.l||(t.l={}),t.l[n+o]=e,e?i||t.addEventListener(n,o?tt:Z,o):t.removeEventListener(n,o?tt:Z,o);else if("dangerouslySetInnerHTML"!==n){if(_)n=n.replace(/xlink(H|:h)/,"h").replace(/sName$/,"s");else if("width"!==n&&"height"!==n&&"href"!==n&&"list"!==n&&"form"!==n&&"tabIndex"!==n&&"download"!==n&&"rowSpan"!==n&&"colSpan"!==n&&n in t)try{t[n]=null==e?"":e;break t}catch(t){}"function"==typeof e||(null==e||!1===e&&"-"!==n[4]?t.removeAttribute(n):t.setAttribute(n,e))}}function Z(t){return this.l[t.type+!1](S.event?S.event(t):t)}function tt(t){return this.l[t.type+!0](S.event?S.event(t):t)}function nt(t,n,e,i,_,o,r,u,l){var f,s,c,h,a,p,d,v,y,m,g,b,k,x,w,C=n.type;if(void 0!==n.constructor)return null;null!=e.__h&&(l=e.__h,u=n.__e=e.__e,n.__h=null,o=[u]),(f=S.__b)&&f(n);try{t:if("function"==typeof C){if(v=n.props,y=(f=C.contextType)&&i[f.__c],m=f?y?y.props.value:f.__:i,e.__c?d=(s=n.__c=e.__c).__=s.__E:("prototype"in C&&C.prototype.render?n.__c=s=new C(v,m):(n.__c=s=new L(v,m),s.constructor=C,s.render=rt),y&&y.sub(s),s.props=v,s.state||(s.state={}),s.context=m,s.__n=i,c=s.__d=!0,s.__h=[],s._sb=[]),null==s.__s&&(s.__s=s.state),null!=C.getDerivedStateFromProps&&(s.__s==s.state&&(s.__s=V({},s.__s)),V(s.__s,C.getDerivedStateFromProps(v,s.__s))),h=s.props,a=s.state,s.__v=n,c)null==C.getDerivedStateFromProps&&null!=s.componentWillMount&&s.componentWillMount(),null!=s.componentDidMount&&s.__h.push(s.componentDidMount);else{if(null==C.getDerivedStateFromProps&&v!==h&&null!=s.componentWillReceiveProps&&s.componentWillReceiveProps(v,m),!s.__e&&null!=s.shouldComponentUpdate&&!1===s.shouldComponentUpdate(v,s.__s,m)||n.__v===e.__v){for(n.__v!==e.__v&&(s.props=v,s.state=s.__s,s.__d=!1),s.__e=!1,n.__e=e.__e,n.__k=e.__k,n.__k.forEach((function(t){t&&(t.__=n)})),g=0;g<s._sb.length;g++)s.__h.push(s._sb[g]);s._sb=[],s.__h.length&&r.push(s);break t}null!=s.componentWillUpdate&&s.componentWillUpdate(v,s.__s,m),null!=s.componentDidUpdate&&s.__h.push((function(){s.componentDidUpdate(h,a,p)}))}if(s.context=m,s.props=v,s.__P=t,b=S.__r,k=0,"prototype"in C&&C.prototype.render){for(s.state=s.__s,s.__d=!1,b&&b(n),f=s.render(s.props,s.state,s.context),x=0;x<s._sb.length;x++)s.__h.push(s._sb[x]);s._sb=[]}else do{s.__d=!1,b&&b(n),f=s.render(s.props,s.state,s.context),s.state=s.__s}while(s.__d&&++k<25);s.state=s.__s,null!=s.getChildContext&&(i=V(V({},i),s.getChildContext())),c||null==s.getSnapshotBeforeUpdate||(p=s.getSnapshotBeforeUpdate(h,a)),B(t,T(w=null!=f&&f.type===O&&null==f.key?f.props.children:f)?w:[w],n,e,i,_,o,r,u,l),s.base=n.__e,n.__h=null,s.__h.length&&r.push(s),d&&(s.__E=s.__=null),s.__e=!1}else null==o&&n.__v===e.__v?(n.__k=e.__k,n.__e=e.__e):n.__e=it(e.__e,n,e,i,_,o,r,l);(f=S.diffed)&&f(n)}catch(t){n.__v=null,(l||null!=o)&&(n.__e=u,n.__h=!!l,o[o.indexOf(u)]=null),S.__e(t,n,e)}}function et(t,n){S.__c&&S.__c(n,t),t.some((function(n){try{t=n.__h,n.__h=[],t.some((function(t){t.call(n)}))}catch(t){S.__e(t,n.__v)}}))}function it(t,n,e,i,_,o,r,u){var l,f,s,c=e.props,h=n.props,a=n.type,p=0;if("svg"===a&&(_=!0),null!=o)for(;p<o.length;p++)if((l=o[p])&&"setAttribute"in l==!!a&&(a?l.localName===a:3===l.nodeType)){t=l,o[p]=null;break}if(null==t){if(null===a)return document.createTextNode(h);t=_?document.createElementNS("http://www.w3.org/2000/svg",a):document.createElement(a,h.is&&h),o=null,u=!1}if(null===a)c===h||u&&t.data===h||(t.data=h);else{if(o=o&&k.call(t.childNodes),f=(c=e.props||P).dangerouslySetInnerHTML,s=h.dangerouslySetInnerHTML,!u){if(null!=o)for(c={},p=0;p<t.attributes.length;p++)c[t.attributes[p].name]=t.attributes[p].value;(s||f)&&(s&&(f&&s.__html==f.__html||s.__html===t.innerHTML)||(t.innerHTML=s&&s.__html||""))}if(Q(t,h,c,_,u),s)n.__k=[];else if(B(t,T(p=n.props.children)?p:[p],n,e,i,_&&"foreignObject"!==a,o,r,o?o[0]:e.__k&&R(e,0),u),null!=o)for(p=o.length;p--;)null!=o[p]&&A(o[p]);u||("value"in h&&void 0!==(p=h.value)&&(p!==t.value||"progress"===a&&!p||"option"===a&&p!==c.value)&&Y(t,"value",p,c.value,!1),"checked"in h&&void 0!==(p=h.checked)&&p!==t.checked&&Y(t,"checked",p,c.checked,!1))}return t}function _t(t,n,e){try{"function"==typeof t?t(n):t.current=n}catch(t){S.__e(t,e)}}function ot(t,n,e){var i,_;if(S.unmount&&S.unmount(t),(i=t.ref)&&(i.current&&i.current!==t.__e||_t(i,null,n)),null!=(i=t.__c)){if(i.componentWillUnmount)try{i.componentWillUnmount()}catch(t){S.__e(t,n)}i.base=i.__P=null,t.__c=void 0}if(i=t.__k)for(_=0;_<i.length;_++)i[_]&&ot(i[_],n,e||"function"!=typeof t.type);e||null==t.__e||A(t.__e),t.__=t.__e=t.__d=void 0}function rt(t,n,e){return this.constructor(t,e)}function ut(t,n,e){var i,_,o;S.__&&S.__(t,n),_=(i="function"==typeof e)?null:e&&e.__k||n.__k,o=[],nt(n,t=(!i&&e||n).__k=F(O,null,[t]),_||P,P,void 0!==n.ownerSVGElement,!i&&e?[e]:_?null:n.firstChild?k.call(n.childNodes):null,o,!i&&e?e:_?_.__e:n.firstChild,i),et(o,t)}function lt(t,n){ut(t,n,lt)}function ft(t,n,e){var i,_,o,r,u=V({},t.props);for(o in t.type&&t.type.defaultProps&&(r=t.type.defaultProps),n)"key"==o?i=n[o]:"ref"==o?_=n[o]:u[o]=void 0===n[o]&&void 0!==r?r[o]:n[o];return arguments.length>2&&(u.children=arguments.length>3?k.call(arguments,2):e),M(t.type,u,i||t.key,_||t.ref,null)}function st(t,n){var e={__c:n="__cC"+N++,__:t,Consumer:function(t,n){return t.children(n)},Provider:function(t){var e,i;return this.getChildContext||(e=[],(i={})[n]=this,this.getChildContext=function(){return i},this.shouldComponentUpdate=function(t){this.props.value!==t.value&&e.some((function(t){t.__e=!0,j(t)}))},this.sub=function(t){e.push(t);var n=t.componentWillUnmount;t.componentWillUnmount=function(){e.splice(e.indexOf(t),1),n&&n.call(t)}}),t.children}};return e.Provider.__=e.Consumer.contextType=e}k=D.slice,S={__e:function(t,n,e,i){for(var _,o,r;n=n.__;)if((_=n.__c)&&!_.__)try{if((o=_.constructor)&&null!=o.getDerivedStateFromError&&(_.setState(o.getDerivedStateFromError(t)),r=_.__d),null!=_.componentDidCatch&&(_.componentDidCatch(t,i||{}),r=_.__d),r)return _.__E=_}catch(n){t=n}throw t}},x=0,w=function(t){return null!=t&&void 0===t.constructor},L.prototype.setState=function(t,n){var e;e=null!=this.__s&&this.__s!==this.state?this.__s:this.__s=V({},this.state),"function"==typeof t&&(t=t(V({},e),this.props)),t&&V(e,t),null!=t&&this.__v&&(n&&this._sb.push(n),j(this))},L.prototype.forceUpdate=function(t){this.__v&&(this.__e=!0,t&&this.__h.push(t),j(this))},L.prototype.render=O,C=[],U="function"==typeof Promise?Promise.prototype.then.bind(Promise.resolve()):setTimeout,H=function(t,n){return t.__v.__b-n.__v.__b},q.__r=0,N=0;var ct,ht,at,pt,dt=0,vt=[],yt=[],mt=S.__b,gt=S.__r,bt=S.diffed,kt=S.__c,St=S.unmount;function xt(t,n){S.__h&&S.__h(ht,t,dt||n),dt=0;var e=ht.__H||(ht.__H={__:[],__h:[]});return t>=e.__.length&&e.__.push({__V:yt}),e.__[t]}function wt(t){return dt=1,Ct(It,t)}function Ct(t,n,e){var i=xt(ct++,2);if(i.t=t,!i.__c&&(i.__=[e?e(n):It(void 0,n),function(t){var n=i.__N?i.__N[0]:i.__[0],e=i.t(n,t);n!==e&&(i.__N=[e,i.__[1]],i.__c.setState({}))}],i.__c=ht,!ht.u)){var _=function(t,n,e){if(!i.__c.__H)return!0;var _=i.__c.__H.__.filter((function(t){return t.__c}));if(_.every((function(t){return!t.__N})))return!o||o.call(this,t,n,e);var r=!1;return _.forEach((function(t){if(t.__N){var n=t.__[0];t.__=t.__N,t.__N=void 0,n!==t.__[0]&&(r=!0)}})),!(!r&&i.__c.props===t)&&(!o||o.call(this,t,n,e))};ht.u=!0;var o=ht.shouldComponentUpdate,r=ht.componentWillUpdate;ht.componentWillUpdate=function(t,n,e){if(this.__e){var i=o;o=void 0,_(t,n,e),o=i}r&&r.call(this,t,n,e)},ht.shouldComponentUpdate=_}return i.__N||i.__}function Et(t,n){var e=xt(ct++,3);!S.__s&&Rt(e.__H,n)&&(e.__=t,e.i=n,ht.__H.__h.push(e))}function Ut(t,n){var e=xt(ct++,4);!S.__s&&Rt(e.__H,n)&&(e.__=t,e.i=n,ht.__h.push(e))}function Ht(t){return dt=5,Pt((function(){return{current:t}}),[])}function Nt(t,n,e){dt=6,Ut((function(){return"function"==typeof t?(t(n()),function(){return t(null)}):t?(t.current=n(),function(){return t.current=null}):void 0}),null==e?e:e.concat(t))}function Pt(t,n){var e=xt(ct++,7);return Rt(e.__H,n)?(e.__V=t(),e.i=n,e.__h=t,e.__V):e.__}function Dt(t,n){return dt=8,Pt((function(){return t}),n)}function $t(t){var n=ht.context[t.__c],e=xt(ct++,9);return e.c=t,n?(null==e.__&&(e.__=!0,n.sub(ht)),n.props.value):t.__}function Tt(t,n){S.useDebugValue&&S.useDebugValue(n?n(t):t)}function Vt(t){var n=xt(ct++,10),e=wt();return n.__=t,ht.componentDidCatch||(ht.componentDidCatch=function(t,i){n.__&&n.__(t,i),e[1](t)}),[e[0],function(){e[1](void 0)}]}function At(){var t=xt(ct++,11);if(!t.__){for(var n=ht.__v;null!==n&&!n.__m&&null!==n.__;)n=n.__;var e=n.__m||(n.__m=[0,0]);t.__="P"+e[0]+"-"+e[1]++}return t.__}function Ft(){for(var t;t=vt.shift();)if(t.__P&&t.__H)try{t.__H.__h.forEach(Ot),t.__H.__h.forEach(Lt),t.__H.__h=[]}catch(u){t.__H.__h=[],S.__e(u,t.__v)}}S.__b=function(t){ht=null,mt&&mt(t)},S.__r=function(t){gt&>(t),ct=0;var n=(ht=t.__c).__H;n&&(at===ht?(n.__h=[],ht.__h=[],n.__.forEach((function(t){t.__N&&(t.__=t.__N),t.__V=yt,t.__N=t.i=void 0}))):(n.__h.forEach(Ot),n.__h.forEach(Lt),n.__h=[],ct=0)),at=ht},S.diffed=function(t){bt&&bt(t);var n=t.__c;n&&n.__H&&(n.__H.__h.length&&(1!==vt.push(n)&&pt===S.requestAnimationFrame||((pt=S.requestAnimationFrame)||Wt)(Ft)),n.__H.__.forEach((function(t){t.i&&(t.__H=t.i),t.__V!==yt&&(t.__=t.__V),t.i=void 0,t.__V=yt}))),at=ht=null},S.__c=function(t,n){n.some((function(t){try{t.__h.forEach(Ot),t.__h=t.__h.filter((function(t){return!t.__||Lt(t)}))}catch(s){n.some((function(t){t.__h&&(t.__h=[])})),n=[],S.__e(s,t.__v)}})),kt&&kt(t,n)},S.unmount=function(t){St&&St(t);var n,e=t.__c;e&&e.__H&&(e.__H.__.forEach((function(t){try{Ot(t)}catch(t){n=t}})),e.__H=void 0,n&&S.__e(n,e.__v))};var Mt="function"==typeof requestAnimationFrame;function Wt(t){var n,e=function(){clearTimeout(i),Mt&&cancelAnimationFrame(n),setTimeout(t)},i=setTimeout(e,100);Mt&&(n=requestAnimationFrame(e))}function Ot(t){var n=ht,e=t.__c;"function"==typeof e&&(t.__c=void 0,e()),ht=n}function Lt(t){var n=ht;t.__c=t.__(),ht=n}function Rt(t,n){return!t||t.length!==n.length||n.some((function(n,e){return n!==t[e]}))}function It(t,n){return"function"==typeof n?n(t):n}function jt(t,n){S[t]=n.bind(null,S[t]||(()=>{}))}let qt,Bt;function Gt(t){if(Bt)Bt();Bt=t&&t.S()}function zt({data:t}){const n=Kt(t);n.value=t;const e=Pt(()=>{let t=this.__v;while(t=t.__)if(t.__c){t.__c.__$f|=4;break}this.__$u.c=()=>{this.base.data=e.peek()};return d(()=>{let t=n.value.value;return 0===t?0:!0===t?"":t||""})},[]);return e.value}zt.displayName="_st";Object.defineProperties(f.prototype,{constructor:{configurable:!0,value:void 0},type:{configurable:!0,value:zt},props:{configurable:!0,get(){return{data:this}}},__b:{configurable:!0,value:1}});jt("__b",(t,n)=>{if("string"==typeof n.type){let t,e=n.props;for(let i in e){if("children"===i)continue;let _=e[i];if(_ instanceof f){if(!t)n.__np=t={};t[i]=_;e[i]=_.peek()}}}t(n)});jt("__r",(t,n)=>{Gt();let e,i=n.__c;if(i){i.__$f&=-2;e=i.__$u;if(void 0===e)i.__$u=e=function(t){let n;b((function(){n=this}));n.c=()=>{i.__$f|=1;i.setState({})};return n}()}qt=i;Gt(e);t(n)});jt("__e",(t,n,e,i)=>{Gt();qt=void 0;t(n,e,i)});jt("diffed",(t,n)=>{Gt();qt=void 0;let e;if("string"==typeof n.type&&(e=n.__e)){let t=n.__np,i=n.props;if(t){let n=e.U;if(n)for(let e in n){let i=n[e];if(void 0!==i&&!(e in t)){i.d();n[e]=void 0}}else{n={};e.U=n}for(let _ in t){let o=n[_],r=t[_];if(void 0===o){o=Jt(e,_,r,i);n[_]=o}else o.o(r,i)}}}t(n)});function Jt(t,n,e,i){const _=n in t&&void 0===t.ownerSVGElement,o=s(e);return{o:(t,n)=>{o.value=t;i=n},d:b(()=>{const e=o.value.value;if(i[n]!==e){i[n]=e;if(_)t[n]=e;else if(e)t.setAttribute(n,e);else t.removeAttribute(n)}})}}jt("unmount",(t,n)=>{if("string"==typeof n.type){let t=n.__e;if(t){const n=t.U;if(n){t.U=void 0;for(let t in n){let e=n[t];if(e)e.d()}}}}else{let t=n.__c;if(t){const n=t.__$u;if(n){t.__$u=void 0;n.d()}}}t(n)});jt("__h",(t,n,e,i)=>{if(i<3)n.__$f|=2;t(n,e,i)});L.prototype.shouldComponentUpdate=function(t,n){const e=this.__$u;if(!(e&&void 0!==e.s||4&this.__$f))return!0;if(3&this.__$f)return!0;for(let i in n)return!0;for(let i in t)if("__source"!==i&&t[i]!==this.props[i])return!0;for(let i in this.props)if(!(i in t))return!0;return!1};function Kt(t){return Pt(()=>s(t),[])}function Qt(t){const n=Ht(t);n.current=t;qt.__$f|=4;return Pt(()=>d(()=>n.current()),[])}function Xt(t){const n=Ht(t);n.current=t;Et(()=>b(()=>n.current()),[])}var Yt=function(t,n,e,i){var _;n[0]=0;for(var o=1;o<n.length;o++){var r=n[o++],u=n[o]?(n[0]|=r?1:2,e[n[o++]]):n[++o];3===r?i[0]=u:4===r?i[1]=Object.assign(i[1]||{},u):5===r?(i[1]=i[1]||{})[n[++o]]=u:6===r?i[1][n[++o]]+=u+"":r?(_=t.apply(u,Yt(t,u,e,["",null])),i.push(_),u[0]?n[0]|=2:(n[o-2]=0,n[o]=_)):i.push(u)}return i},Zt=new Map;function tn(t){var n=Zt.get(this);return n||(n=new Map,Zt.set(this,n)),(n=Yt(this,n.get(t)||(n.set(t,n=function(t){for(var n,e,i=1,_="",o="",r=[0],u=function(t){1===i&&(t||(_=_.replace(/^\s*\n\s*|\s*\n\s*$/g,"")))?r.push(0,t,_):3===i&&(t||_)?(r.push(3,t,_),i=2):2===i&&"..."===_&&t?r.push(4,t,0):2===i&&_&&!t?r.push(5,0,!0,_):i>=5&&((_||!t&&5===i)&&(r.push(i,0,_,e),i=6),t&&(r.push(i,t,0,e),i=6)),_=""},l=0;l<t.length;l++){l&&(1===i&&u(),u(l));for(var f=0;f<t[l].length;f++)n=t[l][f],1===i?"<"===n?(u(),r=[r],i=3):_+=n:4===i?"--"===_&&">"===n?(i=1,_=""):_=n+_[0]:o?n===o?o="":_+=n:'"'===n||"'"===n?o=n:">"===n?(u(),i=1):i&&("="===n?(i=5,e=_,_=""):"/"===n&&(i<5||">"===t[l][f+1])?(u(),3===i&&(r=r[0]),i=r,(r=r[0]).push(2,0,i),i=0):" "===n||"\t"===n||"\n"===n||"\r"===n?(u(),i=2):_+=n),3===i&&"!--"===_&&(i=4,r=r[0])}return u(),r}(t)),n),arguments,[])).length>1?n:n[0]}var nn=tn.bind(F);export{L as Component,O as Fragment,f as Signal,e as batch,ft as cloneElement,d as computed,st as createContext,F as createElement,W as createRef,b as effect,F as h,nn as html,lt as hydrate,w as isValidElement,S as options,ut as render,s as signal,z as toChildArray,Dt as useCallback,Qt as useComputed,$t as useContext,Tt as useDebugValue,Et as useEffect,Vt as useErrorBoundary,At as useId,Nt as useImperativeHandle,Ut as useLayoutEffect,Pt as useMemo,Ct as useReducer,Ht as useRef,Kt as useSignal,Xt as useSignalEffect,wt as useState};
|
examples/server/server.cpp
CHANGED
@@ -2,8 +2,6 @@
|
|
2 |
#include "llama.h"
|
3 |
#include "build-info.h"
|
4 |
|
5 |
-
// single thread
|
6 |
-
#define CPPHTTPLIB_THREAD_POOL_COUNT 1
|
7 |
#ifndef NDEBUG
|
8 |
// crash the server in debug mode, otherwise send an http 500 error
|
9 |
#define CPPHTTPLIB_NO_EXCEPTIONS 1
|
@@ -12,6 +10,11 @@
|
|
12 |
#include "httplib.h"
|
13 |
#include "json.hpp"
|
14 |
|
|
|
|
|
|
|
|
|
|
|
15 |
#ifndef SERVER_VERBOSE
|
16 |
#define SERVER_VERBOSE 1
|
17 |
#endif
|
@@ -19,37 +22,62 @@
|
|
19 |
using namespace httplib;
|
20 |
using json = nlohmann::json;
|
21 |
|
22 |
-
struct server_params
|
|
|
23 |
std::string hostname = "127.0.0.1";
|
|
|
24 |
int32_t port = 8080;
|
25 |
int32_t read_timeout = 600;
|
26 |
int32_t write_timeout = 600;
|
27 |
};
|
28 |
|
29 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
size_t i;
|
31 |
-
for (i = 0; i < a.size() && i < b.size() && a[i] == b[i]; i++)
|
|
|
|
|
32 |
return i;
|
33 |
}
|
34 |
|
35 |
-
enum stop_type
|
|
|
36 |
STOP_FULL,
|
37 |
STOP_PARTIAL,
|
38 |
};
|
39 |
|
40 |
-
static bool ends_with(const std::string &
|
|
|
41 |
return str.size() >= suffix.size() &&
|
42 |
-
|
43 |
}
|
44 |
|
45 |
-
static size_t find_partial_stop_string(const std::string &
|
46 |
-
const std::string &
|
47 |
-
|
|
|
|
|
48 |
const char text_last_char = text.back();
|
49 |
-
for (int64_t char_index = stop.size() - 1; char_index >= 0; char_index--)
|
50 |
-
|
|
|
|
|
51 |
const std::string current_partial = stop.substr(0, char_index + 1);
|
52 |
-
if (ends_with(text, current_partial))
|
|
|
53 |
return text.size() - char_index - 1;
|
54 |
}
|
55 |
}
|
@@ -58,26 +86,30 @@ static size_t find_partial_stop_string(const std::string & stop,
|
|
58 |
return std::string::npos;
|
59 |
}
|
60 |
|
61 |
-
template<class Iter>
|
62 |
-
static std::string tokens_to_str(llama_context *
|
|
|
63 |
std::string ret;
|
64 |
-
for (; begin != end; ++begin)
|
|
|
65 |
ret += llama_token_to_str(ctx, *begin);
|
66 |
}
|
67 |
return ret;
|
68 |
}
|
69 |
|
70 |
-
static void server_log(const char *
|
71 |
-
const char *
|
72 |
-
|
73 |
-
|
74 |
-
{
|
75 |
-
{
|
76 |
-
{
|
77 |
-
{
|
|
|
78 |
};
|
79 |
|
80 |
-
if (!extra.empty())
|
|
|
81 |
log.merge_patch(extra);
|
82 |
}
|
83 |
|
@@ -86,28 +118,72 @@ static void server_log(const char * level, const char * function, int line,
|
|
86 |
fflush(stdout);
|
87 |
}
|
88 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
89 |
static bool server_verbose = false;
|
90 |
|
91 |
#if SERVER_VERBOSE != 1
|
92 |
-
#
|
93 |
#else
|
94 |
-
#
|
95 |
-
do
|
96 |
-
|
|
|
|
|
97 |
server_log("VERBOSE", __func__, __LINE__, MSG, __VA_ARGS__); \
|
98 |
} \
|
99 |
-
} while(0)
|
100 |
#endif
|
101 |
|
102 |
#define LOG_ERROR(MSG, ...) server_log("ERROR", __func__, __LINE__, MSG, __VA_ARGS__)
|
103 |
#define LOG_WARNING(MSG, ...) server_log("WARNING", __func__, __LINE__, MSG, __VA_ARGS__)
|
104 |
#define LOG_INFO(MSG, ...) server_log("INFO", __func__, __LINE__, MSG, __VA_ARGS__)
|
105 |
|
106 |
-
struct llama_server_context
|
|
|
107 |
bool stream = false;
|
108 |
bool has_next_token = false;
|
109 |
std::string generated_text;
|
|
|
110 |
|
|
|
111 |
size_t num_tokens_predicted = 0;
|
112 |
size_t n_past = 0;
|
113 |
size_t n_remain = 0;
|
@@ -115,8 +191,8 @@ struct llama_server_context {
|
|
115 |
std::vector<llama_token> embd;
|
116 |
std::vector<llama_token> last_n_tokens;
|
117 |
|
118 |
-
llama_model *
|
119 |
-
llama_context *
|
120 |
gpt_params params;
|
121 |
|
122 |
bool truncated = false;
|
@@ -126,22 +202,35 @@ struct llama_server_context {
|
|
126 |
std::string stopping_word;
|
127 |
int32_t multibyte_pending = 0;
|
128 |
|
129 |
-
|
130 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
131 |
llama_free(ctx);
|
132 |
ctx = nullptr;
|
133 |
}
|
134 |
-
if (model)
|
|
|
135 |
llama_free_model(model);
|
136 |
model = nullptr;
|
137 |
}
|
138 |
}
|
139 |
|
140 |
-
void rewind()
|
|
|
141 |
params.antiprompt.clear();
|
|
|
142 |
num_tokens_predicted = 0;
|
143 |
generated_text = "";
|
144 |
generated_text.reserve(params.n_ctx);
|
|
|
145 |
truncated = false;
|
146 |
stopped_eos = false;
|
147 |
stopped_word = false;
|
@@ -153,11 +242,13 @@ struct llama_server_context {
|
|
153 |
n_past = 0;
|
154 |
}
|
155 |
|
156 |
-
bool loadModel(const gpt_params &
|
|
|
157 |
params = params_;
|
158 |
std::tie(model, ctx) = llama_init_from_gpt_params(params);
|
159 |
-
if (model == nullptr)
|
160 |
-
|
|
|
161 |
return false;
|
162 |
}
|
163 |
|
@@ -166,34 +257,40 @@ struct llama_server_context {
|
|
166 |
return true;
|
167 |
}
|
168 |
|
169 |
-
void loadPrompt()
|
|
|
170 |
params.prompt.insert(0, 1, ' '); // always add a first space
|
171 |
std::vector<llama_token> prompt_tokens = ::llama_tokenize(ctx, params.prompt, true);
|
|
|
172 |
|
173 |
-
if (params.n_keep < 0)
|
174 |
-
|
|
|
175 |
}
|
176 |
params.n_keep = std::min(params.n_ctx - 4, params.n_keep);
|
177 |
|
178 |
// if input prompt is too big, truncate like normal
|
179 |
-
if (
|
|
|
180 |
const int n_left = (params.n_ctx - params.n_keep) / 2;
|
181 |
std::vector<llama_token> new_tokens(prompt_tokens.begin(), prompt_tokens.begin() + params.n_keep);
|
182 |
-
const int erased_blocks = (
|
183 |
new_tokens.insert(new_tokens.end(), prompt_tokens.begin() + params.n_keep + erased_blocks * n_left, prompt_tokens.end());
|
184 |
std::copy(prompt_tokens.end() - params.n_ctx, prompt_tokens.end(), last_n_tokens.begin());
|
185 |
|
186 |
LOG_VERBOSE("input truncated", {
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
|
193 |
truncated = true;
|
194 |
prompt_tokens = new_tokens;
|
195 |
-
}
|
196 |
-
|
|
|
|
|
197 |
std::fill(last_n_tokens.begin(), last_n_tokens.end() - ps, 0);
|
198 |
std::copy(prompt_tokens.begin(), prompt_tokens.end(), last_n_tokens.end() - ps);
|
199 |
}
|
@@ -201,30 +298,35 @@ struct llama_server_context {
|
|
201 |
// compare the evaluated prompt with the new prompt
|
202 |
n_past = common_part(embd, prompt_tokens);
|
203 |
embd = prompt_tokens;
|
204 |
-
if (n_past ==
|
|
|
205 |
// we have to evaluate at least 1 token to generate logits.
|
206 |
n_past--;
|
207 |
}
|
208 |
|
209 |
LOG_VERBOSE("prompt ingested", {
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
|
215 |
has_next_token = true;
|
216 |
}
|
217 |
|
218 |
-
void beginCompletion()
|
|
|
219 |
// number of tokens to keep when resetting context
|
220 |
n_remain = params.n_predict;
|
221 |
llama_set_rng_seed(ctx, params.seed);
|
222 |
}
|
223 |
|
224 |
-
|
225 |
-
|
|
|
|
|
226 |
|
227 |
-
if (embd.size() >= (size_t)params.n_ctx)
|
|
|
228 |
// Reset context
|
229 |
const int n_left = (params.n_ctx - params.n_keep) / 2;
|
230 |
|
@@ -234,34 +336,39 @@ struct llama_server_context {
|
|
234 |
n_past = params.n_keep;
|
235 |
truncated = true;
|
236 |
LOG_VERBOSE("input truncated", {
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
}
|
243 |
|
244 |
-
while (n_past < embd.size())
|
|
|
245 |
int n_eval = (int)embd.size() - n_past;
|
246 |
-
if (n_eval > params.n_batch)
|
|
|
247 |
n_eval = params.n_batch;
|
248 |
}
|
249 |
-
if (llama_eval(ctx, &embd[n_past], n_eval, n_past, params.n_threads))
|
|
|
250 |
LOG_ERROR("failed to eval", {
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
has_next_token = false;
|
257 |
return result;
|
258 |
}
|
259 |
n_past += n_eval;
|
260 |
}
|
261 |
|
262 |
-
if (params.n_predict == 0)
|
|
|
263 |
has_next_token = false;
|
264 |
-
|
|
|
265 |
}
|
266 |
|
267 |
// out of user input, sample next token
|
@@ -278,74 +385,95 @@ struct llama_server_context {
|
|
278 |
const float mirostat_tau = params.mirostat_tau;
|
279 |
const float mirostat_eta = params.mirostat_eta;
|
280 |
const bool penalize_nl = params.penalize_nl;
|
281 |
-
|
282 |
|
283 |
{
|
284 |
-
auto *
|
285 |
auto n_vocab = llama_n_vocab(ctx);
|
286 |
|
287 |
// Apply params.logit_bias map
|
288 |
-
for (const auto &
|
|
|
289 |
logits[it.first] += it.second;
|
290 |
}
|
291 |
|
292 |
std::vector<llama_token_data> candidates;
|
293 |
candidates.reserve(n_vocab);
|
294 |
-
for (llama_token token_id = 0; token_id < n_vocab; token_id++)
|
295 |
-
|
|
|
296 |
}
|
297 |
|
298 |
-
llama_token_data_array candidates_p = {
|
299 |
|
300 |
// Apply penalties
|
301 |
float nl_logit = logits[llama_token_nl()];
|
302 |
auto last_n_repeat = std::min(std::min((int)last_n_tokens.size(), repeat_last_n), params.n_ctx);
|
303 |
llama_sample_repetition_penalty(ctx, &candidates_p,
|
304 |
-
|
305 |
-
|
306 |
llama_sample_frequency_and_presence_penalties(ctx, &candidates_p,
|
307 |
-
|
308 |
-
|
309 |
-
if (!penalize_nl)
|
|
|
310 |
logits[llama_token_nl()] = nl_logit;
|
311 |
}
|
312 |
|
313 |
-
if (temp <= 0)
|
|
|
314 |
// Greedy sampling
|
315 |
-
|
316 |
-
|
317 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
318 |
static float mirostat_mu = 2.0f * mirostat_tau;
|
319 |
const int mirostat_m = 100;
|
320 |
llama_sample_temperature(ctx, &candidates_p, temp);
|
321 |
-
|
322 |
-
}
|
|
|
|
|
323 |
static float mirostat_mu = 2.0f * mirostat_tau;
|
324 |
llama_sample_temperature(ctx, &candidates_p, temp);
|
325 |
-
|
326 |
-
}
|
|
|
|
|
327 |
// Temperature sampling
|
328 |
-
|
329 |
-
|
330 |
-
|
331 |
-
|
|
|
332 |
llama_sample_temperature(ctx, &candidates_p, temp);
|
333 |
-
|
334 |
}
|
335 |
}
|
|
|
|
|
|
|
|
|
|
|
336 |
last_n_tokens.erase(last_n_tokens.begin());
|
337 |
-
last_n_tokens.push_back(
|
338 |
num_tokens_predicted++;
|
339 |
}
|
340 |
|
341 |
// add it to the context
|
342 |
-
embd.push_back(
|
343 |
-
result = id;
|
344 |
// decrement remaining sampling budget
|
345 |
--n_remain;
|
346 |
|
347 |
-
if (!embd.empty() && embd.back() == llama_token_eos())
|
348 |
-
|
|
|
349 |
has_next_token = false;
|
350 |
stopped_eos = true;
|
351 |
LOG_VERBOSE("eos token found", {});
|
@@ -356,22 +484,28 @@ struct llama_server_context {
|
|
356 |
return result;
|
357 |
}
|
358 |
|
359 |
-
size_t findStoppingStrings(const std::string &
|
360 |
-
const stop_type type)
|
|
|
361 |
size_t stop_pos = std::string::npos;
|
362 |
-
for (const std::string &
|
|
|
363 |
size_t pos;
|
364 |
-
if (type == STOP_FULL)
|
|
|
365 |
const size_t tmp = word.size() + last_token_size;
|
366 |
const size_t from_pos = text.size() > tmp ? text.size() - tmp : 0;
|
367 |
pos = text.find(word, from_pos);
|
368 |
}
|
369 |
-
else
|
|
|
370 |
pos = find_partial_stop_string(word, text);
|
371 |
}
|
372 |
if (pos != std::string::npos &&
|
373 |
-
(stop_pos == std::string::npos || pos < stop_pos))
|
374 |
-
|
|
|
|
|
375 |
stopping_word = word;
|
376 |
stopped_word = true;
|
377 |
has_next_token = false;
|
@@ -382,70 +516,91 @@ struct llama_server_context {
|
|
382 |
return stop_pos;
|
383 |
}
|
384 |
|
385 |
-
|
386 |
-
|
|
|
387 |
|
388 |
-
const std::string token_text =
|
389 |
generated_text += token_text;
|
390 |
|
391 |
-
if (
|
|
|
|
|
|
|
|
|
|
|
|
|
392 |
multibyte_pending -= token_text.size();
|
393 |
-
}
|
|
|
|
|
394 |
const char c = token_text[0];
|
395 |
// 2-byte characters: 110xxxxx 10xxxxxx
|
396 |
-
if ((c & 0xE0) == 0xC0)
|
|
|
397 |
multibyte_pending = 1;
|
398 |
-
|
399 |
-
}
|
|
|
|
|
400 |
multibyte_pending = 2;
|
401 |
-
|
402 |
-
}
|
|
|
|
|
403 |
multibyte_pending = 3;
|
404 |
-
}
|
|
|
|
|
405 |
multibyte_pending = 0;
|
406 |
}
|
407 |
}
|
408 |
|
409 |
-
if (multibyte_pending > 0 && !has_next_token)
|
|
|
410 |
has_next_token = true;
|
411 |
n_remain++;
|
412 |
}
|
413 |
|
414 |
-
if (!has_next_token && n_remain == 0)
|
|
|
415 |
stopped_limit = true;
|
416 |
}
|
417 |
|
418 |
LOG_VERBOSE("next token", {
|
419 |
-
|
420 |
-
|
421 |
-
|
422 |
-
|
423 |
-
|
424 |
-
|
425 |
-
|
426 |
-
|
427 |
-
|
428 |
-
|
429 |
-
|
430 |
-
return
|
431 |
}
|
432 |
|
433 |
-
std::vector<float> getEmbedding()
|
|
|
434 |
static const int n_embd = llama_n_embd(ctx);
|
435 |
-
if (!params.embedding)
|
|
|
436 |
LOG_WARNING("embedding disabled", {
|
437 |
-
|
438 |
-
|
439 |
return std::vector<float>(n_embd, 0.0f);
|
440 |
}
|
441 |
-
const float *
|
442 |
std::vector<float> embedding(data, data + n_embd);
|
443 |
return embedding;
|
444 |
}
|
445 |
};
|
446 |
|
447 |
-
static void server_print_usage(const char *
|
448 |
-
const server_params &
|
|
|
449 |
fprintf(stderr, "usage: %s [options]\n", argv0);
|
450 |
fprintf(stderr, "\n");
|
451 |
fprintf(stderr, "options:\n");
|
@@ -456,10 +611,12 @@ static void server_print_usage(const char * argv0, const gpt_params & params,
|
|
456 |
fprintf(stderr, " -b N, --batch-size N batch size for prompt processing (default: %d)\n", params.n_batch);
|
457 |
fprintf(stderr, " --memory-f32 use f32 instead of f16 for memory key+value (default: disabled)\n");
|
458 |
fprintf(stderr, " not recommended: doubles context memory required and no measurable increase in quality\n");
|
459 |
-
if (llama_mlock_supported())
|
|
|
460 |
fprintf(stderr, " --mlock force system to keep model in RAM rather than swapping or compressing\n");
|
461 |
}
|
462 |
-
if (llama_mmap_supported())
|
|
|
463 |
fprintf(stderr, " --no-mmap do not memory-map model (slower load but may reduce pageouts if not using mlock)\n");
|
464 |
}
|
465 |
#ifdef LLAMA_SUPPORTS_GPU_OFFLOAD
|
@@ -479,77 +636,119 @@ static void server_print_usage(const char * argv0, const gpt_params & params,
|
|
479 |
fprintf(stderr, " --lora-base FNAME optional model to use as a base for the layers modified by the LoRA adapter\n");
|
480 |
fprintf(stderr, " --host ip address to listen (default (default: %s)\n", sparams.hostname.c_str());
|
481 |
fprintf(stderr, " --port PORT port to listen (default (default: %d)\n", sparams.port);
|
|
|
482 |
fprintf(stderr, " -to N, --timeout N server read/write timeout in seconds (default: %d)\n", sparams.read_timeout);
|
483 |
fprintf(stderr, " --embedding enable embedding vector output (default: %s)\n", params.embedding ? "enabled" : "disabled");
|
484 |
fprintf(stderr, "\n");
|
485 |
}
|
486 |
|
487 |
-
static void server_params_parse(int argc, char **
|
488 |
-
gpt_params &
|
|
|
489 |
gpt_params default_params;
|
490 |
server_params default_sparams;
|
491 |
std::string arg;
|
492 |
bool invalid_param = false;
|
493 |
|
494 |
-
for (int i = 1; i < argc; i++)
|
|
|
495 |
arg = argv[i];
|
496 |
-
if (arg == "--port")
|
497 |
-
|
|
|
|
|
498 |
invalid_param = true;
|
499 |
break;
|
500 |
}
|
501 |
sparams.port = std::stoi(argv[i]);
|
502 |
-
}
|
503 |
-
|
|
|
|
|
|
|
504 |
invalid_param = true;
|
505 |
break;
|
506 |
}
|
507 |
sparams.hostname = argv[i];
|
508 |
-
}
|
509 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
510 |
invalid_param = true;
|
511 |
break;
|
512 |
}
|
513 |
sparams.read_timeout = std::stoi(argv[i]);
|
514 |
sparams.write_timeout = std::stoi(argv[i]);
|
515 |
-
}
|
516 |
-
|
|
|
|
|
|
|
517 |
invalid_param = true;
|
518 |
break;
|
519 |
}
|
520 |
params.model = argv[i];
|
521 |
-
}
|
522 |
-
|
|
|
|
|
|
|
523 |
invalid_param = true;
|
524 |
break;
|
525 |
}
|
526 |
params.model_alias = argv[i];
|
527 |
-
}
|
|
|
|
|
528 |
server_print_usage(argv[0], default_params, default_sparams);
|
529 |
exit(0);
|
530 |
-
}
|
531 |
-
|
|
|
|
|
|
|
532 |
invalid_param = true;
|
533 |
break;
|
534 |
}
|
535 |
params.n_ctx = std::stoi(argv[i]);
|
536 |
-
}
|
|
|
|
|
537 |
params.memory_f16 = false;
|
538 |
-
}
|
539 |
-
|
|
|
|
|
|
|
540 |
invalid_param = true;
|
541 |
break;
|
542 |
}
|
543 |
params.n_threads = std::stoi(argv[i]);
|
544 |
-
}
|
545 |
-
|
|
|
|
|
|
|
546 |
invalid_param = true;
|
547 |
break;
|
548 |
}
|
549 |
params.n_batch = std::stoi(argv[i]);
|
550 |
params.n_batch = std::min(512, params.n_batch);
|
551 |
-
}
|
552 |
-
|
|
|
|
|
|
|
553 |
invalid_param = true;
|
554 |
break;
|
555 |
}
|
@@ -557,11 +756,14 @@ static void server_params_parse(int argc, char ** argv, server_params & sparams,
|
|
557 |
params.n_gpu_layers = std::stoi(argv[i]);
|
558 |
#else
|
559 |
LOG_WARNING("Not compiled with GPU offload support, --n-gpu-layers option will be ignored. "
|
560 |
-
"See main README.md for information on enabling GPU BLAS support",
|
|
|
561 |
#endif
|
562 |
}
|
563 |
-
else if (arg == "--tensor-split" || arg == "-ts")
|
564 |
-
|
|
|
|
|
565 |
invalid_param = true;
|
566 |
break;
|
567 |
}
|
@@ -569,16 +771,19 @@ static void server_params_parse(int argc, char ** argv, server_params & sparams,
|
|
569 |
std::string arg_next = argv[i];
|
570 |
|
571 |
// split string by , and /
|
572 |
-
const std::regex regex{
|
573 |
-
std::sregex_token_iterator it{
|
574 |
-
std::vector<std::string> split_arg{
|
575 |
GGML_ASSERT(split_arg.size() <= LLAMA_MAX_DEVICES);
|
576 |
|
577 |
-
for (size_t i_device = 0; i_device < LLAMA_MAX_DEVICES; ++i_device)
|
578 |
-
|
|
|
|
|
579 |
params.tensor_split[i_device] = std::stof(split_arg[i_device]);
|
580 |
}
|
581 |
-
else
|
|
|
582 |
params.tensor_split[i_device] = 0.0f;
|
583 |
}
|
584 |
}
|
@@ -594,8 +799,10 @@ static void server_params_parse(int argc, char ** argv, server_params & sparams,
|
|
594 |
fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. It is not possible to set lower vram usage.\n");
|
595 |
#endif // GGML_USE_CUBLAS
|
596 |
}
|
597 |
-
else if (arg == "--main-gpu" || arg == "-mg")
|
598 |
-
|
|
|
|
|
599 |
invalid_param = true;
|
600 |
break;
|
601 |
}
|
@@ -604,110 +811,173 @@ static void server_params_parse(int argc, char ** argv, server_params & sparams,
|
|
604 |
#else
|
605 |
LOG_WARNING("llama.cpp was compiled without cuBLAS. It is not possible to set a main GPU.", {});
|
606 |
#endif
|
607 |
-
}
|
608 |
-
|
|
|
|
|
|
|
609 |
invalid_param = true;
|
610 |
break;
|
611 |
}
|
612 |
params.lora_adapter = argv[i];
|
613 |
params.use_mmap = false;
|
614 |
-
}
|
615 |
-
|
|
|
|
|
|
|
616 |
invalid_param = true;
|
617 |
break;
|
618 |
}
|
619 |
params.lora_base = argv[i];
|
620 |
-
}
|
|
|
|
|
621 |
#if SERVER_VERBOSE != 1
|
622 |
LOG_WARNING("server.cpp is not built with verbose logging.", {});
|
623 |
#else
|
624 |
server_verbose = true;
|
625 |
#endif
|
626 |
-
}
|
|
|
|
|
627 |
params.use_mlock = true;
|
628 |
-
}
|
|
|
|
|
629 |
params.use_mmap = false;
|
630 |
-
}
|
|
|
|
|
631 |
params.embedding = true;
|
632 |
-
}
|
|
|
|
|
633 |
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
|
634 |
server_print_usage(argv[0], default_params, default_sparams);
|
635 |
exit(1);
|
636 |
}
|
637 |
}
|
638 |
|
639 |
-
if (invalid_param)
|
|
|
640 |
fprintf(stderr, "error: invalid parameter for argument: %s\n", arg.c_str());
|
641 |
server_print_usage(argv[0], default_params, default_sparams);
|
642 |
exit(1);
|
643 |
}
|
644 |
}
|
645 |
|
646 |
-
static json format_generation_settings(llama_server_context &
|
|
|
647 |
const auto eos_bias = llama.params.logit_bias.find(llama_token_eos());
|
648 |
const bool ignore_eos = eos_bias != llama.params.logit_bias.end() &&
|
649 |
-
|
650 |
-
|
651 |
-
return json
|
652 |
-
{
|
653 |
-
{
|
654 |
-
{
|
655 |
-
{
|
656 |
-
{
|
657 |
-
{
|
658 |
-
{
|
659 |
-
{
|
660 |
-
{
|
661 |
-
{
|
662 |
-
{
|
663 |
-
{
|
664 |
-
{
|
665 |
-
{
|
666 |
-
{
|
667 |
-
{
|
668 |
-
{
|
669 |
-
{
|
670 |
-
{
|
671 |
-
{
|
|
|
|
|
|
|
672 |
};
|
673 |
}
|
674 |
|
675 |
-
static json format_embedding_response(llama_server_context &
|
676 |
-
|
677 |
-
|
|
|
678 |
};
|
679 |
}
|
680 |
|
681 |
-
static json
|
682 |
-
|
683 |
-
|
684 |
-
|
685 |
-
|
686 |
-
|
687 |
-
|
688 |
-
{
|
689 |
-
{
|
690 |
-
{
|
691 |
-
{
|
692 |
-
|
693 |
-
{
|
|
|
|
|
|
|
694 |
};
|
695 |
}
|
696 |
|
697 |
-
static json
|
698 |
-
|
699 |
-
|
700 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
701 |
};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
702 |
}
|
703 |
|
704 |
-
static json
|
705 |
-
|
706 |
-
|
|
|
|
|
707 |
};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
708 |
}
|
709 |
|
710 |
-
static
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
711 |
gpt_params default_params;
|
712 |
|
713 |
llama.stream = body.value("stream", false);
|
@@ -728,22 +998,31 @@ static void parse_options_completion(const json & body, llama_server_context & l
|
|
728 |
llama.params.n_keep = body.value("n_keep", default_params.n_keep);
|
729 |
llama.params.seed = body.value("seed", default_params.seed);
|
730 |
llama.params.prompt = body.value("prompt", default_params.prompt);
|
|
|
731 |
|
732 |
llama.params.logit_bias.clear();
|
733 |
-
if (body.value("ignore_eos", false))
|
|
|
734 |
llama.params.logit_bias[llama_token_eos()] = -INFINITY;
|
735 |
}
|
736 |
|
737 |
-
const auto &
|
738 |
-
if (logit_bias != body.end() && logit_bias->is_array())
|
|
|
739 |
const int n_vocab = llama_n_vocab(llama.ctx);
|
740 |
-
for (const auto &
|
741 |
-
|
|
|
|
|
742 |
llama_token tok = el[0].get<llama_token>();
|
743 |
-
if (tok >= 0 && tok < n_vocab)
|
744 |
-
|
|
|
|
|
745 |
llama.params.logit_bias[tok] = el[1].get<float>();
|
746 |
-
}
|
|
|
|
|
747 |
llama.params.logit_bias[tok] = -INFINITY;
|
748 |
}
|
749 |
}
|
@@ -752,10 +1031,13 @@ static void parse_options_completion(const json & body, llama_server_context & l
|
|
752 |
}
|
753 |
|
754 |
llama.params.antiprompt.clear();
|
755 |
-
const auto &
|
756 |
-
if (stop != body.end() && stop->is_array())
|
757 |
-
|
758 |
-
|
|
|
|
|
|
|
759 |
llama.params.antiprompt.push_back(word);
|
760 |
}
|
761 |
}
|
@@ -764,18 +1046,25 @@ static void parse_options_completion(const json & body, llama_server_context & l
|
|
764 |
LOG_VERBOSE("completion parameters parsed", format_generation_settings(llama));
|
765 |
}
|
766 |
|
767 |
-
static void log_server_request(const Request &
|
|
|
768 |
LOG_INFO("request", {
|
769 |
-
|
770 |
-
|
771 |
-
|
772 |
-
|
773 |
-
|
774 |
-
|
775 |
-
|
|
|
|
|
|
|
|
|
|
|
776 |
}
|
777 |
|
778 |
-
int main(int argc, char **
|
|
|
779 |
// own arguments required by this example
|
780 |
gpt_params params;
|
781 |
server_params sparams;
|
@@ -785,40 +1074,57 @@ int main(int argc, char ** argv) {
|
|
785 |
|
786 |
server_params_parse(argc, argv, sparams, params);
|
787 |
|
788 |
-
if (params.model_alias == "unknown")
|
|
|
789 |
params.model_alias = params.model;
|
790 |
}
|
791 |
|
792 |
llama_init_backend(params.numa);
|
793 |
|
794 |
-
LOG_INFO("build info", {
|
795 |
-
|
796 |
-
{ "commit", BUILD_COMMIT }
|
797 |
-
});
|
798 |
LOG_INFO("system info", {
|
799 |
-
|
800 |
-
|
801 |
-
|
802 |
-
|
803 |
|
804 |
// load the model
|
805 |
-
if (!llama.loadModel(params))
|
|
|
806 |
return 1;
|
807 |
}
|
808 |
|
809 |
Server svr;
|
810 |
|
811 |
-
svr.set_default_headers({
|
812 |
-
|
813 |
-
|
814 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
815 |
|
816 |
-
svr.
|
817 |
-
|
818 |
-
|
819 |
|
820 |
-
svr.Post("/completion", [&llama](const Request & req, Response & res) {
|
821 |
llama.rewind();
|
|
|
822 |
llama_reset_timings(llama.ctx);
|
823 |
|
824 |
parse_options_completion(json::parse(req.body), llama);
|
@@ -830,7 +1136,8 @@ int main(int argc, char ** argv) {
|
|
830 |
size_t stop_pos = std::string::npos;
|
831 |
|
832 |
while (llama.has_next_token) {
|
833 |
-
const
|
|
|
834 |
|
835 |
stop_pos = llama.findStoppingStrings(llama.generated_text,
|
836 |
token_text.size(), STOP_FULL);
|
@@ -844,7 +1151,7 @@ int main(int argc, char ** argv) {
|
|
844 |
llama.generated_text.end());
|
845 |
}
|
846 |
|
847 |
-
const json data = format_final_response(llama, llama.generated_text);
|
848 |
|
849 |
llama_print_timings(llama.ctx);
|
850 |
|
@@ -853,9 +1160,11 @@ int main(int argc, char ** argv) {
|
|
853 |
} else {
|
854 |
const auto chunked_content_provider = [&](size_t, DataSink & sink) {
|
855 |
size_t sent_count = 0;
|
|
|
856 |
|
857 |
while (llama.has_next_token) {
|
858 |
-
const
|
|
|
859 |
if (llama.multibyte_pending > 0) {
|
860 |
continue;
|
861 |
}
|
@@ -878,10 +1187,22 @@ int main(int argc, char ** argv) {
|
|
878 |
const std::string to_send = llama.generated_text.substr(pos, stop_pos);
|
879 |
sent_count += to_send.size();
|
880 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
881 |
const json data = llama.has_next_token
|
882 |
-
? format_partial_response(to_send)
|
883 |
// Generation is done, send extra information.
|
884 |
-
: format_final_response(llama, to_send);
|
885 |
|
886 |
const std::string str =
|
887 |
"data: " +
|
@@ -904,22 +1225,30 @@ int main(int argc, char ** argv) {
|
|
904 |
return true;
|
905 |
};
|
906 |
res.set_chunked_content_provider("text/event-stream", chunked_content_provider);
|
907 |
-
}
|
908 |
-
|
|
|
|
|
|
|
|
|
909 |
|
910 |
-
svr.Options(R"(/.*)", [](const Request &, Response &
|
911 |
-
|
912 |
-
|
|
|
|
|
|
|
913 |
|
914 |
-
svr.Post("/tokenize", [&llama](const Request & req, Response & res) {
|
915 |
const json body = json::parse(req.body);
|
916 |
const std::string content = body.value("content", "");
|
917 |
const std::vector<llama_token> tokens = llama_tokenize(llama.ctx, content, false);
|
918 |
const json data = format_tokenizer_response(tokens);
|
919 |
-
return res.set_content(data.dump(), "application/json");
|
920 |
-
|
|
|
|
|
|
|
921 |
|
922 |
-
svr.Post("/embedding", [&llama](const Request & req, Response & res) {
|
923 |
const json body = json::parse(req.body);
|
924 |
|
925 |
llama.rewind();
|
@@ -931,12 +1260,12 @@ int main(int argc, char ** argv) {
|
|
931 |
llama.doCompletion();
|
932 |
|
933 |
const json data = format_embedding_response(llama);
|
934 |
-
return res.set_content(data.dump(), "application/json");
|
935 |
-
});
|
936 |
|
937 |
svr.set_logger(log_server_request);
|
938 |
|
939 |
-
svr.set_exception_handler([](const Request &, Response &
|
|
|
940 |
const auto * fmt = "500 Internal Server Error\n%s";
|
941 |
char buf[BUFSIZ];
|
942 |
try {
|
@@ -947,27 +1276,36 @@ int main(int argc, char ** argv) {
|
|
947 |
snprintf(buf, sizeof(buf), fmt, "Unknown Exception");
|
948 |
}
|
949 |
res.set_content(buf, "text/plain");
|
950 |
-
res.status = 500;
|
951 |
-
|
|
|
|
|
|
|
|
|
952 |
|
953 |
// set timeouts and change hostname and port
|
954 |
svr.set_read_timeout(sparams.read_timeout);
|
955 |
svr.set_write_timeout(sparams.write_timeout);
|
956 |
|
957 |
-
if (!svr.bind_to_port(sparams.hostname, sparams.port))
|
958 |
-
|
959 |
-
|
960 |
-
{ "port", sparams.port },
|
961 |
-
});
|
962 |
return 1;
|
963 |
}
|
964 |
|
|
|
|
|
|
|
|
|
|
|
|
|
965 |
LOG_INFO("HTTP server listening", {
|
966 |
-
|
967 |
-
|
968 |
-
|
969 |
|
970 |
-
if (!svr.listen_after_bind())
|
|
|
971 |
return 1;
|
972 |
}
|
973 |
|
|
|
2 |
#include "llama.h"
|
3 |
#include "build-info.h"
|
4 |
|
|
|
|
|
5 |
#ifndef NDEBUG
|
6 |
// crash the server in debug mode, otherwise send an http 500 error
|
7 |
#define CPPHTTPLIB_NO_EXCEPTIONS 1
|
|
|
10 |
#include "httplib.h"
|
11 |
#include "json.hpp"
|
12 |
|
13 |
+
// auto generated files (update with ./deps.sh)
|
14 |
+
#include "index.html.hpp"
|
15 |
+
#include "index.js.hpp"
|
16 |
+
#include "completion.js.hpp"
|
17 |
+
|
18 |
#ifndef SERVER_VERBOSE
|
19 |
#define SERVER_VERBOSE 1
|
20 |
#endif
|
|
|
22 |
using namespace httplib;
|
23 |
using json = nlohmann::json;
|
24 |
|
25 |
+
struct server_params
|
26 |
+
{
|
27 |
std::string hostname = "127.0.0.1";
|
28 |
+
std::string public_path = "examples/server/public";
|
29 |
int32_t port = 8080;
|
30 |
int32_t read_timeout = 600;
|
31 |
int32_t write_timeout = 600;
|
32 |
};
|
33 |
|
34 |
+
// completion token output with probabilities
|
35 |
+
struct completion_token_output
|
36 |
+
{
|
37 |
+
struct token_prob
|
38 |
+
{
|
39 |
+
llama_token tok;
|
40 |
+
float prob;
|
41 |
+
};
|
42 |
+
|
43 |
+
std::vector<token_prob> probs;
|
44 |
+
llama_token tok;
|
45 |
+
};
|
46 |
+
|
47 |
+
static size_t common_part(const std::vector<llama_token> &a, const std::vector<llama_token> &b)
|
48 |
+
{
|
49 |
size_t i;
|
50 |
+
for (i = 0; i < a.size() && i < b.size() && a[i] == b[i]; i++)
|
51 |
+
{
|
52 |
+
}
|
53 |
return i;
|
54 |
}
|
55 |
|
56 |
+
enum stop_type
|
57 |
+
{
|
58 |
STOP_FULL,
|
59 |
STOP_PARTIAL,
|
60 |
};
|
61 |
|
62 |
+
static bool ends_with(const std::string &str, const std::string &suffix)
|
63 |
+
{
|
64 |
return str.size() >= suffix.size() &&
|
65 |
+
0 == str.compare(str.size() - suffix.size(), suffix.size(), suffix);
|
66 |
}
|
67 |
|
68 |
+
static size_t find_partial_stop_string(const std::string &stop,
|
69 |
+
const std::string &text)
|
70 |
+
{
|
71 |
+
if (!text.empty() && !stop.empty())
|
72 |
+
{
|
73 |
const char text_last_char = text.back();
|
74 |
+
for (int64_t char_index = stop.size() - 1; char_index >= 0; char_index--)
|
75 |
+
{
|
76 |
+
if (stop[char_index] == text_last_char)
|
77 |
+
{
|
78 |
const std::string current_partial = stop.substr(0, char_index + 1);
|
79 |
+
if (ends_with(text, current_partial))
|
80 |
+
{
|
81 |
return text.size() - char_index - 1;
|
82 |
}
|
83 |
}
|
|
|
86 |
return std::string::npos;
|
87 |
}
|
88 |
|
89 |
+
template <class Iter>
|
90 |
+
static std::string tokens_to_str(llama_context *ctx, Iter begin, Iter end)
|
91 |
+
{
|
92 |
std::string ret;
|
93 |
+
for (; begin != end; ++begin)
|
94 |
+
{
|
95 |
ret += llama_token_to_str(ctx, *begin);
|
96 |
}
|
97 |
return ret;
|
98 |
}
|
99 |
|
100 |
+
static void server_log(const char *level, const char *function, int line,
|
101 |
+
const char *message, const nlohmann::ordered_json &extra)
|
102 |
+
{
|
103 |
+
nlohmann::ordered_json log{
|
104 |
+
{"timestamp", time(nullptr)},
|
105 |
+
{"level", level},
|
106 |
+
{"function", function},
|
107 |
+
{"line", line},
|
108 |
+
{"message", message},
|
109 |
};
|
110 |
|
111 |
+
if (!extra.empty())
|
112 |
+
{
|
113 |
log.merge_patch(extra);
|
114 |
}
|
115 |
|
|
|
118 |
fflush(stdout);
|
119 |
}
|
120 |
|
121 |
+
// format incomplete utf-8 multibyte character for output
|
122 |
+
static std::string tokens_to_output_formatted_string(const llama_context *ctx, const llama_token token)
|
123 |
+
{
|
124 |
+
std::string out = token == -1 ? "" : llama_token_to_str(ctx, token);
|
125 |
+
// if first bit is 1, meaning it's a partial character
|
126 |
+
if (out.size() > 0 && (out[0] & 0x80) == 0x80)
|
127 |
+
{
|
128 |
+
std::stringstream ss;
|
129 |
+
ss << std::hex << (out[0] & 0xff);
|
130 |
+
std::string res(ss.str());
|
131 |
+
out = "byte: \\x" + res;
|
132 |
+
}
|
133 |
+
return out;
|
134 |
+
}
|
135 |
+
|
136 |
+
// convert a vector of completion_token_output to json
|
137 |
+
static json probs_vector_to_json(const llama_context *ctx, const std::vector<completion_token_output> probs)
|
138 |
+
{
|
139 |
+
json out = json::array();
|
140 |
+
for (const auto &prob : probs)
|
141 |
+
{
|
142 |
+
json probs_for_token = json::array();
|
143 |
+
for (const auto &p : prob.probs)
|
144 |
+
{
|
145 |
+
std::string tok_str = tokens_to_output_formatted_string(ctx, p.tok);
|
146 |
+
probs_for_token.push_back(json{
|
147 |
+
{"tok_str", tok_str},
|
148 |
+
{"prob", p.prob},
|
149 |
+
});
|
150 |
+
}
|
151 |
+
std::string tok_str = tokens_to_output_formatted_string(ctx, prob.tok);
|
152 |
+
out.push_back(json{
|
153 |
+
{"content", tok_str},
|
154 |
+
{"probs", probs_for_token},
|
155 |
+
});
|
156 |
+
}
|
157 |
+
return out;
|
158 |
+
}
|
159 |
+
|
160 |
static bool server_verbose = false;
|
161 |
|
162 |
#if SERVER_VERBOSE != 1
|
163 |
+
#define LOG_VERBOSE(MSG, ...)
|
164 |
#else
|
165 |
+
#define LOG_VERBOSE(MSG, ...) \
|
166 |
+
do \
|
167 |
+
{ \
|
168 |
+
if (server_verbose) \
|
169 |
+
{ \
|
170 |
server_log("VERBOSE", __func__, __LINE__, MSG, __VA_ARGS__); \
|
171 |
} \
|
172 |
+
} while (0)
|
173 |
#endif
|
174 |
|
175 |
#define LOG_ERROR(MSG, ...) server_log("ERROR", __func__, __LINE__, MSG, __VA_ARGS__)
|
176 |
#define LOG_WARNING(MSG, ...) server_log("WARNING", __func__, __LINE__, MSG, __VA_ARGS__)
|
177 |
#define LOG_INFO(MSG, ...) server_log("INFO", __func__, __LINE__, MSG, __VA_ARGS__)
|
178 |
|
179 |
+
struct llama_server_context
|
180 |
+
{
|
181 |
bool stream = false;
|
182 |
bool has_next_token = false;
|
183 |
std::string generated_text;
|
184 |
+
std::vector<completion_token_output> generated_token_probs;
|
185 |
|
186 |
+
size_t num_prompt_tokens = 0;
|
187 |
size_t num_tokens_predicted = 0;
|
188 |
size_t n_past = 0;
|
189 |
size_t n_remain = 0;
|
|
|
191 |
std::vector<llama_token> embd;
|
192 |
std::vector<llama_token> last_n_tokens;
|
193 |
|
194 |
+
llama_model *model = nullptr;
|
195 |
+
llama_context *ctx = nullptr;
|
196 |
gpt_params params;
|
197 |
|
198 |
bool truncated = false;
|
|
|
202 |
std::string stopping_word;
|
203 |
int32_t multibyte_pending = 0;
|
204 |
|
205 |
+
std::mutex mutex;
|
206 |
+
|
207 |
+
std::unique_lock<std::mutex> lock()
|
208 |
+
{
|
209 |
+
return std::unique_lock<std::mutex>(mutex);
|
210 |
+
}
|
211 |
+
|
212 |
+
~llama_server_context()
|
213 |
+
{
|
214 |
+
if (ctx)
|
215 |
+
{
|
216 |
llama_free(ctx);
|
217 |
ctx = nullptr;
|
218 |
}
|
219 |
+
if (model)
|
220 |
+
{
|
221 |
llama_free_model(model);
|
222 |
model = nullptr;
|
223 |
}
|
224 |
}
|
225 |
|
226 |
+
void rewind()
|
227 |
+
{
|
228 |
params.antiprompt.clear();
|
229 |
+
num_prompt_tokens = 0;
|
230 |
num_tokens_predicted = 0;
|
231 |
generated_text = "";
|
232 |
generated_text.reserve(params.n_ctx);
|
233 |
+
generated_token_probs.clear();
|
234 |
truncated = false;
|
235 |
stopped_eos = false;
|
236 |
stopped_word = false;
|
|
|
242 |
n_past = 0;
|
243 |
}
|
244 |
|
245 |
+
bool loadModel(const gpt_params ¶ms_)
|
246 |
+
{
|
247 |
params = params_;
|
248 |
std::tie(model, ctx) = llama_init_from_gpt_params(params);
|
249 |
+
if (model == nullptr)
|
250 |
+
{
|
251 |
+
LOG_ERROR("unable to load model", {{"model", params_.model}});
|
252 |
return false;
|
253 |
}
|
254 |
|
|
|
257 |
return true;
|
258 |
}
|
259 |
|
260 |
+
void loadPrompt()
|
261 |
+
{
|
262 |
params.prompt.insert(0, 1, ' '); // always add a first space
|
263 |
std::vector<llama_token> prompt_tokens = ::llama_tokenize(ctx, params.prompt, true);
|
264 |
+
num_prompt_tokens = prompt_tokens.size();
|
265 |
|
266 |
+
if (params.n_keep < 0)
|
267 |
+
{
|
268 |
+
params.n_keep = (int)num_prompt_tokens;
|
269 |
}
|
270 |
params.n_keep = std::min(params.n_ctx - 4, params.n_keep);
|
271 |
|
272 |
// if input prompt is too big, truncate like normal
|
273 |
+
if (num_prompt_tokens >= (size_t)params.n_ctx)
|
274 |
+
{
|
275 |
const int n_left = (params.n_ctx - params.n_keep) / 2;
|
276 |
std::vector<llama_token> new_tokens(prompt_tokens.begin(), prompt_tokens.begin() + params.n_keep);
|
277 |
+
const int erased_blocks = (num_prompt_tokens - params.n_keep - n_left - 1) / n_left;
|
278 |
new_tokens.insert(new_tokens.end(), prompt_tokens.begin() + params.n_keep + erased_blocks * n_left, prompt_tokens.end());
|
279 |
std::copy(prompt_tokens.end() - params.n_ctx, prompt_tokens.end(), last_n_tokens.begin());
|
280 |
|
281 |
LOG_VERBOSE("input truncated", {
|
282 |
+
{"n_ctx", params.n_ctx},
|
283 |
+
{"n_keep", params.n_keep},
|
284 |
+
{"n_left", n_left},
|
285 |
+
{"new_tokens", tokens_to_str(ctx, new_tokens.cbegin(), new_tokens.cend())},
|
286 |
+
});
|
287 |
|
288 |
truncated = true;
|
289 |
prompt_tokens = new_tokens;
|
290 |
+
}
|
291 |
+
else
|
292 |
+
{
|
293 |
+
const size_t ps = num_prompt_tokens;
|
294 |
std::fill(last_n_tokens.begin(), last_n_tokens.end() - ps, 0);
|
295 |
std::copy(prompt_tokens.begin(), prompt_tokens.end(), last_n_tokens.end() - ps);
|
296 |
}
|
|
|
298 |
// compare the evaluated prompt with the new prompt
|
299 |
n_past = common_part(embd, prompt_tokens);
|
300 |
embd = prompt_tokens;
|
301 |
+
if (n_past == num_prompt_tokens)
|
302 |
+
{
|
303 |
// we have to evaluate at least 1 token to generate logits.
|
304 |
n_past--;
|
305 |
}
|
306 |
|
307 |
LOG_VERBOSE("prompt ingested", {
|
308 |
+
{"n_past", n_past},
|
309 |
+
{"cached", tokens_to_str(ctx, embd.cbegin(), embd.cbegin() + n_past)},
|
310 |
+
{"to_eval", tokens_to_str(ctx, embd.cbegin() + n_past, embd.cend())},
|
311 |
+
});
|
312 |
|
313 |
has_next_token = true;
|
314 |
}
|
315 |
|
316 |
+
void beginCompletion()
|
317 |
+
{
|
318 |
// number of tokens to keep when resetting context
|
319 |
n_remain = params.n_predict;
|
320 |
llama_set_rng_seed(ctx, params.seed);
|
321 |
}
|
322 |
|
323 |
+
completion_token_output nextToken()
|
324 |
+
{
|
325 |
+
completion_token_output result;
|
326 |
+
result.tok = -1;
|
327 |
|
328 |
+
if (embd.size() >= (size_t)params.n_ctx)
|
329 |
+
{
|
330 |
// Reset context
|
331 |
const int n_left = (params.n_ctx - params.n_keep) / 2;
|
332 |
|
|
|
336 |
n_past = params.n_keep;
|
337 |
truncated = true;
|
338 |
LOG_VERBOSE("input truncated", {
|
339 |
+
{"n_ctx", params.n_ctx},
|
340 |
+
{"n_keep", params.n_keep},
|
341 |
+
{"n_left", n_left},
|
342 |
+
{"new_tokens", tokens_to_str(ctx, new_tokens.cbegin(), new_tokens.cend())},
|
343 |
+
});
|
344 |
}
|
345 |
|
346 |
+
while (n_past < embd.size())
|
347 |
+
{
|
348 |
int n_eval = (int)embd.size() - n_past;
|
349 |
+
if (n_eval > params.n_batch)
|
350 |
+
{
|
351 |
n_eval = params.n_batch;
|
352 |
}
|
353 |
+
if (llama_eval(ctx, &embd[n_past], n_eval, n_past, params.n_threads))
|
354 |
+
{
|
355 |
LOG_ERROR("failed to eval", {
|
356 |
+
{"n_eval", n_eval},
|
357 |
+
{"n_past", n_past},
|
358 |
+
{"n_threads", params.n_threads},
|
359 |
+
{"embd", tokens_to_str(ctx, embd.cbegin() + n_past, embd.cend())},
|
360 |
+
});
|
361 |
has_next_token = false;
|
362 |
return result;
|
363 |
}
|
364 |
n_past += n_eval;
|
365 |
}
|
366 |
|
367 |
+
if (params.n_predict == 0)
|
368 |
+
{
|
369 |
has_next_token = false;
|
370 |
+
result.tok = llama_token_eos();
|
371 |
+
return result;
|
372 |
}
|
373 |
|
374 |
// out of user input, sample next token
|
|
|
385 |
const float mirostat_tau = params.mirostat_tau;
|
386 |
const float mirostat_eta = params.mirostat_eta;
|
387 |
const bool penalize_nl = params.penalize_nl;
|
388 |
+
const int32_t n_probs = params.n_probs;
|
389 |
|
390 |
{
|
391 |
+
auto *logits = llama_get_logits(ctx);
|
392 |
auto n_vocab = llama_n_vocab(ctx);
|
393 |
|
394 |
// Apply params.logit_bias map
|
395 |
+
for (const auto &it : params.logit_bias)
|
396 |
+
{
|
397 |
logits[it.first] += it.second;
|
398 |
}
|
399 |
|
400 |
std::vector<llama_token_data> candidates;
|
401 |
candidates.reserve(n_vocab);
|
402 |
+
for (llama_token token_id = 0; token_id < n_vocab; token_id++)
|
403 |
+
{
|
404 |
+
candidates.emplace_back(llama_token_data{token_id, logits[token_id], 0.0f});
|
405 |
}
|
406 |
|
407 |
+
llama_token_data_array candidates_p = {candidates.data(), candidates.size(), false};
|
408 |
|
409 |
// Apply penalties
|
410 |
float nl_logit = logits[llama_token_nl()];
|
411 |
auto last_n_repeat = std::min(std::min((int)last_n_tokens.size(), repeat_last_n), params.n_ctx);
|
412 |
llama_sample_repetition_penalty(ctx, &candidates_p,
|
413 |
+
last_n_tokens.data() + last_n_tokens.size() - last_n_repeat,
|
414 |
+
last_n_repeat, repeat_penalty);
|
415 |
llama_sample_frequency_and_presence_penalties(ctx, &candidates_p,
|
416 |
+
last_n_tokens.data() + last_n_tokens.size() - last_n_repeat,
|
417 |
+
last_n_repeat, alpha_frequency, alpha_presence);
|
418 |
+
if (!penalize_nl)
|
419 |
+
{
|
420 |
logits[llama_token_nl()] = nl_logit;
|
421 |
}
|
422 |
|
423 |
+
if (temp <= 0)
|
424 |
+
{
|
425 |
// Greedy sampling
|
426 |
+
result.tok = llama_sample_token_greedy(ctx, &candidates_p);
|
427 |
+
if (n_probs > 0)
|
428 |
+
{
|
429 |
+
llama_sample_softmax(ctx, &candidates_p);
|
430 |
+
}
|
431 |
+
}
|
432 |
+
else
|
433 |
+
{
|
434 |
+
if (mirostat == 1)
|
435 |
+
{
|
436 |
static float mirostat_mu = 2.0f * mirostat_tau;
|
437 |
const int mirostat_m = 100;
|
438 |
llama_sample_temperature(ctx, &candidates_p, temp);
|
439 |
+
result.tok = llama_sample_token_mirostat(ctx, &candidates_p, mirostat_tau, mirostat_eta, mirostat_m, &mirostat_mu);
|
440 |
+
}
|
441 |
+
else if (mirostat == 2)
|
442 |
+
{
|
443 |
static float mirostat_mu = 2.0f * mirostat_tau;
|
444 |
llama_sample_temperature(ctx, &candidates_p, temp);
|
445 |
+
result.tok = llama_sample_token_mirostat_v2(ctx, &candidates_p, mirostat_tau, mirostat_eta, &mirostat_mu);
|
446 |
+
}
|
447 |
+
else
|
448 |
+
{
|
449 |
// Temperature sampling
|
450 |
+
size_t min_keep = std::max(1, n_probs);
|
451 |
+
llama_sample_top_k(ctx, &candidates_p, top_k, min_keep);
|
452 |
+
llama_sample_tail_free(ctx, &candidates_p, tfs_z, min_keep);
|
453 |
+
llama_sample_typical(ctx, &candidates_p, typical_p, min_keep);
|
454 |
+
llama_sample_top_p(ctx, &candidates_p, top_p, min_keep);
|
455 |
llama_sample_temperature(ctx, &candidates_p, temp);
|
456 |
+
result.tok = llama_sample_token(ctx, &candidates_p);
|
457 |
}
|
458 |
}
|
459 |
+
|
460 |
+
for (size_t i = 0; i < std::min(candidates_p.size, (size_t)n_probs); ++i)
|
461 |
+
{
|
462 |
+
result.probs.push_back({candidates_p.data[i].id, candidates_p.data[i].p});
|
463 |
+
}
|
464 |
last_n_tokens.erase(last_n_tokens.begin());
|
465 |
+
last_n_tokens.push_back(result.tok);
|
466 |
num_tokens_predicted++;
|
467 |
}
|
468 |
|
469 |
// add it to the context
|
470 |
+
embd.push_back(result.tok);
|
|
|
471 |
// decrement remaining sampling budget
|
472 |
--n_remain;
|
473 |
|
474 |
+
if (!embd.empty() && embd.back() == llama_token_eos())
|
475 |
+
{
|
476 |
+
// stopping_word = llama_token_to_str(ctx, embd.back());
|
477 |
has_next_token = false;
|
478 |
stopped_eos = true;
|
479 |
LOG_VERBOSE("eos token found", {});
|
|
|
484 |
return result;
|
485 |
}
|
486 |
|
487 |
+
size_t findStoppingStrings(const std::string &text, const size_t last_token_size,
|
488 |
+
const stop_type type)
|
489 |
+
{
|
490 |
size_t stop_pos = std::string::npos;
|
491 |
+
for (const std::string &word : params.antiprompt)
|
492 |
+
{
|
493 |
size_t pos;
|
494 |
+
if (type == STOP_FULL)
|
495 |
+
{
|
496 |
const size_t tmp = word.size() + last_token_size;
|
497 |
const size_t from_pos = text.size() > tmp ? text.size() - tmp : 0;
|
498 |
pos = text.find(word, from_pos);
|
499 |
}
|
500 |
+
else
|
501 |
+
{
|
502 |
pos = find_partial_stop_string(word, text);
|
503 |
}
|
504 |
if (pos != std::string::npos &&
|
505 |
+
(stop_pos == std::string::npos || pos < stop_pos))
|
506 |
+
{
|
507 |
+
if (type == STOP_FULL)
|
508 |
+
{
|
509 |
stopping_word = word;
|
510 |
stopped_word = true;
|
511 |
has_next_token = false;
|
|
|
516 |
return stop_pos;
|
517 |
}
|
518 |
|
519 |
+
completion_token_output doCompletion()
|
520 |
+
{
|
521 |
+
const completion_token_output token_with_probs = nextToken();
|
522 |
|
523 |
+
const std::string token_text = token_with_probs.tok == -1 ? "" : llama_token_to_str(ctx, token_with_probs.tok);
|
524 |
generated_text += token_text;
|
525 |
|
526 |
+
if (params.n_probs > 0)
|
527 |
+
{
|
528 |
+
generated_token_probs.push_back(token_with_probs);
|
529 |
+
}
|
530 |
+
|
531 |
+
if (multibyte_pending > 0)
|
532 |
+
{
|
533 |
multibyte_pending -= token_text.size();
|
534 |
+
}
|
535 |
+
else if (token_text.size() == 1)
|
536 |
+
{
|
537 |
const char c = token_text[0];
|
538 |
// 2-byte characters: 110xxxxx 10xxxxxx
|
539 |
+
if ((c & 0xE0) == 0xC0)
|
540 |
+
{
|
541 |
multibyte_pending = 1;
|
542 |
+
// 3-byte characters: 1110xxxx 10xxxxxx 10xxxxxx
|
543 |
+
}
|
544 |
+
else if ((c & 0xF0) == 0xE0)
|
545 |
+
{
|
546 |
multibyte_pending = 2;
|
547 |
+
// 4-byte characters: 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
|
548 |
+
}
|
549 |
+
else if ((c & 0xF8) == 0xF0)
|
550 |
+
{
|
551 |
multibyte_pending = 3;
|
552 |
+
}
|
553 |
+
else
|
554 |
+
{
|
555 |
multibyte_pending = 0;
|
556 |
}
|
557 |
}
|
558 |
|
559 |
+
if (multibyte_pending > 0 && !has_next_token)
|
560 |
+
{
|
561 |
has_next_token = true;
|
562 |
n_remain++;
|
563 |
}
|
564 |
|
565 |
+
if (!has_next_token && n_remain == 0)
|
566 |
+
{
|
567 |
stopped_limit = true;
|
568 |
}
|
569 |
|
570 |
LOG_VERBOSE("next token", {
|
571 |
+
{"token", token_with_probs.tok},
|
572 |
+
{"token_text", tokens_to_output_formatted_string(ctx, token_with_probs.tok)},
|
573 |
+
{"has_next_token", has_next_token},
|
574 |
+
{"n_remain", n_remain},
|
575 |
+
{"num_tokens_predicted", num_tokens_predicted},
|
576 |
+
{"stopped_eos", stopped_eos},
|
577 |
+
{"stopped_word", stopped_word},
|
578 |
+
{"stopped_limit", stopped_limit},
|
579 |
+
{"stopping_word", stopping_word},
|
580 |
+
});
|
581 |
+
|
582 |
+
return token_with_probs;
|
583 |
}
|
584 |
|
585 |
+
std::vector<float> getEmbedding()
|
586 |
+
{
|
587 |
static const int n_embd = llama_n_embd(ctx);
|
588 |
+
if (!params.embedding)
|
589 |
+
{
|
590 |
LOG_WARNING("embedding disabled", {
|
591 |
+
{"params.embedding", params.embedding},
|
592 |
+
});
|
593 |
return std::vector<float>(n_embd, 0.0f);
|
594 |
}
|
595 |
+
const float *data = llama_get_embeddings(ctx);
|
596 |
std::vector<float> embedding(data, data + n_embd);
|
597 |
return embedding;
|
598 |
}
|
599 |
};
|
600 |
|
601 |
+
static void server_print_usage(const char *argv0, const gpt_params ¶ms,
|
602 |
+
const server_params &sparams)
|
603 |
+
{
|
604 |
fprintf(stderr, "usage: %s [options]\n", argv0);
|
605 |
fprintf(stderr, "\n");
|
606 |
fprintf(stderr, "options:\n");
|
|
|
611 |
fprintf(stderr, " -b N, --batch-size N batch size for prompt processing (default: %d)\n", params.n_batch);
|
612 |
fprintf(stderr, " --memory-f32 use f32 instead of f16 for memory key+value (default: disabled)\n");
|
613 |
fprintf(stderr, " not recommended: doubles context memory required and no measurable increase in quality\n");
|
614 |
+
if (llama_mlock_supported())
|
615 |
+
{
|
616 |
fprintf(stderr, " --mlock force system to keep model in RAM rather than swapping or compressing\n");
|
617 |
}
|
618 |
+
if (llama_mmap_supported())
|
619 |
+
{
|
620 |
fprintf(stderr, " --no-mmap do not memory-map model (slower load but may reduce pageouts if not using mlock)\n");
|
621 |
}
|
622 |
#ifdef LLAMA_SUPPORTS_GPU_OFFLOAD
|
|
|
636 |
fprintf(stderr, " --lora-base FNAME optional model to use as a base for the layers modified by the LoRA adapter\n");
|
637 |
fprintf(stderr, " --host ip address to listen (default (default: %s)\n", sparams.hostname.c_str());
|
638 |
fprintf(stderr, " --port PORT port to listen (default (default: %d)\n", sparams.port);
|
639 |
+
fprintf(stderr, " --path PUBLIC_PATH path from which to serve static files (default %s)\n", sparams.public_path.c_str());
|
640 |
fprintf(stderr, " -to N, --timeout N server read/write timeout in seconds (default: %d)\n", sparams.read_timeout);
|
641 |
fprintf(stderr, " --embedding enable embedding vector output (default: %s)\n", params.embedding ? "enabled" : "disabled");
|
642 |
fprintf(stderr, "\n");
|
643 |
}
|
644 |
|
645 |
+
static void server_params_parse(int argc, char **argv, server_params &sparams,
|
646 |
+
gpt_params ¶ms)
|
647 |
+
{
|
648 |
gpt_params default_params;
|
649 |
server_params default_sparams;
|
650 |
std::string arg;
|
651 |
bool invalid_param = false;
|
652 |
|
653 |
+
for (int i = 1; i < argc; i++)
|
654 |
+
{
|
655 |
arg = argv[i];
|
656 |
+
if (arg == "--port")
|
657 |
+
{
|
658 |
+
if (++i >= argc)
|
659 |
+
{
|
660 |
invalid_param = true;
|
661 |
break;
|
662 |
}
|
663 |
sparams.port = std::stoi(argv[i]);
|
664 |
+
}
|
665 |
+
else if (arg == "--host")
|
666 |
+
{
|
667 |
+
if (++i >= argc)
|
668 |
+
{
|
669 |
invalid_param = true;
|
670 |
break;
|
671 |
}
|
672 |
sparams.hostname = argv[i];
|
673 |
+
}
|
674 |
+
else if (arg == "--path")
|
675 |
+
{
|
676 |
+
if (++i >= argc)
|
677 |
+
{
|
678 |
+
invalid_param = true;
|
679 |
+
break;
|
680 |
+
}
|
681 |
+
sparams.public_path = argv[i];
|
682 |
+
}
|
683 |
+
else if (arg == "--timeout" || arg == "-to")
|
684 |
+
{
|
685 |
+
if (++i >= argc)
|
686 |
+
{
|
687 |
invalid_param = true;
|
688 |
break;
|
689 |
}
|
690 |
sparams.read_timeout = std::stoi(argv[i]);
|
691 |
sparams.write_timeout = std::stoi(argv[i]);
|
692 |
+
}
|
693 |
+
else if (arg == "-m" || arg == "--model")
|
694 |
+
{
|
695 |
+
if (++i >= argc)
|
696 |
+
{
|
697 |
invalid_param = true;
|
698 |
break;
|
699 |
}
|
700 |
params.model = argv[i];
|
701 |
+
}
|
702 |
+
else if (arg == "-a" || arg == "--alias")
|
703 |
+
{
|
704 |
+
if (++i >= argc)
|
705 |
+
{
|
706 |
invalid_param = true;
|
707 |
break;
|
708 |
}
|
709 |
params.model_alias = argv[i];
|
710 |
+
}
|
711 |
+
else if (arg == "-h" || arg == "--help")
|
712 |
+
{
|
713 |
server_print_usage(argv[0], default_params, default_sparams);
|
714 |
exit(0);
|
715 |
+
}
|
716 |
+
else if (arg == "-c" || arg == "--ctx-size" || arg == "--ctx_size")
|
717 |
+
{
|
718 |
+
if (++i >= argc)
|
719 |
+
{
|
720 |
invalid_param = true;
|
721 |
break;
|
722 |
}
|
723 |
params.n_ctx = std::stoi(argv[i]);
|
724 |
+
}
|
725 |
+
else if (arg == "--memory-f32" || arg == "--memory_f32")
|
726 |
+
{
|
727 |
params.memory_f16 = false;
|
728 |
+
}
|
729 |
+
else if (arg == "--threads" || arg == "-t")
|
730 |
+
{
|
731 |
+
if (++i >= argc)
|
732 |
+
{
|
733 |
invalid_param = true;
|
734 |
break;
|
735 |
}
|
736 |
params.n_threads = std::stoi(argv[i]);
|
737 |
+
}
|
738 |
+
else if (arg == "-b" || arg == "--batch-size")
|
739 |
+
{
|
740 |
+
if (++i >= argc)
|
741 |
+
{
|
742 |
invalid_param = true;
|
743 |
break;
|
744 |
}
|
745 |
params.n_batch = std::stoi(argv[i]);
|
746 |
params.n_batch = std::min(512, params.n_batch);
|
747 |
+
}
|
748 |
+
else if (arg == "--gpu-layers" || arg == "-ngl" || arg == "--n-gpu-layers")
|
749 |
+
{
|
750 |
+
if (++i >= argc)
|
751 |
+
{
|
752 |
invalid_param = true;
|
753 |
break;
|
754 |
}
|
|
|
756 |
params.n_gpu_layers = std::stoi(argv[i]);
|
757 |
#else
|
758 |
LOG_WARNING("Not compiled with GPU offload support, --n-gpu-layers option will be ignored. "
|
759 |
+
"See main README.md for information on enabling GPU BLAS support",
|
760 |
+
{{"n_gpu_layers", params.n_gpu_layers}});
|
761 |
#endif
|
762 |
}
|
763 |
+
else if (arg == "--tensor-split" || arg == "-ts")
|
764 |
+
{
|
765 |
+
if (++i >= argc)
|
766 |
+
{
|
767 |
invalid_param = true;
|
768 |
break;
|
769 |
}
|
|
|
771 |
std::string arg_next = argv[i];
|
772 |
|
773 |
// split string by , and /
|
774 |
+
const std::regex regex{R"([,/]+)"};
|
775 |
+
std::sregex_token_iterator it{arg_next.begin(), arg_next.end(), regex, -1};
|
776 |
+
std::vector<std::string> split_arg{it, {}};
|
777 |
GGML_ASSERT(split_arg.size() <= LLAMA_MAX_DEVICES);
|
778 |
|
779 |
+
for (size_t i_device = 0; i_device < LLAMA_MAX_DEVICES; ++i_device)
|
780 |
+
{
|
781 |
+
if (i_device < split_arg.size())
|
782 |
+
{
|
783 |
params.tensor_split[i_device] = std::stof(split_arg[i_device]);
|
784 |
}
|
785 |
+
else
|
786 |
+
{
|
787 |
params.tensor_split[i_device] = 0.0f;
|
788 |
}
|
789 |
}
|
|
|
799 |
fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. It is not possible to set lower vram usage.\n");
|
800 |
#endif // GGML_USE_CUBLAS
|
801 |
}
|
802 |
+
else if (arg == "--main-gpu" || arg == "-mg")
|
803 |
+
{
|
804 |
+
if (++i >= argc)
|
805 |
+
{
|
806 |
invalid_param = true;
|
807 |
break;
|
808 |
}
|
|
|
811 |
#else
|
812 |
LOG_WARNING("llama.cpp was compiled without cuBLAS. It is not possible to set a main GPU.", {});
|
813 |
#endif
|
814 |
+
}
|
815 |
+
else if (arg == "--lora")
|
816 |
+
{
|
817 |
+
if (++i >= argc)
|
818 |
+
{
|
819 |
invalid_param = true;
|
820 |
break;
|
821 |
}
|
822 |
params.lora_adapter = argv[i];
|
823 |
params.use_mmap = false;
|
824 |
+
}
|
825 |
+
else if (arg == "--lora-base")
|
826 |
+
{
|
827 |
+
if (++i >= argc)
|
828 |
+
{
|
829 |
invalid_param = true;
|
830 |
break;
|
831 |
}
|
832 |
params.lora_base = argv[i];
|
833 |
+
}
|
834 |
+
else if (arg == "-v" || arg == "--verbose")
|
835 |
+
{
|
836 |
#if SERVER_VERBOSE != 1
|
837 |
LOG_WARNING("server.cpp is not built with verbose logging.", {});
|
838 |
#else
|
839 |
server_verbose = true;
|
840 |
#endif
|
841 |
+
}
|
842 |
+
else if (arg == "--mlock")
|
843 |
+
{
|
844 |
params.use_mlock = true;
|
845 |
+
}
|
846 |
+
else if (arg == "--no-mmap")
|
847 |
+
{
|
848 |
params.use_mmap = false;
|
849 |
+
}
|
850 |
+
else if (arg == "--embedding")
|
851 |
+
{
|
852 |
params.embedding = true;
|
853 |
+
}
|
854 |
+
else
|
855 |
+
{
|
856 |
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
|
857 |
server_print_usage(argv[0], default_params, default_sparams);
|
858 |
exit(1);
|
859 |
}
|
860 |
}
|
861 |
|
862 |
+
if (invalid_param)
|
863 |
+
{
|
864 |
fprintf(stderr, "error: invalid parameter for argument: %s\n", arg.c_str());
|
865 |
server_print_usage(argv[0], default_params, default_sparams);
|
866 |
exit(1);
|
867 |
}
|
868 |
}
|
869 |
|
870 |
+
static json format_generation_settings(llama_server_context &llama)
|
871 |
+
{
|
872 |
const auto eos_bias = llama.params.logit_bias.find(llama_token_eos());
|
873 |
const bool ignore_eos = eos_bias != llama.params.logit_bias.end() &&
|
874 |
+
eos_bias->second < 0.0f && std::isinf(eos_bias->second);
|
875 |
+
|
876 |
+
return json{
|
877 |
+
{"n_ctx", llama.params.n_ctx},
|
878 |
+
{"model", llama.params.model_alias},
|
879 |
+
{"seed", llama.params.seed},
|
880 |
+
{"temp", llama.params.temp},
|
881 |
+
{"top_k", llama.params.top_k},
|
882 |
+
{"top_p", llama.params.top_p},
|
883 |
+
{"tfs_z", llama.params.tfs_z},
|
884 |
+
{"typical_p", llama.params.typical_p},
|
885 |
+
{"repeat_last_n", llama.params.repeat_last_n},
|
886 |
+
{"repeat_penalty", llama.params.repeat_penalty},
|
887 |
+
{"presence_penalty", llama.params.presence_penalty},
|
888 |
+
{"frequency_penalty", llama.params.frequency_penalty},
|
889 |
+
{"mirostat", llama.params.mirostat},
|
890 |
+
{"mirostat_tau", llama.params.mirostat_tau},
|
891 |
+
{"mirostat_eta", llama.params.mirostat_eta},
|
892 |
+
{"penalize_nl", llama.params.penalize_nl},
|
893 |
+
{"stop", llama.params.antiprompt},
|
894 |
+
{"n_predict", llama.params.n_predict},
|
895 |
+
{"n_keep", llama.params.n_keep},
|
896 |
+
{"ignore_eos", ignore_eos},
|
897 |
+
{"stream", llama.stream},
|
898 |
+
{"logit_bias", llama.params.logit_bias},
|
899 |
+
{"n_probs", llama.params.n_probs},
|
900 |
};
|
901 |
}
|
902 |
|
903 |
+
static json format_embedding_response(llama_server_context &llama)
|
904 |
+
{
|
905 |
+
return json{
|
906 |
+
{"embedding", llama.getEmbedding()},
|
907 |
};
|
908 |
}
|
909 |
|
910 |
+
static json format_timings(llama_server_context &llama)
|
911 |
+
{
|
912 |
+
const auto timings = llama_get_timings(llama.ctx);
|
913 |
+
|
914 |
+
assert(timings.n_eval == llama.num_tokens_predicted);
|
915 |
+
|
916 |
+
return json{
|
917 |
+
{"prompt_n", timings.n_eval},
|
918 |
+
{"prompt_ms", timings.t_p_eval_ms},
|
919 |
+
{"prompt_per_token_ms", timings.t_p_eval_ms / timings.n_p_eval},
|
920 |
+
{"prompt_per_second", 1e3 / timings.t_p_eval_ms * timings.n_p_eval},
|
921 |
+
|
922 |
+
{"predicted_n", timings.n_eval},
|
923 |
+
{"predicted_ms", timings.t_eval_ms},
|
924 |
+
{"predicted_per_token_ms", timings.t_eval_ms / timings.n_eval},
|
925 |
+
{"predicted_per_second", 1e3 / timings.t_eval_ms * timings.n_eval},
|
926 |
};
|
927 |
}
|
928 |
|
929 |
+
static json format_final_response(llama_server_context &llama, const std::string &content, const std::vector<completion_token_output> &probs)
|
930 |
+
{
|
931 |
+
|
932 |
+
json res = json{
|
933 |
+
{"content", content},
|
934 |
+
{"stop", true},
|
935 |
+
{"model", llama.params.model_alias},
|
936 |
+
{"tokens_predicted", llama.num_tokens_predicted},
|
937 |
+
{"tokens_evaluated", llama.num_prompt_tokens},
|
938 |
+
{"generation_settings", format_generation_settings(llama)},
|
939 |
+
{"prompt", llama.params.prompt},
|
940 |
+
{"truncated", llama.truncated},
|
941 |
+
{"stopped_eos", llama.stopped_eos},
|
942 |
+
{"stopped_word", llama.stopped_word},
|
943 |
+
{"stopped_limit", llama.stopped_limit},
|
944 |
+
{"stopping_word", llama.stopping_word},
|
945 |
+
{"tokens_cached", llama.n_past},
|
946 |
+
{"tokens_predicted", llama.num_tokens_predicted},
|
947 |
+
{"timings", format_timings(llama)},
|
948 |
};
|
949 |
+
|
950 |
+
if (llama.params.n_probs > 0)
|
951 |
+
{
|
952 |
+
res["completion_probabilities"] = probs_vector_to_json(llama.ctx, probs);
|
953 |
+
}
|
954 |
+
|
955 |
+
return res;
|
956 |
}
|
957 |
|
958 |
+
static json format_partial_response(llama_server_context &llama, const std::string &content, const std::vector<completion_token_output> &probs)
|
959 |
+
{
|
960 |
+
json res = json{
|
961 |
+
{"content", content},
|
962 |
+
{"stop", false},
|
963 |
};
|
964 |
+
|
965 |
+
if (llama.params.n_probs > 0)
|
966 |
+
{
|
967 |
+
res["completion_probabilities"] = probs_vector_to_json(llama.ctx, probs);
|
968 |
+
}
|
969 |
+
|
970 |
+
return res;
|
971 |
}
|
972 |
|
973 |
+
static json format_tokenizer_response(const std::vector<llama_token> &tokens)
|
974 |
+
{
|
975 |
+
return json{
|
976 |
+
{"tokens", tokens}};
|
977 |
+
}
|
978 |
+
|
979 |
+
static void parse_options_completion(const json &body, llama_server_context &llama)
|
980 |
+
{
|
981 |
gpt_params default_params;
|
982 |
|
983 |
llama.stream = body.value("stream", false);
|
|
|
998 |
llama.params.n_keep = body.value("n_keep", default_params.n_keep);
|
999 |
llama.params.seed = body.value("seed", default_params.seed);
|
1000 |
llama.params.prompt = body.value("prompt", default_params.prompt);
|
1001 |
+
llama.params.n_probs = body.value("n_probs", default_params.n_probs);
|
1002 |
|
1003 |
llama.params.logit_bias.clear();
|
1004 |
+
if (body.value("ignore_eos", false))
|
1005 |
+
{
|
1006 |
llama.params.logit_bias[llama_token_eos()] = -INFINITY;
|
1007 |
}
|
1008 |
|
1009 |
+
const auto &logit_bias = body.find("logit_bias");
|
1010 |
+
if (logit_bias != body.end() && logit_bias->is_array())
|
1011 |
+
{
|
1012 |
const int n_vocab = llama_n_vocab(llama.ctx);
|
1013 |
+
for (const auto &el : *logit_bias)
|
1014 |
+
{
|
1015 |
+
if (el.is_array() && el.size() == 2 && el[0].is_number_integer())
|
1016 |
+
{
|
1017 |
llama_token tok = el[0].get<llama_token>();
|
1018 |
+
if (tok >= 0 && tok < n_vocab)
|
1019 |
+
{
|
1020 |
+
if (el[1].is_number())
|
1021 |
+
{
|
1022 |
llama.params.logit_bias[tok] = el[1].get<float>();
|
1023 |
+
}
|
1024 |
+
else if (el[1].is_boolean() && !el[1].get<bool>())
|
1025 |
+
{
|
1026 |
llama.params.logit_bias[tok] = -INFINITY;
|
1027 |
}
|
1028 |
}
|
|
|
1031 |
}
|
1032 |
|
1033 |
llama.params.antiprompt.clear();
|
1034 |
+
const auto &stop = body.find("stop");
|
1035 |
+
if (stop != body.end() && stop->is_array())
|
1036 |
+
{
|
1037 |
+
for (const auto &word : *stop)
|
1038 |
+
{
|
1039 |
+
if (!word.empty())
|
1040 |
+
{
|
1041 |
llama.params.antiprompt.push_back(word);
|
1042 |
}
|
1043 |
}
|
|
|
1046 |
LOG_VERBOSE("completion parameters parsed", format_generation_settings(llama));
|
1047 |
}
|
1048 |
|
1049 |
+
static void log_server_request(const Request &req, const Response &res)
|
1050 |
+
{
|
1051 |
LOG_INFO("request", {
|
1052 |
+
{"remote_addr", req.remote_addr},
|
1053 |
+
{"remote_port", req.remote_port},
|
1054 |
+
{"status", res.status},
|
1055 |
+
{"method", req.method},
|
1056 |
+
{"path", req.path},
|
1057 |
+
{"params", req.params},
|
1058 |
+
});
|
1059 |
+
|
1060 |
+
LOG_VERBOSE("request", {
|
1061 |
+
{"request", req.body},
|
1062 |
+
{"response", res.body},
|
1063 |
+
});
|
1064 |
}
|
1065 |
|
1066 |
+
int main(int argc, char **argv)
|
1067 |
+
{
|
1068 |
// own arguments required by this example
|
1069 |
gpt_params params;
|
1070 |
server_params sparams;
|
|
|
1074 |
|
1075 |
server_params_parse(argc, argv, sparams, params);
|
1076 |
|
1077 |
+
if (params.model_alias == "unknown")
|
1078 |
+
{
|
1079 |
params.model_alias = params.model;
|
1080 |
}
|
1081 |
|
1082 |
llama_init_backend(params.numa);
|
1083 |
|
1084 |
+
LOG_INFO("build info", {{"build", BUILD_NUMBER},
|
1085 |
+
{"commit", BUILD_COMMIT}});
|
|
|
|
|
1086 |
LOG_INFO("system info", {
|
1087 |
+
{"n_threads", params.n_threads},
|
1088 |
+
{"total_threads", std::thread::hardware_concurrency()},
|
1089 |
+
{"system_info", llama_print_system_info()},
|
1090 |
+
});
|
1091 |
|
1092 |
// load the model
|
1093 |
+
if (!llama.loadModel(params))
|
1094 |
+
{
|
1095 |
return 1;
|
1096 |
}
|
1097 |
|
1098 |
Server svr;
|
1099 |
|
1100 |
+
svr.set_default_headers({{"Server", "llama.cpp"},
|
1101 |
+
{"Access-Control-Allow-Origin", "*"},
|
1102 |
+
{"Access-Control-Allow-Headers", "content-type"}});
|
1103 |
+
|
1104 |
+
// this is only called if no index.html is found in the public --path
|
1105 |
+
svr.Get("/", [](const Request &, Response &res)
|
1106 |
+
{
|
1107 |
+
res.set_content(reinterpret_cast<const char*>(&index_html), index_html_len, "text/html");
|
1108 |
+
return false; });
|
1109 |
+
|
1110 |
+
// this is only called if no index.js is found in the public --path
|
1111 |
+
svr.Get("/index.js", [](const Request &, Response &res)
|
1112 |
+
{
|
1113 |
+
res.set_content(reinterpret_cast<const char *>(&index_js), index_js_len, "text/javascript");
|
1114 |
+
return false; });
|
1115 |
+
|
1116 |
+
// this is only called if no index.html is found in the public --path
|
1117 |
+
svr.Get("/completion.js", [](const Request &, Response &res)
|
1118 |
+
{
|
1119 |
+
res.set_content(reinterpret_cast<const char*>(&completion_js), completion_js_len, "application/javascript");
|
1120 |
+
return false; });
|
1121 |
|
1122 |
+
svr.Post("/completion", [&llama](const Request &req, Response &res)
|
1123 |
+
{
|
1124 |
+
auto lock = llama.lock();
|
1125 |
|
|
|
1126 |
llama.rewind();
|
1127 |
+
|
1128 |
llama_reset_timings(llama.ctx);
|
1129 |
|
1130 |
parse_options_completion(json::parse(req.body), llama);
|
|
|
1136 |
size_t stop_pos = std::string::npos;
|
1137 |
|
1138 |
while (llama.has_next_token) {
|
1139 |
+
const completion_token_output token_with_probs = llama.doCompletion();
|
1140 |
+
const std::string token_text = token_with_probs.tok == -1 ? "" : llama_token_to_str(llama.ctx, token_with_probs.tok);
|
1141 |
|
1142 |
stop_pos = llama.findStoppingStrings(llama.generated_text,
|
1143 |
token_text.size(), STOP_FULL);
|
|
|
1151 |
llama.generated_text.end());
|
1152 |
}
|
1153 |
|
1154 |
+
const json data = format_final_response(llama, llama.generated_text, llama.generated_token_probs);
|
1155 |
|
1156 |
llama_print_timings(llama.ctx);
|
1157 |
|
|
|
1160 |
} else {
|
1161 |
const auto chunked_content_provider = [&](size_t, DataSink & sink) {
|
1162 |
size_t sent_count = 0;
|
1163 |
+
size_t sent_token_probs_index = 0;
|
1164 |
|
1165 |
while (llama.has_next_token) {
|
1166 |
+
const completion_token_output token_with_probs = llama.doCompletion();
|
1167 |
+
const std::string token_text = token_with_probs.tok == -1 ? "" : llama_token_to_str(llama.ctx, token_with_probs.tok);
|
1168 |
if (llama.multibyte_pending > 0) {
|
1169 |
continue;
|
1170 |
}
|
|
|
1187 |
const std::string to_send = llama.generated_text.substr(pos, stop_pos);
|
1188 |
sent_count += to_send.size();
|
1189 |
|
1190 |
+
std::vector<completion_token_output> probs_output = {};
|
1191 |
+
|
1192 |
+
if (llama.params.n_probs > 0) {
|
1193 |
+
const std::vector<llama_token> to_send_toks = llama_tokenize(llama.ctx, to_send, false);
|
1194 |
+
size_t probs_pos = std::min(sent_token_probs_index, llama.generated_token_probs.size());
|
1195 |
+
size_t probs_stop_pos = std::min(sent_token_probs_index + to_send_toks.size(), llama.generated_token_probs.size());
|
1196 |
+
if (probs_pos < probs_stop_pos) {
|
1197 |
+
probs_output = std::vector<completion_token_output>(llama.generated_token_probs.begin() + probs_pos, llama.generated_token_probs.begin() + probs_stop_pos);
|
1198 |
+
}
|
1199 |
+
sent_token_probs_index = probs_stop_pos;
|
1200 |
+
}
|
1201 |
+
|
1202 |
const json data = llama.has_next_token
|
1203 |
+
? format_partial_response(llama, to_send, probs_output)
|
1204 |
// Generation is done, send extra information.
|
1205 |
+
: format_final_response(llama, to_send, llama.generated_token_probs);
|
1206 |
|
1207 |
const std::string str =
|
1208 |
"data: " +
|
|
|
1225 |
return true;
|
1226 |
};
|
1227 |
res.set_chunked_content_provider("text/event-stream", chunked_content_provider);
|
1228 |
+
} });
|
1229 |
+
|
1230 |
+
svr.Get("/model.json", [&llama](const Request &, Response &res)
|
1231 |
+
{
|
1232 |
+
const json data = format_generation_settings(llama);
|
1233 |
+
return res.set_content(data.dump(), "application/json"); });
|
1234 |
|
1235 |
+
svr.Options(R"(/.*)", [](const Request &, Response &res)
|
1236 |
+
{ return res.set_content("", "application/json"); });
|
1237 |
+
|
1238 |
+
svr.Post("/tokenize", [&llama](const Request &req, Response &res)
|
1239 |
+
{
|
1240 |
+
auto lock = llama.lock();
|
1241 |
|
|
|
1242 |
const json body = json::parse(req.body);
|
1243 |
const std::string content = body.value("content", "");
|
1244 |
const std::vector<llama_token> tokens = llama_tokenize(llama.ctx, content, false);
|
1245 |
const json data = format_tokenizer_response(tokens);
|
1246 |
+
return res.set_content(data.dump(), "application/json"); });
|
1247 |
+
|
1248 |
+
svr.Post("/embedding", [&llama](const Request &req, Response &res)
|
1249 |
+
{
|
1250 |
+
auto lock = llama.lock();
|
1251 |
|
|
|
1252 |
const json body = json::parse(req.body);
|
1253 |
|
1254 |
llama.rewind();
|
|
|
1260 |
llama.doCompletion();
|
1261 |
|
1262 |
const json data = format_embedding_response(llama);
|
1263 |
+
return res.set_content(data.dump(), "application/json"); });
|
|
|
1264 |
|
1265 |
svr.set_logger(log_server_request);
|
1266 |
|
1267 |
+
svr.set_exception_handler([](const Request &, Response &res, std::exception_ptr ep)
|
1268 |
+
{
|
1269 |
const auto * fmt = "500 Internal Server Error\n%s";
|
1270 |
char buf[BUFSIZ];
|
1271 |
try {
|
|
|
1276 |
snprintf(buf, sizeof(buf), fmt, "Unknown Exception");
|
1277 |
}
|
1278 |
res.set_content(buf, "text/plain");
|
1279 |
+
res.status = 500; });
|
1280 |
+
|
1281 |
+
svr.set_error_handler([](const Request &, Response &res)
|
1282 |
+
{
|
1283 |
+
res.set_content("File Not Found", "text/plain");
|
1284 |
+
res.status = 404; });
|
1285 |
|
1286 |
// set timeouts and change hostname and port
|
1287 |
svr.set_read_timeout(sparams.read_timeout);
|
1288 |
svr.set_write_timeout(sparams.write_timeout);
|
1289 |
|
1290 |
+
if (!svr.bind_to_port(sparams.hostname, sparams.port))
|
1291 |
+
{
|
1292 |
+
fprintf(stderr, "\ncouldn't bind to server socket: hostname=%s port=%d\n\n", sparams.hostname.c_str(), sparams.port);
|
|
|
|
|
1293 |
return 1;
|
1294 |
}
|
1295 |
|
1296 |
+
// Set the base directory for serving static files
|
1297 |
+
svr.set_base_dir(sparams.public_path);
|
1298 |
+
|
1299 |
+
// to make it ctrl+clickable:
|
1300 |
+
fprintf(stdout, "\nllama server listening at http://%s:%d\n\n", sparams.hostname.c_str(), sparams.port);
|
1301 |
+
|
1302 |
LOG_INFO("HTTP server listening", {
|
1303 |
+
{"hostname", sparams.hostname},
|
1304 |
+
{"port", sparams.port},
|
1305 |
+
});
|
1306 |
|
1307 |
+
if (!svr.listen_after_bind())
|
1308 |
+
{
|
1309 |
return 1;
|
1310 |
}
|
1311 |
|
expose.cpp
CHANGED
@@ -47,14 +47,14 @@ extern "C"
|
|
47 |
}
|
48 |
|
49 |
//first digit is whether configured, second is platform, third is devices
|
50 |
-
int
|
51 |
|
52 |
-
std::string usingclblast = "GGML_OPENCL_CONFIGURED="+std::to_string(
|
53 |
putenv((char*)usingclblast.c_str());
|
54 |
|
55 |
-
|
56 |
-
int platform =
|
57 |
-
int devices =
|
58 |
platformenv = "GGML_OPENCL_PLATFORM="+std::to_string(platform);
|
59 |
deviceenv = "GGML_OPENCL_DEVICE="+std::to_string(devices);
|
60 |
putenv((char*)platformenv.c_str());
|
|
|
47 |
}
|
48 |
|
49 |
//first digit is whether configured, second is platform, third is devices
|
50 |
+
int cl_parseinfo = inputs.clblast_info;
|
51 |
|
52 |
+
std::string usingclblast = "GGML_OPENCL_CONFIGURED="+std::to_string(cl_parseinfo>0?1:0);
|
53 |
putenv((char*)usingclblast.c_str());
|
54 |
|
55 |
+
cl_parseinfo = cl_parseinfo%100; //keep last 2 digits
|
56 |
+
int platform = cl_parseinfo/10;
|
57 |
+
int devices = cl_parseinfo%10;
|
58 |
platformenv = "GGML_OPENCL_PLATFORM="+std::to_string(platform);
|
59 |
deviceenv = "GGML_OPENCL_DEVICE="+std::to_string(devices);
|
60 |
putenv((char*)platformenv.c_str());
|
expose.h
CHANGED
@@ -1,6 +1,19 @@
|
|
1 |
#pragma once
|
2 |
|
3 |
const int stop_token_max = 10;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
struct load_model_inputs
|
5 |
{
|
6 |
const int threads;
|
@@ -18,10 +31,12 @@ struct load_model_inputs
|
|
18 |
const bool use_smartcontext;
|
19 |
const bool unban_tokens;
|
20 |
const int clblast_info = 0;
|
|
|
21 |
const int blasbatchsize = 512;
|
22 |
const int debugmode = 0;
|
23 |
const int forceversion = 0;
|
24 |
const int gpulayers = 0;
|
|
|
25 |
};
|
26 |
struct generation_inputs
|
27 |
{
|
@@ -40,6 +55,8 @@ struct generation_inputs
|
|
40 |
const int mirostat = 0;
|
41 |
const float mirostat_eta;
|
42 |
const float mirostat_tau;
|
|
|
|
|
43 |
const char * stop_sequence[stop_token_max];
|
44 |
const bool stream_sse;
|
45 |
};
|
|
|
1 |
#pragma once
|
2 |
|
3 |
const int stop_token_max = 10;
|
4 |
+
const int ban_token_max = 10;
|
5 |
+
// match kobold's sampler list and order
|
6 |
+
enum samplers
|
7 |
+
{
|
8 |
+
KCPP_SAMPLER_TOP_K=0,
|
9 |
+
KCPP_SAMPLER_TOP_A=1,
|
10 |
+
KCPP_SAMPLER_TOP_P=2,
|
11 |
+
KCPP_SAMPLER_TFS=3,
|
12 |
+
KCPP_SAMPLER_TYP=4,
|
13 |
+
KCPP_SAMPLER_TEMP=5,
|
14 |
+
KCPP_SAMPLER_REP_PEN=6,
|
15 |
+
KCPP_SAMPLER_MAX
|
16 |
+
};
|
17 |
struct load_model_inputs
|
18 |
{
|
19 |
const int threads;
|
|
|
31 |
const bool use_smartcontext;
|
32 |
const bool unban_tokens;
|
33 |
const int clblast_info = 0;
|
34 |
+
const int cublas_info = 0;
|
35 |
const int blasbatchsize = 512;
|
36 |
const int debugmode = 0;
|
37 |
const int forceversion = 0;
|
38 |
const int gpulayers = 0;
|
39 |
+
const char * banned_tokens[ban_token_max];
|
40 |
};
|
41 |
struct generation_inputs
|
42 |
{
|
|
|
55 |
const int mirostat = 0;
|
56 |
const float mirostat_eta;
|
57 |
const float mirostat_tau;
|
58 |
+
const samplers sampler_order[KCPP_SAMPLER_MAX];
|
59 |
+
const int sampler_len;
|
60 |
const char * stop_sequence[stop_token_max];
|
61 |
const bool stream_sse;
|
62 |
};
|
ggml-cuda.cu
CHANGED
@@ -70,9 +70,11 @@ typedef void (*ggml_cuda_op_t)(
|
|
70 |
|
71 |
// QK = number of values after dequantization
|
72 |
// QR = QK / number of values before dequantization
|
|
|
73 |
|
74 |
#define QK4_0 32
|
75 |
#define QR4_0 2
|
|
|
76 |
typedef struct {
|
77 |
half d; // delta
|
78 |
uint8_t qs[QK4_0 / 2]; // nibbles / quants
|
@@ -81,6 +83,7 @@ static_assert(sizeof(block_q4_0) == sizeof(ggml_fp16_t) + QK4_0 / 2, "wrong q4_0
|
|
81 |
|
82 |
#define QK4_1 32
|
83 |
#define QR4_1 2
|
|
|
84 |
typedef struct {
|
85 |
half d; // delta
|
86 |
half m; // min
|
@@ -90,6 +93,7 @@ static_assert(sizeof(block_q4_1) == sizeof(ggml_fp16_t) * 2 + QK4_1 / 2, "wrong
|
|
90 |
|
91 |
#define QK5_0 32
|
92 |
#define QR5_0 2
|
|
|
93 |
typedef struct {
|
94 |
half d; // delta
|
95 |
uint8_t qh[4]; // 5-th bit of quants
|
@@ -99,6 +103,7 @@ static_assert(sizeof(block_q5_0) == sizeof(ggml_fp16_t) + sizeof(uint32_t) + QK5
|
|
99 |
|
100 |
#define QK5_1 32
|
101 |
#define QR5_1 2
|
|
|
102 |
typedef struct {
|
103 |
half d; // delta
|
104 |
half m; // min
|
@@ -109,12 +114,25 @@ static_assert(sizeof(block_q5_1) == 2 * sizeof(ggml_fp16_t) + sizeof(uint32_t) +
|
|
109 |
|
110 |
#define QK8_0 32
|
111 |
#define QR8_0 1
|
|
|
112 |
typedef struct {
|
113 |
half d; // delta
|
114 |
int8_t qs[QK8_0]; // quants
|
115 |
} block_q8_0;
|
116 |
static_assert(sizeof(block_q8_0) == sizeof(ggml_fp16_t) + QK8_0, "wrong q8_0 block size/padding");
|
117 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
118 |
//================================= k-quants
|
119 |
|
120 |
#ifdef GGML_QKK_64
|
@@ -198,14 +216,15 @@ static_assert(sizeof(block_q6_K) == sizeof(ggml_fp16_t) + 13*QK_K/16, "wrong q6_
|
|
198 |
#define CUDA_SCALE_BLOCK_SIZE 256
|
199 |
#define CUDA_ROPE_BLOCK_SIZE 256
|
200 |
#define CUDA_DIAG_MASK_INF_BLOCK_SIZE 32
|
|
|
201 |
#define CUDA_DEQUANTIZE_BLOCK_SIZE 256
|
202 |
|
203 |
// dmmv = dequantize_mul_mat_vec
|
204 |
#ifndef GGML_CUDA_DMMV_X
|
205 |
#define GGML_CUDA_DMMV_X 32
|
206 |
#endif
|
207 |
-
#ifndef
|
208 |
-
#define
|
209 |
#endif
|
210 |
|
211 |
#ifndef K_QUANTS_PER_ITERATION
|
@@ -270,7 +289,6 @@ static __global__ void rms_norm_f32(const float * x, float * dst, const int ncol
|
|
270 |
}
|
271 |
|
272 |
// sum up partial sums
|
273 |
-
__syncthreads();
|
274 |
#pragma unroll
|
275 |
for (int mask = 16; mask > 0; mask >>= 1) {
|
276 |
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
|
@@ -714,7 +732,6 @@ static __global__ void dequantize_mul_mat_vec_q2_k(const void * vx, const float
|
|
714 |
#endif
|
715 |
|
716 |
// sum up partial sums and write back result
|
717 |
-
__syncthreads();
|
718 |
#pragma unroll
|
719 |
for (int mask = 16; mask > 0; mask >>= 1) {
|
720 |
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
|
@@ -819,7 +836,6 @@ static __global__ void dequantize_mul_mat_vec_q3_k(const void * vx, const float
|
|
819 |
#endif
|
820 |
|
821 |
// sum up partial sums and write back result
|
822 |
-
__syncthreads();
|
823 |
#pragma unroll
|
824 |
for (int mask = 16; mask > 0; mask >>= 1) {
|
825 |
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
|
@@ -923,7 +939,6 @@ static __global__ void dequantize_mul_mat_vec_q4_k(const void * vx, const float
|
|
923 |
#endif
|
924 |
|
925 |
// sum up partial sums and write back result
|
926 |
-
__syncthreads();
|
927 |
#pragma unroll
|
928 |
for (int mask = 16; mask > 0; mask >>= 1) {
|
929 |
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
|
@@ -1028,7 +1043,6 @@ static __global__ void dequantize_mul_mat_vec_q5_k(const void * vx, const float
|
|
1028 |
#endif
|
1029 |
|
1030 |
// sum up partial sums and write back result
|
1031 |
-
__syncthreads();
|
1032 |
#pragma unroll
|
1033 |
for (int mask = 16; mask > 0; mask >>= 1) {
|
1034 |
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
|
@@ -1139,7 +1153,6 @@ static __global__ void dequantize_mul_mat_vec_q6_k(const void * vx, const float
|
|
1139 |
#endif
|
1140 |
|
1141 |
// sum up partial sums and write back result
|
1142 |
-
__syncthreads();
|
1143 |
#pragma unroll
|
1144 |
for (int mask = 16; mask > 0; mask >>= 1) {
|
1145 |
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
|
@@ -1158,6 +1171,41 @@ static __device__ void convert_f16(const void * vx, const int ib, const int iqs,
|
|
1158 |
v.y = x[ib + iqs + 1];
|
1159 |
}
|
1160 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1161 |
template <int qk, int qr, dequantize_kernel_t dequantize_kernel>
|
1162 |
static __global__ void dequantize_block(const void * vx, float * y, const int k) {
|
1163 |
const int i = blockDim.x*blockIdx.x + 2*threadIdx.x;
|
@@ -1179,6 +1227,182 @@ static __global__ void dequantize_block(const void * vx, float * y, const int k)
|
|
1179 |
y[iybs + iqs + y_offset] = v.y;
|
1180 |
}
|
1181 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1182 |
template <int qk, int qr, dequantize_kernel_t dequantize_kernel>
|
1183 |
static __global__ void dequantize_mul_mat_vec(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows) {
|
1184 |
// qk = quantized weights per x block
|
@@ -1233,7 +1457,6 @@ static __global__ void dequantize_mul_mat_vec(const void * vx, const dfloat * y,
|
|
1233 |
}
|
1234 |
|
1235 |
// sum up partial sums and write back result
|
1236 |
-
__syncthreads();
|
1237 |
#pragma unroll
|
1238 |
for (int mask = 16; mask > 0; mask >>= 1) {
|
1239 |
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
|
@@ -1284,7 +1507,6 @@ static __global__ void mul_mat_p021_f16_f32(const void * vx, const float * y, fl
|
|
1284 |
const int idst = channel*nrows_dst + row_dst;
|
1285 |
|
1286 |
// sum up partial sums and write back result
|
1287 |
-
__syncthreads();
|
1288 |
#pragma unroll
|
1289 |
for (int mask = 16; mask > 0; mask >>= 1) {
|
1290 |
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
|
@@ -1330,7 +1552,6 @@ static __global__ void mul_mat_vec_nc_f16_f32( // nc == non-contiguous
|
|
1330 |
}
|
1331 |
|
1332 |
// sum up partial sums and write back result
|
1333 |
-
__syncthreads();
|
1334 |
#pragma unroll
|
1335 |
for (int mask = 16; mask > 0; mask >>= 1) {
|
1336 |
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
|
@@ -1440,7 +1661,6 @@ static __global__ void soft_max_f32(const float * x, float * dst, const int ncol
|
|
1440 |
}
|
1441 |
|
1442 |
// sum up partial sums
|
1443 |
-
__syncthreads();
|
1444 |
#pragma unroll
|
1445 |
for (int mask = 16; mask > 0; mask >>= 1) {
|
1446 |
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
|
@@ -1494,6 +1714,11 @@ static void rms_norm_f32_cuda(const float * x, float * dst, const int ncols, con
|
|
1494 |
rms_norm_f32<<<nrows, block_dims, 0, stream>>>(x, dst, ncols);
|
1495 |
}
|
1496 |
|
|
|
|
|
|
|
|
|
|
|
1497 |
static void dequantize_row_q4_0_cuda(const void * vx, float * y, const int k, cudaStream_t stream) {
|
1498 |
const int num_blocks = (k + CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / CUDA_DEQUANTIZE_BLOCK_SIZE;
|
1499 |
dequantize_block<QK4_0, QR4_0, dequantize_q4_0><<<num_blocks, CUDA_DEQUANTIZE_BLOCK_SIZE, 0, stream>>>(vx, y, k);
|
@@ -1562,45 +1787,45 @@ static void dequantize_row_q6_K_cuda(const void * vx, float * y, const int k, cu
|
|
1562 |
|
1563 |
static void dequantize_mul_mat_vec_q4_0_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
|
1564 |
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
|
1565 |
-
const int block_num_y = (nrows +
|
1566 |
const dim3 block_nums(1, block_num_y, 1);
|
1567 |
-
const dim3 block_dims(WARP_SIZE,
|
1568 |
dequantize_mul_mat_vec<QK4_0, QR4_0, dequantize_q4_0>
|
1569 |
<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
|
1570 |
}
|
1571 |
|
1572 |
static void dequantize_mul_mat_vec_q4_1_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
|
1573 |
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
|
1574 |
-
const int block_num_y = (nrows +
|
1575 |
const dim3 block_nums(1, block_num_y, 1);
|
1576 |
-
const dim3 block_dims(WARP_SIZE,
|
1577 |
dequantize_mul_mat_vec<QK4_1, QR4_1, dequantize_q4_1>
|
1578 |
<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
|
1579 |
}
|
1580 |
|
1581 |
static void dequantize_mul_mat_vec_q5_0_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
|
1582 |
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
|
1583 |
-
const int block_num_y = (nrows +
|
1584 |
const dim3 block_nums(1, block_num_y, 1);
|
1585 |
-
const dim3 block_dims(WARP_SIZE,
|
1586 |
dequantize_mul_mat_vec<QK5_0, QR5_0, dequantize_q5_0>
|
1587 |
<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
|
1588 |
}
|
1589 |
|
1590 |
static void dequantize_mul_mat_vec_q5_1_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
|
1591 |
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
|
1592 |
-
const int block_num_y = (nrows +
|
1593 |
const dim3 block_nums(1, block_num_y, 1);
|
1594 |
-
const dim3 block_dims(WARP_SIZE,
|
1595 |
dequantize_mul_mat_vec<QK5_1, QR5_1, dequantize_q5_1>
|
1596 |
<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
|
1597 |
}
|
1598 |
|
1599 |
static void dequantize_mul_mat_vec_q8_0_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
|
1600 |
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
|
1601 |
-
const int block_num_y = (nrows +
|
1602 |
const dim3 block_nums(1, block_num_y, 1);
|
1603 |
-
const dim3 block_dims(WARP_SIZE,
|
1604 |
dequantize_mul_mat_vec<QK8_0, QR8_0, dequantize_q8_0>
|
1605 |
<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
|
1606 |
}
|
@@ -1647,6 +1872,51 @@ static void dequantize_mul_mat_vec_q6_K_cuda(const void * vx, const float * y, f
|
|
1647 |
dequantize_mul_mat_vec_q6_k<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
|
1648 |
}
|
1649 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1650 |
static void convert_fp16_to_fp32_cuda(const void * vx, float * y, const int k, cudaStream_t stream) {
|
1651 |
const int num_blocks = (k + CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / CUDA_DEQUANTIZE_BLOCK_SIZE;
|
1652 |
dequantize_block<1, 1, convert_f16><<<num_blocks, CUDA_DEQUANTIZE_BLOCK_SIZE, 0, stream>>>(vx, y, k);
|
@@ -1654,9 +1924,9 @@ static void convert_fp16_to_fp32_cuda(const void * vx, float * y, const int k, c
|
|
1654 |
|
1655 |
static void convert_mul_mat_vec_f16_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
|
1656 |
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
|
1657 |
-
const int block_num_y = (nrows +
|
1658 |
const dim3 block_nums(1, block_num_y, 1);
|
1659 |
-
const dim3 block_dims(WARP_SIZE,
|
1660 |
dequantize_mul_mat_vec<1, 1, convert_f16>
|
1661 |
<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
|
1662 |
}
|
@@ -1847,6 +2117,7 @@ static size_t g_scratch_offset = 0;
|
|
1847 |
|
1848 |
static int g_device_count = -1;
|
1849 |
static int g_main_device = 0;
|
|
|
1850 |
static float g_tensor_split[GGML_CUDA_MAX_DEVICES] = {0};
|
1851 |
|
1852 |
static cublasHandle_t g_cublas_handles[GGML_CUDA_MAX_DEVICES] = {nullptr};
|
@@ -1864,9 +2135,12 @@ void ggml_init_cublas() {
|
|
1864 |
for (int id = 0; id < g_device_count; ++id) {
|
1865 |
cudaDeviceProp prop;
|
1866 |
CUDA_CHECK(cudaGetDeviceProperties(&prop, id));
|
1867 |
-
fprintf(stderr, " Device %d: %s\n", id, prop.name);
|
|
|
1868 |
g_tensor_split[id] = total_vram;
|
1869 |
total_vram += prop.totalGlobalMem;
|
|
|
|
|
1870 |
}
|
1871 |
for (int id = 0; id < g_device_count; ++id) {
|
1872 |
g_tensor_split[id] /= total_vram;
|
@@ -2082,7 +2356,7 @@ inline void ggml_cuda_op_rms_norm(
|
|
2082 |
(void) i1;
|
2083 |
}
|
2084 |
|
2085 |
-
inline void
|
2086 |
const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
|
2087 |
float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
|
2088 |
cudaStream_t & cudaStream_main){
|
@@ -2094,69 +2368,116 @@ inline void ggml_cuda_op_dequantize_mul_mat_vec(
|
|
2094 |
const int64_t ne00 = src0->ne[0];
|
2095 |
const int64_t nrows = i01_high - i01_low;
|
2096 |
|
2097 |
-
|
2098 |
-
|
2099 |
-
|
2100 |
-
|
|
|
2101 |
|
2102 |
-
bool
|
2103 |
-
src0->type ==
|
2104 |
-
src0->type ==
|
|
|
|
|
2105 |
|
2106 |
-
|
2107 |
-
|
2108 |
-
|
2109 |
-
|
2110 |
-
|
2111 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2112 |
#else
|
2113 |
-
|
2114 |
#endif // GGML_CUDA_DMMV_F16
|
2115 |
|
2116 |
-
|
2117 |
-
|
2118 |
-
|
2119 |
-
|
2120 |
-
|
2121 |
-
|
2122 |
-
|
2123 |
-
|
2124 |
-
|
2125 |
-
|
2126 |
-
|
2127 |
-
|
2128 |
-
|
2129 |
-
|
2130 |
-
|
2131 |
-
|
2132 |
-
|
2133 |
-
|
2134 |
-
|
2135 |
-
|
2136 |
-
|
2137 |
-
|
2138 |
-
|
2139 |
-
|
2140 |
-
|
2141 |
-
|
2142 |
-
|
2143 |
-
|
2144 |
-
|
2145 |
-
|
2146 |
-
|
2147 |
-
|
2148 |
-
|
2149 |
-
|
2150 |
-
|
2151 |
-
|
2152 |
-
|
2153 |
-
|
2154 |
|
2155 |
#ifdef GGML_CUDA_DMMV_F16
|
2156 |
-
|
2157 |
-
|
2158 |
-
|
2159 |
#endif // GGML_CUDA_DMMV_F16
|
|
|
2160 |
|
2161 |
(void) src1;
|
2162 |
(void) dst;
|
@@ -2729,8 +3050,8 @@ void ggml_cuda_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1, ggml_
|
|
2729 |
}else if (src0->type == GGML_TYPE_F32) {
|
2730 |
ggml_cuda_op(src0, src1, dst, ggml_cuda_op_mul_mat_cublas, true, false);
|
2731 |
} else if (ggml_is_quantized(src0->type) || src0->type == GGML_TYPE_F16) {
|
2732 |
-
if (src1->ne[1] == 1 && src0->ne[0] % GGML_CUDA_DMMV_X == 0
|
2733 |
-
ggml_cuda_op(src0, src1, dst,
|
2734 |
} else {
|
2735 |
ggml_cuda_op(src0, src1, dst, ggml_cuda_op_mul_mat_cublas, true, false);
|
2736 |
}
|
@@ -2863,7 +3184,7 @@ void ggml_cuda_transform_tensor(void * data, struct ggml_tensor * tensor) {
|
|
2863 |
}
|
2864 |
|
2865 |
void ggml_cuda_free_data(struct ggml_tensor * tensor) {
|
2866 |
-
if (tensor->backend != GGML_BACKEND_GPU && tensor->backend != GGML_BACKEND_GPU_SPLIT) {
|
2867 |
return;
|
2868 |
}
|
2869 |
|
|
|
70 |
|
71 |
// QK = number of values after dequantization
|
72 |
// QR = QK / number of values before dequantization
|
73 |
+
// QI = number of 32 bit integers before dequantization
|
74 |
|
75 |
#define QK4_0 32
|
76 |
#define QR4_0 2
|
77 |
+
#define QI4_0 4
|
78 |
typedef struct {
|
79 |
half d; // delta
|
80 |
uint8_t qs[QK4_0 / 2]; // nibbles / quants
|
|
|
83 |
|
84 |
#define QK4_1 32
|
85 |
#define QR4_1 2
|
86 |
+
#define QI4_1 4
|
87 |
typedef struct {
|
88 |
half d; // delta
|
89 |
half m; // min
|
|
|
93 |
|
94 |
#define QK5_0 32
|
95 |
#define QR5_0 2
|
96 |
+
#define QI5_0 4
|
97 |
typedef struct {
|
98 |
half d; // delta
|
99 |
uint8_t qh[4]; // 5-th bit of quants
|
|
|
103 |
|
104 |
#define QK5_1 32
|
105 |
#define QR5_1 2
|
106 |
+
#define QI5_1 4
|
107 |
typedef struct {
|
108 |
half d; // delta
|
109 |
half m; // min
|
|
|
114 |
|
115 |
#define QK8_0 32
|
116 |
#define QR8_0 1
|
117 |
+
#define QI8_0 8
|
118 |
typedef struct {
|
119 |
half d; // delta
|
120 |
int8_t qs[QK8_0]; // quants
|
121 |
} block_q8_0;
|
122 |
static_assert(sizeof(block_q8_0) == sizeof(ggml_fp16_t) + QK8_0, "wrong q8_0 block size/padding");
|
123 |
|
124 |
+
#define QK8_1 32
|
125 |
+
#define QR8_1 1
|
126 |
+
#define QI8_1 8
|
127 |
+
typedef struct {
|
128 |
+
half d; // delta
|
129 |
+
half s; // unquantized sum
|
130 |
+
int8_t qs[QK8_0]; // quants
|
131 |
+
} block_q8_1;
|
132 |
+
static_assert(sizeof(block_q8_1) == 2*sizeof(ggml_fp16_t) + QK8_0, "wrong q8_1 block size/padding");
|
133 |
+
|
134 |
+
typedef float (*vec_dot_q_cuda_t)(const void * vbq, const block_q8_1 * bq8_1, const int iqs);
|
135 |
+
|
136 |
//================================= k-quants
|
137 |
|
138 |
#ifdef GGML_QKK_64
|
|
|
216 |
#define CUDA_SCALE_BLOCK_SIZE 256
|
217 |
#define CUDA_ROPE_BLOCK_SIZE 256
|
218 |
#define CUDA_DIAG_MASK_INF_BLOCK_SIZE 32
|
219 |
+
#define CUDA_QUANTIZE_BLOCK_SIZE 256
|
220 |
#define CUDA_DEQUANTIZE_BLOCK_SIZE 256
|
221 |
|
222 |
// dmmv = dequantize_mul_mat_vec
|
223 |
#ifndef GGML_CUDA_DMMV_X
|
224 |
#define GGML_CUDA_DMMV_X 32
|
225 |
#endif
|
226 |
+
#ifndef GGML_CUDA_MMV_Y
|
227 |
+
#define GGML_CUDA_MMV_Y 1
|
228 |
#endif
|
229 |
|
230 |
#ifndef K_QUANTS_PER_ITERATION
|
|
|
289 |
}
|
290 |
|
291 |
// sum up partial sums
|
|
|
292 |
#pragma unroll
|
293 |
for (int mask = 16; mask > 0; mask >>= 1) {
|
294 |
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
|
|
|
732 |
#endif
|
733 |
|
734 |
// sum up partial sums and write back result
|
|
|
735 |
#pragma unroll
|
736 |
for (int mask = 16; mask > 0; mask >>= 1) {
|
737 |
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
|
|
|
836 |
#endif
|
837 |
|
838 |
// sum up partial sums and write back result
|
|
|
839 |
#pragma unroll
|
840 |
for (int mask = 16; mask > 0; mask >>= 1) {
|
841 |
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
|
|
|
939 |
#endif
|
940 |
|
941 |
// sum up partial sums and write back result
|
|
|
942 |
#pragma unroll
|
943 |
for (int mask = 16; mask > 0; mask >>= 1) {
|
944 |
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
|
|
|
1043 |
#endif
|
1044 |
|
1045 |
// sum up partial sums and write back result
|
|
|
1046 |
#pragma unroll
|
1047 |
for (int mask = 16; mask > 0; mask >>= 1) {
|
1048 |
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
|
|
|
1153 |
#endif
|
1154 |
|
1155 |
// sum up partial sums and write back result
|
|
|
1156 |
#pragma unroll
|
1157 |
for (int mask = 16; mask > 0; mask >>= 1) {
|
1158 |
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
|
|
|
1171 |
v.y = x[ib + iqs + 1];
|
1172 |
}
|
1173 |
|
1174 |
+
static __global__ void quantize_q8_1(const float * x, void * vy, const int k) {
|
1175 |
+
const int i = blockDim.x*blockIdx.x + threadIdx.x;
|
1176 |
+
|
1177 |
+
if (i >= k) {
|
1178 |
+
return;
|
1179 |
+
}
|
1180 |
+
|
1181 |
+
block_q8_1 * y = (block_q8_1 *) vy;
|
1182 |
+
|
1183 |
+
const int ib = i / QK8_0; // block index
|
1184 |
+
const int iqs = i % QK8_0; // quant index
|
1185 |
+
|
1186 |
+
const float xi = x[i];
|
1187 |
+
float amax = fabsf(xi);
|
1188 |
+
float sum = xi;
|
1189 |
+
|
1190 |
+
#pragma unroll
|
1191 |
+
for (int mask = 16; mask > 0; mask >>= 1) {
|
1192 |
+
amax = fmaxf(amax, __shfl_xor_sync(0xffffffff, amax, mask, 32));
|
1193 |
+
sum += __shfl_xor_sync(0xffffffff, sum, mask, 32);
|
1194 |
+
}
|
1195 |
+
|
1196 |
+
const float d = amax / 127;
|
1197 |
+
const int8_t q = amax == 0.0f ? 0 : roundf(xi / d);
|
1198 |
+
|
1199 |
+
y[ib].qs[iqs] = q;
|
1200 |
+
|
1201 |
+
if (iqs > 0) {
|
1202 |
+
return;
|
1203 |
+
}
|
1204 |
+
|
1205 |
+
y[ib].d = d;
|
1206 |
+
y[ib].s = sum;
|
1207 |
+
}
|
1208 |
+
|
1209 |
template <int qk, int qr, dequantize_kernel_t dequantize_kernel>
|
1210 |
static __global__ void dequantize_block(const void * vx, float * y, const int k) {
|
1211 |
const int i = blockDim.x*blockIdx.x + 2*threadIdx.x;
|
|
|
1227 |
y[iybs + iqs + y_offset] = v.y;
|
1228 |
}
|
1229 |
|
1230 |
+
static __device__ __forceinline__ float vec_dot_q4_0_q8_1(const void * vbq, const block_q8_1 * bq8_1, const int iqs) {
|
1231 |
+
#if __CUDA_ARCH__ >= 600 // lowest compute capability for integer intrinsics
|
1232 |
+
const block_q4_0 * bq4_0 = (const block_q4_0 *) vbq;
|
1233 |
+
|
1234 |
+
int vi;
|
1235 |
+
memcpy(&vi, &bq4_0->qs[sizeof(int) * (iqs + 0)], sizeof(int));
|
1236 |
+
const int ui0 = *((int *) &bq8_1->qs[sizeof(int) * (iqs + 0)]);
|
1237 |
+
const int ui1 = *((int *) &bq8_1->qs[sizeof(int) * (iqs + QI4_0)]);
|
1238 |
+
|
1239 |
+
const float d = __half2float(bq4_0->d) * __half2float(bq8_1->d);
|
1240 |
+
|
1241 |
+
// subtract 8 from each quantized value
|
1242 |
+
const int vi0 = __vsub4((vi >> 0) & 0x0F0F0F0F, 0x08080808);
|
1243 |
+
const int vi1 = __vsub4((vi >> 4) & 0x0F0F0F0F, 0x08080808);
|
1244 |
+
|
1245 |
+
// SIMD dot product of quantized values
|
1246 |
+
int sumi = __dp4a(vi0, ui0, 0);
|
1247 |
+
sumi = __dp4a(vi1, ui1, sumi);
|
1248 |
+
|
1249 |
+
return sumi*d;
|
1250 |
+
#else
|
1251 |
+
return 0.0f; // only to satisfy the compiler
|
1252 |
+
#endif // __CUDA_ARCH__ >= 600
|
1253 |
+
}
|
1254 |
+
|
1255 |
+
static __device__ __forceinline__ float vec_dot_q4_1_q8_1(const void * vbq, const block_q8_1 * bq8_1, const int iqs) {
|
1256 |
+
#if __CUDA_ARCH__ >= 600 // lowest compute capability for integer intrinsics
|
1257 |
+
const block_q4_1 * bq4_1 = (const block_q4_1 *) vbq;
|
1258 |
+
|
1259 |
+
const int vi = *((int *) &bq4_1->qs[sizeof(int) * (iqs + 0)]);
|
1260 |
+
const int ui0 = *((int *) &bq8_1->qs[sizeof(int) * (iqs + 0)]);
|
1261 |
+
const int ui1 = *((int *) &bq8_1->qs[sizeof(int) * (iqs + QI4_1)]);
|
1262 |
+
|
1263 |
+
const float d = __half2float(bq4_1->d) * __half2float(bq8_1->d);
|
1264 |
+
const float m = bq4_1->m;
|
1265 |
+
const float s = bq8_1->s;
|
1266 |
+
|
1267 |
+
const int vi0 = (vi >> 0) & 0x0F0F0F0F;
|
1268 |
+
const int vi1 = (vi >> 4) & 0x0F0F0F0F;
|
1269 |
+
|
1270 |
+
// SIMD dot product of quantized values
|
1271 |
+
int sumi = __dp4a(vi0, ui0, 0);
|
1272 |
+
sumi = __dp4a(vi1, ui1, sumi);
|
1273 |
+
|
1274 |
+
return sumi*d + m*s / QI4_1; // scale sum by QI4_1 because there are QI4_1 threads working on this block
|
1275 |
+
#else
|
1276 |
+
return 0.0f; // only to satisfy the compiler
|
1277 |
+
#endif // __CUDA_ARCH__ >= 600
|
1278 |
+
}
|
1279 |
+
|
1280 |
+
static __device__ __forceinline__ float vec_dot_q5_0_q8_1(const void * vbq, const block_q8_1 * bq8_1, const int iqs) {
|
1281 |
+
#if __CUDA_ARCH__ >= 600 // lowest compute capability for integer intrinsics
|
1282 |
+
const block_q5_0 * bq5_0 = (const block_q5_0 *) vbq;
|
1283 |
+
|
1284 |
+
int qs;
|
1285 |
+
memcpy(&qs, &bq5_0->qs[sizeof(int) * (iqs + 0)], sizeof(int));
|
1286 |
+
const int qh0 = bq5_0->qh[iqs/2 + 0] >> 4*(iqs%2);
|
1287 |
+
const int qh1 = bq5_0->qh[iqs/2 + 2] >> 4*(iqs%2);
|
1288 |
+
const int ui0 = *((int *) &bq8_1->qs[sizeof(int) * (iqs + 0)]);
|
1289 |
+
const int ui1 = *((int *) &bq8_1->qs[sizeof(int) * (iqs + QI5_0)]);
|
1290 |
+
|
1291 |
+
const float d = __half2float(bq5_0->d) * __half2float(bq8_1->d);
|
1292 |
+
|
1293 |
+
int vi0 = (qs >> 0) & 0x0F0F0F0F; // lower 4 qs bits, still need qh0 as 5th bits
|
1294 |
+
vi0 |= (qh0 << 4) & 0x00000010; // 1 -> 5
|
1295 |
+
vi0 |= (qh0 << 11) & 0x00001000; // 2 -> 13
|
1296 |
+
vi0 |= (qh0 << 18) & 0x00100000; // 3 -> 21
|
1297 |
+
vi0 |= (qh0 << 25) & 0x10000000; // 4 -> 29
|
1298 |
+
vi0 = __vsub4(vi0, 0x10101010); // subtract 16 from quantized values
|
1299 |
+
int sumi = __dp4a(vi0, ui0, 0); // SIMD dot product of quantized values
|
1300 |
+
|
1301 |
+
int vi1 = (qs >> 4) & 0x0F0F0F0F; // upper 4 qs bits, still need qh1 as 5th bits
|
1302 |
+
vi1 |= (qh1 << 4) & 0x00000010; // 1 -> 5
|
1303 |
+
vi1 |= (qh1 << 11) & 0x00001000; // 2 -> 13
|
1304 |
+
vi1 |= (qh1 << 18) & 0x00100000; // 3 -> 21
|
1305 |
+
vi1 |= (qh1 << 25) & 0x10000000; // 4 -> 29
|
1306 |
+
vi1 = __vsub4(vi1, 0x10101010); // subtract 16 from quantized values
|
1307 |
+
sumi = __dp4a(vi1, ui1, sumi); // SIMD dot product of quantized values
|
1308 |
+
|
1309 |
+
return sumi*d;
|
1310 |
+
#else
|
1311 |
+
return 0.0f; // only to satisfy the compiler
|
1312 |
+
#endif // __CUDA_ARCH__ >= 600
|
1313 |
+
}
|
1314 |
+
|
1315 |
+
static __device__ __forceinline__ float vec_dot_q5_1_q8_1(const void * vbq, const block_q8_1 * bq8_1, const int iqs) {
|
1316 |
+
#if __CUDA_ARCH__ >= 600 // lowest compute capability for integer intrinsics
|
1317 |
+
const block_q5_1 * bq5_1 = (const block_q5_1 *) vbq;
|
1318 |
+
|
1319 |
+
const int qs = *((int *) &bq5_1->qs[sizeof(int) * (iqs + 0)]);
|
1320 |
+
const int qh0 = bq5_1->qh[iqs/2 + 0] >> 4*(iqs%2);
|
1321 |
+
const int qh1 = bq5_1->qh[iqs/2 + 2] >> 4*(iqs%2);
|
1322 |
+
const int ui0 = *((int *) &bq8_1->qs[sizeof(int) * (iqs + 0)]);
|
1323 |
+
const int ui1 = *((int *) &bq8_1->qs[sizeof(int) * (iqs + QI5_1)]);
|
1324 |
+
|
1325 |
+
const float d = __half2float(bq5_1->d) * __half2float(bq8_1->d);
|
1326 |
+
const float m = bq5_1->m;
|
1327 |
+
const float s = bq8_1->s;
|
1328 |
+
|
1329 |
+
int vi0 = (qs >> 0) & 0x0F0F0F0F; // lower 4 qs bits, still need qh0 as 5th bits
|
1330 |
+
vi0 |= (qh0 << 4) & 0x00000010; // 1 -> 5
|
1331 |
+
vi0 |= (qh0 << 11) & 0x00001000; // 2 -> 13
|
1332 |
+
vi0 |= (qh0 << 18) & 0x00100000; // 3 -> 21
|
1333 |
+
vi0 |= (qh0 << 25) & 0x10000000; // 4 -> 29
|
1334 |
+
int sumi = __dp4a(vi0, ui0, 0); // SIMD dot product of quantized values
|
1335 |
+
|
1336 |
+
int vi1 = (qs >> 4) & 0x0F0F0F0F; // upper 4 qs bits, still need qh1 as 5th bits
|
1337 |
+
vi1 |= (qh1 << 4) & 0x00000010; // 1 -> 5
|
1338 |
+
vi1 |= (qh1 << 11) & 0x00001000; // 2 -> 13
|
1339 |
+
vi1 |= (qh1 << 18) & 0x00100000; // 3 -> 21
|
1340 |
+
vi1 |= (qh1 << 25) & 0x10000000; // 4 -> 29
|
1341 |
+
sumi = __dp4a(vi1, ui1, sumi); // SIMD dot product of quantized values
|
1342 |
+
|
1343 |
+
return sumi*d + m*s / QI5_1; // scale sum by QI5_1 because there are QI5_1 threads working on this block
|
1344 |
+
#else
|
1345 |
+
return 0.0f; // only to satisfy the compiler
|
1346 |
+
#endif // __CUDA_ARCH__ >= 600
|
1347 |
+
}
|
1348 |
+
|
1349 |
+
static __device__ __forceinline__ float vec_dot_q8_0_q8_1(const void * vbq, const block_q8_1 * bq8_1, const int iqs) {
|
1350 |
+
#if __CUDA_ARCH__ >= 600 // lowest compute capability for integer intrinsics
|
1351 |
+
const block_q8_0 * bq8_0 = (const block_q8_0 *) vbq;
|
1352 |
+
|
1353 |
+
int vi;
|
1354 |
+
memcpy(&vi, &bq8_0->qs[sizeof(int) * (iqs + 0)], sizeof(int));
|
1355 |
+
const int ui = *((int *) &bq8_1->qs[sizeof(int) * (iqs + 0)]);
|
1356 |
+
|
1357 |
+
const float d = __half2float(bq8_0->d) * __half2float(bq8_1->d);
|
1358 |
+
|
1359 |
+
// SIMD dot product of quantized values
|
1360 |
+
int sumi = __dp4a(vi, ui, 0);
|
1361 |
+
|
1362 |
+
return sumi*d;
|
1363 |
+
#else
|
1364 |
+
return 0.0f; // only to satisfy the compiler
|
1365 |
+
#endif // __CUDA_ARCH__ >= 600
|
1366 |
+
}
|
1367 |
+
|
1368 |
+
template <int qk, int qi, typename block_q_t, vec_dot_q_cuda_t vec_dot_q_cuda>
|
1369 |
+
static __global__ void mul_mat_vec_q(const void * vx, const void * vy, float * dst, const int ncols, const int nrows) {
|
1370 |
+
const int row = blockIdx.y*blockDim.y + threadIdx.y;
|
1371 |
+
|
1372 |
+
if (row >= nrows) {
|
1373 |
+
return;
|
1374 |
+
}
|
1375 |
+
|
1376 |
+
const int blocks_per_row = ncols / qk;
|
1377 |
+
const int blocks_per_warp = WARP_SIZE / qi;
|
1378 |
+
|
1379 |
+
// partial sum for each thread
|
1380 |
+
float tmp = 0.0f;
|
1381 |
+
|
1382 |
+
const block_q_t * x = (const block_q_t *) vx;
|
1383 |
+
const block_q8_1 * y = (const block_q8_1 *) vy;
|
1384 |
+
|
1385 |
+
for (int i = 0; i < blocks_per_row; i += blocks_per_warp) {
|
1386 |
+
const int ibx = row*blocks_per_row + i + threadIdx.x / qi; // x block index
|
1387 |
+
|
1388 |
+
const int iby = i + threadIdx.x / qi; // y block index
|
1389 |
+
|
1390 |
+
const int iqs = threadIdx.x % qi; // x block quant index when casting the quants to int
|
1391 |
+
|
1392 |
+
tmp += vec_dot_q_cuda(&x[ibx], &y[iby], iqs);
|
1393 |
+
}
|
1394 |
+
|
1395 |
+
// sum up partial sums and write back result
|
1396 |
+
#pragma unroll
|
1397 |
+
for (int mask = 16; mask > 0; mask >>= 1) {
|
1398 |
+
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
|
1399 |
+
}
|
1400 |
+
|
1401 |
+
if (threadIdx.x == 0) {
|
1402 |
+
dst[row] = tmp;
|
1403 |
+
}
|
1404 |
+
}
|
1405 |
+
|
1406 |
template <int qk, int qr, dequantize_kernel_t dequantize_kernel>
|
1407 |
static __global__ void dequantize_mul_mat_vec(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows) {
|
1408 |
// qk = quantized weights per x block
|
|
|
1457 |
}
|
1458 |
|
1459 |
// sum up partial sums and write back result
|
|
|
1460 |
#pragma unroll
|
1461 |
for (int mask = 16; mask > 0; mask >>= 1) {
|
1462 |
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
|
|
|
1507 |
const int idst = channel*nrows_dst + row_dst;
|
1508 |
|
1509 |
// sum up partial sums and write back result
|
|
|
1510 |
#pragma unroll
|
1511 |
for (int mask = 16; mask > 0; mask >>= 1) {
|
1512 |
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
|
|
|
1552 |
}
|
1553 |
|
1554 |
// sum up partial sums and write back result
|
|
|
1555 |
#pragma unroll
|
1556 |
for (int mask = 16; mask > 0; mask >>= 1) {
|
1557 |
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
|
|
|
1661 |
}
|
1662 |
|
1663 |
// sum up partial sums
|
|
|
1664 |
#pragma unroll
|
1665 |
for (int mask = 16; mask > 0; mask >>= 1) {
|
1666 |
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
|
|
|
1714 |
rms_norm_f32<<<nrows, block_dims, 0, stream>>>(x, dst, ncols);
|
1715 |
}
|
1716 |
|
1717 |
+
static void quantize_row_q8_1_cuda(const float * x, void * vy, const int k, cudaStream_t stream) {
|
1718 |
+
const int num_blocks = (k + CUDA_QUANTIZE_BLOCK_SIZE - 1) / CUDA_QUANTIZE_BLOCK_SIZE;
|
1719 |
+
quantize_q8_1<<<num_blocks, CUDA_QUANTIZE_BLOCK_SIZE, 0, stream>>>(x, vy, k);
|
1720 |
+
}
|
1721 |
+
|
1722 |
static void dequantize_row_q4_0_cuda(const void * vx, float * y, const int k, cudaStream_t stream) {
|
1723 |
const int num_blocks = (k + CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / CUDA_DEQUANTIZE_BLOCK_SIZE;
|
1724 |
dequantize_block<QK4_0, QR4_0, dequantize_q4_0><<<num_blocks, CUDA_DEQUANTIZE_BLOCK_SIZE, 0, stream>>>(vx, y, k);
|
|
|
1787 |
|
1788 |
static void dequantize_mul_mat_vec_q4_0_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
|
1789 |
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
|
1790 |
+
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
|
1791 |
const dim3 block_nums(1, block_num_y, 1);
|
1792 |
+
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
|
1793 |
dequantize_mul_mat_vec<QK4_0, QR4_0, dequantize_q4_0>
|
1794 |
<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
|
1795 |
}
|
1796 |
|
1797 |
static void dequantize_mul_mat_vec_q4_1_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
|
1798 |
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
|
1799 |
+
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
|
1800 |
const dim3 block_nums(1, block_num_y, 1);
|
1801 |
+
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
|
1802 |
dequantize_mul_mat_vec<QK4_1, QR4_1, dequantize_q4_1>
|
1803 |
<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
|
1804 |
}
|
1805 |
|
1806 |
static void dequantize_mul_mat_vec_q5_0_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
|
1807 |
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
|
1808 |
+
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
|
1809 |
const dim3 block_nums(1, block_num_y, 1);
|
1810 |
+
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
|
1811 |
dequantize_mul_mat_vec<QK5_0, QR5_0, dequantize_q5_0>
|
1812 |
<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
|
1813 |
}
|
1814 |
|
1815 |
static void dequantize_mul_mat_vec_q5_1_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
|
1816 |
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
|
1817 |
+
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
|
1818 |
const dim3 block_nums(1, block_num_y, 1);
|
1819 |
+
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
|
1820 |
dequantize_mul_mat_vec<QK5_1, QR5_1, dequantize_q5_1>
|
1821 |
<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
|
1822 |
}
|
1823 |
|
1824 |
static void dequantize_mul_mat_vec_q8_0_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
|
1825 |
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
|
1826 |
+
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
|
1827 |
const dim3 block_nums(1, block_num_y, 1);
|
1828 |
+
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
|
1829 |
dequantize_mul_mat_vec<QK8_0, QR8_0, dequantize_q8_0>
|
1830 |
<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
|
1831 |
}
|
|
|
1872 |
dequantize_mul_mat_vec_q6_k<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
|
1873 |
}
|
1874 |
|
1875 |
+
static void mul_mat_vec_q4_0_q8_1_cuda(const void * vx, const void * vy, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
|
1876 |
+
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
|
1877 |
+
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
|
1878 |
+
const dim3 block_nums(1, block_num_y, 1);
|
1879 |
+
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
|
1880 |
+
mul_mat_vec_q<QK4_0, QI4_0, block_q4_0, vec_dot_q4_0_q8_1>
|
1881 |
+
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows);
|
1882 |
+
}
|
1883 |
+
|
1884 |
+
static void mul_mat_vec_q4_1_q8_1_cuda(const void * vx, const void * vy, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
|
1885 |
+
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
|
1886 |
+
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
|
1887 |
+
const dim3 block_nums(1, block_num_y, 1);
|
1888 |
+
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
|
1889 |
+
mul_mat_vec_q<QK4_0, QI4_1, block_q4_1, vec_dot_q4_1_q8_1>
|
1890 |
+
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows);
|
1891 |
+
}
|
1892 |
+
|
1893 |
+
static void mul_mat_vec_q5_0_q8_1_cuda(const void * vx, const void * vy, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
|
1894 |
+
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
|
1895 |
+
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
|
1896 |
+
const dim3 block_nums(1, block_num_y, 1);
|
1897 |
+
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
|
1898 |
+
mul_mat_vec_q<QK5_0, QI5_0, block_q5_0, vec_dot_q5_0_q8_1>
|
1899 |
+
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows);
|
1900 |
+
}
|
1901 |
+
|
1902 |
+
static void mul_mat_vec_q5_1_q8_1_cuda(const void * vx, const void * vy, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
|
1903 |
+
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
|
1904 |
+
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
|
1905 |
+
const dim3 block_nums(1, block_num_y, 1);
|
1906 |
+
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
|
1907 |
+
mul_mat_vec_q<QK5_1, QI5_1, block_q5_1, vec_dot_q5_1_q8_1>
|
1908 |
+
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows);
|
1909 |
+
}
|
1910 |
+
|
1911 |
+
static void mul_mat_vec_q8_0_q8_1_cuda(const void * vx, const void * vy, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
|
1912 |
+
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
|
1913 |
+
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
|
1914 |
+
const dim3 block_nums(1, block_num_y, 1);
|
1915 |
+
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
|
1916 |
+
mul_mat_vec_q<QK8_0, QI8_0, block_q8_0, vec_dot_q8_0_q8_1>
|
1917 |
+
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows);
|
1918 |
+
}
|
1919 |
+
|
1920 |
static void convert_fp16_to_fp32_cuda(const void * vx, float * y, const int k, cudaStream_t stream) {
|
1921 |
const int num_blocks = (k + CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / CUDA_DEQUANTIZE_BLOCK_SIZE;
|
1922 |
dequantize_block<1, 1, convert_f16><<<num_blocks, CUDA_DEQUANTIZE_BLOCK_SIZE, 0, stream>>>(vx, y, k);
|
|
|
1924 |
|
1925 |
static void convert_mul_mat_vec_f16_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
|
1926 |
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
|
1927 |
+
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
|
1928 |
const dim3 block_nums(1, block_num_y, 1);
|
1929 |
+
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
|
1930 |
dequantize_mul_mat_vec<1, 1, convert_f16>
|
1931 |
<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
|
1932 |
}
|
|
|
2117 |
|
2118 |
static int g_device_count = -1;
|
2119 |
static int g_main_device = 0;
|
2120 |
+
static int g_compute_capabilities[GGML_CUDA_MAX_DEVICES];
|
2121 |
static float g_tensor_split[GGML_CUDA_MAX_DEVICES] = {0};
|
2122 |
|
2123 |
static cublasHandle_t g_cublas_handles[GGML_CUDA_MAX_DEVICES] = {nullptr};
|
|
|
2135 |
for (int id = 0; id < g_device_count; ++id) {
|
2136 |
cudaDeviceProp prop;
|
2137 |
CUDA_CHECK(cudaGetDeviceProperties(&prop, id));
|
2138 |
+
fprintf(stderr, " Device %d: %s, compute capability %d.%d\n", id, prop.name, prop.major, prop.minor);
|
2139 |
+
|
2140 |
g_tensor_split[id] = total_vram;
|
2141 |
total_vram += prop.totalGlobalMem;
|
2142 |
+
|
2143 |
+
g_compute_capabilities[id] = 100*prop.major + 10*prop.minor;
|
2144 |
}
|
2145 |
for (int id = 0; id < g_device_count; ++id) {
|
2146 |
g_tensor_split[id] /= total_vram;
|
|
|
2356 |
(void) i1;
|
2357 |
}
|
2358 |
|
2359 |
+
inline void ggml_cuda_op_mul_mat_vec(
|
2360 |
const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
|
2361 |
float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
|
2362 |
cudaStream_t & cudaStream_main){
|
|
|
2368 |
const int64_t ne00 = src0->ne[0];
|
2369 |
const int64_t nrows = i01_high - i01_low;
|
2370 |
|
2371 |
+
#ifdef GGML_CUDA_FORCE_DMMV
|
2372 |
+
const bool use_mul_mat_vec_q = false;
|
2373 |
+
#else
|
2374 |
+
int id;
|
2375 |
+
CUDA_CHECK(cudaGetDevice(&id));
|
2376 |
|
2377 |
+
const bool mul_mat_vec_q_implemented = src0->type == GGML_TYPE_Q4_0 ||
|
2378 |
+
src0->type == GGML_TYPE_Q4_1 ||
|
2379 |
+
src0->type == GGML_TYPE_Q5_0 ||
|
2380 |
+
src0->type == GGML_TYPE_Q5_1 ||
|
2381 |
+
src0->type == GGML_TYPE_Q8_0;
|
2382 |
|
2383 |
+
// The integer intrinsics used in mul_mat_vec_q are available with compute capability 6.
|
2384 |
+
// However, they have bad performance with Pascal cards.
|
2385 |
+
// Therefore, in a multi GPU setting decide at runtime which GPUs should use mul_mat_vec_q.
|
2386 |
+
const bool use_mul_mat_vec_q = g_compute_capabilities[id] >= 700 && mul_mat_vec_q_implemented;
|
2387 |
+
#endif
|
2388 |
+
|
2389 |
+
if (use_mul_mat_vec_q) {
|
2390 |
+
size_t as;
|
2391 |
+
void * src1_q8_1 = ggml_cuda_pool_malloc(ne00*sizeof(block_q8_1)/QK8_1, &as);
|
2392 |
+
quantize_row_q8_1_cuda(src1_ddf_i, src1_q8_1, ne00, cudaStream_main);
|
2393 |
+
|
2394 |
+
switch (src0->type) {
|
2395 |
+
case GGML_TYPE_Q4_0:
|
2396 |
+
mul_mat_vec_q4_0_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, nrows, cudaStream_main);
|
2397 |
+
break;
|
2398 |
+
case GGML_TYPE_Q4_1:
|
2399 |
+
mul_mat_vec_q4_1_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, nrows, cudaStream_main);
|
2400 |
+
break;
|
2401 |
+
case GGML_TYPE_Q5_0:
|
2402 |
+
mul_mat_vec_q5_0_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, nrows, cudaStream_main);
|
2403 |
+
break;
|
2404 |
+
case GGML_TYPE_Q5_1:
|
2405 |
+
mul_mat_vec_q5_1_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, nrows, cudaStream_main);
|
2406 |
+
break;
|
2407 |
+
case GGML_TYPE_Q8_0:
|
2408 |
+
mul_mat_vec_q8_0_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, nrows, cudaStream_main);
|
2409 |
+
break;
|
2410 |
+
default:
|
2411 |
+
GGML_ASSERT(false);
|
2412 |
+
break;
|
2413 |
+
}
|
2414 |
+
|
2415 |
+
ggml_cuda_pool_free(src1_q8_1, as);
|
2416 |
+
} else {
|
2417 |
+
// on some GPUs it is faster to convert src1 to half and to use half precision intrinsics
|
2418 |
+
#ifdef GGML_CUDA_DMMV_F16
|
2419 |
+
size_t ash;
|
2420 |
+
dfloat * src1_dfloat = nullptr; // dfloat == half
|
2421 |
+
|
2422 |
+
bool src1_convert_f16 = src0->type == GGML_TYPE_Q4_0 || src0->type == GGML_TYPE_Q4_1 ||
|
2423 |
+
src0->type == GGML_TYPE_Q5_0 || src0->type == GGML_TYPE_Q5_1 ||
|
2424 |
+
src0->type == GGML_TYPE_Q8_0 || src0->type == GGML_TYPE_F16;
|
2425 |
+
|
2426 |
+
if (src1_convert_f16) {
|
2427 |
+
src1_dfloat = (half *) ggml_cuda_pool_malloc(ne00*sizeof(half), &ash);
|
2428 |
+
ggml_cpy_f32_f16_cuda((char *) src1_ddf_i, (char *) src1_dfloat, ne00,
|
2429 |
+
ne00, 1, sizeof(float), 0, 0,
|
2430 |
+
ne00, 1, sizeof(half), 0, 0, cudaStream_main);
|
2431 |
+
}
|
2432 |
#else
|
2433 |
+
dfloat * src1_dfloat = src1_ddf_i; // dfloat == float, no conversion
|
2434 |
#endif // GGML_CUDA_DMMV_F16
|
2435 |
|
2436 |
+
switch (src0->type) {
|
2437 |
+
case GGML_TYPE_Q4_0:
|
2438 |
+
dequantize_mul_mat_vec_q4_0_cuda(src0_ddq_i, src1_dfloat, dst_ddf_i, ne00, nrows, cudaStream_main);
|
2439 |
+
break;
|
2440 |
+
case GGML_TYPE_Q4_1:
|
2441 |
+
dequantize_mul_mat_vec_q4_1_cuda(src0_ddq_i, src1_dfloat, dst_ddf_i, ne00, nrows, cudaStream_main);
|
2442 |
+
break;
|
2443 |
+
case GGML_TYPE_Q5_0:
|
2444 |
+
dequantize_mul_mat_vec_q5_0_cuda(src0_ddq_i, src1_dfloat, dst_ddf_i, ne00, nrows, cudaStream_main);
|
2445 |
+
break;
|
2446 |
+
case GGML_TYPE_Q5_1:
|
2447 |
+
dequantize_mul_mat_vec_q5_1_cuda(src0_ddq_i, src1_dfloat, dst_ddf_i, ne00, nrows, cudaStream_main);
|
2448 |
+
break;
|
2449 |
+
case GGML_TYPE_Q8_0:
|
2450 |
+
dequantize_mul_mat_vec_q8_0_cuda(src0_ddq_i, src1_dfloat, dst_ddf_i, ne00, nrows, cudaStream_main);
|
2451 |
+
break;
|
2452 |
+
case GGML_TYPE_Q2_K:
|
2453 |
+
dequantize_mul_mat_vec_q2_K_cuda(src0_ddq_i, src1_ddf_i, dst_ddf_i, ne00, nrows, cudaStream_main);
|
2454 |
+
break;
|
2455 |
+
case GGML_TYPE_Q3_K:
|
2456 |
+
dequantize_mul_mat_vec_q3_K_cuda(src0_ddq_i, src1_ddf_i, dst_ddf_i, ne00, nrows, cudaStream_main);
|
2457 |
+
break;
|
2458 |
+
case GGML_TYPE_Q4_K:
|
2459 |
+
dequantize_mul_mat_vec_q4_K_cuda(src0_ddq_i, src1_ddf_i, dst_ddf_i, ne00, nrows, cudaStream_main);
|
2460 |
+
break;
|
2461 |
+
case GGML_TYPE_Q5_K:
|
2462 |
+
dequantize_mul_mat_vec_q5_K_cuda(src0_ddq_i, src1_ddf_i, dst_ddf_i, ne00, nrows, cudaStream_main);
|
2463 |
+
break;
|
2464 |
+
case GGML_TYPE_Q6_K:
|
2465 |
+
dequantize_mul_mat_vec_q6_K_cuda(src0_ddq_i, src1_ddf_i, dst_ddf_i, ne00, nrows, cudaStream_main);
|
2466 |
+
break;
|
2467 |
+
case GGML_TYPE_F16:
|
2468 |
+
convert_mul_mat_vec_f16_cuda(src0_ddq_i, src1_dfloat, dst_ddf_i, ne00, nrows, cudaStream_main);
|
2469 |
+
break;
|
2470 |
+
default:
|
2471 |
+
GGML_ASSERT(false);
|
2472 |
+
break;
|
2473 |
+
}
|
2474 |
|
2475 |
#ifdef GGML_CUDA_DMMV_F16
|
2476 |
+
if (src1_convert_f16) {
|
2477 |
+
ggml_cuda_pool_free(src1_dfloat, ash);
|
2478 |
+
}
|
2479 |
#endif // GGML_CUDA_DMMV_F16
|
2480 |
+
}
|
2481 |
|
2482 |
(void) src1;
|
2483 |
(void) dst;
|
|
|
3050 |
}else if (src0->type == GGML_TYPE_F32) {
|
3051 |
ggml_cuda_op(src0, src1, dst, ggml_cuda_op_mul_mat_cublas, true, false);
|
3052 |
} else if (ggml_is_quantized(src0->type) || src0->type == GGML_TYPE_F16) {
|
3053 |
+
if (src1->ne[1] == 1 && src0->ne[0] % GGML_CUDA_DMMV_X == 0) {
|
3054 |
+
ggml_cuda_op(src0, src1, dst, ggml_cuda_op_mul_mat_vec, false, false);
|
3055 |
} else {
|
3056 |
ggml_cuda_op(src0, src1, dst, ggml_cuda_op_mul_mat_cublas, true, false);
|
3057 |
}
|
|
|
3184 |
}
|
3185 |
|
3186 |
void ggml_cuda_free_data(struct ggml_tensor * tensor) {
|
3187 |
+
if (!tensor || (tensor->backend != GGML_BACKEND_GPU && tensor->backend != GGML_BACKEND_GPU_SPLIT) ) {
|
3188 |
return;
|
3189 |
}
|
3190 |
|
ggml-opencl.cpp
CHANGED
@@ -654,13 +654,17 @@ __kernel void dequantize_mul_mat_vec_q6_K(__global const struct block_q6_K * xx,
|
|
654 |
const int im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128...
|
655 |
const int in = tid - step*im; // 0...15 or 0...7
|
656 |
|
657 |
-
#if K_QUANTS_PER_ITERATION == 1
|
658 |
const int l0 = K_QUANTS_PER_ITERATION*in; // 0...15
|
659 |
const int is = 0;
|
660 |
-
|
|
|
|
|
661 |
const int l0 = 4 * in; // 0, 4, 8, ..., 28
|
662 |
const int is = in / 4;
|
663 |
-
|
|
|
|
|
664 |
const int ql_offset = 64*im + l0;
|
665 |
const int qh_offset = 32*im + l0;
|
666 |
const int s_offset = 8*im + is;
|
@@ -677,7 +681,7 @@ __kernel void dequantize_mul_mat_vec_q6_K(__global const struct block_q6_K * xx,
|
|
677 |
|
678 |
const float d = vload_half(0, &x[i].d);
|
679 |
|
680 |
-
#if K_QUANTS_PER_ITERATION == 1
|
681 |
float sum = y[ 0] * s[0] * d * ((int8_t)((ql[ 0] & 0xF) | ((qh[ 0] & 0x03) << 4)) - 32)
|
682 |
+ y[16] * s[1] * d * ((int8_t)((ql[16] & 0xF) | ((qh[16] & 0x03) << 4)) - 32)
|
683 |
+ y[32] * s[2] * d * ((int8_t)((ql[32] & 0xF) | ((qh[ 0] & 0x0c) << 2)) - 32)
|
@@ -687,7 +691,7 @@ __kernel void dequantize_mul_mat_vec_q6_K(__global const struct block_q6_K * xx,
|
|
687 |
+ y[96] * s[6] * d * ((int8_t)((ql[32] >> 4) | ((qh[ 0] & 0xc0) >> 2)) - 32)
|
688 |
+y[112] * s[7] * d * ((int8_t)((ql[48] >> 4) | ((qh[16] & 0xc0) >> 2)) - 32);
|
689 |
tmp[16 * ix + tid] += sum;
|
690 |
-
#else
|
691 |
float sum = 0;
|
692 |
for (int l = 0; l < 4; ++l) {
|
693 |
sum += y[l+ 0] * s[0] * d * ((int8_t)((ql[l+ 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32)
|
@@ -696,7 +700,7 @@ __kernel void dequantize_mul_mat_vec_q6_K(__global const struct block_q6_K * xx,
|
|
696 |
+ y[l+96] * s[6] * d * ((int8_t)((ql[l+32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32);
|
697 |
}
|
698 |
tmp[16 * ix + tid] += sum;
|
699 |
-
#endif
|
700 |
|
701 |
}
|
702 |
|
@@ -1384,7 +1388,7 @@ static void ggml_cl_mul_f32(const ggml_tensor * src0, const ggml_tensor * src1,
|
|
1384 |
const int64_t ne00 = src0->ne[0];
|
1385 |
const int64_t ne01 = src0->ne[1];
|
1386 |
const int64_t ne02 = src0->ne[2];
|
1387 |
-
const int64_t ne03 = src0->ne[
|
1388 |
const int64_t ne0 = ne00 * ne01 * ne02 * ne03;
|
1389 |
const int64_t ne10 = src1->ne[0];
|
1390 |
const int64_t ne11 = src1->ne[1];
|
|
|
654 |
const int im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128...
|
655 |
const int in = tid - step*im; // 0...15 or 0...7
|
656 |
|
657 |
+
\n#if K_QUANTS_PER_ITERATION == 1\n
|
658 |
const int l0 = K_QUANTS_PER_ITERATION*in; // 0...15
|
659 |
const int is = 0;
|
660 |
+
|
661 |
+
\n#else\n
|
662 |
+
|
663 |
const int l0 = 4 * in; // 0, 4, 8, ..., 28
|
664 |
const int is = in / 4;
|
665 |
+
|
666 |
+
\n#endif\n
|
667 |
+
|
668 |
const int ql_offset = 64*im + l0;
|
669 |
const int qh_offset = 32*im + l0;
|
670 |
const int s_offset = 8*im + is;
|
|
|
681 |
|
682 |
const float d = vload_half(0, &x[i].d);
|
683 |
|
684 |
+
\n#if K_QUANTS_PER_ITERATION == 1\n
|
685 |
float sum = y[ 0] * s[0] * d * ((int8_t)((ql[ 0] & 0xF) | ((qh[ 0] & 0x03) << 4)) - 32)
|
686 |
+ y[16] * s[1] * d * ((int8_t)((ql[16] & 0xF) | ((qh[16] & 0x03) << 4)) - 32)
|
687 |
+ y[32] * s[2] * d * ((int8_t)((ql[32] & 0xF) | ((qh[ 0] & 0x0c) << 2)) - 32)
|
|
|
691 |
+ y[96] * s[6] * d * ((int8_t)((ql[32] >> 4) | ((qh[ 0] & 0xc0) >> 2)) - 32)
|
692 |
+y[112] * s[7] * d * ((int8_t)((ql[48] >> 4) | ((qh[16] & 0xc0) >> 2)) - 32);
|
693 |
tmp[16 * ix + tid] += sum;
|
694 |
+
\n#else\n
|
695 |
float sum = 0;
|
696 |
for (int l = 0; l < 4; ++l) {
|
697 |
sum += y[l+ 0] * s[0] * d * ((int8_t)((ql[l+ 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32)
|
|
|
700 |
+ y[l+96] * s[6] * d * ((int8_t)((ql[l+32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32);
|
701 |
}
|
702 |
tmp[16 * ix + tid] += sum;
|
703 |
+
\n#endif\n
|
704 |
|
705 |
}
|
706 |
|
|
|
1388 |
const int64_t ne00 = src0->ne[0];
|
1389 |
const int64_t ne01 = src0->ne[1];
|
1390 |
const int64_t ne02 = src0->ne[2];
|
1391 |
+
const int64_t ne03 = src0->ne[3];
|
1392 |
const int64_t ne0 = ne00 * ne01 * ne02 * ne03;
|
1393 |
const int64_t ne10 = src1->ne[0];
|
1394 |
const int64_t ne11 = src1->ne[1];
|
ggml.c
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
ggml.h
CHANGED
@@ -201,6 +201,8 @@
|
|
201 |
#define GGML_MAX_NAME 48
|
202 |
#define GGML_DEFAULT_N_THREADS 4
|
203 |
|
|
|
|
|
204 |
#define GGML_ASSERT(x) \
|
205 |
do { \
|
206 |
if (!(x)) { \
|
@@ -209,6 +211,30 @@
|
|
209 |
} \
|
210 |
} while (0)
|
211 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
212 |
#ifdef __cplusplus
|
213 |
extern "C" {
|
214 |
#endif
|
@@ -224,8 +250,8 @@ extern "C" {
|
|
224 |
GGML_API float ggml_fp16_to_fp32(ggml_fp16_t x);
|
225 |
GGML_API ggml_fp16_t ggml_fp32_to_fp16(float x);
|
226 |
|
227 |
-
GGML_API void ggml_fp16_to_fp32_row(const ggml_fp16_t * x, float * y,
|
228 |
-
GGML_API void ggml_fp32_to_fp16_row(const float * x, ggml_fp16_t * y,
|
229 |
|
230 |
struct ggml_object;
|
231 |
struct ggml_context;
|
@@ -295,12 +321,15 @@ extern "C" {
|
|
295 |
GGML_OP_SUM,
|
296 |
GGML_OP_SUM_ROWS,
|
297 |
GGML_OP_MEAN,
|
|
|
298 |
GGML_OP_REPEAT,
|
299 |
GGML_OP_REPEAT_BACK,
|
300 |
GGML_OP_ABS,
|
301 |
GGML_OP_SGN,
|
302 |
GGML_OP_NEG,
|
303 |
GGML_OP_STEP,
|
|
|
|
|
304 |
GGML_OP_RELU,
|
305 |
GGML_OP_GELU,
|
306 |
GGML_OP_GELU_QUICK,
|
@@ -332,9 +361,8 @@ extern "C" {
|
|
332 |
GGML_OP_ROPE_BACK,
|
333 |
GGML_OP_ALIBI,
|
334 |
GGML_OP_CLAMP,
|
335 |
-
|
336 |
-
|
337 |
-
GGML_OP_CONV_2D_SK_P0,
|
338 |
|
339 |
GGML_OP_FLASH_ATTN,
|
340 |
GGML_OP_FLASH_FF,
|
@@ -692,6 +720,11 @@ extern "C" {
|
|
692 |
struct ggml_context * ctx,
|
693 |
struct ggml_tensor * a);
|
694 |
|
|
|
|
|
|
|
|
|
|
|
695 |
// if a is the same shape as b, and a is not parameter, return a
|
696 |
// otherwise, return a new tensor: repeat(a) to fit in b
|
697 |
GGML_API struct ggml_tensor * ggml_repeat(
|
@@ -736,6 +769,22 @@ extern "C" {
|
|
736 |
struct ggml_context * ctx,
|
737 |
struct ggml_tensor * a);
|
738 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
739 |
GGML_API struct ggml_tensor * ggml_relu(
|
740 |
struct ggml_context * ctx,
|
741 |
struct ggml_tensor * a);
|
@@ -1086,58 +1135,33 @@ extern "C" {
|
|
1086 |
float min,
|
1087 |
float max);
|
1088 |
|
1089 |
-
|
1090 |
-
// GGML_API struct ggml_tensor * ggml_conv_1d(
|
1091 |
-
// struct ggml_context * ctx,
|
1092 |
-
// struct ggml_tensor * a,
|
1093 |
-
// struct ggml_tensor * b,
|
1094 |
-
// int s0
|
1095 |
-
// int p0,
|
1096 |
-
// int d0);
|
1097 |
-
//
|
1098 |
-
// GGML_API struct ggml_tensor * ggml_conv_2d(
|
1099 |
-
// struct ggml_context * ctx,
|
1100 |
-
// struct ggml_tensor * a,
|
1101 |
-
// struct ggml_tensor * b,
|
1102 |
-
// int s0,
|
1103 |
-
// int s1,
|
1104 |
-
// int p0,
|
1105 |
-
// int p1,
|
1106 |
-
// int d0,
|
1107 |
-
// int d1);
|
1108 |
-
|
1109 |
-
// padding = half
|
1110 |
-
// TODO: we don't support extra parameters for now
|
1111 |
-
// that's why we are hard-coding the stride, padding, and dilation
|
1112 |
-
// not great ..
|
1113 |
-
// example:
|
1114 |
-
// a: 3 80 768 1
|
1115 |
-
// b: 3000 80 1 1
|
1116 |
-
// res: 3000 768 1 1
|
1117 |
-
// used in whisper
|
1118 |
-
GGML_API struct ggml_tensor * ggml_conv_1d_s1_ph(
|
1119 |
struct ggml_context * ctx,
|
1120 |
struct ggml_tensor * a,
|
1121 |
-
struct ggml_tensor * b
|
|
|
|
|
|
|
1122 |
|
1123 |
-
|
1124 |
-
GGML_API struct ggml_tensor * ggml_conv_1d_s2_ph(
|
1125 |
struct ggml_context * ctx,
|
1126 |
struct ggml_tensor * a,
|
1127 |
-
struct ggml_tensor * b
|
|
|
|
|
|
|
|
|
|
|
|
|
1128 |
|
1129 |
-
//
|
1130 |
-
//
|
1131 |
-
|
1132 |
-
// example:
|
1133 |
-
// a: 16 16 3 768
|
1134 |
-
// b: 1024 1024 3 1
|
1135 |
-
// res: 64 64 768 1
|
1136 |
-
// used in sam
|
1137 |
-
GGML_API struct ggml_tensor * ggml_conv_2d_sk_p0(
|
1138 |
struct ggml_context * ctx,
|
1139 |
struct ggml_tensor * a,
|
1140 |
-
struct ggml_tensor * b
|
|
|
|
|
1141 |
|
1142 |
GGML_API struct ggml_tensor * ggml_flash_attn(
|
1143 |
struct ggml_context * ctx,
|
@@ -1493,25 +1517,24 @@ extern "C" {
|
|
1493 |
//
|
1494 |
|
1495 |
#ifdef __cplusplus
|
1496 |
-
|
1497 |
#define GGML_RESTRICT
|
1498 |
#else
|
1499 |
#define GGML_RESTRICT restrict
|
1500 |
#endif
|
1501 |
-
typedef void (*
|
1502 |
-
typedef void (*
|
1503 |
-
typedef void (*
|
1504 |
|
1505 |
typedef struct {
|
1506 |
-
|
1507 |
-
|
1508 |
-
|
1509 |
-
|
1510 |
-
|
1511 |
-
|
1512 |
-
|
1513 |
-
|
1514 |
-
quantize_fns_t ggml_internal_get_quantize_fn(size_t i);
|
1515 |
|
1516 |
#ifdef __cplusplus
|
1517 |
}
|
|
|
201 |
#define GGML_MAX_NAME 48
|
202 |
#define GGML_DEFAULT_N_THREADS 4
|
203 |
|
204 |
+
#define GGML_UNUSED(x) (void)(x)
|
205 |
+
|
206 |
#define GGML_ASSERT(x) \
|
207 |
do { \
|
208 |
if (!(x)) { \
|
|
|
211 |
} \
|
212 |
} while (0)
|
213 |
|
214 |
+
// used to copy the number of elements and stride in bytes of tensors into local variables.
|
215 |
+
// main purpose is to reduce code duplication and improve readability.
|
216 |
+
//
|
217 |
+
// example:
|
218 |
+
//
|
219 |
+
// GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne);
|
220 |
+
// GGML_TENSOR_LOCALS(size_t, nb1, src1, nb);
|
221 |
+
//
|
222 |
+
#define GGML_TENSOR_LOCALS_1(type, prefix, pointer, array) \
|
223 |
+
const type prefix##0 = (pointer)->array[0]; \
|
224 |
+
GGML_UNUSED(prefix##0);
|
225 |
+
#define GGML_TENSOR_LOCALS_2(type, prefix, pointer, array) \
|
226 |
+
GGML_TENSOR_LOCALS_1 (type, prefix, pointer, array) \
|
227 |
+
const type prefix##1 = (pointer)->array[1]; \
|
228 |
+
GGML_UNUSED(prefix##1);
|
229 |
+
#define GGML_TENSOR_LOCALS_3(type, prefix, pointer, array) \
|
230 |
+
GGML_TENSOR_LOCALS_2 (type, prefix, pointer, array) \
|
231 |
+
const type prefix##2 = (pointer)->array[2]; \
|
232 |
+
GGML_UNUSED(prefix##2);
|
233 |
+
#define GGML_TENSOR_LOCALS(type, prefix, pointer, array) \
|
234 |
+
GGML_TENSOR_LOCALS_3 (type, prefix, pointer, array) \
|
235 |
+
const type prefix##3 = (pointer)->array[3]; \
|
236 |
+
GGML_UNUSED(prefix##3);
|
237 |
+
|
238 |
#ifdef __cplusplus
|
239 |
extern "C" {
|
240 |
#endif
|
|
|
250 |
GGML_API float ggml_fp16_to_fp32(ggml_fp16_t x);
|
251 |
GGML_API ggml_fp16_t ggml_fp32_to_fp16(float x);
|
252 |
|
253 |
+
GGML_API void ggml_fp16_to_fp32_row(const ggml_fp16_t * x, float * y, int n);
|
254 |
+
GGML_API void ggml_fp32_to_fp16_row(const float * x, ggml_fp16_t * y, int n);
|
255 |
|
256 |
struct ggml_object;
|
257 |
struct ggml_context;
|
|
|
321 |
GGML_OP_SUM,
|
322 |
GGML_OP_SUM_ROWS,
|
323 |
GGML_OP_MEAN,
|
324 |
+
GGML_OP_ARGMAX,
|
325 |
GGML_OP_REPEAT,
|
326 |
GGML_OP_REPEAT_BACK,
|
327 |
GGML_OP_ABS,
|
328 |
GGML_OP_SGN,
|
329 |
GGML_OP_NEG,
|
330 |
GGML_OP_STEP,
|
331 |
+
GGML_OP_TANH,
|
332 |
+
GGML_OP_ELU,
|
333 |
GGML_OP_RELU,
|
334 |
GGML_OP_GELU,
|
335 |
GGML_OP_GELU_QUICK,
|
|
|
361 |
GGML_OP_ROPE_BACK,
|
362 |
GGML_OP_ALIBI,
|
363 |
GGML_OP_CLAMP,
|
364 |
+
GGML_OP_CONV_1D,
|
365 |
+
GGML_OP_CONV_2D,
|
|
|
366 |
|
367 |
GGML_OP_FLASH_ATTN,
|
368 |
GGML_OP_FLASH_FF,
|
|
|
720 |
struct ggml_context * ctx,
|
721 |
struct ggml_tensor * a);
|
722 |
|
723 |
+
// argmax along rows
|
724 |
+
GGML_API struct ggml_tensor * ggml_argmax(
|
725 |
+
struct ggml_context * ctx,
|
726 |
+
struct ggml_tensor * a);
|
727 |
+
|
728 |
// if a is the same shape as b, and a is not parameter, return a
|
729 |
// otherwise, return a new tensor: repeat(a) to fit in b
|
730 |
GGML_API struct ggml_tensor * ggml_repeat(
|
|
|
769 |
struct ggml_context * ctx,
|
770 |
struct ggml_tensor * a);
|
771 |
|
772 |
+
GGML_API struct ggml_tensor * ggml_tanh(
|
773 |
+
struct ggml_context * ctx,
|
774 |
+
struct ggml_tensor * a);
|
775 |
+
|
776 |
+
GGML_API struct ggml_tensor * ggml_tanh_inplace(
|
777 |
+
struct ggml_context * ctx,
|
778 |
+
struct ggml_tensor * a);
|
779 |
+
|
780 |
+
GGML_API struct ggml_tensor * ggml_elu(
|
781 |
+
struct ggml_context * ctx,
|
782 |
+
struct ggml_tensor * a);
|
783 |
+
|
784 |
+
GGML_API struct ggml_tensor * ggml_elu_inplace(
|
785 |
+
struct ggml_context * ctx,
|
786 |
+
struct ggml_tensor * a);
|
787 |
+
|
788 |
GGML_API struct ggml_tensor * ggml_relu(
|
789 |
struct ggml_context * ctx,
|
790 |
struct ggml_tensor * a);
|
|
|
1135 |
float min,
|
1136 |
float max);
|
1137 |
|
1138 |
+
GGML_API struct ggml_tensor * ggml_conv_1d(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1139 |
struct ggml_context * ctx,
|
1140 |
struct ggml_tensor * a,
|
1141 |
+
struct ggml_tensor * b,
|
1142 |
+
int s0, // stride
|
1143 |
+
int p0, // padding
|
1144 |
+
int d0); // dilation
|
1145 |
|
1146 |
+
GGML_API struct ggml_tensor * ggml_conv_2d(
|
|
|
1147 |
struct ggml_context * ctx,
|
1148 |
struct ggml_tensor * a,
|
1149 |
+
struct ggml_tensor * b,
|
1150 |
+
int s0,
|
1151 |
+
int s1,
|
1152 |
+
int p0,
|
1153 |
+
int p1,
|
1154 |
+
int d0,
|
1155 |
+
int d1);
|
1156 |
|
1157 |
+
// conv_1d with padding = half
|
1158 |
+
// alias for ggml_conv_1d(a, b, s, a->ne[0]/2, d)
|
1159 |
+
GGML_API struct ggml_tensor* ggml_conv_1d_ph(
|
|
|
|
|
|
|
|
|
|
|
|
|
1160 |
struct ggml_context * ctx,
|
1161 |
struct ggml_tensor * a,
|
1162 |
+
struct ggml_tensor * b,
|
1163 |
+
int s,
|
1164 |
+
int d);
|
1165 |
|
1166 |
GGML_API struct ggml_tensor * ggml_flash_attn(
|
1167 |
struct ggml_context * ctx,
|
|
|
1517 |
//
|
1518 |
|
1519 |
#ifdef __cplusplus
|
1520 |
+
// restrict not standard in C++
|
1521 |
#define GGML_RESTRICT
|
1522 |
#else
|
1523 |
#define GGML_RESTRICT restrict
|
1524 |
#endif
|
1525 |
+
typedef void (*ggml_to_float_t) (const void * GGML_RESTRICT x, float * GGML_RESTRICT y, int k);
|
1526 |
+
typedef void (*ggml_from_float_t)(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int k);
|
1527 |
+
typedef void (*ggml_vec_dot_t) (const int n, float * GGML_RESTRICT s, const void * GGML_RESTRICT x, const void * GGML_RESTRICT y);
|
1528 |
|
1529 |
typedef struct {
|
1530 |
+
ggml_to_float_t to_float;
|
1531 |
+
ggml_from_float_t from_float;
|
1532 |
+
ggml_from_float_t from_float_reference;
|
1533 |
+
ggml_vec_dot_t vec_dot;
|
1534 |
+
enum ggml_type vec_dot_type;
|
1535 |
+
} ggml_type_traits_t;
|
1536 |
+
|
1537 |
+
ggml_type_traits_t ggml_internal_get_type_traits(enum ggml_type i);
|
|
|
1538 |
|
1539 |
#ifdef __cplusplus
|
1540 |
}
|
gpttype_adapter.cpp
CHANGED
@@ -76,6 +76,8 @@ static size_t mem_per_token = 0;
|
|
76 |
static std::vector<float> logits;
|
77 |
static std::vector<int> smartcontext;
|
78 |
static std::vector<std::string> stop_sequence;
|
|
|
|
|
79 |
static std::vector<llama_token_data> top_picks;
|
80 |
static int remaining_tokens = 0;
|
81 |
static int stopper_unused_tokens = 0;
|
@@ -219,8 +221,31 @@ void sample_top_a(llama_token_data_array * candidates, float a, size_t min_keep)
|
|
219 |
candidates->size = last_idx;
|
220 |
}
|
221 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
222 |
int SampleLogits(const float * logits, int n_ctx, int n_vocab, int rep_pen_range, float rep_pen, float top_k, float top_a, float top_p, float typical_p, float tfs, float temp, std::mt19937 & rng,
|
223 |
-
int mirostat, float mirostat_tau, float mirostat_eta)
|
224 |
{
|
225 |
int id = 0;
|
226 |
std::vector<llama_token_data> candidates;
|
@@ -231,47 +256,54 @@ int mirostat, float mirostat_tau, float mirostat_eta)
|
|
231 |
|
232 |
llama_token_data_array candidates_p = { candidates.data(), candidates.size(), false };
|
233 |
|
234 |
-
|
235 |
-
auto last_n_repeat = std::min(std::min((int)last_n_tokens.size(), rep_pen_range), n_ctx);
|
236 |
-
llama_sample_repetition_penalty(nullptr, &candidates_p,
|
237 |
-
last_n_tokens.data() + last_n_tokens.size() - last_n_repeat,
|
238 |
-
last_n_repeat, rep_pen);
|
239 |
-
|
240 |
-
// llama_sample_frequency_and_presence_penalties(nullptr, &candidates_p,
|
241 |
-
// last_n_tokens.data() + last_n_tokens.size() - last_n_repeat,
|
242 |
-
// last_n_repeat, alpha_frequency, alpha_presence);
|
243 |
-
|
244 |
-
if (temp <= 0)
|
245 |
-
{
|
246 |
-
// Greedy sampling
|
247 |
-
id = llama_sample_token_greedy(nullptr, &candidates_p);
|
248 |
-
}
|
249 |
-
else
|
250 |
{
|
|
|
|
|
|
|
|
|
251 |
if (mirostat == 1)
|
252 |
{
|
253 |
-
static float mirostat_mu = 2.0f * mirostat_tau;
|
254 |
-
const int mirostat_m = 100;
|
255 |
-
llama_sample_temperature(nullptr, &candidates_p, temp);
|
256 |
id = sample_token_mirostat(n_vocab, &candidates_p, rng, mirostat_tau, mirostat_eta, mirostat_m, &mirostat_mu);
|
257 |
}
|
258 |
-
else
|
259 |
{
|
260 |
-
static float mirostat_mu = 2.0f * mirostat_tau;
|
261 |
-
llama_sample_temperature(nullptr, &candidates_p, temp);
|
262 |
id = sample_token_mirostat_v2(&candidates_p, rng, mirostat_tau, mirostat_eta, &mirostat_mu);
|
263 |
}
|
264 |
-
|
|
|
|
|
|
|
265 |
{
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
274 |
}
|
|
|
275 |
}
|
276 |
|
277 |
return id;
|
@@ -314,10 +346,30 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in
|
|
314 |
= gpt2_ctx_v1.hparams.n_ctx = gpt2_ctx_v2.hparams.n_ctx = gpt2_ctx_v3.hparams.n_ctx
|
315 |
= mpt_ctx_v3.hparams.n_ctx = params.n_ctx;
|
316 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
317 |
//this is used for the mem_per_token eval, openblas needs more RAM
|
318 |
bool use_scratch = ggml_cpu_has_gpublas();
|
319 |
|
|
|
|
|
320 |
printf("System Info: %s\n", llama_print_system_info());
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
321 |
SetQuantsUnshuffled(false);
|
322 |
if(file_format == FileFormat::GGML || file_format == FileFormat::GGHF || file_format == FileFormat::GGJT || file_format == FileFormat::GGJT_2)
|
323 |
{
|
@@ -382,6 +434,7 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in
|
|
382 |
llama_ctx_params.use_mmap = inputs.use_mmap;
|
383 |
llama_ctx_params.use_mlock = inputs.use_mlock;
|
384 |
llama_ctx_params.n_gpu_layers = inputs.gpulayers;
|
|
|
385 |
|
386 |
llama_ctx_v3 = llama_init_from_file(modelname.c_str(), llama_ctx_params);
|
387 |
|
@@ -432,10 +485,10 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in
|
|
432 |
{
|
433 |
rwkv_ctx_v3 = rwkv_init_from_file(modelname.c_str(), n_threads);
|
434 |
|
435 |
-
|
436 |
-
|
437 |
-
|
438 |
-
|
439 |
|
440 |
const struct rwkv_file_header & header = rwkv_ctx_v3->instance->model.header;
|
441 |
const size_t n_vocab = header.n_vocab;
|
@@ -913,6 +966,28 @@ generation_outputs gpttype_generate(const generation_inputs inputs, generation_o
|
|
913 |
std::mt19937 rng(params.seed);
|
914 |
concat_output = "";
|
915 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
916 |
bool startedsampling = false;
|
917 |
bool use_scratch = true; //for normal inference always use scratch
|
918 |
|
@@ -1002,6 +1077,25 @@ generation_outputs gpttype_generate(const generation_inputs inputs, generation_o
|
|
1002 |
printf("Bad format!");
|
1003 |
}
|
1004 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1005 |
if(debugmode!=-1)
|
1006 |
{
|
1007 |
printf("\n");
|
@@ -1066,15 +1160,15 @@ generation_outputs gpttype_generate(const generation_inputs inputs, generation_o
|
|
1066 |
}
|
1067 |
else
|
1068 |
{
|
1069 |
-
|
1070 |
-
|
1071 |
-
|
1072 |
-
|
1073 |
-
|
1074 |
-
|
1075 |
bool ignoreLogits = (!startedsampling && ((int)embd_inp.size() > input_consumed + 2));
|
1076 |
evalres = rwkv_eval(rwkv_ctx_v3, embd[0], rwkv_ctx_v3->state_in, rwkv_ctx_v3->state_out, ignoreLogits?nullptr:rwkv_ctx_v3->logits_out);
|
1077 |
-
|
1078 |
|
1079 |
memcpy(logits.data(), rwkv_ctx_v3->logits_out, sizeof(float) * rwkv_vocab.size());
|
1080 |
rwkv_ctx_v3->state_in = rwkv_ctx_v3->state_out;
|
@@ -1159,6 +1253,7 @@ generation_outputs gpttype_generate(const generation_inputs inputs, generation_o
|
|
1159 |
|
1160 |
unsigned int eosID = 0;
|
1161 |
float * logitsPtr;
|
|
|
1162 |
if(file_format == FileFormat::GGML || file_format == FileFormat::GGHF || file_format == FileFormat::GGJT || file_format == FileFormat::GGJT_2 || file_format == FileFormat::GGJT_3)
|
1163 |
{
|
1164 |
if(file_format == FileFormat::GGJT_3)
|
@@ -1177,6 +1272,14 @@ generation_outputs gpttype_generate(const generation_inputs inputs, generation_o
|
|
1177 |
// set the logit of the eos token (2) to zero to avoid sampling it
|
1178 |
logitsPtr[eosID] = 0;
|
1179 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1180 |
}
|
1181 |
else
|
1182 |
{
|
@@ -1231,11 +1334,19 @@ generation_outputs gpttype_generate(const generation_inputs inputs, generation_o
|
|
1231 |
}
|
1232 |
}
|
1233 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1234 |
}
|
1235 |
|
1236 |
id = SampleLogits(logitsPtr, nctx, n_vocab, last_n_size, repeat_penalty,
|
1237 |
top_k, top_a, top_p, typical_p, tfs_z, temp, rng,
|
1238 |
-
params.mirostat,params.mirostat_tau,params.mirostat_eta);
|
1239 |
|
1240 |
last_n_tokens.erase(last_n_tokens.begin());
|
1241 |
last_n_tokens.push_back(id);
|
|
|
76 |
static std::vector<float> logits;
|
77 |
static std::vector<int> smartcontext;
|
78 |
static std::vector<std::string> stop_sequence;
|
79 |
+
static std::vector<std::string> banned_tokens;
|
80 |
+
static std::vector<int> banned_token_ids;
|
81 |
static std::vector<llama_token_data> top_picks;
|
82 |
static int remaining_tokens = 0;
|
83 |
static int stopper_unused_tokens = 0;
|
|
|
221 |
candidates->size = last_idx;
|
222 |
}
|
223 |
|
224 |
+
void sample_rep_pen(int n_ctx, int rep_pen_range, float rep_pen, llama_token_data_array * candidates_p)
|
225 |
+
{
|
226 |
+
auto last_n_repeat = std::min(std::min((int)last_n_tokens.size(), rep_pen_range), n_ctx);
|
227 |
+
llama_sample_repetition_penalty(nullptr, candidates_p,
|
228 |
+
last_n_tokens.data() + last_n_tokens.size() - last_n_repeat,
|
229 |
+
last_n_repeat, rep_pen);
|
230 |
+
}
|
231 |
+
|
232 |
+
void sample_temperature(llama_token_data_array * candidates_p, float temp)
|
233 |
+
{
|
234 |
+
if (temp <= 0)
|
235 |
+
{
|
236 |
+
// Imitate greedy sampling
|
237 |
+
temp = 0.01f; //cannot be zero else div0
|
238 |
+
llama_sample_temperature(nullptr, candidates_p, temp);
|
239 |
+
llama_sample_top_k(nullptr, candidates_p, 1, 1); //only want first candidate
|
240 |
+
}
|
241 |
+
else
|
242 |
+
{
|
243 |
+
llama_sample_temperature(nullptr, candidates_p, temp);
|
244 |
+
}
|
245 |
+
}
|
246 |
+
|
247 |
int SampleLogits(const float * logits, int n_ctx, int n_vocab, int rep_pen_range, float rep_pen, float top_k, float top_a, float top_p, float typical_p, float tfs, float temp, std::mt19937 & rng,
|
248 |
+
int mirostat, float mirostat_tau, float mirostat_eta, const std::vector<samplers> & sampler_order)
|
249 |
{
|
250 |
int id = 0;
|
251 |
std::vector<llama_token_data> candidates;
|
|
|
256 |
|
257 |
llama_token_data_array candidates_p = { candidates.data(), candidates.size(), false };
|
258 |
|
259 |
+
if (mirostat == 1 || mirostat == 2)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
260 |
{
|
261 |
+
static float mirostat_mu = 2.0f * mirostat_tau;
|
262 |
+
const int mirostat_m = 100;
|
263 |
+
sample_rep_pen(n_ctx, rep_pen_range, rep_pen, &candidates_p);
|
264 |
+
sample_temperature(&candidates_p, temp);
|
265 |
if (mirostat == 1)
|
266 |
{
|
|
|
|
|
|
|
267 |
id = sample_token_mirostat(n_vocab, &candidates_p, rng, mirostat_tau, mirostat_eta, mirostat_m, &mirostat_mu);
|
268 |
}
|
269 |
+
else
|
270 |
{
|
|
|
|
|
271 |
id = sample_token_mirostat_v2(&candidates_p, rng, mirostat_tau, mirostat_eta, &mirostat_mu);
|
272 |
}
|
273 |
+
}
|
274 |
+
else
|
275 |
+
{
|
276 |
+
for (int i = 0; i < sampler_order.size(); i++)
|
277 |
{
|
278 |
+
switch (sampler_order[i])
|
279 |
+
{
|
280 |
+
case KCPP_SAMPLER_TOP_K:
|
281 |
+
llama_sample_top_k(nullptr, &candidates_p, top_k,1);
|
282 |
+
break;
|
283 |
+
case KCPP_SAMPLER_TOP_A:
|
284 |
+
sample_top_a(&candidates_p,top_a,1);
|
285 |
+
break;
|
286 |
+
case KCPP_SAMPLER_TOP_P:
|
287 |
+
llama_sample_top_p(nullptr, &candidates_p, top_p,1);
|
288 |
+
break;
|
289 |
+
case KCPP_SAMPLER_TFS:
|
290 |
+
llama_sample_tail_free(nullptr, &candidates_p, tfs,1);
|
291 |
+
break;
|
292 |
+
case KCPP_SAMPLER_TYP:
|
293 |
+
llama_sample_typical(nullptr, &candidates_p, typical_p,1);
|
294 |
+
break;
|
295 |
+
case KCPP_SAMPLER_TEMP:
|
296 |
+
sample_temperature(&candidates_p, temp);
|
297 |
+
break;
|
298 |
+
case KCPP_SAMPLER_REP_PEN:
|
299 |
+
sample_rep_pen(n_ctx, rep_pen_range, rep_pen, &candidates_p);
|
300 |
+
break;
|
301 |
+
default:
|
302 |
+
printf("\nSampleLogits: Unknown Sampler : %d",sampler_order[i]);
|
303 |
+
break;
|
304 |
+
}
|
305 |
}
|
306 |
+
id = sample_token(&candidates_p, rng);
|
307 |
}
|
308 |
|
309 |
return id;
|
|
|
346 |
= gpt2_ctx_v1.hparams.n_ctx = gpt2_ctx_v2.hparams.n_ctx = gpt2_ctx_v3.hparams.n_ctx
|
347 |
= mpt_ctx_v3.hparams.n_ctx = params.n_ctx;
|
348 |
|
349 |
+
//handle custom token bans
|
350 |
+
banned_tokens.clear();
|
351 |
+
for(int x=0;x<ban_token_max;++x)
|
352 |
+
{
|
353 |
+
std::string word = inputs.banned_tokens[x];
|
354 |
+
if(word!="")
|
355 |
+
{
|
356 |
+
banned_tokens.push_back(word);
|
357 |
+
}
|
358 |
+
}
|
359 |
+
|
360 |
//this is used for the mem_per_token eval, openblas needs more RAM
|
361 |
bool use_scratch = ggml_cpu_has_gpublas();
|
362 |
|
363 |
+
int cu_parseinfo_maindevice = inputs.cublas_info<0?0:inputs.cublas_info;
|
364 |
+
|
365 |
printf("System Info: %s\n", llama_print_system_info());
|
366 |
+
#if defined(GGML_USE_CUBLAS)
|
367 |
+
if(ggml_cpu_has_gpublas() && cu_parseinfo_maindevice>0)
|
368 |
+
{
|
369 |
+
printf("CUBLAS: Set main device to %d\n",cu_parseinfo_maindevice);
|
370 |
+
ggml_cuda_set_main_device(cu_parseinfo_maindevice);
|
371 |
+
}
|
372 |
+
#endif
|
373 |
SetQuantsUnshuffled(false);
|
374 |
if(file_format == FileFormat::GGML || file_format == FileFormat::GGHF || file_format == FileFormat::GGJT || file_format == FileFormat::GGJT_2)
|
375 |
{
|
|
|
434 |
llama_ctx_params.use_mmap = inputs.use_mmap;
|
435 |
llama_ctx_params.use_mlock = inputs.use_mlock;
|
436 |
llama_ctx_params.n_gpu_layers = inputs.gpulayers;
|
437 |
+
llama_ctx_params.main_gpu = cu_parseinfo_maindevice;
|
438 |
|
439 |
llama_ctx_v3 = llama_init_from_file(modelname.c_str(), llama_ctx_params);
|
440 |
|
|
|
485 |
{
|
486 |
rwkv_ctx_v3 = rwkv_init_from_file(modelname.c_str(), n_threads);
|
487 |
|
488 |
+
if(inputs.gpulayers>0)
|
489 |
+
{
|
490 |
+
rwkv_gpu_offload_layers(rwkv_ctx_v3,inputs.gpulayers);
|
491 |
+
}
|
492 |
|
493 |
const struct rwkv_file_header & header = rwkv_ctx_v3->instance->model.header;
|
494 |
const size_t n_vocab = header.n_vocab;
|
|
|
966 |
std::mt19937 rng(params.seed);
|
967 |
concat_output = "";
|
968 |
|
969 |
+
//prepare sampler order
|
970 |
+
std::vector<samplers> sampler_order;
|
971 |
+
if(inputs.sampler_len<=0) //list by value
|
972 |
+
{
|
973 |
+
sampler_order = {
|
974 |
+
KCPP_SAMPLER_REP_PEN,
|
975 |
+
KCPP_SAMPLER_TOP_K,
|
976 |
+
KCPP_SAMPLER_TOP_A,
|
977 |
+
KCPP_SAMPLER_TFS,
|
978 |
+
KCPP_SAMPLER_TYP,
|
979 |
+
KCPP_SAMPLER_TOP_P,
|
980 |
+
KCPP_SAMPLER_TEMP
|
981 |
+
};
|
982 |
+
}
|
983 |
+
else
|
984 |
+
{
|
985 |
+
for(int i=0;i<inputs.sampler_len;++i)
|
986 |
+
{
|
987 |
+
sampler_order.push_back(inputs.sampler_order[i]);
|
988 |
+
}
|
989 |
+
}
|
990 |
+
|
991 |
bool startedsampling = false;
|
992 |
bool use_scratch = true; //for normal inference always use scratch
|
993 |
|
|
|
1077 |
printf("Bad format!");
|
1078 |
}
|
1079 |
|
1080 |
+
//prepare banned tokens
|
1081 |
+
if(banned_token_ids.size()==0 && banned_tokens.size()>0)
|
1082 |
+
{
|
1083 |
+
printf("\n[First Run] Banning %d token sequences...",banned_tokens.size());
|
1084 |
+
for(int v=0;v<n_vocab;++v)
|
1085 |
+
{
|
1086 |
+
std::string word = FileFormatTokenizeID(v,file_format);
|
1087 |
+
for(int i=0;i<banned_tokens.size();++i)
|
1088 |
+
{
|
1089 |
+
if (word.find(banned_tokens[i]) != std::string::npos)
|
1090 |
+
{
|
1091 |
+
banned_token_ids.push_back(v);
|
1092 |
+
break;
|
1093 |
+
}
|
1094 |
+
}
|
1095 |
+
}
|
1096 |
+
printf("\nBanned a total of %d tokens.\n",banned_token_ids.size());
|
1097 |
+
}
|
1098 |
+
|
1099 |
if(debugmode!=-1)
|
1100 |
{
|
1101 |
printf("\n");
|
|
|
1160 |
}
|
1161 |
else
|
1162 |
{
|
1163 |
+
if(embd.size()>1)
|
1164 |
+
{
|
1165 |
+
evalres = rwkv_eval_sequence(rwkv_ctx_v3, (uint32_t*)embd.data(), embd.size(), rwkv_ctx_v3->state_in, rwkv_ctx_v3->state_out, rwkv_ctx_v3->logits_out);
|
1166 |
+
}
|
1167 |
+
else
|
1168 |
+
{
|
1169 |
bool ignoreLogits = (!startedsampling && ((int)embd_inp.size() > input_consumed + 2));
|
1170 |
evalres = rwkv_eval(rwkv_ctx_v3, embd[0], rwkv_ctx_v3->state_in, rwkv_ctx_v3->state_out, ignoreLogits?nullptr:rwkv_ctx_v3->logits_out);
|
1171 |
+
}
|
1172 |
|
1173 |
memcpy(logits.data(), rwkv_ctx_v3->logits_out, sizeof(float) * rwkv_vocab.size());
|
1174 |
rwkv_ctx_v3->state_in = rwkv_ctx_v3->state_out;
|
|
|
1253 |
|
1254 |
unsigned int eosID = 0;
|
1255 |
float * logitsPtr;
|
1256 |
+
int btsize = banned_token_ids.size();
|
1257 |
if(file_format == FileFormat::GGML || file_format == FileFormat::GGHF || file_format == FileFormat::GGJT || file_format == FileFormat::GGJT_2 || file_format == FileFormat::GGJT_3)
|
1258 |
{
|
1259 |
if(file_format == FileFormat::GGJT_3)
|
|
|
1272 |
// set the logit of the eos token (2) to zero to avoid sampling it
|
1273 |
logitsPtr[eosID] = 0;
|
1274 |
}
|
1275 |
+
|
1276 |
+
if(btsize>0)
|
1277 |
+
{
|
1278 |
+
for(int t=0;t<btsize;++t)
|
1279 |
+
{
|
1280 |
+
logitsPtr[banned_token_ids[t]]=0;
|
1281 |
+
}
|
1282 |
+
}
|
1283 |
}
|
1284 |
else
|
1285 |
{
|
|
|
1334 |
}
|
1335 |
}
|
1336 |
|
1337 |
+
if(btsize>0)
|
1338 |
+
{
|
1339 |
+
int topid = std::min_element(logits.begin(), logits.end()) - logits.begin();
|
1340 |
+
for (int t = 0; t < btsize; ++t)
|
1341 |
+
{
|
1342 |
+
logits[banned_token_ids[t]] = (logits[topid] < 0 ? logits[topid] : 0);
|
1343 |
+
}
|
1344 |
+
}
|
1345 |
}
|
1346 |
|
1347 |
id = SampleLogits(logitsPtr, nctx, n_vocab, last_n_size, repeat_penalty,
|
1348 |
top_k, top_a, top_p, typical_p, tfs_z, temp, rng,
|
1349 |
+
params.mirostat, params.mirostat_tau, params.mirostat_eta, sampler_order);
|
1350 |
|
1351 |
last_n_tokens.erase(last_n_tokens.begin());
|
1352 |
last_n_tokens.push_back(id);
|
klite.embd
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
koboldcpp.py
CHANGED
@@ -1,3 +1,6 @@
|
|
|
|
|
|
|
|
1 |
# A hacky little script from Concedo that exposes llama.cpp function bindings
|
2 |
# allowing it to be used via a simulated kobold api endpoint
|
3 |
# generation delay scales linearly with original prompt length.
|
@@ -9,6 +12,8 @@ import json, sys, http.server, time, asyncio, socket, threading
|
|
9 |
from concurrent.futures import ThreadPoolExecutor
|
10 |
|
11 |
stop_token_max = 10
|
|
|
|
|
12 |
|
13 |
class load_model_inputs(ctypes.Structure):
|
14 |
_fields_ = [("threads", ctypes.c_int),
|
@@ -26,10 +31,12 @@ class load_model_inputs(ctypes.Structure):
|
|
26 |
("use_smartcontext", ctypes.c_bool),
|
27 |
("unban_tokens", ctypes.c_bool),
|
28 |
("clblast_info", ctypes.c_int),
|
|
|
29 |
("blasbatchsize", ctypes.c_int),
|
30 |
("debugmode", ctypes.c_int),
|
31 |
("forceversion", ctypes.c_int),
|
32 |
-
("gpulayers", ctypes.c_int)
|
|
|
33 |
|
34 |
class generation_inputs(ctypes.Structure):
|
35 |
_fields_ = [("seed", ctypes.c_int),
|
@@ -47,6 +54,8 @@ class generation_inputs(ctypes.Structure):
|
|
47 |
("mirostat", ctypes.c_int),
|
48 |
("mirostat_tau", ctypes.c_float),
|
49 |
("mirostat_eta", ctypes.c_float),
|
|
|
|
|
50 |
("stop_sequence", ctypes.c_char_p * stop_token_max),
|
51 |
("stream_sse", ctypes.c_bool)]
|
52 |
|
@@ -105,7 +114,7 @@ def init_library():
|
|
105 |
else:
|
106 |
print("Attempting to use CLBlast library for faster prompt ingestion. A compatible clblast will be required.")
|
107 |
use_clblast = True
|
108 |
-
elif (args.usecublas
|
109 |
if not file_exists(lib_cublas):
|
110 |
print("Warning: CuBLAS library file not found. Non-BLAS library will be used.")
|
111 |
else:
|
@@ -160,7 +169,7 @@ def load_model(model_filename):
|
|
160 |
inputs.batch_size = 8
|
161 |
inputs.max_context_length = maxctx #initial value to use for ctx, can be overwritten
|
162 |
inputs.threads = args.threads
|
163 |
-
inputs.low_vram = (True if args.usecublas
|
164 |
inputs.blasthreads = args.blasthreads
|
165 |
inputs.f16_kv = True
|
166 |
inputs.use_mmap = (not args.nommap)
|
@@ -181,12 +190,23 @@ def load_model(model_filename):
|
|
181 |
if args.useclblast:
|
182 |
clblastids = 100 + int(args.useclblast[0])*10 + int(args.useclblast[1])
|
183 |
inputs.clblast_info = clblastids
|
|
|
|
|
|
|
|
|
|
|
184 |
inputs.executable_path = (getdirpath()+"/").encode("UTF-8")
|
185 |
inputs.debugmode = args.debugmode
|
|
|
|
|
|
|
|
|
|
|
|
|
186 |
ret = handle.load_model(inputs)
|
187 |
return ret
|
188 |
|
189 |
-
def generate(prompt,max_length=20, max_context_length=512,temperature=0.8,top_k=120, top_a=0.0
|
190 |
inputs = generation_inputs()
|
191 |
outputs = ctypes.create_unicode_buffer(ctypes.sizeof(generation_outputs))
|
192 |
inputs.prompt = prompt.encode("UTF-8")
|
@@ -205,8 +225,19 @@ def generate(prompt,max_length=20, max_context_length=512,temperature=0.8,top_k=
|
|
205 |
inputs.mirostat = int(args.usemirostat[0])
|
206 |
inputs.mirostat_tau = float(args.usemirostat[1])
|
207 |
inputs.mirostat_eta = float(args.usemirostat[2])
|
|
|
|
|
|
|
|
|
208 |
else:
|
209 |
inputs.mirostat = inputs.mirostat_tau = inputs.mirostat_eta = 0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
210 |
inputs.seed = seed
|
211 |
for n in range(stop_token_max):
|
212 |
if not stop_sequence or n >= len(stop_sequence):
|
@@ -272,6 +303,10 @@ class ServerRequestHandler(http.server.SimpleHTTPRequestHandler):
|
|
272 |
tfs=genparams.get('tfs', 1.0),
|
273 |
rep_pen=genparams.get('rep_pen', 1.1),
|
274 |
rep_pen_range=genparams.get('rep_pen_range', 128),
|
|
|
|
|
|
|
|
|
275 |
seed=genparams.get('sampler_seed', -1),
|
276 |
stop_sequence=genparams.get('stop_sequence', []),
|
277 |
stream_sse=stream_flag)
|
@@ -288,6 +323,10 @@ class ServerRequestHandler(http.server.SimpleHTTPRequestHandler):
|
|
288 |
tfs=genparams.get('tfs', 1.0),
|
289 |
rep_pen=genparams.get('rep_pen', 1.1),
|
290 |
rep_pen_range=genparams.get('rep_pen_range', 128),
|
|
|
|
|
|
|
|
|
291 |
seed=genparams.get('sampler_seed', -1),
|
292 |
stop_sequence=genparams.get('stop_sequence', []),
|
293 |
stream_sse=stream_flag)
|
@@ -563,10 +602,413 @@ def RunServerMultiThreaded(addr, port, embedded_kailite = None):
|
|
563 |
threadArr[i].stop()
|
564 |
sys.exit(0)
|
565 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
566 |
|
567 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
568 |
import tkinter as tk
|
569 |
from tkinter.filedialog import askopenfilename
|
|
|
570 |
|
571 |
if len(sys.argv) == 1:
|
572 |
#no args passed at all. Show nooby gui
|
@@ -643,8 +1085,8 @@ def show_gui():
|
|
643 |
frameD.grid(row=5,column=0,pady=4)
|
644 |
|
645 |
# Create button, it will change label text
|
646 |
-
tk.Button(
|
647 |
-
tk.Label(root, text = "(Please use the Command Line for more advanced options)",
|
648 |
font = ("Arial", 9)).grid(row=7,column=0)
|
649 |
|
650 |
root.mainloop()
|
@@ -674,7 +1116,7 @@ def show_gui():
|
|
674 |
if selrunchoice==runopts[3]:
|
675 |
args.useclblast = [0,1]
|
676 |
if selrunchoice==runopts[4]:
|
677 |
-
args.usecublas =
|
678 |
if selrunchoice==runopts[5]:
|
679 |
args.noblas = True
|
680 |
if selrunchoice==runopts[6]:
|
@@ -727,14 +1169,22 @@ def main(args):
|
|
727 |
if not args.model_param:
|
728 |
#give them a chance to pick a file
|
729 |
print("For command line arguments, please refer to --help")
|
730 |
-
print("
|
731 |
try:
|
732 |
-
|
733 |
except Exception as ex:
|
734 |
-
print("
|
735 |
-
print("
|
736 |
-
|
737 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
738 |
|
739 |
if args.hordeconfig and args.hordeconfig[0]!="":
|
740 |
global friendlymodelname, maxhordelen, maxhordectx, showdebug
|
@@ -804,7 +1254,8 @@ def main(args):
|
|
804 |
args.blasthreads = args.threads
|
805 |
|
806 |
modelname = os.path.abspath(args.model_param)
|
807 |
-
print(
|
|
|
808 |
loadok = load_model(modelname)
|
809 |
print("Load Model OK: " + str(loadok))
|
810 |
|
@@ -839,7 +1290,7 @@ def main(args):
|
|
839 |
asyncio.run(RunServerMultiThreaded(args.host, args.port, embedded_kailite))
|
840 |
|
841 |
if __name__ == '__main__':
|
842 |
-
print("
|
843 |
# print("Python version: " + sys.version)
|
844 |
parser = argparse.ArgumentParser(description='KoboldCpp Server')
|
845 |
modelgroup = parser.add_mutually_exclusive_group() #we want to be backwards compatible with the unnamed positional args
|
@@ -863,7 +1314,8 @@ if __name__ == '__main__':
|
|
863 |
parser.add_argument("--blasbatchsize", help="Sets the batch size used in BLAS processing (default 512). Setting it to -1 disables BLAS mode, but keeps other benefits like GPU offload.", type=int,choices=[-1,32,64,128,256,512,1024], default=512)
|
864 |
parser.add_argument("--stream", help="Uses streaming when generating tokens. Only for the Kobold Lite UI.", action='store_true')
|
865 |
parser.add_argument("--smartcontext", help="Reserving a portion of context to try processing less frequently.", action='store_true')
|
866 |
-
parser.add_argument("--unbantokens", help="Normally, KoboldAI prevents
|
|
|
867 |
parser.add_argument("--usemirostat", help="Experimental! Replaces your samplers with mirostat. Takes 3 params = [type(0/1/2), tau(5.0), eta(0.1)].",metavar=('[type]', '[tau]', '[eta]'), type=float, nargs=3)
|
868 |
parser.add_argument("--forceversion", help="If the model file format detection fails (e.g. rogue modified model) you can set this to override the detected format (enter desired version, e.g. 401 for GPTNeoX-Type2).",metavar=('[version]'), type=int, default=0)
|
869 |
parser.add_argument("--nommap", help="If set, do not use mmap to load newer models", action='store_true')
|
@@ -875,7 +1327,7 @@ if __name__ == '__main__':
|
|
875 |
compatgroup = parser.add_mutually_exclusive_group()
|
876 |
compatgroup.add_argument("--noblas", help="Do not use OpenBLAS for accelerated prompt ingestion", action='store_true')
|
877 |
compatgroup.add_argument("--useclblast", help="Use CLBlast for GPU Acceleration. Must specify exactly 2 arguments, platform ID and device ID (e.g. --useclblast 1 0).", type=int, choices=range(0,9), nargs=2)
|
878 |
-
compatgroup.add_argument("--usecublas", help="Use CuBLAS for GPU Acceleration. Requires Nvidia GPU. Select lowvram to not allocate VRAM scratch buffer.",
|
879 |
parser.add_argument("--gpulayers", help="Set number of layers to offload to GPU when using GPU. Requires GPU.",metavar=('[GPU layers]'), type=int, default=0)
|
880 |
args = parser.parse_args()
|
881 |
main(args)
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
#-*- coding: utf-8 -*-
|
3 |
+
|
4 |
# A hacky little script from Concedo that exposes llama.cpp function bindings
|
5 |
# allowing it to be used via a simulated kobold api endpoint
|
6 |
# generation delay scales linearly with original prompt length.
|
|
|
12 |
from concurrent.futures import ThreadPoolExecutor
|
13 |
|
14 |
stop_token_max = 10
|
15 |
+
sampler_order_max = 7
|
16 |
+
ban_token_max = 10
|
17 |
|
18 |
class load_model_inputs(ctypes.Structure):
|
19 |
_fields_ = [("threads", ctypes.c_int),
|
|
|
31 |
("use_smartcontext", ctypes.c_bool),
|
32 |
("unban_tokens", ctypes.c_bool),
|
33 |
("clblast_info", ctypes.c_int),
|
34 |
+
("cublas_info", ctypes.c_int),
|
35 |
("blasbatchsize", ctypes.c_int),
|
36 |
("debugmode", ctypes.c_int),
|
37 |
("forceversion", ctypes.c_int),
|
38 |
+
("gpulayers", ctypes.c_int),
|
39 |
+
("banned_tokens", ctypes.c_char_p * ban_token_max)]
|
40 |
|
41 |
class generation_inputs(ctypes.Structure):
|
42 |
_fields_ = [("seed", ctypes.c_int),
|
|
|
54 |
("mirostat", ctypes.c_int),
|
55 |
("mirostat_tau", ctypes.c_float),
|
56 |
("mirostat_eta", ctypes.c_float),
|
57 |
+
("sampler_order", ctypes.c_int * sampler_order_max),
|
58 |
+
("sampler_len", ctypes.c_int),
|
59 |
("stop_sequence", ctypes.c_char_p * stop_token_max),
|
60 |
("stream_sse", ctypes.c_bool)]
|
61 |
|
|
|
114 |
else:
|
115 |
print("Attempting to use CLBlast library for faster prompt ingestion. A compatible clblast will be required.")
|
116 |
use_clblast = True
|
117 |
+
elif (args.usecublas is not None):
|
118 |
if not file_exists(lib_cublas):
|
119 |
print("Warning: CuBLAS library file not found. Non-BLAS library will be used.")
|
120 |
else:
|
|
|
169 |
inputs.batch_size = 8
|
170 |
inputs.max_context_length = maxctx #initial value to use for ctx, can be overwritten
|
171 |
inputs.threads = args.threads
|
172 |
+
inputs.low_vram = (True if (args.usecublas and "lowvram" in args.usecublas) else False)
|
173 |
inputs.blasthreads = args.blasthreads
|
174 |
inputs.f16_kv = True
|
175 |
inputs.use_mmap = (not args.nommap)
|
|
|
190 |
if args.useclblast:
|
191 |
clblastids = 100 + int(args.useclblast[0])*10 + int(args.useclblast[1])
|
192 |
inputs.clblast_info = clblastids
|
193 |
+
inputs.cublas_info = 0
|
194 |
+
if (args.usecublas and "1" in args.usecublas):
|
195 |
+
inputs.cublas_info = 1
|
196 |
+
elif (args.usecublas and "2" in args.usecublas):
|
197 |
+
inputs.cublas_info = 2
|
198 |
inputs.executable_path = (getdirpath()+"/").encode("UTF-8")
|
199 |
inputs.debugmode = args.debugmode
|
200 |
+
banned_tokens = args.bantokens
|
201 |
+
for n in range(ban_token_max):
|
202 |
+
if not banned_tokens or n >= len(banned_tokens):
|
203 |
+
inputs.banned_tokens[n] = "".encode("UTF-8")
|
204 |
+
else:
|
205 |
+
inputs.banned_tokens[n] = banned_tokens[n].encode("UTF-8")
|
206 |
ret = handle.load_model(inputs)
|
207 |
return ret
|
208 |
|
209 |
+
def generate(prompt,max_length=20, max_context_length=512, temperature=0.8, top_k=120, top_a=0.0, top_p=0.85, typical_p=1.0, tfs=1.0, rep_pen=1.1, rep_pen_range=128, mirostat=0, mirostat_tau=5.0, mirostat_eta=0.1, sampler_order=[6,0,1,3,4,2,5], seed=-1, stop_sequence=[], stream_sse=False):
|
210 |
inputs = generation_inputs()
|
211 |
outputs = ctypes.create_unicode_buffer(ctypes.sizeof(generation_outputs))
|
212 |
inputs.prompt = prompt.encode("UTF-8")
|
|
|
225 |
inputs.mirostat = int(args.usemirostat[0])
|
226 |
inputs.mirostat_tau = float(args.usemirostat[1])
|
227 |
inputs.mirostat_eta = float(args.usemirostat[2])
|
228 |
+
elif mirostat in (1, 2):
|
229 |
+
inputs.mirostat = mirostat
|
230 |
+
inputs.mirostat_tau = mirostat_tau
|
231 |
+
inputs.mirostat_eta = mirostat_eta
|
232 |
else:
|
233 |
inputs.mirostat = inputs.mirostat_tau = inputs.mirostat_eta = 0
|
234 |
+
if sampler_order and 0 < len(sampler_order) <= sampler_order_max:
|
235 |
+
try:
|
236 |
+
for i, sampler in enumerate(sampler_order):
|
237 |
+
inputs.sampler_order[i] = sampler
|
238 |
+
inputs.sampler_len = len(sampler_order)
|
239 |
+
except TypeError as e:
|
240 |
+
print("ERROR: sampler_order must be a list of integers: " + str(e))
|
241 |
inputs.seed = seed
|
242 |
for n in range(stop_token_max):
|
243 |
if not stop_sequence or n >= len(stop_sequence):
|
|
|
303 |
tfs=genparams.get('tfs', 1.0),
|
304 |
rep_pen=genparams.get('rep_pen', 1.1),
|
305 |
rep_pen_range=genparams.get('rep_pen_range', 128),
|
306 |
+
mirostat=genparams.get('mirostat', 0),
|
307 |
+
mirostat_tau=genparams.get('mirostat_tau', 5.0),
|
308 |
+
mirostat_eta=genparams.get('mirostat_eta', 0.1),
|
309 |
+
sampler_order=genparams.get('sampler_order', [6,0,1,3,4,2,5]),
|
310 |
seed=genparams.get('sampler_seed', -1),
|
311 |
stop_sequence=genparams.get('stop_sequence', []),
|
312 |
stream_sse=stream_flag)
|
|
|
323 |
tfs=genparams.get('tfs', 1.0),
|
324 |
rep_pen=genparams.get('rep_pen', 1.1),
|
325 |
rep_pen_range=genparams.get('rep_pen_range', 128),
|
326 |
+
mirostat=genparams.get('mirostat', 0),
|
327 |
+
mirostat_tau=genparams.get('mirostat_tau', 5.0),
|
328 |
+
mirostat_eta=genparams.get('mirostat_eta', 0.1),
|
329 |
+
sampler_order=genparams.get('sampler_order', [6,0,1,3,4,2,5]),
|
330 |
seed=genparams.get('sampler_seed', -1),
|
331 |
stop_sequence=genparams.get('stop_sequence', []),
|
332 |
stream_sse=stream_flag)
|
|
|
602 |
threadArr[i].stop()
|
603 |
sys.exit(0)
|
604 |
|
605 |
+
# note: customtkinter-5.2.0
|
606 |
+
def show_new_gui():
|
607 |
+
import customtkinter as ctk
|
608 |
+
from tkinter.filedialog import askopenfilename
|
609 |
+
from tkinter.filedialog import asksaveasfile
|
610 |
+
|
611 |
+
# if args received, launch
|
612 |
+
if len(sys.argv) != 1:
|
613 |
+
root = ctk.CTk()
|
614 |
+
#we dont want the useless window to be visible, but we want it in taskbar
|
615 |
+
root.attributes("-alpha", 0)
|
616 |
+
args.model_param = askopenfilename(title="Select ggml model .bin files")
|
617 |
+
root.destroy()
|
618 |
+
if not args.model_param:
|
619 |
+
print("\nNo ggml model file was selected. Exiting.")
|
620 |
+
time.sleep(2)
|
621 |
+
sys.exit(2)
|
622 |
+
return
|
623 |
+
|
624 |
+
nextstate = 0 #0=exit, 1=launch, 2=oldgui
|
625 |
+
windowwidth = 520
|
626 |
+
windowheight = 500
|
627 |
+
ctk.set_appearance_mode("dark")
|
628 |
+
root = ctk.CTk()
|
629 |
+
root.geometry(str(windowwidth) + "x" + str(windowheight))
|
630 |
+
root.title("KoboldCpp v"+KcppVersion)
|
631 |
+
root.resizable(False,False)
|
632 |
+
|
633 |
+
tabs = ctk.CTkFrame(root, corner_radius = 0, width=windowwidth, height=windowheight-50)
|
634 |
+
tabs.grid(row=0, stick="nsew")
|
635 |
+
tabnames= ["Quick Launch", "Hardware", "Tokens", "Model", "Network"]
|
636 |
+
navbuttons = {}
|
637 |
+
navbuttonframe = ctk.CTkFrame(tabs, width=100, height=int(tabs.cget("height")))
|
638 |
+
navbuttonframe.grid(row=0, column=0, padx=2,pady=2)
|
639 |
+
navbuttonframe.grid_propagate(False)
|
640 |
+
|
641 |
+
tabcontentframe = ctk.CTkFrame(tabs, width=windowwidth - int(navbuttonframe.cget("width")), height=int(tabs.cget("height")))
|
642 |
+
tabcontentframe.grid(row=0, column=1, sticky="nsew", padx=2, pady=2)
|
643 |
+
tabcontentframe.grid_propagate(False)
|
644 |
+
|
645 |
+
tabcontent = {}
|
646 |
+
|
647 |
+
# slider data
|
648 |
+
blasbatchsize_values = ["-1", "32", "64", "128", "256", "512", "1024"]
|
649 |
+
blasbatchsize_text = ["Don't Batch BLAS","32","64","128","256","512","1024"]
|
650 |
+
contextsize_text = ["512", "1024", "2048", "3072", "4096", "6144", "8192"]
|
651 |
+
runopts = ["Use OpenBLAS","Use CLBlast", "Use CuBLAS", "Use No BLAS","Use OpenBLAS (Old CPU, noavx2)","Failsafe Mode (Old CPU, noavx)"]
|
652 |
+
|
653 |
+
def tabbuttonaction(name):
|
654 |
+
for t in tabcontent:
|
655 |
+
if name == t:
|
656 |
+
tabcontent[t].grid(row=0, column=0)
|
657 |
+
navbuttons[t].configure(fg_color="#6f727b")
|
658 |
+
else:
|
659 |
+
tabcontent[t].grid_forget()
|
660 |
+
navbuttons[t].configure(fg_color="transparent")
|
661 |
+
|
662 |
+
# Dynamically create tabs + buttons based on values of [tabnames]
|
663 |
+
for idx, name in enumerate(tabnames):
|
664 |
+
tabcontent[name] = ctk.CTkFrame(tabcontentframe, width=int(tabcontentframe.cget("width")), height=int(tabcontentframe.cget("height")), fg_color="transparent")
|
665 |
+
tabcontent[name].grid_propagate(False)
|
666 |
+
if idx == 0:
|
667 |
+
tabcontent[name].grid(row=idx, sticky="nsew")
|
668 |
+
ctk.CTkLabel(tabcontent[name], text= name, font=ctk.CTkFont(None, 14, 'bold')).grid(row=0, padx=12, pady = 5, stick='nw')
|
669 |
+
|
670 |
+
navbuttons[name] = ctk.CTkButton(navbuttonframe, text=name, width = 100, corner_radius=0 , command = lambda d=name:tabbuttonaction(d), hover_color="#868a94" )
|
671 |
+
navbuttons[name].grid(row=idx)
|
672 |
+
|
673 |
+
tabbuttonaction(tabnames[0])
|
674 |
+
|
675 |
+
# helper functions
|
676 |
+
def makecheckbox(parent, text, variable=None, row=0, column=0, command=None, onvalue=1, offvalue=0):
|
677 |
+
temp = ctk.CTkCheckBox(parent, text=text,variable=variable, onvalue=onvalue, offvalue=offvalue)
|
678 |
+
if command is not None and variable is not None:
|
679 |
+
variable.trace("w", command)
|
680 |
+
temp.grid(row=row,column=column, padx=8, pady=1, stick="nw")
|
681 |
+
return temp
|
682 |
+
|
683 |
+
def makelabel(parent, text, row, column=0):
|
684 |
+
temp = ctk.CTkLabel(parent, text=text)
|
685 |
+
temp.grid(row=row, column=column, padx=8, pady=1, stick="nw")
|
686 |
+
return temp
|
687 |
+
|
688 |
+
def makeslider(parent, label, options, var, from_ , to, row=0, width=160, height=10, set=0):
|
689 |
+
sliderLabel = makelabel(parent, options[set], row + 1, 1)
|
690 |
+
makelabel(parent, label, row)
|
691 |
+
|
692 |
+
def sliderUpdate(a,b,c):
|
693 |
+
sliderLabel.configure(text = options[int(var.get())])
|
694 |
+
var.trace("w", sliderUpdate)
|
695 |
+
slider = ctk.CTkSlider(parent, from_=from_, to=to, variable = var, width = width, height=height, border_width=5,number_of_steps=len(options) - 1)
|
696 |
+
slider.grid(row=row+1, column=0, padx = 8, stick="w")
|
697 |
+
slider.set(set)
|
698 |
+
return slider
|
699 |
+
|
700 |
+
|
701 |
+
def makelabelentry(parent, text, var, row=0, width= 50):
|
702 |
+
label = makelabel(parent, text, row)
|
703 |
+
entry = ctk.CTkEntry(parent, width=width, textvariable=var) #you cannot set placeholder text for SHARED variables
|
704 |
+
entry.grid(row=row, column=1, padx= 8, stick="nw")
|
705 |
+
return entry, label
|
706 |
+
|
707 |
+
|
708 |
+
def makefileentry(parent, text, searchtext, var, row=0, width=250):
|
709 |
+
makelabel(parent, text, row)
|
710 |
+
def getfilename(var, text):
|
711 |
+
var.set(askopenfilename(title=text))
|
712 |
+
entry = ctk.CTkEntry(parent, width, textvariable=var)
|
713 |
+
entry.grid(row=row+1, column=0, padx=8, stick="nw")
|
714 |
+
button = ctk.CTkButton(parent, 50, text="Browse", command= lambda a=var,b=searchtext:getfilename(a,b))
|
715 |
+
button.grid(row=row+1, column=1, stick="nw")
|
716 |
+
return
|
717 |
+
|
718 |
+
# Vars - should be in scope to be used by multiple widgets
|
719 |
+
gpulayers_var = ctk.StringVar(value="0")
|
720 |
+
threads_var = ctk.StringVar(value=str(default_threads))
|
721 |
+
runopts_var = ctk.StringVar()
|
722 |
+
gpu_choice_var = ctk.StringVar(value="1")
|
723 |
+
|
724 |
+
launchbrowser = ctk.IntVar(value=1)
|
725 |
+
highpriority = ctk.IntVar()
|
726 |
+
disablemmap = ctk.IntVar()
|
727 |
+
psutil = ctk.IntVar()
|
728 |
+
usemlock = ctk.IntVar()
|
729 |
+
debugmode = ctk.IntVar()
|
730 |
+
|
731 |
+
lowvram_var = ctk.IntVar()
|
732 |
+
|
733 |
+
blas_threads_var = ctk.StringVar()
|
734 |
+
blas_size_var = ctk.IntVar()
|
735 |
+
version_var =ctk.StringVar(value="0")
|
736 |
+
|
737 |
+
stream = ctk.IntVar()
|
738 |
+
smartcontext = ctk.IntVar()
|
739 |
+
unbantokens = ctk.IntVar()
|
740 |
+
usemirostat = ctk.IntVar()
|
741 |
+
mirostat_var = ctk.StringVar(value="2")
|
742 |
+
mirostat_tau = ctk.StringVar(value="5.0")
|
743 |
+
mirostat_eta = ctk.StringVar(value="0.1")
|
744 |
+
|
745 |
+
context_var = ctk.IntVar()
|
746 |
+
|
747 |
+
model_var = ctk.StringVar()
|
748 |
+
lora_var = ctk.StringVar()
|
749 |
+
lora_base_var = ctk.StringVar()
|
750 |
+
|
751 |
+
port_var = ctk.StringVar(value=defaultport)
|
752 |
+
host_var = ctk.StringVar(value="")
|
753 |
+
horde_name_var = ctk.StringVar(value="koboldcpp")
|
754 |
+
horde_gen_var = ctk.StringVar(value=maxhordelen)
|
755 |
+
horde_context_var = ctk.StringVar(value=maxhordectx)
|
756 |
+
usehorde_var = ctk.IntVar()
|
757 |
+
|
758 |
+
# Quick Launch Tab
|
759 |
+
quick_tab = tabcontent["Quick Launch"]
|
760 |
+
|
761 |
+
# gpu options
|
762 |
+
quick_gpu_layers_entry,quick_gpu_layers_label = makelabelentry(quick_tab,"GPU Layers:", gpulayers_var, 4, 50)
|
763 |
+
quick_gpu_selector_label = makelabel(quick_tab, "GPU ID:", 3)
|
764 |
+
quick_gpu_selector_box = ctk.CTkComboBox(quick_tab, values=["1","2","3"], width=60, variable=gpu_choice_var, state="readonly")
|
765 |
+
quick_lowvram_box = makecheckbox(quick_tab, "Low VRAM", lowvram_var, 5)
|
766 |
+
|
767 |
+
# hides gpu options when CLBlast is not chosen
|
768 |
+
def changerunmode(a,b,c):
|
769 |
+
index = runopts_var.get()
|
770 |
+
if index == "Use CLBlast" or index == "Use CuBLAS":
|
771 |
+
gpu_selector_label.grid(row=3, column=0, padx = 8, pady=1, stick="nw")
|
772 |
+
gpu_selector_box.grid(row=3, column=1, padx=8, pady=1, stick="nw")
|
773 |
+
quick_gpu_selector_label.grid(row=3, column=0, padx = 8, pady=1, stick="nw")
|
774 |
+
quick_gpu_selector_box.grid(row=3, column=1, padx=8, pady=1, stick="nw")
|
775 |
+
else:
|
776 |
+
gpu_selector_label.grid_forget()
|
777 |
+
gpu_selector_box.grid_forget()
|
778 |
+
quick_gpu_selector_label.grid_forget()
|
779 |
+
quick_gpu_selector_box.grid_forget()
|
780 |
+
|
781 |
+
if index == "Use CuBLAS":
|
782 |
+
lowvram_box.grid(row=4, column=0, padx=8, pady=1, stick="nw")
|
783 |
+
quick_lowvram_box.grid(row=4, column=0, padx=8, pady=1, stick="nw")
|
784 |
+
else:
|
785 |
+
lowvram_box.grid_forget()
|
786 |
+
quick_lowvram_box.grid_forget()
|
787 |
+
|
788 |
+
if index == "Use CLBlast" or index == "Use CuBLAS":
|
789 |
+
gpu_layers_label.grid(row=5, column=0, padx = 8, pady=1, stick="nw")
|
790 |
+
gpu_layers_entry.grid(row=5, column=1, padx=8, pady=1, stick="nw")
|
791 |
+
quick_gpu_layers_label.grid(row=5, column=0, padx = 8, pady=1, stick="nw")
|
792 |
+
quick_gpu_layers_entry.grid(row=5, column=1, padx=8, pady=1, stick="nw")
|
793 |
+
else:
|
794 |
+
gpu_layers_label.grid_forget()
|
795 |
+
gpu_layers_entry.grid_forget()
|
796 |
+
quick_gpu_layers_label.grid_forget()
|
797 |
+
quick_gpu_layers_entry.grid_forget()
|
798 |
+
|
799 |
+
# presets selector
|
800 |
+
makelabel(quick_tab, "Presets:", 1)
|
801 |
+
|
802 |
+
runoptbox = ctk.CTkComboBox(quick_tab, values=runopts, width=180,variable=runopts_var, state="readonly")
|
803 |
+
runoptbox.grid(row=1, column=1,padx=8, stick="nw")
|
804 |
+
runoptbox.set("Use OpenBLAS")
|
805 |
+
|
806 |
+
# threads
|
807 |
+
makelabelentry(quick_tab, "Threads:" , threads_var, 8, 50)
|
808 |
+
|
809 |
+
# blas batch size
|
810 |
+
makeslider(quick_tab, "BLAS Batch Size:", blasbatchsize_text, blas_size_var, 0, 6, 12, set=5)
|
811 |
+
|
812 |
+
# quick boxes
|
813 |
+
quick_boxes = {"Launch Browser": launchbrowser , "High Priority" : highpriority, "Streaming Mode":stream, "Use SmartContext":smartcontext, "Unban Tokens":unbantokens, "Disable MMAP":disablemmap,}
|
814 |
+
for idx, name, in enumerate(quick_boxes):
|
815 |
+
makecheckbox(quick_tab, name, quick_boxes[name], int(idx/2) +20, idx%2)
|
816 |
+
|
817 |
+
# context size
|
818 |
+
makeslider(quick_tab, "Context Size:", contextsize_text, context_var, 0, len(contextsize_text)-1, 30, set=2)
|
819 |
+
|
820 |
+
# load model
|
821 |
+
makefileentry(quick_tab, "Model:", "Select GGML Model File", model_var, 40, 170)
|
822 |
+
|
823 |
+
# Hardware Tab
|
824 |
+
hardware_tab = tabcontent["Hardware"]
|
825 |
+
|
826 |
+
# gpu options
|
827 |
+
gpu_layers_entry,gpu_layers_label = makelabelentry(hardware_tab,"GPU Layers:", gpulayers_var, 4, 50)
|
828 |
+
gpu_selector_label = makelabel(hardware_tab, "GPU ID:", 3)
|
829 |
+
gpu_selector_box = ctk.CTkComboBox(hardware_tab, values=["1","2","3"], width=60, variable=gpu_choice_var, state="readonly")
|
830 |
+
lowvram_box = makecheckbox(hardware_tab, "Low VRAM", lowvram_var, 5)
|
831 |
+
|
832 |
+
# presets selector
|
833 |
+
makelabel(hardware_tab, "Presets:", 1)
|
834 |
+
runoptbox = ctk.CTkComboBox(hardware_tab, values=runopts, width=180,variable=runopts_var, state="readonly")
|
835 |
+
runoptbox.grid(row=1, column=1,padx=8, stick="nw")
|
836 |
+
runoptbox.set("Use OpenBLAS")
|
837 |
+
runopts_var.trace('w', changerunmode)
|
838 |
+
changerunmode(1,1,1)
|
839 |
+
# threads
|
840 |
+
makelabelentry(hardware_tab, "Threads:" , threads_var, 8, 50)
|
841 |
+
|
842 |
+
# hardware checkboxes
|
843 |
+
hardware_boxes = {"Launch Browser": launchbrowser , "High Priority" : highpriority, "Disable MMAP":disablemmap, "Use mlock":usemlock, "PSUtil Set Threads":psutil, "Debug Mode":debugmode,}
|
844 |
+
|
845 |
+
for idx, name, in enumerate(hardware_boxes):
|
846 |
+
makecheckbox(hardware_tab, name, hardware_boxes[name], int(idx/2) +30, idx%2)
|
847 |
+
|
848 |
+
# blas thread specifier
|
849 |
+
makelabelentry(hardware_tab, "BLAS threads:" , blas_threads_var, 11, 50)
|
850 |
+
# blas batch size
|
851 |
+
makeslider(hardware_tab, "BLAS Batch Size:", blasbatchsize_text, blas_size_var, 0, 6, 12, set=5)
|
852 |
+
# force version
|
853 |
+
makelabelentry(hardware_tab, "Force Version:" , version_var, 100, 50)
|
854 |
+
|
855 |
+
# Tokens Tab
|
856 |
+
tokens_tab = tabcontent["Tokens"]
|
857 |
+
# tokens checkboxes
|
858 |
+
token_boxes = {"Streaming Mode":stream, "Use SmartContext":smartcontext, "Unban Tokens":unbantokens}
|
859 |
+
for idx, name, in enumerate(token_boxes):
|
860 |
+
makecheckbox(tokens_tab, name, token_boxes[name], idx + 1)
|
861 |
+
|
862 |
+
mirostat_entry, mirostate_label = makelabelentry(tokens_tab, "Mirostat:", mirostat_var)
|
863 |
+
mirostat_tau_entry, mirostat_tau_label = makelabelentry(tokens_tab, "Mirostat Tau:", mirostat_tau)
|
864 |
+
mirostat_eta_entry, mirostat_eta_label = makelabelentry(tokens_tab, "Mirostat Eta:", mirostat_eta)
|
865 |
+
def togglemiro(a,b,c):
|
866 |
+
items = [mirostate_label, mirostat_entry, mirostat_tau_label, mirostat_tau_entry, mirostat_eta_label, mirostat_eta_entry]
|
867 |
+
for idx, item in enumerate(items):
|
868 |
+
if usemirostat.get() == 1:
|
869 |
+
item.grid(row=11 + int(idx/2), column=idx%2, padx=8, stick="nw")
|
870 |
+
else:
|
871 |
+
item.grid_forget()
|
872 |
+
|
873 |
+
|
874 |
+
makecheckbox(tokens_tab, "Use Mirostat", row=10, variable=usemirostat, command=togglemiro)
|
875 |
+
togglemiro(1,1,1)
|
876 |
+
|
877 |
+
# context size
|
878 |
+
makeslider(tokens_tab, "Context Size:",contextsize_text, context_var, 0, 4, 20, set=2)
|
879 |
+
|
880 |
+
# Model Tab
|
881 |
+
model_tab = tabcontent["Model"]
|
882 |
+
|
883 |
+
makefileentry(model_tab, "Model:", "Select GGML Model File", model_var, 1)
|
884 |
+
makefileentry(model_tab, "Lora:", "Select Lora File",lora_var, 3)
|
885 |
+
makefileentry(model_tab, "Lora Base:", "Select Lora Base File", lora_base_var, 5)
|
886 |
+
|
887 |
+
# Network Tab
|
888 |
+
network_tab = tabcontent["Network"]
|
889 |
|
890 |
+
# interfaces
|
891 |
+
makelabelentry(network_tab, "Port: ", port_var, 1, 150)
|
892 |
+
makelabelentry(network_tab, "Host: ", host_var, 2, 150)
|
893 |
+
|
894 |
+
# horde
|
895 |
+
makelabel(network_tab, "Horde:", 3).grid(pady=10)
|
896 |
+
|
897 |
+
horde_name_entry, horde_name_label = makelabelentry(network_tab, "Horde Name:", horde_name_var, 5, 200)
|
898 |
+
horde_gen_entry, horde_gen_label = makelabelentry(network_tab, "Gen. Length:", horde_gen_var, 6, 50)
|
899 |
+
horde_context_entry, horde_context_label = makelabelentry(network_tab, "Max Context:",horde_context_var, 7, 50)
|
900 |
+
|
901 |
+
def togglehorde(a,b,c):
|
902 |
+
labels = [horde_name_label, horde_gen_label, horde_context_label]
|
903 |
+
for idx, item in enumerate([horde_name_entry, horde_gen_entry, horde_context_entry]):
|
904 |
+
if usehorde_var.get() == 1:
|
905 |
+
item.grid(row=5 + idx, column = 1, padx=8, pady=1, stick="nw")
|
906 |
+
labels[idx].grid(row=5 + idx, padx=8, pady=1, stick="nw")
|
907 |
+
else:
|
908 |
+
item.grid_forget()
|
909 |
+
labels[idx].grid_forget()
|
910 |
+
|
911 |
+
usehorde_box = makecheckbox(network_tab, "Configure for Horde", usehorde_var, 4, command=togglehorde)
|
912 |
+
togglehorde(1,1,1)
|
913 |
+
|
914 |
+
# launch
|
915 |
+
def guilaunch():
|
916 |
+
if model_var.get() == "":
|
917 |
+
tmp = askopenfilename(title="Select ggml model .bin files")
|
918 |
+
model_var.set(tmp)
|
919 |
+
nonlocal nextstate
|
920 |
+
nextstate = 1
|
921 |
+
root.destroy()
|
922 |
+
pass
|
923 |
+
|
924 |
+
def switch_old_gui():
|
925 |
+
nonlocal nextstate
|
926 |
+
nextstate = 2
|
927 |
+
root.destroy()
|
928 |
+
pass
|
929 |
+
|
930 |
+
ctk.CTkButton(tabs , text = "Launch", fg_color="#2f8d3c", command = guilaunch, width=80, height = 35 ).grid(row=1,column=1, stick="se", padx= 25, pady=5)
|
931 |
+
|
932 |
+
# ctk.CTkButton(tabs , text = "Save", fg_color="#084a66", command = save_config, width=60, height = 35 ).grid(row=1,column=1, stick="sw", padx= 5, pady=5)
|
933 |
+
# ctk.CTkButton(tabs , text = "Load", fg_color="#084a66", command = load_config, width=60, height = 35 ).grid(row=1,column=1, stick="sw", padx= 70, pady=5)
|
934 |
+
|
935 |
+
ctk.CTkButton(tabs , text = "Old GUI", fg_color="#084a66", command = switch_old_gui, width=100, height = 35 ).grid(row=1,column=0, stick="sw", padx= 5, pady=5)
|
936 |
+
# runs main loop until closed or launch clicked
|
937 |
+
root.mainloop()
|
938 |
+
|
939 |
+
if nextstate==0:
|
940 |
+
print("Exiting by user request.")
|
941 |
+
time.sleep(2)
|
942 |
+
sys.exit()
|
943 |
+
elif nextstate==2:
|
944 |
+
time.sleep(0.1)
|
945 |
+
show_old_gui()
|
946 |
+
else:
|
947 |
+
# processing vars
|
948 |
+
args.threads = int(threads_var.get())
|
949 |
+
|
950 |
+
args.usemlock = usemlock.get() == 1
|
951 |
+
args.debugmode = debugmode.get() == 1
|
952 |
+
args.launch = launchbrowser.get()==1
|
953 |
+
args.highpriority = highpriority.get()==1
|
954 |
+
args.nommap = disablemmap.get()==1
|
955 |
+
args.psutil_set_threads = psutil.get()==1
|
956 |
+
args.stream = stream.get()==1
|
957 |
+
args.smartcontext = smartcontext.get()==1
|
958 |
+
args.unbantokens = unbantokens.get()==1
|
959 |
+
|
960 |
+
gpuchoiceidx = int(gpu_choice_var.get())-1
|
961 |
+
if runopts_var.get() == runopts[1]:
|
962 |
+
args.useclblast = [[0,0], [1,0], [0,1]][gpuchoiceidx]
|
963 |
+
if runopts_var.get() == runopts[2]:
|
964 |
+
args.usecublas = ["lowvram",str(gpuchoiceidx)] if lowvram_var.get() == 1 else ["normal",str(gpuchoiceidx)]
|
965 |
+
if gpulayers_var.get():
|
966 |
+
args.gpulayers = int(gpulayers_var.get())
|
967 |
+
if runopts_var.get()==runopts[3]:
|
968 |
+
args.noblas = True
|
969 |
+
if runopts_var.get()==runopts[4]:
|
970 |
+
args.noavx2 = True
|
971 |
+
if runopts_var.get()==runopts[5]:
|
972 |
+
args.noavx2 = True
|
973 |
+
args.noblas = True
|
974 |
+
args.nommap = True
|
975 |
+
print("[Failsafe Mode : mmap is disabled.]")
|
976 |
+
|
977 |
+
|
978 |
+
|
979 |
+
args.blasthreads = None if blas_threads_var.get()=="" else int(blas_threads_var.get())
|
980 |
+
|
981 |
+
args.blasbatchsize = int(blasbatchsize_values[int(blas_size_var.get())])
|
982 |
+
args.forceversion = 0 if version_var.get()=="" else int(version_var.get())
|
983 |
+
|
984 |
+
args.mirostat = [int(mirostat_var.get()), float(mirostat_tau.get()), float(mirostat_eta.get())] if usemirostat.get()==1 else None
|
985 |
+
args.contextsize = int(contextsize_text[context_var.get()])
|
986 |
+
|
987 |
+
args.model_param = None if model_var.get() == "" else model_var.get()
|
988 |
+
args.lora = None if lora_var.get() == "" else ([lora_var.get()] if lora_base_var.get()=="" else [lora_var.get(), lora_base_var.get()])
|
989 |
+
|
990 |
+
args.port_param = defaultport if port_var.get()=="" else int(port_var.get())
|
991 |
+
args.host = host_var.get()
|
992 |
+
|
993 |
+
args.hordeconfig = None if usehorde_var.get() == 0 else [horde_name_var.get(), horde_gen_var.get(), horde_context_var.get()]
|
994 |
+
|
995 |
+
if not args.model_param:
|
996 |
+
print("\nNo ggml model file was selected. Exiting.")
|
997 |
+
time.sleep(2)
|
998 |
+
sys.exit(2)
|
999 |
+
|
1000 |
+
def show_gui_warning():
|
1001 |
+
from tkinter import messagebox
|
1002 |
+
import tkinter as tk
|
1003 |
+
root = tk.Tk()
|
1004 |
+
root.attributes("-alpha", 0)
|
1005 |
+
messagebox.showerror(title="New GUI failed, using Old GUI", message="The new GUI failed to load.\n\nTo use new GUI, please install the customtkinter python module.")
|
1006 |
+
root.destroy()
|
1007 |
+
|
1008 |
+
def show_old_gui():
|
1009 |
import tkinter as tk
|
1010 |
from tkinter.filedialog import askopenfilename
|
1011 |
+
from tkinter import messagebox
|
1012 |
|
1013 |
if len(sys.argv) == 1:
|
1014 |
#no args passed at all. Show nooby gui
|
|
|
1085 |
frameD.grid(row=5,column=0,pady=4)
|
1086 |
|
1087 |
# Create button, it will change label text
|
1088 |
+
tk.Button(root , text = "Launch", font = ("Impact", 18), bg='#54FA9B', command = guilaunch ).grid(row=6,column=0)
|
1089 |
+
tk.Label(root, text = "(Please use the Command Line for more advanced options)\nThis GUI is deprecated. Please install customtkinter.",
|
1090 |
font = ("Arial", 9)).grid(row=7,column=0)
|
1091 |
|
1092 |
root.mainloop()
|
|
|
1116 |
if selrunchoice==runopts[3]:
|
1117 |
args.useclblast = [0,1]
|
1118 |
if selrunchoice==runopts[4]:
|
1119 |
+
args.usecublas = ["normal"]
|
1120 |
if selrunchoice==runopts[5]:
|
1121 |
args.noblas = True
|
1122 |
if selrunchoice==runopts[6]:
|
|
|
1169 |
if not args.model_param:
|
1170 |
#give them a chance to pick a file
|
1171 |
print("For command line arguments, please refer to --help")
|
1172 |
+
print("***")
|
1173 |
try:
|
1174 |
+
show_new_gui()
|
1175 |
except Exception as ex:
|
1176 |
+
print("Failed to use new GUI. Reason: " + str(ex))
|
1177 |
+
print("Make sure customtkinter is installed!!!")
|
1178 |
+
print("Attempting to use old GUI...")
|
1179 |
+
if not args.model_param:
|
1180 |
+
try:
|
1181 |
+
show_gui_warning()
|
1182 |
+
show_old_gui()
|
1183 |
+
except Exception as ex2:
|
1184 |
+
print("File selection GUI unsupported. Please check command line: script.py --help")
|
1185 |
+
print("Reason for no GUI: " + str(ex2))
|
1186 |
+
time.sleep(2)
|
1187 |
+
sys.exit(2)
|
1188 |
|
1189 |
if args.hordeconfig and args.hordeconfig[0]!="":
|
1190 |
global friendlymodelname, maxhordelen, maxhordectx, showdebug
|
|
|
1254 |
args.blasthreads = args.threads
|
1255 |
|
1256 |
modelname = os.path.abspath(args.model_param)
|
1257 |
+
print(args)
|
1258 |
+
print(f"==========\nLoading model: {modelname} \n[Threads: {args.threads}, BlasThreads: {args.blasthreads}, SmartContext: {args.smartcontext}]")
|
1259 |
loadok = load_model(modelname)
|
1260 |
print("Load Model OK: " + str(loadok))
|
1261 |
|
|
|
1290 |
asyncio.run(RunServerMultiThreaded(args.host, args.port, embedded_kailite))
|
1291 |
|
1292 |
if __name__ == '__main__':
|
1293 |
+
print("***\nWelcome to KoboldCpp - Version " + KcppVersion) # just update version manually
|
1294 |
# print("Python version: " + sys.version)
|
1295 |
parser = argparse.ArgumentParser(description='KoboldCpp Server')
|
1296 |
modelgroup = parser.add_mutually_exclusive_group() #we want to be backwards compatible with the unnamed positional args
|
|
|
1314 |
parser.add_argument("--blasbatchsize", help="Sets the batch size used in BLAS processing (default 512). Setting it to -1 disables BLAS mode, but keeps other benefits like GPU offload.", type=int,choices=[-1,32,64,128,256,512,1024], default=512)
|
1315 |
parser.add_argument("--stream", help="Uses streaming when generating tokens. Only for the Kobold Lite UI.", action='store_true')
|
1316 |
parser.add_argument("--smartcontext", help="Reserving a portion of context to try processing less frequently.", action='store_true')
|
1317 |
+
parser.add_argument("--unbantokens", help="Normally, KoboldAI prevents the EOS token from being generated. This flag unbans it.", action='store_true')
|
1318 |
+
parser.add_argument("--bantokens", help="You can manually specify a list of token SUBSTRINGS that the AI cannot use. This bans ALL instances of that substring.", metavar=('[token_substrings]'), nargs='+')
|
1319 |
parser.add_argument("--usemirostat", help="Experimental! Replaces your samplers with mirostat. Takes 3 params = [type(0/1/2), tau(5.0), eta(0.1)].",metavar=('[type]', '[tau]', '[eta]'), type=float, nargs=3)
|
1320 |
parser.add_argument("--forceversion", help="If the model file format detection fails (e.g. rogue modified model) you can set this to override the detected format (enter desired version, e.g. 401 for GPTNeoX-Type2).",metavar=('[version]'), type=int, default=0)
|
1321 |
parser.add_argument("--nommap", help="If set, do not use mmap to load newer models", action='store_true')
|
|
|
1327 |
compatgroup = parser.add_mutually_exclusive_group()
|
1328 |
compatgroup.add_argument("--noblas", help="Do not use OpenBLAS for accelerated prompt ingestion", action='store_true')
|
1329 |
compatgroup.add_argument("--useclblast", help="Use CLBlast for GPU Acceleration. Must specify exactly 2 arguments, platform ID and device ID (e.g. --useclblast 1 0).", type=int, choices=range(0,9), nargs=2)
|
1330 |
+
compatgroup.add_argument("--usecublas", help="Use CuBLAS for GPU Acceleration. Requires Nvidia GPU. Select lowvram to not allocate VRAM scratch buffer. Enter a number after to select a different main GPU.", nargs='*',metavar=('[lowvram|normal] [main GPU ID]'), choices=['normal', 'lowvram', '0', '1', '2'])
|
1331 |
parser.add_argument("--gpulayers", help="Set number of layers to offload to GPU when using GPU. Requires GPU.",metavar=('[GPU layers]'), type=int, default=0)
|
1332 |
args = parser.parse_args()
|
1333 |
main(args)
|
llama.cpp
CHANGED
@@ -195,8 +195,8 @@ struct llama_layer {
|
|
195 |
};
|
196 |
|
197 |
struct llama_kv_cache {
|
198 |
-
struct ggml_tensor * k;
|
199 |
-
struct ggml_tensor * v;
|
200 |
|
201 |
struct ggml_context * ctx = NULL;
|
202 |
|
@@ -482,9 +482,7 @@ struct llama_file_loader {
|
|
482 |
std::string word = file.read_string(len);
|
483 |
|
484 |
float score = 0.0f;
|
485 |
-
|
486 |
-
file.read_raw(&score, sizeof(score));
|
487 |
-
}
|
488 |
|
489 |
vocab.token_to_id[word] = i;
|
490 |
|
@@ -1160,6 +1158,7 @@ static void llama_model_load_internal(
|
|
1160 |
}
|
1161 |
}
|
1162 |
#endif // GGML_USE_CUBLAS
|
|
|
1163 |
#if defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST)
|
1164 |
const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer));
|
1165 |
|
@@ -1168,6 +1167,10 @@ static void llama_model_load_internal(
|
|
1168 |
fprintf(stderr, "%s: offloading non-repeating layers to GPU\n", __func__);
|
1169 |
}
|
1170 |
size_t vram_kv_cache = 0;
|
|
|
|
|
|
|
|
|
1171 |
if (n_gpu_layers > (int) hparams.n_layer + 1) {
|
1172 |
if (low_vram) {
|
1173 |
fprintf(stderr, "%s: cannot offload v cache to GPU due to low VRAM option\n", __func__);
|
@@ -1184,14 +1187,18 @@ static void llama_model_load_internal(
|
|
1184 |
vram_kv_cache += MEM_REQ_KV_SELF().at(model.type) / 2;
|
1185 |
}
|
1186 |
}
|
1187 |
-
|
|
|
|
|
|
|
|
|
1188 |
fprintf(stderr, "%s: offloaded %d/%d layers to GPU\n",
|
1189 |
-
__func__, std::min(n_gpu_layers, max_offloadable_layers),
|
1190 |
fprintf(stderr, "%s: total VRAM used: %zu MB\n",
|
1191 |
__func__, (vram_weights + vram_scratch + vram_kv_cache + MB - 1) / MB); // round up
|
1192 |
#else
|
1193 |
(void) n_gpu_layers;
|
1194 |
-
#endif
|
1195 |
}
|
1196 |
|
1197 |
// populate `tensors_by_name`
|
@@ -1898,10 +1905,10 @@ void llama_sample_top_p(struct llama_context * ctx, llama_token_data_array * can
|
|
1898 |
return;
|
1899 |
}
|
1900 |
|
1901 |
-
const int64_t t_start_sample_us = ggml_time_us();
|
1902 |
-
|
1903 |
llama_sample_softmax(ctx, candidates);
|
1904 |
|
|
|
|
|
1905 |
// Compute the cumulative probabilities
|
1906 |
float cum_sum = 0.0f;
|
1907 |
size_t last_idx = candidates->size;
|
@@ -1930,9 +1937,8 @@ void llama_sample_tail_free(struct llama_context * ctx, llama_token_data_array *
|
|
1930 |
return;
|
1931 |
}
|
1932 |
|
1933 |
-
const int64_t t_start_sample_us = ggml_time_us();
|
1934 |
-
|
1935 |
llama_sample_softmax(nullptr, candidates);
|
|
|
1936 |
|
1937 |
// Compute the first and second derivatives
|
1938 |
std::vector<float> first_derivatives(candidates->size - 1);
|
@@ -1984,11 +1990,11 @@ void llama_sample_typical(struct llama_context * ctx, llama_token_data_array * c
|
|
1984 |
return;
|
1985 |
}
|
1986 |
|
1987 |
-
const int64_t t_start_sample_us = ggml_time_us();
|
1988 |
-
|
1989 |
// Compute the softmax of logits and calculate entropy
|
1990 |
llama_sample_softmax(nullptr, candidates);
|
1991 |
|
|
|
|
|
1992 |
float entropy = 0.0f;
|
1993 |
for (size_t i = 0; i < candidates->size; ++i) {
|
1994 |
entropy += -candidates->data[i].p * logf(candidates->data[i].p);
|
@@ -2157,13 +2163,11 @@ llama_token llama_sample_token_mirostat(struct llama_context * ctx, llama_token_
|
|
2157 |
|
2158 |
if (ctx) {
|
2159 |
ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
|
2160 |
-
ctx->n_sample++;
|
2161 |
}
|
2162 |
return X;
|
2163 |
}
|
2164 |
|
2165 |
llama_token llama_sample_token_mirostat_v2(struct llama_context * ctx, llama_token_data_array * candidates, float tau, float eta, float * mu) {
|
2166 |
-
assert(ctx);
|
2167 |
int64_t t_start_sample_us;
|
2168 |
t_start_sample_us = ggml_time_us();
|
2169 |
|
@@ -2178,13 +2182,14 @@ llama_token llama_sample_token_mirostat_v2(struct llama_context * ctx, llama_tok
|
|
2178 |
candidates->size = 1;
|
2179 |
}
|
2180 |
|
|
|
|
|
|
|
|
|
2181 |
// Normalize the probabilities of the remaining words
|
2182 |
llama_sample_softmax(ctx, candidates);
|
2183 |
|
2184 |
// Sample the next word X from the remaining words
|
2185 |
-
if (ctx) {
|
2186 |
-
ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
|
2187 |
-
}
|
2188 |
llama_token X = llama_sample_token(ctx, candidates);
|
2189 |
t_start_sample_us = ggml_time_us();
|
2190 |
|
@@ -2252,10 +2257,10 @@ static void llama_convert_tensor_internal(const llama_load_tensor & tensor, llam
|
|
2252 |
}
|
2253 |
float * f32_output = (float *) output.addr;
|
2254 |
|
2255 |
-
|
2256 |
if (ggml_is_quantized(tensor.type)) {
|
2257 |
-
qtype =
|
2258 |
-
if (qtype.
|
2259 |
throw std::runtime_error(format("type %s unsupported for integer quantization: no dequantization available", ggml_type_name(tensor.type)));
|
2260 |
}
|
2261 |
} else if (tensor.type != GGML_TYPE_F16) {
|
@@ -2266,7 +2271,7 @@ static void llama_convert_tensor_internal(const llama_load_tensor & tensor, llam
|
|
2266 |
if (tensor.type == GGML_TYPE_F16) {
|
2267 |
ggml_fp16_to_fp32_row((ggml_fp16_t *)tensor.data, f32_output, nelements);
|
2268 |
} else if (ggml_is_quantized(tensor.type)) {
|
2269 |
-
qtype.
|
2270 |
} else {
|
2271 |
LLAMA_ASSERT(false); // unreachable
|
2272 |
}
|
@@ -2291,7 +2296,7 @@ static void llama_convert_tensor_internal(const llama_load_tensor & tensor, llam
|
|
2291 |
if (typ == GGML_TYPE_F16) {
|
2292 |
ggml_fp16_to_fp32_row((ggml_fp16_t *)inbuf, outbuf, nels);
|
2293 |
} else {
|
2294 |
-
qtype.
|
2295 |
}
|
2296 |
};
|
2297 |
workers.push_back(std::thread(compute, tensor.type, tensor.data + in_buff_offs, f32_output + out_buff_offs, thr_elems));
|
@@ -2406,9 +2411,9 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
|
2406 |
int ny = tensor.ne.at(1);
|
2407 |
if (nx % QK_K != 0 || ny % QK_K != 0) {
|
2408 |
fprintf(stderr, "\n\n========================= Tensor sizes %d x %d are not divisible by %d\n",nx,ny,QK_K);
|
2409 |
-
fprintf(stderr, "
|
2410 |
fprintf(stderr, "========================================================================================\n\n");
|
2411 |
-
|
2412 |
}
|
2413 |
}
|
2414 |
if (tensor.name == "output.weight") {
|
@@ -3476,23 +3481,35 @@ llama_token llama_token_nl() {
|
|
3476 |
return 13;
|
3477 |
}
|
3478 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3479 |
|
3480 |
-
|
3481 |
-
|
|
|
|
|
3482 |
|
3483 |
-
|
3484 |
-
|
3485 |
-
|
|
|
|
|
3486 |
|
3487 |
fprintf(stderr, "\n");
|
3488 |
-
fprintf(stderr, "%s: load time = %8.2f ms\n", __func__,
|
3489 |
fprintf(stderr, "%s: sample time = %8.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
|
3490 |
-
__func__,
|
3491 |
fprintf(stderr, "%s: prompt eval time = %8.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n",
|
3492 |
-
__func__,
|
3493 |
fprintf(stderr, "%s: eval time = %8.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
|
3494 |
-
__func__,
|
3495 |
-
fprintf(stderr, "%s: total time = %8.2f ms\n", __func__, (
|
3496 |
}
|
3497 |
|
3498 |
void llama_reset_timings(struct llama_context * ctx) {
|
|
|
195 |
};
|
196 |
|
197 |
struct llama_kv_cache {
|
198 |
+
struct ggml_tensor * k = NULL;
|
199 |
+
struct ggml_tensor * v = NULL;
|
200 |
|
201 |
struct ggml_context * ctx = NULL;
|
202 |
|
|
|
482 |
std::string word = file.read_string(len);
|
483 |
|
484 |
float score = 0.0f;
|
485 |
+
file.read_raw(&score, sizeof(score));
|
|
|
|
|
486 |
|
487 |
vocab.token_to_id[word] = i;
|
488 |
|
|
|
1158 |
}
|
1159 |
}
|
1160 |
#endif // GGML_USE_CUBLAS
|
1161 |
+
|
1162 |
#if defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST)
|
1163 |
const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer));
|
1164 |
|
|
|
1167 |
fprintf(stderr, "%s: offloading non-repeating layers to GPU\n", __func__);
|
1168 |
}
|
1169 |
size_t vram_kv_cache = 0;
|
1170 |
+
|
1171 |
+
#ifdef GGML_USE_CUBLAS
|
1172 |
+
const int max_backend_supported_layers = hparams.n_layer + 3;
|
1173 |
+
const int max_offloadable_layers = low_vram ? hparams.n_layer + 1 : hparams.n_layer + 3;
|
1174 |
if (n_gpu_layers > (int) hparams.n_layer + 1) {
|
1175 |
if (low_vram) {
|
1176 |
fprintf(stderr, "%s: cannot offload v cache to GPU due to low VRAM option\n", __func__);
|
|
|
1187 |
vram_kv_cache += MEM_REQ_KV_SELF().at(model.type) / 2;
|
1188 |
}
|
1189 |
}
|
1190 |
+
#elif defined(GGML_USE_CLBLAST)
|
1191 |
+
const int max_backend_supported_layers = hparams.n_layer + 1;
|
1192 |
+
const int max_offloadable_layers = hparams.n_layer + 1;
|
1193 |
+
#endif // GGML_USE_CUBLAS
|
1194 |
+
|
1195 |
fprintf(stderr, "%s: offloaded %d/%d layers to GPU\n",
|
1196 |
+
__func__, std::min(n_gpu_layers, max_offloadable_layers), max_backend_supported_layers);
|
1197 |
fprintf(stderr, "%s: total VRAM used: %zu MB\n",
|
1198 |
__func__, (vram_weights + vram_scratch + vram_kv_cache + MB - 1) / MB); // round up
|
1199 |
#else
|
1200 |
(void) n_gpu_layers;
|
1201 |
+
#endif // defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST)
|
1202 |
}
|
1203 |
|
1204 |
// populate `tensors_by_name`
|
|
|
1905 |
return;
|
1906 |
}
|
1907 |
|
|
|
|
|
1908 |
llama_sample_softmax(ctx, candidates);
|
1909 |
|
1910 |
+
const int64_t t_start_sample_us = ggml_time_us();
|
1911 |
+
|
1912 |
// Compute the cumulative probabilities
|
1913 |
float cum_sum = 0.0f;
|
1914 |
size_t last_idx = candidates->size;
|
|
|
1937 |
return;
|
1938 |
}
|
1939 |
|
|
|
|
|
1940 |
llama_sample_softmax(nullptr, candidates);
|
1941 |
+
const int64_t t_start_sample_us = ggml_time_us();
|
1942 |
|
1943 |
// Compute the first and second derivatives
|
1944 |
std::vector<float> first_derivatives(candidates->size - 1);
|
|
|
1990 |
return;
|
1991 |
}
|
1992 |
|
|
|
|
|
1993 |
// Compute the softmax of logits and calculate entropy
|
1994 |
llama_sample_softmax(nullptr, candidates);
|
1995 |
|
1996 |
+
const int64_t t_start_sample_us = ggml_time_us();
|
1997 |
+
|
1998 |
float entropy = 0.0f;
|
1999 |
for (size_t i = 0; i < candidates->size; ++i) {
|
2000 |
entropy += -candidates->data[i].p * logf(candidates->data[i].p);
|
|
|
2163 |
|
2164 |
if (ctx) {
|
2165 |
ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
|
|
|
2166 |
}
|
2167 |
return X;
|
2168 |
}
|
2169 |
|
2170 |
llama_token llama_sample_token_mirostat_v2(struct llama_context * ctx, llama_token_data_array * candidates, float tau, float eta, float * mu) {
|
|
|
2171 |
int64_t t_start_sample_us;
|
2172 |
t_start_sample_us = ggml_time_us();
|
2173 |
|
|
|
2182 |
candidates->size = 1;
|
2183 |
}
|
2184 |
|
2185 |
+
if (ctx) {
|
2186 |
+
ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
|
2187 |
+
}
|
2188 |
+
|
2189 |
// Normalize the probabilities of the remaining words
|
2190 |
llama_sample_softmax(ctx, candidates);
|
2191 |
|
2192 |
// Sample the next word X from the remaining words
|
|
|
|
|
|
|
2193 |
llama_token X = llama_sample_token(ctx, candidates);
|
2194 |
t_start_sample_us = ggml_time_us();
|
2195 |
|
|
|
2257 |
}
|
2258 |
float * f32_output = (float *) output.addr;
|
2259 |
|
2260 |
+
ggml_type_traits_t qtype;
|
2261 |
if (ggml_is_quantized(tensor.type)) {
|
2262 |
+
qtype = ggml_internal_get_type_traits(tensor.type);
|
2263 |
+
if (qtype.to_float == NULL) {
|
2264 |
throw std::runtime_error(format("type %s unsupported for integer quantization: no dequantization available", ggml_type_name(tensor.type)));
|
2265 |
}
|
2266 |
} else if (tensor.type != GGML_TYPE_F16) {
|
|
|
2271 |
if (tensor.type == GGML_TYPE_F16) {
|
2272 |
ggml_fp16_to_fp32_row((ggml_fp16_t *)tensor.data, f32_output, nelements);
|
2273 |
} else if (ggml_is_quantized(tensor.type)) {
|
2274 |
+
qtype.to_float(tensor.data, f32_output, nelements);
|
2275 |
} else {
|
2276 |
LLAMA_ASSERT(false); // unreachable
|
2277 |
}
|
|
|
2296 |
if (typ == GGML_TYPE_F16) {
|
2297 |
ggml_fp16_to_fp32_row((ggml_fp16_t *)inbuf, outbuf, nels);
|
2298 |
} else {
|
2299 |
+
qtype.to_float(inbuf, outbuf, nels);
|
2300 |
}
|
2301 |
};
|
2302 |
workers.push_back(std::thread(compute, tensor.type, tensor.data + in_buff_offs, f32_output + out_buff_offs, thr_elems));
|
|
|
2411 |
int ny = tensor.ne.at(1);
|
2412 |
if (nx % QK_K != 0 || ny % QK_K != 0) {
|
2413 |
fprintf(stderr, "\n\n========================= Tensor sizes %d x %d are not divisible by %d\n",nx,ny,QK_K);
|
2414 |
+
fprintf(stderr, "Verify before using\n");
|
2415 |
fprintf(stderr, "========================================================================================\n\n");
|
2416 |
+
// throw std::runtime_error("Unsupported tensor size encountered\n");
|
2417 |
}
|
2418 |
}
|
2419 |
if (tensor.name == "output.weight") {
|
|
|
3481 |
return 13;
|
3482 |
}
|
3483 |
|
3484 |
+
struct llama_timings llama_get_timings(struct llama_context * ctx) {
|
3485 |
+
struct llama_timings result = {
|
3486 |
+
/*.t_start_ms =*/ 1e-3 * ctx->t_start_us,
|
3487 |
+
/*.t_end_ms =*/ 1.00 * ggml_time_ms(),
|
3488 |
+
/*.t_load_ms =*/ 1e-3 * ctx->t_load_us,
|
3489 |
+
/*.t_sample_ms =*/ 1e-3 * ctx->t_sample_us,
|
3490 |
+
/*.t_p_eval_ms =*/ 1e-3 * ctx->t_p_eval_us,
|
3491 |
+
/*.t_eval_ms =*/ 1e-3 * ctx->t_eval_us,
|
3492 |
|
3493 |
+
/*.n_sample =*/ std::max(1, ctx->n_sample),
|
3494 |
+
/*.n_p_eval =*/ std::max(1, ctx->n_p_eval),
|
3495 |
+
/*.n_eval =*/ std::max(1, ctx->n_eval),
|
3496 |
+
};
|
3497 |
|
3498 |
+
return result;
|
3499 |
+
}
|
3500 |
+
|
3501 |
+
void llama_print_timings(struct llama_context * ctx) {
|
3502 |
+
const llama_timings timings = llama_get_timings(ctx);
|
3503 |
|
3504 |
fprintf(stderr, "\n");
|
3505 |
+
fprintf(stderr, "%s: load time = %8.2f ms\n", __func__, timings.t_load_ms);
|
3506 |
fprintf(stderr, "%s: sample time = %8.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
|
3507 |
+
__func__, timings.t_sample_ms, timings.n_sample, timings.t_sample_ms / timings.n_sample, 1e3 / timings.t_sample_ms * timings.n_sample);
|
3508 |
fprintf(stderr, "%s: prompt eval time = %8.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n",
|
3509 |
+
__func__, timings.t_p_eval_ms, timings.n_p_eval, timings.t_p_eval_ms / timings.n_p_eval, 1e3 / timings.t_p_eval_ms * timings.n_p_eval);
|
3510 |
fprintf(stderr, "%s: eval time = %8.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
|
3511 |
+
__func__, timings.t_eval_ms, timings.n_eval, timings.t_eval_ms / timings.n_eval, 1e3 / timings.t_eval_ms * timings.n_eval);
|
3512 |
+
fprintf(stderr, "%s: total time = %8.2f ms\n", __func__, (timings.t_end_ms - timings.t_start_ms));
|
3513 |
}
|
3514 |
|
3515 |
void llama_reset_timings(struct llama_context * ctx) {
|
llama.h
CHANGED
@@ -134,6 +134,20 @@ extern "C" {
|
|
134 |
bool quantize_output_tensor; // quantize output.weight
|
135 |
} llama_model_quantize_params;
|
136 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
137 |
LLAMA_API struct llama_context_params llama_context_default_params();
|
138 |
LLAMA_API struct llama_model_quantize_params llama_model_quantize_default_params();
|
139 |
|
@@ -331,6 +345,7 @@ extern "C" {
|
|
331 |
LLAMA_API llama_token llama_sample_token(struct llama_context * ctx, llama_token_data_array * candidates);
|
332 |
|
333 |
// Performance information
|
|
|
334 |
LLAMA_API void llama_print_timings(struct llama_context * ctx);
|
335 |
LLAMA_API void llama_reset_timings(struct llama_context * ctx);
|
336 |
|
|
|
134 |
bool quantize_output_tensor; // quantize output.weight
|
135 |
} llama_model_quantize_params;
|
136 |
|
137 |
+
// performance timing information
|
138 |
+
struct llama_timings {
|
139 |
+
double t_start_ms;
|
140 |
+
double t_end_ms;
|
141 |
+
double t_load_ms;
|
142 |
+
double t_sample_ms;
|
143 |
+
double t_p_eval_ms;
|
144 |
+
double t_eval_ms;
|
145 |
+
|
146 |
+
int32_t n_sample;
|
147 |
+
int32_t n_p_eval;
|
148 |
+
int32_t n_eval;
|
149 |
+
};
|
150 |
+
|
151 |
LLAMA_API struct llama_context_params llama_context_default_params();
|
152 |
LLAMA_API struct llama_model_quantize_params llama_model_quantize_default_params();
|
153 |
|
|
|
345 |
LLAMA_API llama_token llama_sample_token(struct llama_context * ctx, llama_token_data_array * candidates);
|
346 |
|
347 |
// Performance information
|
348 |
+
LLAMA_API struct llama_timings llama_get_timings(struct llama_context * ctx);
|
349 |
LLAMA_API void llama_print_timings(struct llama_context * ctx);
|
350 |
LLAMA_API void llama_reset_timings(struct llama_context * ctx);
|
351 |
|
make_old_pyinstaller.bat
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
echo This file is only for my own usage, please do not use it. I am lazy.
|
2 |
|
3 |
set PATH=d:\\MainApplications\\KoboldAIGPT\\KoboldAI-Horde-Bridge\\python;d:\\MainApplications\\KoboldAIGPT\\KoboldAI-Horde-Bridge\\python\\Scripts;%PATH%
|
4 |
-
PyInstaller --noconfirm --onefile --clean --console --icon "./niko.ico" --add-data "./klite.embd;." --add-data "./koboldcpp.dll;." --add-data "./koboldcpp_openblas.dll;." --add-data "./koboldcpp_failsafe.dll;." --add-data "./koboldcpp_openblas_noavx2.dll;." --add-data "./libopenblas.dll;." --add-data "./koboldcpp_clblast.dll;." --add-data "./clblast.dll;." --add-data "./rwkv_vocab.embd;." --add-data "./rwkv_world_vocab.embd;." "./koboldcpp.py" -n "koboldcpp.exe"
|
|
|
1 |
echo This file is only for my own usage, please do not use it. I am lazy.
|
2 |
|
3 |
set PATH=d:\\MainApplications\\KoboldAIGPT\\KoboldAI-Horde-Bridge\\python;d:\\MainApplications\\KoboldAIGPT\\KoboldAI-Horde-Bridge\\python\\Scripts;%PATH%
|
4 |
+
PyInstaller --noconfirm --onefile --clean --console --collect-all customtkinter --icon "./niko.ico" --add-data "./klite.embd;." --add-data "./koboldcpp.dll;." --add-data "./koboldcpp_openblas.dll;." --add-data "./koboldcpp_failsafe.dll;." --add-data "./koboldcpp_openblas_noavx2.dll;." --add-data "./libopenblas.dll;." --add-data "./koboldcpp_clblast.dll;." --add-data "./clblast.dll;." --add-data "./rwkv_vocab.embd;." --add-data "./rwkv_world_vocab.embd;." "./koboldcpp.py" -n "koboldcpp.exe"
|
make_old_pyinstaller_cuda.bat
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
echo This file is only for my own usage, please do not use it. I am lazy.
|
2 |
|
3 |
set PATH=d:\\MainApplications\\KoboldAIGPT\\KoboldAI-Horde-Bridge\\python;d:\\MainApplications\\KoboldAIGPT\\KoboldAI-Horde-Bridge\\python\\Scripts;%PATH%
|
4 |
-
PyInstaller --noconfirm --onefile --clean --console --icon "./nikogreen.ico" --add-data "./klite.embd;." --add-data "./koboldcpp.dll;." --add-data "./koboldcpp_openblas.dll;." --add-data "./koboldcpp_failsafe.dll;." --add-data "./koboldcpp_openblas_noavx2.dll;." --add-data "./libopenblas.dll;." --add-data "./koboldcpp_clblast.dll;." --add-data "./clblast.dll;." --add-data "./koboldcpp_cublas.dll;." --add-data "./cublas64_11.dll;." --add-data "./cublasLt64_11.dll;." --add-data "./cudart64_110.dll;." --add-data "./msvcp140.dll;." --add-data "./vcruntime140.dll;." --add-data "./vcruntime140_1.dll;." --add-data "./rwkv_vocab.embd;." --add-data "./rwkv_world_vocab.embd;." "./koboldcpp.py" -n "koboldcpp.exe"
|
|
|
1 |
echo This file is only for my own usage, please do not use it. I am lazy.
|
2 |
|
3 |
set PATH=d:\\MainApplications\\KoboldAIGPT\\KoboldAI-Horde-Bridge\\python;d:\\MainApplications\\KoboldAIGPT\\KoboldAI-Horde-Bridge\\python\\Scripts;%PATH%
|
4 |
+
PyInstaller --noconfirm --onefile --clean --console --collect-all customtkinter --icon "./nikogreen.ico" --add-data "./klite.embd;." --add-data "./koboldcpp.dll;." --add-data "./koboldcpp_openblas.dll;." --add-data "./koboldcpp_failsafe.dll;." --add-data "./koboldcpp_openblas_noavx2.dll;." --add-data "./libopenblas.dll;." --add-data "./koboldcpp_clblast.dll;." --add-data "./clblast.dll;." --add-data "./koboldcpp_cublas.dll;." --add-data "./cublas64_11.dll;." --add-data "./cublasLt64_11.dll;." --add-data "./cudart64_110.dll;." --add-data "./msvcp140.dll;." --add-data "./vcruntime140.dll;." --add-data "./vcruntime140_1.dll;." --add-data "./rwkv_vocab.embd;." --add-data "./rwkv_world_vocab.embd;." "./koboldcpp.py" -n "koboldcpp.exe"
|
make_pyinstaller.bat
CHANGED
@@ -1 +1 @@
|
|
1 |
-
PyInstaller --noconfirm --onefile --clean --console --icon "./niko.ico" --add-data "./klite.embd;." --add-data "./koboldcpp.dll;." --add-data "./koboldcpp_openblas.dll;." --add-data "./koboldcpp_failsafe.dll;." --add-data "./koboldcpp_openblas_noavx2.dll;." --add-data "./libopenblas.dll;." --add-data "./koboldcpp_clblast.dll;." --add-data "./clblast.dll;." --add-data "./rwkv_vocab.embd;." --add-data "./rwkv_world_vocab.embd;." "./koboldcpp.py" -n "koboldcpp.exe"
|
|
|
1 |
+
PyInstaller --noconfirm --onefile --clean --console --collect-all customtkinter --icon "./niko.ico" --add-data "./klite.embd;." --add-data "./koboldcpp.dll;." --add-data "./koboldcpp_openblas.dll;." --add-data "./koboldcpp_failsafe.dll;." --add-data "./koboldcpp_openblas_noavx2.dll;." --add-data "./libopenblas.dll;." --add-data "./koboldcpp_clblast.dll;." --add-data "./clblast.dll;." --add-data "./rwkv_vocab.embd;." --add-data "./rwkv_world_vocab.embd;." "./koboldcpp.py" -n "koboldcpp.exe"
|
make_pyinstaller.sh
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
#!/bin/bash
|
2 |
|
3 |
-
pyinstaller --noconfirm --onefile --clean --console --icon "./niko.ico" \
|
4 |
--add-data "./klite.embd:." \
|
5 |
--add-data "./koboldcpp.so:." \
|
6 |
--add-data "./koboldcpp_openblas.so:." \
|
@@ -8,4 +8,5 @@ pyinstaller --noconfirm --onefile --clean --console --icon "./niko.ico" \
|
|
8 |
--add-data "./koboldcpp_openblas_noavx2.so:." \
|
9 |
--add-data "./koboldcpp_clblast.so:." \
|
10 |
--add-data "./rwkv_vocab.embd:." \
|
|
|
11 |
"./koboldcpp.py" -n "koboldcpp"
|
|
|
1 |
#!/bin/bash
|
2 |
|
3 |
+
pyinstaller --noconfirm --onefile --clean --console --collect-all customtkinter --icon "./niko.ico" \
|
4 |
--add-data "./klite.embd:." \
|
5 |
--add-data "./koboldcpp.so:." \
|
6 |
--add-data "./koboldcpp_openblas.so:." \
|
|
|
8 |
--add-data "./koboldcpp_openblas_noavx2.so:." \
|
9 |
--add-data "./koboldcpp_clblast.so:." \
|
10 |
--add-data "./rwkv_vocab.embd:." \
|
11 |
+
--add-data "./rwkv_world_vocab.embd:." \
|
12 |
"./koboldcpp.py" -n "koboldcpp"
|
otherarch/ggml_v2.c
CHANGED
@@ -472,6 +472,9 @@ static const size_t CACHE_LINE_SIZE_F32 = CACHE_LINE_SIZE/sizeof(float);
|
|
472 |
//
|
473 |
// quantization
|
474 |
//
|
|
|
|
|
|
|
475 |
|
476 |
#if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__)
|
477 |
// multiply int8_t, add results pairwise twice
|
@@ -532,7 +535,7 @@ static inline __m256i bytes_from_bits_32(const uint8_t * x) {
|
|
532 |
static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi)
|
533 |
{
|
534 |
const __m128i tmp = _mm_loadu_si128((const __m128i *)rsi);
|
535 |
-
const __m256i bytes =
|
536 |
const __m256i lowMask = _mm256_set1_epi8( 0xF );
|
537 |
return _mm256_and_si256(lowMask, bytes);
|
538 |
}
|
@@ -605,7 +608,7 @@ static inline __m256i bytes_from_bits_32(const uint8_t * x) {
|
|
605 |
bytesh = _mm_or_si128(bytesh, bit_mask);
|
606 |
bytesl = _mm_cmpeq_epi8(bytesl, _mm_set1_epi64x(-1));
|
607 |
bytesh = _mm_cmpeq_epi8(bytesh, _mm_set1_epi64x(-1));
|
608 |
-
return
|
609 |
}
|
610 |
|
611 |
// Unpack 32 4-bit fields into 32 bytes
|
@@ -618,7 +621,7 @@ static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi)
|
|
618 |
const __m128i lowMask = _mm_set1_epi8(0xF);
|
619 |
tmpl = _mm_and_si128(lowMask, tmpl);
|
620 |
tmph = _mm_and_si128(lowMask, tmph);
|
621 |
-
return
|
622 |
}
|
623 |
|
624 |
// add int16_t pairwise and return as float vector
|
@@ -626,7 +629,7 @@ static inline __m256 sum_i16_pairs_float(const __m128i xh, const __m128i xl) {
|
|
626 |
const __m128i ones = _mm_set1_epi16(1);
|
627 |
const __m128i summed_pairsl = _mm_madd_epi16(ones, xl);
|
628 |
const __m128i summed_pairsh = _mm_madd_epi16(ones, xh);
|
629 |
-
const __m256i summed_pairs =
|
630 |
return _mm256_cvtepi32_ps(summed_pairs);
|
631 |
}
|
632 |
|
@@ -2246,7 +2249,7 @@ static void ggml_v2_vec_dot_q4_0_q8_0(const int n, float * restrict s, const voi
|
|
2246 |
const __m128i i32_1 = mul_sum_i8_pairs(bx, by);
|
2247 |
|
2248 |
// Convert int32_t to float
|
2249 |
-
__m256 p = _mm256_cvtepi32_ps(
|
2250 |
|
2251 |
// Apply the scale, and accumulate
|
2252 |
acc = _mm256_add_ps(_mm256_mul_ps( d, p ), acc);
|
@@ -2727,7 +2730,7 @@ static void ggml_v2_vec_dot_q5_0_q8_0(const int n, float * restrict s, const voi
|
|
2727 |
__m128i bxh = _mm256_extractf128_si256(bx, 1);
|
2728 |
bxl = _mm_or_si128(bxl, bxhil);
|
2729 |
bxh = _mm_or_si128(bxh, bxhih);
|
2730 |
-
bx =
|
2731 |
|
2732 |
const __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
|
2733 |
|
@@ -2989,7 +2992,7 @@ static void ggml_v2_vec_dot_q5_1_q8_1(const int n, float * restrict s, const voi
|
|
2989 |
__m128i bxh = _mm256_extractf128_si256(bx, 1);
|
2990 |
bxl = _mm_or_si128(bxl, bxhil);
|
2991 |
bxh = _mm_or_si128(bxh, bxhih);
|
2992 |
-
bx =
|
2993 |
|
2994 |
const __m256 dy = _mm256_broadcast_ss(&y[i].d);
|
2995 |
const __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
|
@@ -17417,7 +17420,7 @@ static void ggml_v2_vec_dot_q4_0_q8_0_v2(const int n, float * restrict s, const
|
|
17417 |
}
|
17418 |
|
17419 |
// Convert int32_t to float
|
17420 |
-
__m256 p = _mm256_cvtepi32_ps(
|
17421 |
// Apply the scale, and accumulate
|
17422 |
acc = _mm256_add_ps(_mm256_mul_ps( d, p ), acc);
|
17423 |
}
|
@@ -17686,7 +17689,7 @@ static void ggml_v2_vec_dot_q4_2_q8_0_v2(const int n, float * restrict s, const
|
|
17686 |
|
17687 |
__m128i bx0 = bytes_from_nibbles_16(x[2*i + 0].qs);
|
17688 |
__m128i bx1 = bytes_from_nibbles_16(x[2*i + 1].qs);
|
17689 |
-
__m256i bx =
|
17690 |
|
17691 |
// Now we have a vector with bytes in [ 0 .. 15 ] interval. Offset them into [ -8 .. +7 ] interval.
|
17692 |
const __m256i off = _mm256_set1_epi8(8);
|
@@ -17819,7 +17822,7 @@ static void ggml_v2_vec_dot_q4_3_q8_1_v2(const int n, float * restrict s, const
|
|
17819 |
|
17820 |
const __m128i bx0 = bytes_from_nibbles_16(x[2*i + 0].qs);
|
17821 |
const __m128i bx1 = bytes_from_nibbles_16(x[2*i + 1].qs);
|
17822 |
-
const __m256i bx =
|
17823 |
|
17824 |
const __m256 dy = _mm256_broadcast_ss(&y[i].d);
|
17825 |
const __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
|
|
|
472 |
//
|
473 |
// quantization
|
474 |
//
|
475 |
+
#ifndef MM256_SET_M128I
|
476 |
+
#define MM256_SET_M128I(a, b) _mm256_insertf128_si256(_mm256_castsi128_si256(b), (a), 1)
|
477 |
+
#endif
|
478 |
|
479 |
#if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__)
|
480 |
// multiply int8_t, add results pairwise twice
|
|
|
535 |
static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi)
|
536 |
{
|
537 |
const __m128i tmp = _mm_loadu_si128((const __m128i *)rsi);
|
538 |
+
const __m256i bytes = MM256_SET_M128I(_mm_srli_epi16(tmp, 4), tmp);
|
539 |
const __m256i lowMask = _mm256_set1_epi8( 0xF );
|
540 |
return _mm256_and_si256(lowMask, bytes);
|
541 |
}
|
|
|
608 |
bytesh = _mm_or_si128(bytesh, bit_mask);
|
609 |
bytesl = _mm_cmpeq_epi8(bytesl, _mm_set1_epi64x(-1));
|
610 |
bytesh = _mm_cmpeq_epi8(bytesh, _mm_set1_epi64x(-1));
|
611 |
+
return MM256_SET_M128I(bytesh, bytesl);
|
612 |
}
|
613 |
|
614 |
// Unpack 32 4-bit fields into 32 bytes
|
|
|
621 |
const __m128i lowMask = _mm_set1_epi8(0xF);
|
622 |
tmpl = _mm_and_si128(lowMask, tmpl);
|
623 |
tmph = _mm_and_si128(lowMask, tmph);
|
624 |
+
return MM256_SET_M128I(tmph, tmpl);
|
625 |
}
|
626 |
|
627 |
// add int16_t pairwise and return as float vector
|
|
|
629 |
const __m128i ones = _mm_set1_epi16(1);
|
630 |
const __m128i summed_pairsl = _mm_madd_epi16(ones, xl);
|
631 |
const __m128i summed_pairsh = _mm_madd_epi16(ones, xh);
|
632 |
+
const __m256i summed_pairs = MM256_SET_M128I(summed_pairsh, summed_pairsl);
|
633 |
return _mm256_cvtepi32_ps(summed_pairs);
|
634 |
}
|
635 |
|
|
|
2249 |
const __m128i i32_1 = mul_sum_i8_pairs(bx, by);
|
2250 |
|
2251 |
// Convert int32_t to float
|
2252 |
+
__m256 p = _mm256_cvtepi32_ps(MM256_SET_M128I(i32_0, i32_1));
|
2253 |
|
2254 |
// Apply the scale, and accumulate
|
2255 |
acc = _mm256_add_ps(_mm256_mul_ps( d, p ), acc);
|
|
|
2730 |
__m128i bxh = _mm256_extractf128_si256(bx, 1);
|
2731 |
bxl = _mm_or_si128(bxl, bxhil);
|
2732 |
bxh = _mm_or_si128(bxh, bxhih);
|
2733 |
+
bx = MM256_SET_M128I(bxh, bxl);
|
2734 |
|
2735 |
const __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
|
2736 |
|
|
|
2992 |
__m128i bxh = _mm256_extractf128_si256(bx, 1);
|
2993 |
bxl = _mm_or_si128(bxl, bxhil);
|
2994 |
bxh = _mm_or_si128(bxh, bxhih);
|
2995 |
+
bx = MM256_SET_M128I(bxh, bxl);
|
2996 |
|
2997 |
const __m256 dy = _mm256_broadcast_ss(&y[i].d);
|
2998 |
const __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
|
|
|
17420 |
}
|
17421 |
|
17422 |
// Convert int32_t to float
|
17423 |
+
__m256 p = _mm256_cvtepi32_ps( MM256_SET_M128I( i32[0], i32[1] ));
|
17424 |
// Apply the scale, and accumulate
|
17425 |
acc = _mm256_add_ps(_mm256_mul_ps( d, p ), acc);
|
17426 |
}
|
|
|
17689 |
|
17690 |
__m128i bx0 = bytes_from_nibbles_16(x[2*i + 0].qs);
|
17691 |
__m128i bx1 = bytes_from_nibbles_16(x[2*i + 1].qs);
|
17692 |
+
__m256i bx = MM256_SET_M128I(bx1, bx0);
|
17693 |
|
17694 |
// Now we have a vector with bytes in [ 0 .. 15 ] interval. Offset them into [ -8 .. +7 ] interval.
|
17695 |
const __m256i off = _mm256_set1_epi8(8);
|
|
|
17822 |
|
17823 |
const __m128i bx0 = bytes_from_nibbles_16(x[2*i + 0].qs);
|
17824 |
const __m128i bx1 = bytes_from_nibbles_16(x[2*i + 1].qs);
|
17825 |
+
const __m256i bx = MM256_SET_M128I(bx1, bx0);
|
17826 |
|
17827 |
const __m256 dy = _mm256_broadcast_ss(&y[i].d);
|
17828 |
const __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
|
otherarch/gptj_v3.cpp
CHANGED
@@ -348,7 +348,7 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g
|
|
348 |
const auto & hparams = model.hparams;
|
349 |
size_t vram_total = 0;
|
350 |
const int n_gpu = std::min(gpulayers, int(hparams.n_layer));
|
351 |
-
fprintf(stderr, "%s: [
|
352 |
for (int i = 0; i < n_gpu; ++i) {
|
353 |
const auto & layer = model.layers[i];
|
354 |
layer.c_attn_q_proj_w->backend = GGML_BACKEND_GPU;
|
@@ -373,7 +373,7 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g
|
|
373 |
ggml_cuda_transform_tensor(layer.c_mlp_proj_w->data,layer.c_mlp_proj_w); vram_total += ggml_nbytes(layer.c_mlp_proj_w);
|
374 |
#endif
|
375 |
}
|
376 |
-
fprintf(stderr, "%s: [
|
377 |
}
|
378 |
#endif
|
379 |
|
|
|
348 |
const auto & hparams = model.hparams;
|
349 |
size_t vram_total = 0;
|
350 |
const int n_gpu = std::min(gpulayers, int(hparams.n_layer));
|
351 |
+
fprintf(stderr, "%s: [GPU] offloading %d layers to GPU\n", __func__, n_gpu);
|
352 |
for (int i = 0; i < n_gpu; ++i) {
|
353 |
const auto & layer = model.layers[i];
|
354 |
layer.c_attn_q_proj_w->backend = GGML_BACKEND_GPU;
|
|
|
373 |
ggml_cuda_transform_tensor(layer.c_mlp_proj_w->data,layer.c_mlp_proj_w); vram_total += ggml_nbytes(layer.c_mlp_proj_w);
|
374 |
#endif
|
375 |
}
|
376 |
+
fprintf(stderr, "%s: [GPU] total VRAM used: %zu MB\n", __func__, vram_total / 1024 / 1024);
|
377 |
}
|
378 |
#endif
|
379 |
|
otherarch/mpt_v3.cpp
CHANGED
@@ -301,7 +301,7 @@ bool mpt_model_load(const std::string & fname, mpt_model & model, gpt_vocab & vo
|
|
301 |
const auto & hparams = model.hparams;
|
302 |
size_t vram_total = 0;
|
303 |
const int n_gpu = std::min(gpulayers, int(hparams.n_layers));
|
304 |
-
fprintf(stderr, "%s: [
|
305 |
for (int i = 0; i < n_gpu; ++i) {
|
306 |
const auto & layer = model.layers[i];
|
307 |
layer.ffn_up_proj->backend = GGML_BACKEND_GPU;
|
@@ -320,7 +320,7 @@ bool mpt_model_load(const std::string & fname, mpt_model & model, gpt_vocab & vo
|
|
320 |
ggml_cuda_transform_tensor(layer.c_attn_out_proj_weight->data,layer.c_attn_out_proj_weight); vram_total += ggml_nbytes(layer.c_attn_out_proj_weight);
|
321 |
#endif
|
322 |
}
|
323 |
-
fprintf(stderr, "%s: [
|
324 |
}
|
325 |
#endif
|
326 |
|
|
|
301 |
const auto & hparams = model.hparams;
|
302 |
size_t vram_total = 0;
|
303 |
const int n_gpu = std::min(gpulayers, int(hparams.n_layers));
|
304 |
+
fprintf(stderr, "%s: [GPU] offloading %d layers to GPU\n", __func__, n_gpu);
|
305 |
for (int i = 0; i < n_gpu; ++i) {
|
306 |
const auto & layer = model.layers[i];
|
307 |
layer.ffn_up_proj->backend = GGML_BACKEND_GPU;
|
|
|
320 |
ggml_cuda_transform_tensor(layer.c_attn_out_proj_weight->data,layer.c_attn_out_proj_weight); vram_total += ggml_nbytes(layer.c_attn_out_proj_weight);
|
321 |
#endif
|
322 |
}
|
323 |
+
fprintf(stderr, "%s: [GPU] total VRAM used: %zu MB\n", __func__, vram_total / 1024 / 1024);
|
324 |
}
|
325 |
#endif
|
326 |
|
otherarch/neox_v3.cpp
CHANGED
@@ -335,7 +335,7 @@ ModelLoadResult gpt_neox_model_load(const std::string & fname, gpt_neox_model &
|
|
335 |
const auto & hparams = model.hparams;
|
336 |
size_t vram_total = 0;
|
337 |
const int n_gpu = std::min(gpulayers, int(hparams.n_layer));
|
338 |
-
fprintf(stderr, "%s: [
|
339 |
for (int i = 0; i < n_gpu; ++i) {
|
340 |
const auto & layer = model.layers[i];
|
341 |
layer.c_attn_attn_w->backend = GGML_BACKEND_GPU;
|
@@ -354,7 +354,7 @@ ModelLoadResult gpt_neox_model_load(const std::string & fname, gpt_neox_model &
|
|
354 |
ggml_cuda_transform_tensor(layer.c_mlp_proj_w->data,layer.c_mlp_proj_w); vram_total += ggml_nbytes(layer.c_mlp_proj_w);
|
355 |
#endif
|
356 |
}
|
357 |
-
fprintf(stderr, "%s: [
|
358 |
}
|
359 |
#endif
|
360 |
|
|
|
335 |
const auto & hparams = model.hparams;
|
336 |
size_t vram_total = 0;
|
337 |
const int n_gpu = std::min(gpulayers, int(hparams.n_layer));
|
338 |
+
fprintf(stderr, "%s: [GPU] offloading %d layers to GPU\n", __func__, n_gpu);
|
339 |
for (int i = 0; i < n_gpu; ++i) {
|
340 |
const auto & layer = model.layers[i];
|
341 |
layer.c_attn_attn_w->backend = GGML_BACKEND_GPU;
|
|
|
354 |
ggml_cuda_transform_tensor(layer.c_mlp_proj_w->data,layer.c_mlp_proj_w); vram_total += ggml_nbytes(layer.c_mlp_proj_w);
|
355 |
#endif
|
356 |
}
|
357 |
+
fprintf(stderr, "%s: [GPU] total VRAM used: %zu MB\n", __func__, vram_total / 1024 / 1024);
|
358 |
}
|
359 |
#endif
|
360 |
|
otherarch/rwkv_v3.cpp
CHANGED
@@ -6,6 +6,13 @@
|
|
6 |
#include "rwkv_v3.h"
|
7 |
#include "ggml.h"
|
8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
#include <string>
|
10 |
#include <vector>
|
11 |
#include <cstring>
|
@@ -1058,7 +1065,11 @@ struct rwkv_future_tensor rwkv_future_graph_work(struct rwkv_future_ctx & ctx,
|
|
1058 |
const size_t n_threads,
|
1059 |
const size_t sequence_len = 1
|
1060 |
) {
|
|
|
|
|
|
|
1061 |
enum ggml_type mul_mat_type = ggml_is_quantized(type) ? GGML_TYPE_Q8_1 : type;
|
|
|
1062 |
return ctx.alloc(GGML_TYPE_I8, rwkv_future_tensor::size(mul_mat_type, ffn_key_height, sequence_len) * n_threads + 64 * (n_threads - 1));
|
1063 |
}
|
1064 |
|
@@ -1545,7 +1556,38 @@ struct rwkv_context * rwkv_clone_context(struct rwkv_context * ctx, const uint32
|
|
1545 |
}
|
1546 |
|
1547 |
bool rwkv_gpu_offload_layers(struct rwkv_context * ctx, const uint32_t n_layers) {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1548 |
|
|
|
|
|
|
|
1549 |
return false;
|
1550 |
}
|
1551 |
|
|
|
6 |
#include "rwkv_v3.h"
|
7 |
#include "ggml.h"
|
8 |
|
9 |
+
#ifdef GGML_USE_CUBLAS
|
10 |
+
#include "ggml-cuda.h"
|
11 |
+
#endif
|
12 |
+
#if defined(GGML_USE_CLBLAST)
|
13 |
+
#include "ggml-opencl.h"
|
14 |
+
#endif
|
15 |
+
|
16 |
#include <string>
|
17 |
#include <vector>
|
18 |
#include <cstring>
|
|
|
1065 |
const size_t n_threads,
|
1066 |
const size_t sequence_len = 1
|
1067 |
) {
|
1068 |
+
#if defined(GGML_USE_CLBLAST) || defined(GGML_USE_CUBLAS)
|
1069 |
+
enum ggml_type mul_mat_type = type == GGML_TYPE_F32 ? GGML_TYPE_F32 : GGML_TYPE_F16;
|
1070 |
+
#else
|
1071 |
enum ggml_type mul_mat_type = ggml_is_quantized(type) ? GGML_TYPE_Q8_1 : type;
|
1072 |
+
#endif
|
1073 |
return ctx.alloc(GGML_TYPE_I8, rwkv_future_tensor::size(mul_mat_type, ffn_key_height, sequence_len) * n_threads + 64 * (n_threads - 1));
|
1074 |
}
|
1075 |
|
|
|
1556 |
}
|
1557 |
|
1558 |
bool rwkv_gpu_offload_layers(struct rwkv_context * ctx, const uint32_t n_layers) {
|
1559 |
+
#if defined(GGML_USE_CLBLAST) || defined(GGML_USE_CUBLAS)
|
1560 |
+
printf("\nOffloading %u (or fewer) layers...",n_layers);
|
1561 |
+
const auto offload = [&](struct ggml_tensor * tensor) {
|
1562 |
+
// TODO support multi-GPU
|
1563 |
+
tensor->backend = GGML_BACKEND_GPU;
|
1564 |
+
#if defined(GGML_USE_CLBLAST)
|
1565 |
+
ggml_cl_transform_tensor(tensor->data, tensor);
|
1566 |
+
#else
|
1567 |
+
ggml_cuda_transform_tensor(tensor->data, tensor);
|
1568 |
+
#endif
|
1569 |
+
};
|
1570 |
+
|
1571 |
+
const size_t n_gpu = std::min(n_layers, ctx->instance->model.header.n_layer);
|
1572 |
+
|
1573 |
+
if (ctx->gpu_layers < n_gpu) {
|
1574 |
+
for (size_t & i = ctx->gpu_layers; i < n_gpu; i++) {
|
1575 |
+
const struct rwkv_layer & layer = ctx->instance->model.layers[i];
|
1576 |
+
|
1577 |
+
// TODO also offload other operations to GPU with ggml_cuda_assign_buffers
|
1578 |
+
offload(layer.att_key);
|
1579 |
+
offload(layer.att_value);
|
1580 |
+
offload(layer.att_receptance);
|
1581 |
+
offload(layer.att_output);
|
1582 |
+
|
1583 |
+
offload(layer.ffn_key);
|
1584 |
+
offload(layer.ffn_value);
|
1585 |
+
offload(layer.ffn_receptance);
|
1586 |
+
}
|
1587 |
|
1588 |
+
return true;
|
1589 |
+
}
|
1590 |
+
#endif
|
1591 |
return false;
|
1592 |
}
|
1593 |
|
spm-headers/ggml.h
CHANGED
@@ -201,6 +201,8 @@
|
|
201 |
#define GGML_MAX_NAME 48
|
202 |
#define GGML_DEFAULT_N_THREADS 4
|
203 |
|
|
|
|
|
204 |
#define GGML_ASSERT(x) \
|
205 |
do { \
|
206 |
if (!(x)) { \
|
@@ -209,6 +211,30 @@
|
|
209 |
} \
|
210 |
} while (0)
|
211 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
212 |
#ifdef __cplusplus
|
213 |
extern "C" {
|
214 |
#endif
|
@@ -224,8 +250,8 @@ extern "C" {
|
|
224 |
GGML_API float ggml_fp16_to_fp32(ggml_fp16_t x);
|
225 |
GGML_API ggml_fp16_t ggml_fp32_to_fp16(float x);
|
226 |
|
227 |
-
GGML_API void ggml_fp16_to_fp32_row(const ggml_fp16_t * x, float * y,
|
228 |
-
GGML_API void ggml_fp32_to_fp16_row(const float * x, ggml_fp16_t * y,
|
229 |
|
230 |
struct ggml_object;
|
231 |
struct ggml_context;
|
@@ -295,12 +321,15 @@ extern "C" {
|
|
295 |
GGML_OP_SUM,
|
296 |
GGML_OP_SUM_ROWS,
|
297 |
GGML_OP_MEAN,
|
|
|
298 |
GGML_OP_REPEAT,
|
299 |
GGML_OP_REPEAT_BACK,
|
300 |
GGML_OP_ABS,
|
301 |
GGML_OP_SGN,
|
302 |
GGML_OP_NEG,
|
303 |
GGML_OP_STEP,
|
|
|
|
|
304 |
GGML_OP_RELU,
|
305 |
GGML_OP_GELU,
|
306 |
GGML_OP_GELU_QUICK,
|
@@ -332,9 +361,8 @@ extern "C" {
|
|
332 |
GGML_OP_ROPE_BACK,
|
333 |
GGML_OP_ALIBI,
|
334 |
GGML_OP_CLAMP,
|
335 |
-
|
336 |
-
|
337 |
-
GGML_OP_CONV_2D_SK_P0,
|
338 |
|
339 |
GGML_OP_FLASH_ATTN,
|
340 |
GGML_OP_FLASH_FF,
|
@@ -692,6 +720,11 @@ extern "C" {
|
|
692 |
struct ggml_context * ctx,
|
693 |
struct ggml_tensor * a);
|
694 |
|
|
|
|
|
|
|
|
|
|
|
695 |
// if a is the same shape as b, and a is not parameter, return a
|
696 |
// otherwise, return a new tensor: repeat(a) to fit in b
|
697 |
GGML_API struct ggml_tensor * ggml_repeat(
|
@@ -736,6 +769,22 @@ extern "C" {
|
|
736 |
struct ggml_context * ctx,
|
737 |
struct ggml_tensor * a);
|
738 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
739 |
GGML_API struct ggml_tensor * ggml_relu(
|
740 |
struct ggml_context * ctx,
|
741 |
struct ggml_tensor * a);
|
@@ -1086,58 +1135,33 @@ extern "C" {
|
|
1086 |
float min,
|
1087 |
float max);
|
1088 |
|
1089 |
-
|
1090 |
-
// GGML_API struct ggml_tensor * ggml_conv_1d(
|
1091 |
-
// struct ggml_context * ctx,
|
1092 |
-
// struct ggml_tensor * a,
|
1093 |
-
// struct ggml_tensor * b,
|
1094 |
-
// int s0
|
1095 |
-
// int p0,
|
1096 |
-
// int d0);
|
1097 |
-
//
|
1098 |
-
// GGML_API struct ggml_tensor * ggml_conv_2d(
|
1099 |
-
// struct ggml_context * ctx,
|
1100 |
-
// struct ggml_tensor * a,
|
1101 |
-
// struct ggml_tensor * b,
|
1102 |
-
// int s0,
|
1103 |
-
// int s1,
|
1104 |
-
// int p0,
|
1105 |
-
// int p1,
|
1106 |
-
// int d0,
|
1107 |
-
// int d1);
|
1108 |
-
|
1109 |
-
// padding = half
|
1110 |
-
// TODO: we don't support extra parameters for now
|
1111 |
-
// that's why we are hard-coding the stride, padding, and dilation
|
1112 |
-
// not great ..
|
1113 |
-
// example:
|
1114 |
-
// a: 3 80 768 1
|
1115 |
-
// b: 3000 80 1 1
|
1116 |
-
// res: 3000 768 1 1
|
1117 |
-
// used in whisper
|
1118 |
-
GGML_API struct ggml_tensor * ggml_conv_1d_s1_ph(
|
1119 |
struct ggml_context * ctx,
|
1120 |
struct ggml_tensor * a,
|
1121 |
-
struct ggml_tensor * b
|
|
|
|
|
|
|
1122 |
|
1123 |
-
|
1124 |
-
GGML_API struct ggml_tensor * ggml_conv_1d_s2_ph(
|
1125 |
struct ggml_context * ctx,
|
1126 |
struct ggml_tensor * a,
|
1127 |
-
struct ggml_tensor * b
|
|
|
|
|
|
|
|
|
|
|
|
|
1128 |
|
1129 |
-
//
|
1130 |
-
//
|
1131 |
-
|
1132 |
-
// example:
|
1133 |
-
// a: 16 16 3 768
|
1134 |
-
// b: 1024 1024 3 1
|
1135 |
-
// res: 64 64 768 1
|
1136 |
-
// used in sam
|
1137 |
-
GGML_API struct ggml_tensor * ggml_conv_2d_sk_p0(
|
1138 |
struct ggml_context * ctx,
|
1139 |
struct ggml_tensor * a,
|
1140 |
-
struct ggml_tensor * b
|
|
|
|
|
1141 |
|
1142 |
GGML_API struct ggml_tensor * ggml_flash_attn(
|
1143 |
struct ggml_context * ctx,
|
@@ -1493,25 +1517,24 @@ extern "C" {
|
|
1493 |
//
|
1494 |
|
1495 |
#ifdef __cplusplus
|
1496 |
-
|
1497 |
#define GGML_RESTRICT
|
1498 |
#else
|
1499 |
#define GGML_RESTRICT restrict
|
1500 |
#endif
|
1501 |
-
typedef void (*
|
1502 |
-
typedef void (*
|
1503 |
-
typedef void (*
|
1504 |
|
1505 |
typedef struct {
|
1506 |
-
|
1507 |
-
|
1508 |
-
|
1509 |
-
|
1510 |
-
|
1511 |
-
|
1512 |
-
|
1513 |
-
|
1514 |
-
quantize_fns_t ggml_internal_get_quantize_fn(size_t i);
|
1515 |
|
1516 |
#ifdef __cplusplus
|
1517 |
}
|
|
|
201 |
#define GGML_MAX_NAME 48
|
202 |
#define GGML_DEFAULT_N_THREADS 4
|
203 |
|
204 |
+
#define GGML_UNUSED(x) (void)(x)
|
205 |
+
|
206 |
#define GGML_ASSERT(x) \
|
207 |
do { \
|
208 |
if (!(x)) { \
|
|
|
211 |
} \
|
212 |
} while (0)
|
213 |
|
214 |
+
// used to copy the number of elements and stride in bytes of tensors into local variables.
|
215 |
+
// main purpose is to reduce code duplication and improve readability.
|
216 |
+
//
|
217 |
+
// example:
|
218 |
+
//
|
219 |
+
// GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne);
|
220 |
+
// GGML_TENSOR_LOCALS(size_t, nb1, src1, nb);
|
221 |
+
//
|
222 |
+
#define GGML_TENSOR_LOCALS_1(type, prefix, pointer, array) \
|
223 |
+
const type prefix##0 = (pointer)->array[0]; \
|
224 |
+
GGML_UNUSED(prefix##0);
|
225 |
+
#define GGML_TENSOR_LOCALS_2(type, prefix, pointer, array) \
|
226 |
+
GGML_TENSOR_LOCALS_1 (type, prefix, pointer, array) \
|
227 |
+
const type prefix##1 = (pointer)->array[1]; \
|
228 |
+
GGML_UNUSED(prefix##1);
|
229 |
+
#define GGML_TENSOR_LOCALS_3(type, prefix, pointer, array) \
|
230 |
+
GGML_TENSOR_LOCALS_2 (type, prefix, pointer, array) \
|
231 |
+
const type prefix##2 = (pointer)->array[2]; \
|
232 |
+
GGML_UNUSED(prefix##2);
|
233 |
+
#define GGML_TENSOR_LOCALS(type, prefix, pointer, array) \
|
234 |
+
GGML_TENSOR_LOCALS_3 (type, prefix, pointer, array) \
|
235 |
+
const type prefix##3 = (pointer)->array[3]; \
|
236 |
+
GGML_UNUSED(prefix##3);
|
237 |
+
|
238 |
#ifdef __cplusplus
|
239 |
extern "C" {
|
240 |
#endif
|
|
|
250 |
GGML_API float ggml_fp16_to_fp32(ggml_fp16_t x);
|
251 |
GGML_API ggml_fp16_t ggml_fp32_to_fp16(float x);
|
252 |
|
253 |
+
GGML_API void ggml_fp16_to_fp32_row(const ggml_fp16_t * x, float * y, int n);
|
254 |
+
GGML_API void ggml_fp32_to_fp16_row(const float * x, ggml_fp16_t * y, int n);
|
255 |
|
256 |
struct ggml_object;
|
257 |
struct ggml_context;
|
|
|
321 |
GGML_OP_SUM,
|
322 |
GGML_OP_SUM_ROWS,
|
323 |
GGML_OP_MEAN,
|
324 |
+
GGML_OP_ARGMAX,
|
325 |
GGML_OP_REPEAT,
|
326 |
GGML_OP_REPEAT_BACK,
|
327 |
GGML_OP_ABS,
|
328 |
GGML_OP_SGN,
|
329 |
GGML_OP_NEG,
|
330 |
GGML_OP_STEP,
|
331 |
+
GGML_OP_TANH,
|
332 |
+
GGML_OP_ELU,
|
333 |
GGML_OP_RELU,
|
334 |
GGML_OP_GELU,
|
335 |
GGML_OP_GELU_QUICK,
|
|
|
361 |
GGML_OP_ROPE_BACK,
|
362 |
GGML_OP_ALIBI,
|
363 |
GGML_OP_CLAMP,
|
364 |
+
GGML_OP_CONV_1D,
|
365 |
+
GGML_OP_CONV_2D,
|
|
|
366 |
|
367 |
GGML_OP_FLASH_ATTN,
|
368 |
GGML_OP_FLASH_FF,
|
|
|
720 |
struct ggml_context * ctx,
|
721 |
struct ggml_tensor * a);
|
722 |
|
723 |
+
// argmax along rows
|
724 |
+
GGML_API struct ggml_tensor * ggml_argmax(
|
725 |
+
struct ggml_context * ctx,
|
726 |
+
struct ggml_tensor * a);
|
727 |
+
|
728 |
// if a is the same shape as b, and a is not parameter, return a
|
729 |
// otherwise, return a new tensor: repeat(a) to fit in b
|
730 |
GGML_API struct ggml_tensor * ggml_repeat(
|
|
|
769 |
struct ggml_context * ctx,
|
770 |
struct ggml_tensor * a);
|
771 |
|
772 |
+
GGML_API struct ggml_tensor * ggml_tanh(
|
773 |
+
struct ggml_context * ctx,
|
774 |
+
struct ggml_tensor * a);
|
775 |
+
|
776 |
+
GGML_API struct ggml_tensor * ggml_tanh_inplace(
|
777 |
+
struct ggml_context * ctx,
|
778 |
+
struct ggml_tensor * a);
|
779 |
+
|
780 |
+
GGML_API struct ggml_tensor * ggml_elu(
|
781 |
+
struct ggml_context * ctx,
|
782 |
+
struct ggml_tensor * a);
|
783 |
+
|
784 |
+
GGML_API struct ggml_tensor * ggml_elu_inplace(
|
785 |
+
struct ggml_context * ctx,
|
786 |
+
struct ggml_tensor * a);
|
787 |
+
|
788 |
GGML_API struct ggml_tensor * ggml_relu(
|
789 |
struct ggml_context * ctx,
|
790 |
struct ggml_tensor * a);
|
|
|
1135 |
float min,
|
1136 |
float max);
|
1137 |
|
1138 |
+
GGML_API struct ggml_tensor * ggml_conv_1d(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1139 |
struct ggml_context * ctx,
|
1140 |
struct ggml_tensor * a,
|
1141 |
+
struct ggml_tensor * b,
|
1142 |
+
int s0, // stride
|
1143 |
+
int p0, // padding
|
1144 |
+
int d0); // dilation
|
1145 |
|
1146 |
+
GGML_API struct ggml_tensor * ggml_conv_2d(
|
|
|
1147 |
struct ggml_context * ctx,
|
1148 |
struct ggml_tensor * a,
|
1149 |
+
struct ggml_tensor * b,
|
1150 |
+
int s0,
|
1151 |
+
int s1,
|
1152 |
+
int p0,
|
1153 |
+
int p1,
|
1154 |
+
int d0,
|
1155 |
+
int d1);
|
1156 |
|
1157 |
+
// conv_1d with padding = half
|
1158 |
+
// alias for ggml_conv_1d(a, b, s, a->ne[0]/2, d)
|
1159 |
+
GGML_API struct ggml_tensor* ggml_conv_1d_ph(
|
|
|
|
|
|
|
|
|
|
|
|
|
1160 |
struct ggml_context * ctx,
|
1161 |
struct ggml_tensor * a,
|
1162 |
+
struct ggml_tensor * b,
|
1163 |
+
int s,
|
1164 |
+
int d);
|
1165 |
|
1166 |
GGML_API struct ggml_tensor * ggml_flash_attn(
|
1167 |
struct ggml_context * ctx,
|
|
|
1517 |
//
|
1518 |
|
1519 |
#ifdef __cplusplus
|
1520 |
+
// restrict not standard in C++
|
1521 |
#define GGML_RESTRICT
|
1522 |
#else
|
1523 |
#define GGML_RESTRICT restrict
|
1524 |
#endif
|
1525 |
+
typedef void (*ggml_to_float_t) (const void * GGML_RESTRICT x, float * GGML_RESTRICT y, int k);
|
1526 |
+
typedef void (*ggml_from_float_t)(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int k);
|
1527 |
+
typedef void (*ggml_vec_dot_t) (const int n, float * GGML_RESTRICT s, const void * GGML_RESTRICT x, const void * GGML_RESTRICT y);
|
1528 |
|
1529 |
typedef struct {
|
1530 |
+
ggml_to_float_t to_float;
|
1531 |
+
ggml_from_float_t from_float;
|
1532 |
+
ggml_from_float_t from_float_reference;
|
1533 |
+
ggml_vec_dot_t vec_dot;
|
1534 |
+
enum ggml_type vec_dot_type;
|
1535 |
+
} ggml_type_traits_t;
|
1536 |
+
|
1537 |
+
ggml_type_traits_t ggml_internal_get_type_traits(enum ggml_type i);
|
|
|
1538 |
|
1539 |
#ifdef __cplusplus
|
1540 |
}
|