File size: 370 Bytes
84d0c0e 3d15656 84d0c0e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 |
---
repo: TheBloke/stable-vicuna-13B-GGML
file: stable-vicuna-13B.ggml.q5_1.bin
llama_cpp:
n_ctx: 2048
n_gpu_layers: 0 # llama 13b has 40 layers
chat:
stop:
- "</s>"
- "<unk>"
- "### USER:"
- "USER:"
queue:
max_size: 16
concurrency_count: 1 # leave this at 1, llama-cpp-python doesn't handle concurrent requests and will crash the entire app
|