--- repo: TheBloke/stable-vicuna-13B-GGML file: stable-vicuna-13B.ggml.q5_1.bin llama_cpp: n_ctx: 2048 n_gpu_layers: 0 # llama 13b has 40 layers chat: stop: - "" - "" - "### USER:" - "USER:" queue: max_size: 16 concurrency_count: 1 # leave this at 1, llama-cpp-python doesn't handle concurrent requests and will crash the entire app