gguf-my-repo / start.sh
ngxson's picture
ngxson HF staff
add docker compose for dev locally (#130)
feaa097 verified
raw
history blame contribute delete
310 Bytes
#!/bin/bash
if [ ! -d "llama.cpp" ]; then
# only run in dev env
git clone https://github.com/ggerganov/llama.cpp
fi
if [[ -z "${RUN_LOCALLY}" ]]; then
# enable CUDA if NOT running locally
export LLAMA_CUDA=1
fi
cd llama.cpp
make -j llama-quantize llama-gguf-split llama-imatrix
cd ..
python app.py