iris
/
llama.cpp
/build
/examples
/speculative-simple
/CMakeFiles
/llama-speculative-simple.dir
/flags.make
# CMAKE generated file: DO NOT EDIT! | |
# Generated by "Unix Makefiles" Generator, CMake Version 3.22 | |
# compile CXX with /usr/bin/c++ | |
CXX_DEFINES = -DGGML_BACKEND_SHARED -DGGML_SHARED -DGGML_USE_CPU -DLLAMA_SHARED | |
CXX_INCLUDES = -I"/home/mathis-portable/Documents/KTH/Scalable Machine Learning/lab_project/ID2224_Lab2/llama.cpp/examples" -I"/home/mathis-portable/Documents/KTH/Scalable Machine Learning/lab_project/ID2224_Lab2/llama.cpp/common/." -I"/home/mathis-portable/Documents/KTH/Scalable Machine Learning/lab_project/ID2224_Lab2/llama.cpp/src/." -I"/home/mathis-portable/Documents/KTH/Scalable Machine Learning/lab_project/ID2224_Lab2/llama.cpp/src/../include" -I"/home/mathis-portable/Documents/KTH/Scalable Machine Learning/lab_project/ID2224_Lab2/llama.cpp/ggml/src/../include" | |
CXX_FLAGS = -O3 -DNDEBUG -Wmissing-declarations -Wmissing-noreturn -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wno-array-bounds -Wextra-semi | |