TobDeBer commited on
Commit
19822a8
1 Parent(s): 52af04b

normal files

Browse files
Files changed (6) hide show
  1. .gitattributes +3 -0
  2. README.md +13 -0
  3. build_diffusion.sh +5 -0
  4. build_gguf.sh +5 -0
  5. diffusion/Dockerfile +6 -0
  6. gguf/Dockerfile +10 -0
.gitattributes CHANGED
@@ -33,3 +33,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ llama-cli filter=lfs diff=lfs merge=lfs -text
37
+ llama-server filter=lfs diff=lfs merge=lfs -text
38
+ sd_cuda filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Container Repository for CPU adaptations of Inference code
2
+
3
+ ## Variants
4
+
5
+ ### CPUdiffusion
6
+
7
+ - inference diffusion models on CPU
8
+ - include CUDAonCPU stack
9
+
10
+ ### CPUgguf
11
+
12
+ - inference gguf models on CPU
13
+ - include GUI libraries
build_diffusion.sh ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ podman build --squash-all --tag bookworm:diffusion diffusion
2
+ podman image prune -f
3
+ podman save localhost/bookworm:diffusion >test.tar
4
+ #time xz -9e -T0 test.tar
5
+
build_gguf.sh ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ podman build --squash-all --tag bookworm:gguf gguf
2
+ podman image prune -f
3
+ podman save localhost/bookworm:gguf >gguf.tar
4
+ time xz -6 -T0 gguf.tar
5
+
diffusion/Dockerfile ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ FROM debian:bookworm-slim
2
+
3
+ RUN apt-get update && apt-get upgrade
4
+ RUN apt-get install -y git git-lfs pip cmake python3
5
+
6
+ CMD ["sleep", " infinity"]
gguf/Dockerfile ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM debian:bookworm-slim
2
+ RUN apt-get update && apt-get upgrade
3
+ RUN apt-get install -y git git-lfs pip cmake python3
4
+
5
+ RUN git clone https://github.com/ggerganov/llama.cpp.git
6
+ RUN cd llama.cpp && make -j 32 llama-server
7
+ RUN cp llama.cpp/llama-server .
8
+ RUN rm -rf llama.cpp/
9
+
10
+ CMD ["sleep", " infinity"]