rrg92 commited on
Commit
6912198
·
verified ·
1 Parent(s): 4631974

removed zero

Browse files
Files changed (1) hide show
  1. app.py +91 -92
app.py CHANGED
@@ -1,92 +1,91 @@
1
- from diffusers import AutoPipelineForText2Image
2
- import torch
3
- import gradio as gr
4
- import threading
5
- import time;
6
- from queue import Queue
7
- import spaces
8
-
9
-
10
- @spaces.GPU(duration=120)
11
- def GenerateImage(prompt,steps,progress,model):
12
-
13
- data = []
14
-
15
- queue = Queue();
16
-
17
- def StartThread():
18
-
19
- pipe_txt2img = AutoPipelineForText2Image.from_pretrained(
20
- model, torch_dtype=torch.float16, use_safetensors=True
21
- ).to("cuda")
22
-
23
- vae = pipe_txt2img.vae
24
-
25
- def latents_callback(i, t, latents):
26
- latents = 1 / 0.18215 * latents
27
- image = vae.decode(latents).sample[0]
28
- image = (image / 2 + 0.5).clamp(0, 1)
29
- image = image.cpu().permute(1, 2, 0).numpy()
30
- FinalImage = pipe_txt2img.numpy_to_pil(image)
31
- queue.put({'type':'image', 'image':FinalImage[0], 'step': i})
32
-
33
- generator = torch.Generator(device="cpu").manual_seed(37)
34
- FinalImage = pipe_txt2img(prompt, generator=generator, num_inference_steps=steps,callback=latents_callback, callback_steps=progress).images[0]
35
- queue.put({'type':'image', 'image':FinalImage, 'step': steps+1})
36
- queue.put({'type':'end'})
37
-
38
- t = threading.Thread(target=StartThread)
39
- t.start();
40
-
41
- while True:
42
- print("Waiting next item");
43
- nextItem = queue.get()
44
-
45
- if nextItem['type'] == 'end':
46
- break;
47
-
48
- Image = nextItem['image']
49
- Step = nextItem['step']
50
- yield [Image,Step];
51
-
52
- print("Waiting thread finish...");
53
- t.join()
54
-
55
- print("Finished!");
56
-
57
-
58
-
59
- with gr.Blocks() as demo:
60
- gr.Markdown("""
61
- This is a lab to demonstrate how we can implement a text-to-image generation using Gradio and Diffusers, showing the progress of each image produced at each step.
62
- Type a prompt, choose the maximum number of steps and the frequency (in steps) at which progress is shown. You will see the diffusion process live!
63
- """)
64
-
65
- with gr.Row():
66
- prompt = gr.Text(label="prompt");
67
- TotalSteps = gr.Slider(label="Steps", minimum=1,maximum=150,value=10);
68
- ProgressSteps = gr.Number(label="Progress steps", value = 2);
69
- model = gr.Text(label="Model", value="dreamlike-art/dreamlike-photoreal-2.0")
70
-
71
- with gr.Row():
72
- with gr.Column():
73
- btnRun = gr.Button(value="Run!");
74
- btnStop = gr.Button(value="Stop!");
75
- status = gr.Text(label="Current Step");
76
-
77
-
78
- image = gr.Image();
79
-
80
-
81
- GenerateEvent = btnRun.click( GenerateImage, [prompt,TotalSteps,ProgressSteps,model], [image,status] );
82
- btnStop.click( None,None,None, cancels=[GenerateEvent] )
83
-
84
- if __name__ == "__main__":
85
- demo.launch(show_api=True)
86
-
87
-
88
-
89
-
90
-
91
-
92
-
 
1
+ from diffusers import AutoPipelineForText2Image
2
+ import torch
3
+ import gradio as gr
4
+ import threading
5
+ import time;
6
+ from queue import Queue
7
+
8
+
9
+ # @spaces.GPU(duration=120)
10
+ def GenerateImage(prompt,steps,progress,model):
11
+
12
+ data = []
13
+
14
+ queue = Queue();
15
+
16
+ def StartThread():
17
+
18
+ pipe_txt2img = AutoPipelineForText2Image.from_pretrained(
19
+ model, torch_dtype=torch.float16, use_safetensors=True
20
+ ).to("cuda")
21
+
22
+ vae = pipe_txt2img.vae
23
+
24
+ def latents_callback(i, t, latents):
25
+ latents = 1 / 0.18215 * latents
26
+ image = vae.decode(latents).sample[0]
27
+ image = (image / 2 + 0.5).clamp(0, 1)
28
+ image = image.cpu().permute(1, 2, 0).numpy()
29
+ FinalImage = pipe_txt2img.numpy_to_pil(image)
30
+ queue.put({'type':'image', 'image':FinalImage[0], 'step': i})
31
+
32
+ generator = torch.Generator(device="cpu").manual_seed(37)
33
+ FinalImage = pipe_txt2img(prompt, generator=generator, num_inference_steps=steps,callback=latents_callback, callback_steps=progress).images[0]
34
+ queue.put({'type':'image', 'image':FinalImage, 'step': steps+1})
35
+ queue.put({'type':'end'})
36
+
37
+ t = threading.Thread(target=StartThread)
38
+ t.start();
39
+
40
+ while True:
41
+ print("Waiting next item");
42
+ nextItem = queue.get()
43
+
44
+ if nextItem['type'] == 'end':
45
+ break;
46
+
47
+ Image = nextItem['image']
48
+ Step = nextItem['step']
49
+ yield [Image,Step];
50
+
51
+ print("Waiting thread finish...");
52
+ t.join()
53
+
54
+ print("Finished!");
55
+
56
+
57
+
58
+ with gr.Blocks() as demo:
59
+ gr.Markdown("""
60
+ This is a lab to demonstrate how we can implement a text-to-image generation using Gradio and Diffusers, showing the progress of each image produced at each step.
61
+ Type a prompt, choose the maximum number of steps and the frequency (in steps) at which progress is shown. You will see the diffusion process live!
62
+ """)
63
+
64
+ with gr.Row():
65
+ prompt = gr.Text(label="prompt");
66
+ TotalSteps = gr.Slider(label="Steps", minimum=1,maximum=150,value=10);
67
+ ProgressSteps = gr.Number(label="Progress steps", value = 2);
68
+ model = gr.Text(label="Model", value="dreamlike-art/dreamlike-photoreal-2.0")
69
+
70
+ with gr.Row():
71
+ with gr.Column():
72
+ btnRun = gr.Button(value="Run!");
73
+ btnStop = gr.Button(value="Stop!");
74
+ status = gr.Text(label="Current Step");
75
+
76
+
77
+ image = gr.Image();
78
+
79
+
80
+ GenerateEvent = btnRun.click( GenerateImage, [prompt,TotalSteps,ProgressSteps,model], [image,status] );
81
+ btnStop.click( None,None,None, cancels=[GenerateEvent] )
82
+
83
+ if __name__ == "__main__":
84
+ demo.launch(show_api=True)
85
+
86
+
87
+
88
+
89
+
90
+
91
+