Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -2,11 +2,15 @@
|
|
2 |
## Happy coding!
|
3 |
import gradio as gr
|
4 |
import torch
|
5 |
-
#import modin.pandas as pd
|
6 |
-
|
7 |
import numpy as np
|
8 |
from diffusers import DiffusionPipeline
|
9 |
from transformers import pipeline
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
|
11 |
pipe = pipeline('text-generation', model='daspartho/prompt-extend')
|
12 |
|
@@ -16,8 +20,6 @@ def extend_prompt(prompt):
|
|
16 |
def text_it(inputs):
|
17 |
return extend_prompt(inputs)
|
18 |
|
19 |
-
|
20 |
-
|
21 |
def load_pipeline(use_cuda):
|
22 |
device = "cuda" if use_cuda and torch.cuda.is_available() else "cpu"
|
23 |
if device == "cuda":
|
@@ -32,13 +34,13 @@ def load_pipeline(use_cuda):
|
|
32 |
pipe = pipe.to(device)
|
33 |
return pipe
|
34 |
|
35 |
-
def genie(prompt="sexy woman",
|
36 |
pipe = load_pipeline(use_cuda)
|
37 |
generator = np.random.seed(0) if seed == 0 else torch.manual_seed(seed)
|
38 |
if use_details:
|
39 |
extended_prompt = extend_prompt(prompt)
|
40 |
else:
|
41 |
-
extended_prompt=prompt
|
42 |
int_image = pipe(prompt=extended_prompt, generator=generator, num_inference_steps=steps, guidance_scale=0.0).images[0]
|
43 |
return int_image, extended_prompt
|
44 |
|
@@ -50,28 +52,35 @@ with gr.Blocks() as myface:
|
|
50 |
details_checkbox = gr.Checkbox(label="details", info="Generate Details?")
|
51 |
steps_slider = gr.Slider(1, maximum=5, value=2, step=1, label='Number of Iterations')
|
52 |
seed_slider = gr.Slider(minimum=0, step=1, maximum=999999999999999999, randomize=True)
|
53 |
-
cuda_checkbox = gr.Checkbox(label="
|
54 |
-
|
55 |
-
|
56 |
with gr.Row():
|
57 |
output_image = gr.Image("./imagen.png")
|
58 |
output_text = gr.Textbox(label="Generated Text", lines=2)
|
59 |
-
generate_button.click(genie, inputs=[input_text,details_checkbox, steps_slider, seed_slider, cuda_checkbox], outputs=[output_image, output_text], concurrency_limit=10)
|
60 |
|
61 |
# Define the example
|
62 |
-
example = [["sexy woman", True
|
63 |
-
['''sexy woman, in a black bikini, white bra, highly detailed, d & d, fantasy, highly detailed, digital painting, trending on artstation, concept art, sharp focus, illustration, art by artgerm and greg rutkowski and fuji choko and viktoria gavrilenko''', False
|
64 |
-
['''sexy woman, D&D, fantasy, portrait, highly detailed, headshot, digital painting, trending on artstation, concept art, sharp focus, illustration, art by artgerm and greg rutkowski and magali villeneuve and wlop, ilya kuvshinov, octane render, 8 ''', False
|
65 |
-
[''' sexy woman, worksafe, light blonde long hair, fully clothed, brown eyes, sitting on a chair, sitting by a reflective pool, in the style of ilya kuvshinov, very dark, cinematic
|
66 |
-
['''sexy woman, medium shot, candid, red hair, 4 k, high definition, realistic, natural, highly detailed, photo realistic
|
67 |
]
|
68 |
|
69 |
with gr.Interface(
|
70 |
fn=genie,
|
71 |
-
inputs=[input_text,details_checkbox, steps_slider, seed_slider, cuda_checkbox],
|
72 |
outputs=[output_image, output_text],
|
73 |
title="Stable Diffusion Turbo with GPT",
|
74 |
description="Type your text and lets create an image, check the box details if you want a creative picture",
|
75 |
examples=example,
|
76 |
) as iface:
|
77 |
iface.launch(inline=True, show_api=False, max_threads=200)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
## Happy coding!
|
3 |
import gradio as gr
|
4 |
import torch
|
|
|
|
|
5 |
import numpy as np
|
6 |
from diffusers import DiffusionPipeline
|
7 |
from transformers import pipeline
|
8 |
+
import subprocess
|
9 |
+
import threading
|
10 |
+
|
11 |
+
# FastAPI setup
|
12 |
+
def run_fastapi():
|
13 |
+
subprocess.run(["uvicorn", "api:app", "--host", "0.0.0.0", "--port", "7860"])
|
14 |
|
15 |
pipe = pipeline('text-generation', model='daspartho/prompt-extend')
|
16 |
|
|
|
20 |
def text_it(inputs):
|
21 |
return extend_prompt(inputs)
|
22 |
|
|
|
|
|
23 |
def load_pipeline(use_cuda):
|
24 |
device = "cuda" if use_cuda and torch.cuda.is_available() else "cpu"
|
25 |
if device == "cuda":
|
|
|
34 |
pipe = pipe.to(device)
|
35 |
return pipe
|
36 |
|
37 |
+
def genie(prompt="sexy woman", use_details=False, steps=2, seed=398231747038484200, use_cuda=False):
|
38 |
pipe = load_pipeline(use_cuda)
|
39 |
generator = np.random.seed(0) if seed == 0 else torch.manual_seed(seed)
|
40 |
if use_details:
|
41 |
extended_prompt = extend_prompt(prompt)
|
42 |
else:
|
43 |
+
extended_prompt = prompt
|
44 |
int_image = pipe(prompt=extended_prompt, generator=generator, num_inference_steps=steps, guidance_scale=0.0).images[0]
|
45 |
return int_image, extended_prompt
|
46 |
|
|
|
52 |
details_checkbox = gr.Checkbox(label="details", info="Generate Details?")
|
53 |
steps_slider = gr.Slider(1, maximum=5, value=2, step=1, label='Number of Iterations')
|
54 |
seed_slider = gr.Slider(minimum=0, step=1, maximum=999999999999999999, randomize=True)
|
55 |
+
cuda_checkbox = gr.Checkbox(label="Use CUDA", info="Use GPU for inference.")
|
56 |
+
with gr.Row():
|
57 |
+
generate_button = gr.Button("Generate")
|
58 |
with gr.Row():
|
59 |
output_image = gr.Image("./imagen.png")
|
60 |
output_text = gr.Textbox(label="Generated Text", lines=2)
|
61 |
+
generate_button.click(genie, inputs=[input_text, details_checkbox, steps_slider, seed_slider, cuda_checkbox], outputs=[output_image, output_text], concurrency_limit=10)
|
62 |
|
63 |
# Define the example
|
64 |
+
example = [["sexy woman", True, 2, 398231747038484200, ""],
|
65 |
+
['''sexy woman, in a black bikini, white bra, highly detailed, d & d, fantasy, highly detailed, digital painting, trending on artstation, concept art, sharp focus, illustration, art by artgerm and greg rutkowski and fuji choko and viktoria gavrilenko''', False, 2, 304332410412655740, ""],
|
66 |
+
['''sexy woman, D&D, fantasy, portrait, highly detailed, headshot, digital painting, trending on artstation, concept art, sharp focus, illustration, art by artgerm and greg rutkowski and magali villeneuve and wlop, ilya kuvshinov, octane render, 8 ''', False, 2, 747356768820251800, ""],
|
67 |
+
[''' sexy woman, worksafe, light blonde long hair, fully clothed, brown eyes, sitting on a chair, sitting by a reflective pool, in the style of ilya kuvshinov, very dark, cinematic dramatic atmosphere, artstation, detailed facial ''', False, 2, 398231747038484200, ""],
|
68 |
+
['''sexy woman, medium shot, candid, red hair, 4 k, high definition, realistic, natural, highly detailed, photo realistic smooth, sharp, unreal engine 5, cinema4d, Blender, render photo-realistic, v-ray ''', False, 2, 398231747038484200, ""],
|
69 |
]
|
70 |
|
71 |
with gr.Interface(
|
72 |
fn=genie,
|
73 |
+
inputs=[input_text, details_checkbox, steps_slider, seed_slider, cuda_checkbox],
|
74 |
outputs=[output_image, output_text],
|
75 |
title="Stable Diffusion Turbo with GPT",
|
76 |
description="Type your text and lets create an image, check the box details if you want a creative picture",
|
77 |
examples=example,
|
78 |
) as iface:
|
79 |
iface.launch(inline=True, show_api=False, max_threads=200)
|
80 |
+
|
81 |
+
if __name__ == "__main__":
|
82 |
+
# اجرای FastAPI در یک رشته جداگانه
|
83 |
+
threading.Thread(target=run_fastapi).start()
|
84 |
+
|
85 |
+
# اجرای Gradio برنامه اصلی شما
|
86 |
+
iface.launch()
|