nikajoon commited on
Commit
2cab7b7
1 Parent(s): a204c65

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -64
app.py CHANGED
@@ -1,71 +1,23 @@
1
- ## Created by ruslanmv.com
2
- ## Happy coding!
3
  import gradio as gr
4
  import torch
5
  import numpy as np
6
  from diffusers import DiffusionPipeline
7
  from transformers import pipeline
8
- import subprocess
9
- import threading
10
 
11
- # FastAPI setup
12
  def run_fastapi():
13
- subprocess.run(["uvicorn", "api:app", "--host", "0.0.0.0", "--port", "7860"])
14
-
15
- pipe = pipeline('text-generation', model='daspartho/prompt-extend')
16
-
17
- def extend_prompt(prompt):
18
- return pipe(prompt+',', num_return_sequences=1)[0]["generated_text"]
19
 
20
- def text_it(inputs):
21
- return extend_prompt(inputs)
22
 
23
- def load_pipeline(use_cuda):
24
- device = "cuda" if use_cuda and torch.cuda.is_available() else "cpu"
25
- if device == "cuda":
26
- torch.cuda.max_memory_allocated(device=device)
27
- torch.cuda.empty_cache()
28
- pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
29
- pipe.enable_xformers_memory_efficient_attention()
30
- pipe = pipe.to(device)
31
- torch.cuda.empty_cache()
32
- else:
33
- pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True)
34
- pipe = pipe.to(device)
35
- return pipe
36
 
37
- def genie(prompt="sexy woman", use_details=False, steps=2, seed=398231747038484200, use_cuda=False):
38
- pipe = load_pipeline(use_cuda)
39
- generator = np.random.seed(0) if seed == 0 else torch.manual_seed(seed)
40
- if use_details:
41
- extended_prompt = extend_prompt(prompt)
42
- else:
43
- extended_prompt = prompt
44
- int_image = pipe(prompt=extended_prompt, generator=generator, num_inference_steps=steps, guidance_scale=0.0).images[0]
45
- return int_image, extended_prompt
46
-
47
- with gr.Blocks() as myface:
48
- gr.HTML()
49
- with gr.Row():
50
- with gr.Row():
51
- input_text = gr.Textbox(label='Prompt Text.', lines=1)
52
- details_checkbox = gr.Checkbox(label="details", info="Generate Details?")
53
- steps_slider = gr.Slider(1, maximum=5, value=2, step=1, label='Number of Iterations')
54
- seed_slider = gr.Slider(minimum=0, step=1, maximum=999999999999999999, randomize=True)
55
- cuda_checkbox = gr.Checkbox(label="Use CUDA", info="Use GPU for inference.")
56
- with gr.Row():
57
- generate_button = gr.Button("Generate")
58
- with gr.Row():
59
- output_image = gr.Image("./imagen.png")
60
- output_text = gr.Textbox(label="Generated Text", lines=2)
61
- generate_button.click(genie, inputs=[input_text, details_checkbox, steps_slider, seed_slider, cuda_checkbox], outputs=[output_image, output_text], concurrency_limit=10)
62
-
63
- # Define the example
64
  example = [["sexy woman", True, 2, 398231747038484200, ""],
65
- ['''sexy woman, in a black bikini, white bra, highly detailed, d & d, fantasy, highly detailed, digital painting, trending on artstation, concept art, sharp focus, illustration, art by artgerm and greg rutkowski and fuji choko and viktoria gavrilenko''', False, 2, 304332410412655740, ""],
66
- ['''sexy woman, D&D, fantasy, portrait, highly detailed, headshot, digital painting, trending on artstation, concept art, sharp focus, illustration, art by artgerm and greg rutkowski and magali villeneuve and wlop, ilya kuvshinov, octane render, 8 ''', False, 2, 747356768820251800, ""],
67
- [''' sexy woman, worksafe, light blonde long hair, fully clothed, brown eyes, sitting on a chair, sitting by a reflective pool, in the style of ilya kuvshinov, very dark, cinematic dramatic atmosphere, artstation, detailed facial ''', False, 2, 398231747038484200, ""],
68
- ['''sexy woman, medium shot, candid, red hair, 4 k, high definition, realistic, natural, highly detailed, photo realistic smooth, sharp, unreal engine 5, cinema4d, Blender, render photo-realistic, v-ray ''', False, 2, 398231747038484200, ""],
69
  ]
70
 
71
  with gr.Interface(
@@ -77,10 +29,3 @@ with gr.Interface(
77
  examples=example,
78
  ) as iface:
79
  iface.launch(inline=True, show_api=False, max_threads=200)
80
-
81
- if __name__ == "__main__":
82
- # اجرای FastAPI در یک رشته جداگانه
83
- threading.Thread(target=run_fastapi).start()
84
-
85
- # اجرای Gradio برنامه اصلی شما
86
- iface.launch()
 
1
+ import threading
2
+ from api import app
3
  import gradio as gr
4
  import torch
5
  import numpy as np
6
  from diffusers import DiffusionPipeline
7
  from transformers import pipeline
 
 
8
 
 
9
  def run_fastapi():
10
+ import uvicorn
11
+ uvicorn.run(app, host="0.0.0.0", port=7860)
 
 
 
 
12
 
13
+ # FastAPI را در یک رشته جداگانه اجرا کنید
14
+ threading.Thread(target=run_fastapi).start()
15
 
16
+ # ادامه کد Gradio همانطور که قبلا بوده
 
 
 
 
 
 
 
 
 
 
 
 
17
 
18
+ # تعریف مثال
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  example = [["sexy woman", True, 2, 398231747038484200, ""],
20
+ # بقیه مثال‌ها
 
 
 
21
  ]
22
 
23
  with gr.Interface(
 
29
  examples=example,
30
  ) as iface:
31
  iface.launch(inline=True, show_api=False, max_threads=200)