File size: 3,294 Bytes
3d5d6c3
e4014ea
 
 
8166181
3d87864
8166181
e4014ea
8166181
3d87864
e4014ea
3d87864
e4014ea
3d87864
e4014ea
 
 
 
e79ed56
e4014ea
 
 
 
 
 
 
 
3d87864
e4014ea
 
 
8166181
3d87864
 
 
 
 
 
 
 
 
 
 
e4014ea
3d87864
db260c7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e4014ea
db260c7
 
 
 
 
e4014ea
db260c7
 
 
 
 
3d87864
 
db260c7
 
 
 
 
 
 
 
 
 
 
8166181
 
db260c7
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
import spaces
import gradio as gr
import numpy as np
import random
import torch
from diffusers import AuraFlowPipeline

device = "cuda" if torch.cuda.is_available() else "cpu"

# Initialize the AuraFlow v0.3 pipeline
pipe = AuraFlowPipeline.from_pretrained(
    "fal/AuraFlow-v0.3",
    torch_dtype=torch.float16
).to(device)

MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 1024

@spaces.GPU
def infer(prompt,
          negative_prompt="",
          seed=42,
          randomize_seed=False,
          width=1024,
          height=1024,
          guidance_scale=5.0,
          num_inference_steps=28,
          progress=gr.Progress(track_tqdm=True)):
    
    if randomize_seed:
        seed = random.randint(0, MAX_SEED)
        
    generator = torch.Generator(device=device).manual_seed(seed)
    
    image = pipe(
        prompt=prompt, 
        negative_prompt=negative_prompt,
        width=width,
        height=height,
        guidance_scale=guidance_scale, 
        num_inference_steps=num_inference_steps, 
        generator=generator
    ).images[0]
        
    return image, seed
with gr.Blocks(theme=gr.themes.Default()) as demo:
    gr.HTML(
        """
        <h1 style='text-align: center'>
        AuraFlow v0.3
        </h1>
        """
    )
    gr.HTML(
        """
        <h3 style='text-align: center'>
        Follow me for more!
        <a href='https://twitter.com/kadirnar_ai' target='_blank'>Twitter</a> | <a href='https://github.com/kadirnar' target='_blank'>Github</a> | <a href='https://www.linkedin.com/in/kadir-nar/' target='_blank'>Linkedin</a>  | <a href='https://www.huggingface.co/kadirnar/' target='_blank'>HuggingFace</a>
        </h3>
        """
    )
    
    with gr.Row():
        with gr.Column(scale=1):
            prompt = gr.Text(label="Prompt", placeholder="Enter your prompt")
            negative_prompt = gr.Text(label="Negative prompt", placeholder="Enter a negative prompt")
            seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
            randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
            width = gr.Slider(label="Width", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024)
            height = gr.Slider(label="Height", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024)
            guidance_scale = gr.Slider(label="Guidance scale", minimum=0.0, maximum=10.0, step=0.1, value=5.0)
            num_inference_steps = gr.Slider(label="Number of inference steps", minimum=1, maximum=50, step=1, value=28)
            run_button = gr.Button("Generate")

        with gr.Column(scale=1):
            result = gr.Image(label="Generated Image")
            seed_output = gr.Number(label="Seed used")

    run_button.click(
        fn=infer,
        inputs=[prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
        outputs=[result, seed_output]
    )

    gr.Examples(
        examples=[
            "A photo of a lavender cat",
            "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
            "An astronaut riding a green horse",
            "A delicious ceviche cheesecake slice",
        ],
        inputs=prompt,
    )

demo.queue().launch(server_name="0.0.0.0", share=False)