File size: 7,138 Bytes
e91aab6
 
 
 
 
 
cd39e40
 
b08424a
 
204c114
dc36948
8d3a8e5
cabe1fc
e91aab6
2773697
3f5ddb0
 
e91aab6
 
 
bb85f13
e91aab6
 
 
 
 
27822a6
204c114
 
dc36948
8d3a8e5
 
 
e91aab6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b4374a8
e91aab6
 
b4374a8
e91aab6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3b7c00b
e91aab6
 
 
 
cd39e40
e91aab6
8d3a8e5
e91aab6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
import gradio as gr
import os 
import sys
from pathlib import Path
       
models = [
    "yodayo-ai/kivotos-xl-2.0",
    "yodayo-ai/holodayo-xl-2.1",
    "digiplay/NWSJRealMix_SDXL_v1",
    "digiplay/Jellymix_XL_v1",
    "playgroundai/playground-v2-1024px-aesthetic",
    "dataautogpt3/ProteusV0.4",
    "Bakanayatsu/ponyDiffusion-V6-XL-Turbo-DPO",
    "Lykon/dreamshaper-xl-1-0",
    "nerijs/pixel-art-xl",
    "Linaqruf/animagine-xl",
    "dataautogpt3/OpenDalleV1.1",
    "stabilityai/stable-diffusion-xl-base-1.0",
]
current_model = models[0]

text_gen1=gr.Interface.load("spaces/phenomenon1981/MagicPrompt-Stable-Diffusion")

models2=[
    gr.Interface.load(f"models/{models[0]}",live=True,preprocess=False),
    gr.Interface.load(f"models/{models[1]}",live=True,preprocess=False),
    gr.Interface.load(f"models/{models[2]}",live=True,preprocess=False),
    gr.Interface.load(f"models/{models[3]}",live=True,preprocess=False),
    gr.Interface.load(f"models/{models[4]}",live=True,preprocess=False),
    gr.Interface.load(f"models/{models[5]}",live=True,preprocess=False),
    gr.Interface.load(f"models/{models[6]}",live=True,preprocess=False),
    gr.Interface.load(f"models/{models[7]}",live=True,preprocess=False),
    gr.Interface.load(f"models/{models[8]}",live=True,preprocess=False),
    gr.Interface.load(f"models/{models[9]}",live=True,preprocess=False),
]

   
def text_it1(inputs,text_gen1=text_gen1):
        go_t1=text_gen1(inputs)
        return(go_t1)

def set_model(current_model):
    current_model = models[current_model]
    return gr.update(label=(f"{current_model}"))


def send_it1(inputs, model_choice):
        proc1=models2[model_choice]
        output1=proc1(inputs)
        return(output1)
css=""""""


with gr.Blocks(css=css) as myface:
    gr.HTML("""
     <div style="text-align: center; max-width: 1200px; margin: 0 auto;">
              <div>
                <style>
                    h1 {
                    font-size: 6em;
                    color: #ffffff;
                    margin-top: 30px;
                    margin-bottom: 30px;
                    text-shadow: 3px 3px 0 rgba(0, 0, 0, 1) !important;
                   }
                   h4 {
                    color: #ffaa66; !important;
                   }
                   h3 {
                    color: #ffffff; !important;
                   }
                   .gradio-container {
                   background-image: linear-gradient(#8150df, #6d43e4, #000000) !important;
                   color: #ffaa66 !important;
                   font-family: 'IBM Plex Sans', sans-serif !important;
                   }
                   .text-gray-500 {
                   color: #ffaa66 !important;
                   }
                   .gr-box {
    color: #000000 !important;
    background-image: linear-gradient(#563595, #6d43e4, #8150df) !important;
    border-top-color: #000000 !important;
    border-right-color: #ffffff !important;
    border-bottom-color: #ffffff !important;
    border-left-color: #000000 !important;
                   }
                </style>
                <body>
                <div class="center"><h1>ToyWorld XL</h1>
                </div>
                </body>
              </div>
              <p style="margin-bottom: 10px; color: #ffaa66;">
              <h3>Top 12 SDXL models for your enjoyment!</h3></p>
              <p style="margin-bottom: 10px; font-size: 98%">
              <br><h4><a href="https://huggingface.co/spaces/Yntec/PrintingPress"><u><b><p>Try out more than 800 Stable Diffusion models at the Printing Press by clicking here!</p></b></u></a></h4></p>
            </div>
                                                            """)
    with gr.Row():
        with gr.Column(scale=100):
            #Model selection dropdown    
            model_name1 = gr.Dropdown(label="Select Model", choices=[m for m in models], type="index", value=current_model, interactive=True)
    with gr.Row():
        with gr.Column(scale=100):
            magic1=gr.Textbox(label="Your Prompt", lines=4)
  #align-items: center !important;
  #appearance: none !important;
  
  
  #border-style: none !important;
  #box-shadow: rgba(0, 0, 0, .2) 0 3px 5px -1px,rgba(0, 0, 0, .14) 0 6px 10px 0,rgba(0, 0, 0, .12) 0 1px 18px 0 !important;
  #box-sizing: border-box !important;
  
  #cursor: pointer !important;
  #display: inline-flex !important;
  #fill: currentcolor !important;
  #font-family: "Google Sans",Roboto,Arial,sans-serif !important;
  #font-size: 14px !important;
  #font-weight: 500 !important;
  #height: 48px !important;
  #justify-content: center !important;
  #letter-spacing: .25px !important;
  #line-height: normal !important;
  #max-width: 100% !important;
  #overflow: visible !important;
  #padding: 2px 24px !important;
  #position: relative !important;
  #text-align: center !important;
  #text-transform: none !important;
  #transition: box-shadow 280ms cubic-bezier(.4, 0, .2, 1),opacity 15ms linear 30ms,transform 270ms cubic-bezier(0, 0, .2, 1) 0ms !important;
  #user-select: none !important;
  #-webkit-user-select: none !important;
  #touch-action: manipulation !important;
  #width: auto !important;
  #will-change: transform,opacity !important;
  #z-index: 0 !important;
            gr.HTML("""<style>           .gr-button {
            color: white !important;
            text-shadow: 1px 1px 0 rgba(0, 0, 0, 1) !important;
            background-image: linear-gradient(#ff9e0c, #ffd31e) !important;
            border-radius: 24px !important;
            border: solid 1px !important;
            border-top-color: #ffffff !important;
            border-right-color: #000000 !important;
            border-bottom-color: #000000 !important;
            border-left-color: #ffffff !important;
            padding: 6px 30px;
}

.gr-button:active {
            font-size: 98% !important;
            text-shadow: 0px 0px 0 rgba(0, 0, 0, 1) !important;
            border-top-color: #000000 !important;
            border-right-color: #ffffff !important;
            border-bottom-color: #ffffff !important;
            border-left-color: #000000 !important;
}

</style>""")
            run=gr.Button("Generate Image")
    with gr.Row():
        with gr.Column(style="width=800px"):
            output1=gr.Image(label=(f"{current_model}"))
                
            
    with gr.Row():
        with gr.Column(scale=50):
            input_text=gr.Textbox(label="Use this box to extend an idea automagically, by typing some words and clicking Extend Idea",lines=2)
            use_short=gr.Button("Use Short Prompt")
            see_prompts=gr.Button("Extend Idea")
                
     
    def short_prompt(inputs):
        return(inputs)
    
    model_name1.change(set_model,inputs=model_name1,outputs=[output1])
    
    run.click(send_it1, inputs=[magic1, model_name1], outputs=[output1])
    
    use_short.click(short_prompt,inputs=[input_text],outputs=magic1)
    
    see_prompts.click(text_it1,inputs=[input_text],outputs=magic1)
    
myface.queue(concurrency_count=200)
myface.launch(inline=True, show_api=False, max_threads=400)