jbilcke-hf HF staff commited on
Commit
9269fd4
1 Parent(s): d15185d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +80 -66
app.py CHANGED
@@ -10,7 +10,6 @@ from diffusers import DiffusionPipeline, UNet2DConditionModel, LCMScheduler
10
 
11
  MAX_SEED = np.iinfo(np.int32).max
12
  MAX_IMAGE_SIZE = int(os.getenv('MAX_IMAGE_SIZE', '1024'))
13
- SECRET_TOKEN = os.getenv('SECRET_TOKEN', 'default_secret')
14
 
15
  device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
16
  if torch.cuda.is_available():
@@ -45,12 +44,8 @@ def generate(prompt: str,
45
  width: int = 1024,
46
  height: int = 1024,
47
  guidance_scale: float = 1.0,
48
- num_inference_steps: int = 6,
49
- secret_token: str = '') -> PIL.Image.Image:
50
- if secret_token != SECRET_TOKEN:
51
- raise gr.Error(
52
- f'Invalid secret token. Please fork the original space if you want to use it for yourself.')
53
-
54
  generator = torch.Generator().manual_seed(seed)
55
 
56
  if not use_negative_prompt:
@@ -66,68 +61,63 @@ def generate(prompt: str,
66
  output_type='pil').images[0]
67
 
68
  with gr.Blocks() as demo:
69
- gr.HTML("""
70
- <div style="z-index: 100; position: fixed; top: 0px; right: 0px; left: 0px; bottom: 0px; width: 100%; height: 100%; background: white; display: flex; align-items: center; justify-content: center; color: black;">
71
- <div style="text-align: center; color: black;">
72
- <p style="color: black;">This space is a REST API to programmatically generate images using LCM-SSD-1B.</p>
73
- <p style="color: black;">It is not meant to be directly used through a user interface, but using code and an access key.</p>
74
- </div>
75
- </div>""")
76
- secret_token = gr.Text(
77
- label='Secret Token',
78
- max_lines=1,
79
- placeholder='Enter your secret token',
80
- )
81
- prompt = gr.Text(
82
- label='Prompt',
83
- show_label=False,
84
- max_lines=1,
85
- placeholder='Enter your prompt',
86
- container=False,
87
- )
88
- result = gr.Image(label='Result', show_label=False)
 
89
 
90
- use_negative_prompt = gr.Checkbox(label='Use negative prompt', value=False)
91
- negative_prompt = gr.Text(
92
- label='Negative prompt',
93
- max_lines=1,
94
- placeholder='Enter a negative prompt',
95
- visible=False,
96
- )
97
- seed = gr.Slider(label='Seed',
98
- minimum=0,
99
- maximum=MAX_SEED,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
100
  step=1,
101
- value=0)
102
- randomize_seed = gr.Checkbox(label='Randomize seed', value=True)
103
 
104
- width = gr.Slider(
105
- label='Width',
106
- minimum=256,
107
- maximum=MAX_IMAGE_SIZE,
108
- step=32,
109
- value=1024,
110
- )
111
- height = gr.Slider(
112
- label='Height',
113
- minimum=256,
114
- maximum=MAX_IMAGE_SIZE,
115
- step=32,
116
- value=1024,
117
- )
118
- guidance_scale = gr.Slider(
119
- label='Guidance scale',
120
- minimum=1,
121
- maximum=20,
122
- step=0.1,
123
- value=1.0)
124
- num_inference_steps = gr.Slider(
125
- label='Number of inference steps',
126
- minimum=2,
127
- maximum=40,
128
- step=1,
129
- value=6)
130
-
131
  use_negative_prompt.change(
132
  fn=lambda x: gr.update(visible=x),
133
  inputs=use_negative_prompt,
@@ -159,5 +149,29 @@ with gr.Blocks() as demo:
159
  outputs=result,
160
  api_name='run',
161
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
162
 
163
  demo.queue(max_size=6).launch()
 
10
 
11
  MAX_SEED = np.iinfo(np.int32).max
12
  MAX_IMAGE_SIZE = int(os.getenv('MAX_IMAGE_SIZE', '1024'))
 
13
 
14
  device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
15
  if torch.cuda.is_available():
 
44
  width: int = 1024,
45
  height: int = 1024,
46
  guidance_scale: float = 1.0,
47
+ num_inference_steps: int = 6) -> PIL.Image.Image:
48
+
 
 
 
 
49
  generator = torch.Generator().manual_seed(seed)
50
 
51
  if not use_negative_prompt:
 
61
  output_type='pil').images[0]
62
 
63
  with gr.Blocks() as demo:
64
+ with gr.Row():
65
+ with gr.Row():
66
+ prompt = gr.Text(
67
+ label='Prompt',
68
+ show_label=False,
69
+ max_lines=1,
70
+ placeholder='Enter your prompt',
71
+ container=False,
72
+ )
73
+ run_button = gr.Button('Run', scale=0)
74
+ result = gr.Image(label='Result', show_label=False)
75
+ with gr.Accordion('Advanced options', open=False):
76
+ with gr.Row():
77
+ use_negative_prompt = gr.Checkbox(label='Use negative prompt',
78
+ value=False)
79
+ negative_prompt = gr.Text(
80
+ label='Negative prompt',
81
+ max_lines=1,
82
+ placeholder='Enter a negative prompt',
83
+ visible=False,
84
+ )
85
 
86
+ seed = gr.Slider(label='Seed',
87
+ minimum=0,
88
+ maximum=MAX_SEED,
89
+ step=1,
90
+ value=0)
91
+ randomize_seed = gr.Checkbox(label='Randomize seed', value=True)
92
+ with gr.Row():
93
+ width = gr.Slider(
94
+ label='Width',
95
+ minimum=256,
96
+ maximum=MAX_IMAGE_SIZE,
97
+ step=32,
98
+ value=1024,
99
+ )
100
+ height = gr.Slider(
101
+ label='Height',
102
+ minimum=256,
103
+ maximum=MAX_IMAGE_SIZE,
104
+ step=32,
105
+ value=1024,
106
+ )
107
+ with gr.Row():
108
+ guidance_scale = gr.Slider(
109
+ label='Guidance scale',
110
+ minimum=1,
111
+ maximum=20,
112
+ step=0.1,
113
+ value=5.0)
114
+ num_inference_steps = gr.Slider(
115
+ label='Number of inference steps',
116
+ minimum=2,
117
+ maximum=50,
118
  step=1,
119
+ value=6)
 
120
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121
  use_negative_prompt.change(
122
  fn=lambda x: gr.update(visible=x),
123
  inputs=use_negative_prompt,
 
149
  outputs=result,
150
  api_name='run',
151
  )
152
+ negative_prompt.submit(
153
+ fn=randomize_seed_fn,
154
+ inputs=[seed, randomize_seed],
155
+ outputs=seed,
156
+ queue=False,
157
+ api_name=False,
158
+ ).then(
159
+ fn=generate,
160
+ inputs=inputs,
161
+ outputs=result,
162
+ api_name=False,
163
+ )
164
+ run_button.click(
165
+ fn=randomize_seed_fn,
166
+ inputs=[seed, randomize_seed],
167
+ outputs=seed,
168
+ queue=False,
169
+ api_name=False,
170
+ ).then(
171
+ fn=generate,
172
+ inputs=inputs,
173
+ outputs=result,
174
+ api_name=False,
175
+ )
176
 
177
  demo.queue(max_size=6).launch()