Spaces:
Runtime error
Runtime error
generate images with stable diffusion
Browse files- app.py +30 -9
- requirements.txt +2 -1
app.py
CHANGED
@@ -11,9 +11,10 @@ import string
|
|
11 |
#image generation stuff
|
12 |
from PIL import Image
|
13 |
|
14 |
-
# gradio / hf stuff
|
15 |
import gradio as gr
|
16 |
from openai import OpenAI
|
|
|
17 |
from dotenv import load_dotenv
|
18 |
|
19 |
# stats stuff
|
@@ -27,6 +28,8 @@ from datetime import datetime, timedelta
|
|
27 |
|
28 |
load_dotenv()
|
29 |
|
|
|
|
|
30 |
openai_key = os.getenv("OPENAI_API_KEY")
|
31 |
pw_key = os.getenv("PW")
|
32 |
|
@@ -135,23 +138,41 @@ def generate_images(prompts, pw, model):
|
|
135 |
users.append(user_initials) # Append user initials to the list
|
136 |
|
137 |
try:
|
138 |
-
openai_client = OpenAI(api_key=openai_key)
|
139 |
start_time = time.time()
|
140 |
|
141 |
#make a prompt with the challenge and text
|
142 |
prompt_w_challenge = f"{challenge}: {text}"
|
143 |
|
144 |
-
response = openai_client.images.generate(
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
150 |
)
|
|
|
|
|
151 |
end_time = time.time()
|
152 |
gen_time = end_time - start_time # total generation time
|
153 |
|
154 |
-
image_url = response.data[0].url
|
|
|
|
|
155 |
# conditionally render the user to the label with the prompt
|
156 |
image_label = f"{i}: {text}" if user_initials == "" else f"{i}: {user_initials}-{text}, "
|
157 |
|
|
|
11 |
#image generation stuff
|
12 |
from PIL import Image
|
13 |
|
14 |
+
# gradio / hf / image gen stuff
|
15 |
import gradio as gr
|
16 |
from openai import OpenAI
|
17 |
+
import replicate
|
18 |
from dotenv import load_dotenv
|
19 |
|
20 |
# stats stuff
|
|
|
28 |
|
29 |
load_dotenv()
|
30 |
|
31 |
+
REPLICATE_API_TOKEN = os.getenv("REPLICATE_API_TOKEN")
|
32 |
+
|
33 |
openai_key = os.getenv("OPENAI_API_KEY")
|
34 |
pw_key = os.getenv("PW")
|
35 |
|
|
|
138 |
users.append(user_initials) # Append user initials to the list
|
139 |
|
140 |
try:
|
141 |
+
#openai_client = OpenAI(api_key=openai_key)
|
142 |
start_time = time.time()
|
143 |
|
144 |
#make a prompt with the challenge and text
|
145 |
prompt_w_challenge = f"{challenge}: {text}"
|
146 |
|
147 |
+
# response = openai_client.images.generate(
|
148 |
+
# prompt=prompt_w_challenge,
|
149 |
+
# model=model, # dall-e-2 or dall-e-3
|
150 |
+
# quality="standard", # standard or hd
|
151 |
+
# size="512x512" if model == "dall-e-2" else "1024x1024", # varies for dalle-2 and dalle-3
|
152 |
+
# n=1, # Number of images to generate
|
153 |
+
# )
|
154 |
+
|
155 |
+
# stable diffusion
|
156 |
+
response = replicate.run(
|
157 |
+
"stability-ai/stable-diffusion:ac732df83cea7fff18b8472768c88ad041fa750ff7682a21affe81863cbe77e4",
|
158 |
+
input={
|
159 |
+
"width": 768, #must be multiples of 64
|
160 |
+
"height": 768,
|
161 |
+
"prompt": prompt_w_challenge,
|
162 |
+
"scheduler": "K_EULER", #controlling the steps of the diffusion process to balance between image quality, generation speed, and resource consumption - DDIM, K_EULER, DPMSolverMultistep, K_EULER_ANCESTRAL, PNDM, KLMS
|
163 |
+
"num_outputs": 1, #images to generate
|
164 |
+
"guidance_scale": 7.5, #0-20, higher the number, more it sticks to the prompt
|
165 |
+
"num_inference_steps": 50 #1-500 - higher the better, generally
|
166 |
+
}
|
167 |
)
|
168 |
+
print(response)
|
169 |
+
|
170 |
end_time = time.time()
|
171 |
gen_time = end_time - start_time # total generation time
|
172 |
|
173 |
+
#image_url = response.data[0].url
|
174 |
+
image_url = response[0]
|
175 |
+
|
176 |
# conditionally render the user to the label with the prompt
|
177 |
image_label = f"{i}: {text}" if user_initials == "" else f"{i}: {user_initials}-{text}, "
|
178 |
|
requirements.txt
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
gradio==4.2.0
|
2 |
openai==1.2.3
|
3 |
python-dotenv==1.0.0
|
4 |
-
pymongo
|
|
|
|
1 |
gradio==4.2.0
|
2 |
openai==1.2.3
|
3 |
python-dotenv==1.0.0
|
4 |
+
pymongo
|
5 |
+
replicate==0.24.0
|