LordFarquaad42 commited on
Commit
b5701d5
·
verified ·
1 Parent(s): 37647bd
Files changed (1) hide show
  1. app.py +6 -22
app.py CHANGED
@@ -7,36 +7,20 @@ from dotenv import load_dotenv
7
  load_dotenv()
8
 
9
  API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-cascade-prior"
10
- # Corrected the format of the Authorization header here
11
- HEADERS = {"Authorization": f"Bearer {os.getenv('bearer_token')}"}
12
 
13
  st.title("Image Generator with Diffusers")
14
 
15
  def query(payload):
16
  response = requests.post(API_URL, headers=HEADERS, json=payload)
17
- # Check if the request was successful
18
- if response.status_code == 200:
19
- return response.content
20
- else:
21
- # If the request failed, print the response and return None
22
- st.write("Failed to get the image:", response.text)
23
- return None
24
 
25
  prompt = st.text_input("Enter a prompt:", "batman hitting the griddy in gotham")
26
 
27
  image_bytes = query({
28
  "inputs": prompt,
29
  })
30
-
31
- # Ensure that image_bytes is not None and is not empty
32
- if image_bytes:
33
- try:
34
- image = Image.open(io.BytesIO(image_bytes))
35
- st.image(image, caption="Image generated by the model", use_column_width=True)
36
- except IOError:
37
- # If an error occurs, print the error and the image_bytes
38
- st.write("Cannot identify image file. The response may not be an image or is corrupted.")
39
- # To debug, you might want to see the first few bytes to determine if it's an image file
40
- st.write(image_bytes[:100])
41
- else:
42
- st.write("No image data was returned.")
 
7
  load_dotenv()
8
 
9
  API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-cascade-prior"
10
+ HEADERS = {"Authorization": f"Bearer ${os.getenv('bearer_token')}"}
 
11
 
12
  st.title("Image Generator with Diffusers")
13
 
14
  def query(payload):
15
  response = requests.post(API_URL, headers=HEADERS, json=payload)
16
+ return response.content
 
 
 
 
 
 
17
 
18
  prompt = st.text_input("Enter a prompt:", "batman hitting the griddy in gotham")
19
 
20
  image_bytes = query({
21
  "inputs": prompt,
22
  })
23
+ st.text(image_bytes)
24
+ # if(image_bytes):
25
+ # image = Image.open(io.BytesIO(image_bytes))
26
+ # st.image(image, caption="Image generated by the model", use_column_width=True)