Spaces:
Running
Running
import os | |
import streamlit as st | |
from groq import Groq | |
def make_call(api): | |
"""Calls the Groq API (assuming API key auth) and handles potential errors.""" | |
try: | |
client = Groq( | |
api_key=api, | |
) # Configure the model with the API key | |
query = st.text_input("Enter your query") | |
prmptquery= f"give the answer of given query in context to bhagwat geeta: {query}" | |
chat_completion = client.chat.completions.create( | |
messages=[ | |
{ | |
"role": "user", | |
"content": prmptquery, | |
} | |
], | |
model="mixtral-8x7b-32768", | |
) | |
# print(response.text) # Return the response for further processing | |
return chat_completion.choices[0].message.content | |
except Exception as e: | |
print(f"API call failed for key {api_key}: {e}") | |
return None # Indicate failur | |
api1 = os.environ.get("GROQ_API_KEY"), | |
# api1 = os.environ.get("GROQ_API_KEY"), | |
# api1 = os.environ.get("GROQ_API_KEY"), | |
# api1 = os.environ.get("GROQ_API_KEY"), | |
# api1 = os.environ.get("GROQ_API_KEY"), | |
# api1 = os.environ.get("GROQ_API_KEY"), | |
apis = [ | |
api1, | |
# api1, | |
] | |
# Loop indefinitely | |
data = None | |
# while True: # Loop indefinitely | |
for api in apis: | |
data = make_call(api) | |
if data: # Check for a successful response | |
st.write(chat_completion.choices[0].message.content) | |
break # Exit both the for loop and while loop | |
else: | |
st.write(f"Failed to retrieve data from key {api_key}.") | |
# if data: # If a successful response was found, break the outer while loop | |
# break | |
# print(chat_completion) | |
# # Text to 3D | |
# import streamlit as st | |
# import torch | |
# from diffusers import ShapEPipeline | |
# from diffusers.utils import export_to_gif | |
# # Model loading (Ideally done once at the start for efficiency) | |
# ckpt_id = "openai/shap-e" | |
# @st.cache_resource # Caches the model for faster subsequent runs | |
# def load_model(): | |
# return ShapEPipeline.from_pretrained(ckpt_id).to("cuda") | |
# pipe = load_model() | |
# # App Title | |
# st.title("Shark 3D Image Generator") | |
# # User Inputs | |
# prompt = st.text_input("Enter your prompt:", "a shark") | |
# guidance_scale = st.slider("Guidance Scale", 0.0, 20.0, 15.0, step=0.5) | |
# # Generate and Display Images | |
# if st.button("Generate"): | |
# with st.spinner("Generating images..."): | |
# images = pipe( | |
# prompt, | |
# guidance_scale=guidance_scale, | |
# num_inference_steps=64, | |
# size=256, | |
# ).images | |
# gif_path = export_to_gif(images, "shark_3d.gif") | |
# st.image(images[0]) # Display the first image | |
# st.success("GIF saved as shark_3d.gif") |