Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,35 +1,82 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
from groq import Groq
|
4 |
+
|
5 |
+
client = Groq(
|
6 |
+
api_key=os.environ.get("GROQ_API_KEY"),
|
7 |
+
)
|
8 |
+
|
9 |
+
query = st.input_text("Enter your query")
|
10 |
+
chat_completion = client.chat.completions.create(
|
11 |
+
messages=[
|
12 |
+
{
|
13 |
+
"role": "user",
|
14 |
+
"content": query,
|
15 |
+
}
|
16 |
+
],
|
17 |
+
model="mixtral-8x7b-32768",
|
18 |
+
)
|
19 |
+
|
20 |
+
print(chat_completion.choices[0].message.content)
|
21 |
+
print(chat_completion.choices[1].message.content)
|
22 |
+
print(chat_completion)
|
23 |
+
|
24 |
+
|
25 |
+
|
26 |
+
|
27 |
+
|
28 |
+
|
29 |
+
|
30 |
+
|
31 |
+
|
32 |
+
|
33 |
+
|
34 |
+
|
35 |
+
|
36 |
+
|
37 |
+
|
38 |
+
|
39 |
+
|
40 |
+
|
41 |
+
|
42 |
+
|
43 |
+
|
44 |
+
|
45 |
+
|
46 |
+
|
47 |
+
|
48 |
+
# # Text to 3D
|
49 |
+
|
50 |
+
# import streamlit as st
|
51 |
+
# import torch
|
52 |
+
# from diffusers import ShapEPipeline
|
53 |
+
# from diffusers.utils import export_to_gif
|
54 |
+
|
55 |
+
# # Model loading (Ideally done once at the start for efficiency)
|
56 |
+
# ckpt_id = "openai/shap-e"
|
57 |
+
# @st.cache_resource # Caches the model for faster subsequent runs
|
58 |
+
# def load_model():
|
59 |
+
# return ShapEPipeline.from_pretrained(ckpt_id).to("cuda")
|
60 |
+
|
61 |
+
# pipe = load_model()
|
62 |
+
|
63 |
+
# # App Title
|
64 |
+
# st.title("Shark 3D Image Generator")
|
65 |
+
|
66 |
+
# # User Inputs
|
67 |
+
# prompt = st.text_input("Enter your prompt:", "a shark")
|
68 |
+
# guidance_scale = st.slider("Guidance Scale", 0.0, 20.0, 15.0, step=0.5)
|
69 |
+
|
70 |
+
# # Generate and Display Images
|
71 |
+
# if st.button("Generate"):
|
72 |
+
# with st.spinner("Generating images..."):
|
73 |
+
# images = pipe(
|
74 |
+
# prompt,
|
75 |
+
# guidance_scale=guidance_scale,
|
76 |
+
# num_inference_steps=64,
|
77 |
+
# size=256,
|
78 |
+
# ).images
|
79 |
+
# gif_path = export_to_gif(images, "shark_3d.gif")
|
80 |
+
|
81 |
+
# st.image(images[0]) # Display the first image
|
82 |
+
# st.success("GIF saved as shark_3d.gif")
|