Spaces:
Runtime error
Runtime error
Upload 7 files
Browse files
README.md
CHANGED
@@ -1,10 +1,10 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
sdk: streamlit
|
7 |
-
sdk_version: 1.
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
---
|
|
|
1 |
---
|
2 |
+
title: Chatbot Using Gemini
|
3 |
+
emoji: π
|
4 |
+
colorFrom: red
|
5 |
+
colorTo: blue
|
6 |
sdk: streamlit
|
7 |
+
sdk_version: 1.29.0
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
---
|
app.py
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from streamlit_option_menu import option_menu
|
3 |
+
from page1 import text
|
4 |
+
from page2 import image
|
5 |
+
from page3 import details
|
6 |
+
from page4 import sdxl
|
7 |
+
|
8 |
+
def main():
|
9 |
+
|
10 |
+
st.title("Chat With Gemini AI")
|
11 |
+
|
12 |
+
with st.sidebar:
|
13 |
+
selection = option_menu(
|
14 |
+
menu_title="Main Menu",
|
15 |
+
options=["Home", "Vanilla Chat", "Chat with Image", "Image Creator"],
|
16 |
+
icons=["home", "pencil", "bird", "image"],
|
17 |
+
menu_icon="cast",
|
18 |
+
default_index=1
|
19 |
+
)
|
20 |
+
|
21 |
+
if selection == "Vanilla Chat":
|
22 |
+
text()
|
23 |
+
|
24 |
+
elif selection == "Chat with Image":
|
25 |
+
image()
|
26 |
+
|
27 |
+
elif selection == "Image Creator":
|
28 |
+
sdxl()
|
29 |
+
|
30 |
+
elif selection == "Home":
|
31 |
+
details()
|
32 |
+
|
33 |
+
|
34 |
+
if __name__ == '__main__':
|
35 |
+
main()
|
page1.py
ADDED
@@ -0,0 +1,195 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from langchain_core.messages import HumanMessage
|
3 |
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
4 |
+
from langchain.chains import LLMChain
|
5 |
+
from langchain.prompts import PromptTemplate
|
6 |
+
from langchain.memory import ConversationBufferMemory
|
7 |
+
from langchain.memory.chat_message_histories import StreamlitChatMessageHistory
|
8 |
+
from streamlit_chat import message
|
9 |
+
import time
|
10 |
+
import random
|
11 |
+
import os
|
12 |
+
api = os.environ.get("api_key")
|
13 |
+
|
14 |
+
def text():
|
15 |
+
st.markdown("""
|
16 |
+
<style>
|
17 |
+
.anim-typewriter {
|
18 |
+
animation: typewriter 3s steps(40) 1s 1 normal both, blinkTextCursor 800ms steps(40) infinite normal;
|
19 |
+
overflow: hidden;
|
20 |
+
white-space: nowrap;
|
21 |
+
border-right: 3px solid;
|
22 |
+
font-family: serif;
|
23 |
+
font-size: 0.9em;
|
24 |
+
}
|
25 |
+
@keyframes typewriter {
|
26 |
+
from {
|
27 |
+
width: 0;
|
28 |
+
}
|
29 |
+
to {
|
30 |
+
width: 100%;
|
31 |
+
height: 100%
|
32 |
+
}
|
33 |
+
}
|
34 |
+
@keyframes blinkTextCursor {
|
35 |
+
from {
|
36 |
+
border-right-color: rgba(255, 255, 255, 0.75);
|
37 |
+
}
|
38 |
+
to {
|
39 |
+
border-right-color: transparent;
|
40 |
+
}
|
41 |
+
}
|
42 |
+
</style>
|
43 |
+
""", unsafe_allow_html=True)
|
44 |
+
text ="Hello π, how may I assist you today?"
|
45 |
+
animated_output = f'<div class="line-1 anim-typewriter">{text}</div>'
|
46 |
+
|
47 |
+
with st.chat_message("assistant").markdown(animated_output,unsafe_allow_html=True ):
|
48 |
+
st.markdown(animated_output,unsafe_allow_html=True)
|
49 |
+
apiKey = api
|
50 |
+
msgs = StreamlitChatMessageHistory(key="special_app_key")
|
51 |
+
|
52 |
+
memory = ConversationBufferMemory(memory_key="history", chat_memory=msgs)
|
53 |
+
if len(msgs.messages) == 0:
|
54 |
+
msgs.add_ai_message("How can I help you?")
|
55 |
+
template = """You are an AI chatbot having a conversation with a human.
|
56 |
+
|
57 |
+
{history}
|
58 |
+
Human: {human_input}
|
59 |
+
AI: """
|
60 |
+
prompt = PromptTemplate(input_variables=["history", "human_input"], template=template)
|
61 |
+
llm_chain = LLMChain( llm = ChatGoogleGenerativeAI(model="gemini-pro", google_api_key=apiKey), prompt=prompt, memory = memory)
|
62 |
+
|
63 |
+
if 'messages' not in st.session_state:
|
64 |
+
st.session_state['messages'] = []
|
65 |
+
|
66 |
+
for message in st.session_state.messages:
|
67 |
+
with st.chat_message(message["role"]):
|
68 |
+
st.markdown(message["content"])
|
69 |
+
|
70 |
+
prompt = st.chat_input("Say something")
|
71 |
+
|
72 |
+
if prompt:
|
73 |
+
with st.chat_message("user").markdown(prompt):
|
74 |
+
st.session_state.messages.append(
|
75 |
+
{
|
76 |
+
"role": "user",
|
77 |
+
"content": prompt
|
78 |
+
}
|
79 |
+
)
|
80 |
+
# Custom HTML and CSS for three-dot animation
|
81 |
+
spinner_html = """
|
82 |
+
<div class="col-3">
|
83 |
+
<div class="snippet" data-title="dot-pulse">
|
84 |
+
<div class="stage">
|
85 |
+
<div class="dot-pulse"></div>
|
86 |
+
</div>
|
87 |
+
</div>
|
88 |
+
</div>
|
89 |
+
"""
|
90 |
+
|
91 |
+
spinner_css = """
|
92 |
+
.dot-pulse {
|
93 |
+
position: relative;
|
94 |
+
left: -9999px;
|
95 |
+
|
96 |
+
width: 10px;
|
97 |
+
height: 10px;
|
98 |
+
border-radius: 5px;
|
99 |
+
background-color: #9880ff;
|
100 |
+
color: #9880ff;
|
101 |
+
box-shadow: 9999px 0 0 -5px;
|
102 |
+
animation: dot-pulse 1.5s infinite linear;
|
103 |
+
animation-delay: 0.25s;
|
104 |
+
}
|
105 |
+
.dot-pulse::before, .dot-pulse::after {
|
106 |
+
content: "";
|
107 |
+
display: inline-block;
|
108 |
+
position: absolute;
|
109 |
+
top: 0;
|
110 |
+
width: 10px;
|
111 |
+
height: 10px;
|
112 |
+
border-radius: 5px;
|
113 |
+
background-color: #9880ff;
|
114 |
+
color: #9880ff;
|
115 |
+
}
|
116 |
+
.dot-pulse::before {
|
117 |
+
box-shadow: 9984px 0 0 -5px;
|
118 |
+
animation: dot-pulse-before 1.5s infinite linear;
|
119 |
+
animation-delay: 0s;
|
120 |
+
}
|
121 |
+
.dot-pulse::after {
|
122 |
+
box-shadow: 10014px 0 0 -5px;
|
123 |
+
animation: dot-pulse-after 1.5s infinite linear;
|
124 |
+
animation-delay: 0.5s;
|
125 |
+
}
|
126 |
+
|
127 |
+
@keyframes dot-pulse-before {
|
128 |
+
0% {
|
129 |
+
box-shadow: 9984px 0 0 -5px;
|
130 |
+
}
|
131 |
+
30% {
|
132 |
+
box-shadow: 9984px 0 0 2px;
|
133 |
+
}
|
134 |
+
60%, 100% {
|
135 |
+
box-shadow: 9984px 0 0 -5px;
|
136 |
+
}
|
137 |
+
}
|
138 |
+
@keyframes dot-pulse {
|
139 |
+
0% {
|
140 |
+
box-shadow: 9999px 0 0 -5px;
|
141 |
+
}
|
142 |
+
30% {
|
143 |
+
box-shadow: 9999px 0 0 2px;
|
144 |
+
}
|
145 |
+
60%, 100% {
|
146 |
+
box-shadow: 9999px 0 0 -5px;
|
147 |
+
}
|
148 |
+
}
|
149 |
+
@keyframes dot-pulse-after {
|
150 |
+
0% {
|
151 |
+
box-shadow: 10014px 0 0 -5px;
|
152 |
+
}
|
153 |
+
30% {
|
154 |
+
box-shadow: 10014px 0 0 2px;
|
155 |
+
}
|
156 |
+
60%, 100% {
|
157 |
+
box-shadow: 10014px 0 0 -5px;
|
158 |
+
}
|
159 |
+
}
|
160 |
+
"""
|
161 |
+
|
162 |
+
st.markdown(f'<style>{spinner_css}</style>', unsafe_allow_html=True)
|
163 |
+
st.markdown(spinner_html, unsafe_allow_html=True)
|
164 |
+
|
165 |
+
for chunk in llm_chain.stream(prompt):
|
166 |
+
text_output = chunk.get("text", "")
|
167 |
+
st.markdown('<style>.dot-pulse { visibility: hidden; }</style>', unsafe_allow_html=True)
|
168 |
+
|
169 |
+
with st.chat_message("assistant").markdown(text_output):
|
170 |
+
st.session_state.messages.append(
|
171 |
+
{
|
172 |
+
"role": "assistant",
|
173 |
+
"content": text_output
|
174 |
+
}
|
175 |
+
)
|
176 |
+
|
177 |
+
#with st.chat_message("assistant"):
|
178 |
+
#message_placeholder = st.empty()
|
179 |
+
#full_response = ""
|
180 |
+
#assistant_response = random.choice(
|
181 |
+
#[
|
182 |
+
#"Hello there! How can I assist you today?",
|
183 |
+
#"Hi, human! Is there anything I can help you with?",
|
184 |
+
# "Do you need help?",
|
185 |
+
# ]
|
186 |
+
# )
|
187 |
+
# Simulate stream of response with milliseconds delay
|
188 |
+
# for chunk in text_output.split():
|
189 |
+
# full_response += chunk + " "
|
190 |
+
# time.sleep(0.05)
|
191 |
+
# Add a blinking cursor to simulate typing
|
192 |
+
# message_placeholder.markdown(full_response + "β")
|
193 |
+
# message_placeholder.markdown(full_response)
|
194 |
+
# Add assistant response to chat history
|
195 |
+
# st.session_state.messages.append({"role": "assistant", "content": full_response})
|
page2.py
ADDED
@@ -0,0 +1,194 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from langchain_core.messages import HumanMessage
|
3 |
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
4 |
+
from streamlit_chat import message
|
5 |
+
from PIL import Image
|
6 |
+
import base64
|
7 |
+
import io
|
8 |
+
from langchain.chains import LLMChain
|
9 |
+
from langchain.prompts import PromptTemplate
|
10 |
+
from langchain.memory import ConversationBufferMemory
|
11 |
+
from langchain.memory.chat_message_histories import StreamlitChatMessageHistory
|
12 |
+
import os
|
13 |
+
|
14 |
+
# Streamlit app
|
15 |
+
def image():
|
16 |
+
key = os.environ.get("api_key")
|
17 |
+
st.markdown("""
|
18 |
+
<style>
|
19 |
+
.anim-typewriter {
|
20 |
+
animation: typewriter 3s steps(40) 1s 1 normal both, blinkTextCursor 800ms steps(40) infinite normal;
|
21 |
+
overflow: hidden;
|
22 |
+
white-space: nowrap;
|
23 |
+
border-right: 3px solid;
|
24 |
+
font-family: serif;
|
25 |
+
font-size: 0.8em;
|
26 |
+
}
|
27 |
+
@keyframes typewriter {
|
28 |
+
from {
|
29 |
+
width: 0;
|
30 |
+
}
|
31 |
+
to {
|
32 |
+
width: 100%;
|
33 |
+
height: 100%
|
34 |
+
}
|
35 |
+
}
|
36 |
+
@keyframes blinkTextCursor {
|
37 |
+
from {
|
38 |
+
border-right-color: rgba(255, 255, 255, 0.75);
|
39 |
+
}
|
40 |
+
to {
|
41 |
+
border-right-color: transparent;
|
42 |
+
}
|
43 |
+
}
|
44 |
+
</style>
|
45 |
+
""", unsafe_allow_html=True)
|
46 |
+
text1 = "Hello π, upload an image and ask questions related to it!"
|
47 |
+
animated = f'<div class="line-1 anim-typewriter">{text1}</div>'
|
48 |
+
with st.chat_message("assistant").markdown(animated, unsafe_allow_html=True):
|
49 |
+
st.markdown(animated, unsafe_allow_html=True)
|
50 |
+
def process_image(uploaded_file):
|
51 |
+
# Display the uploaded image
|
52 |
+
image = Image.open(uploaded_file)
|
53 |
+
st.image(image, caption='Uploaded Image', use_column_width=True)
|
54 |
+
|
55 |
+
# Process the image and return the URL or other information
|
56 |
+
# For demonstration purposes, convert the image to base64 and return a data URL
|
57 |
+
buffered = io.BytesIO()
|
58 |
+
image.save(buffered, format="JPEG")
|
59 |
+
image_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
60 |
+
image_url = f"data:image/jpeg;base64,{image_base64}"
|
61 |
+
|
62 |
+
return image_url
|
63 |
+
apiKey = key
|
64 |
+
|
65 |
+
llm = ChatGoogleGenerativeAI(model="gemini-pro-vision", google_api_key=apiKey)
|
66 |
+
|
67 |
+
image_url = None # Initialize image_url outside the if statement
|
68 |
+
with st.sidebar:
|
69 |
+
uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
|
70 |
+
if uploaded_file is not None:
|
71 |
+
image_url = process_image(uploaded_file)
|
72 |
+
|
73 |
+
|
74 |
+
if 'messages' not in st.session_state:
|
75 |
+
st.session_state['messages'] = []
|
76 |
+
|
77 |
+
for message in st.session_state.messages:
|
78 |
+
with st.chat_message(message["role"]):
|
79 |
+
st.markdown(message["content"])
|
80 |
+
prompt = st.chat_input("Say something")
|
81 |
+
message = HumanMessage(
|
82 |
+
content=[
|
83 |
+
{
|
84 |
+
"type": "text",
|
85 |
+
"text": prompt,
|
86 |
+
}, # You can optionally provide text parts
|
87 |
+
{"type": "image_url", "image_url": image_url},
|
88 |
+
]
|
89 |
+
)
|
90 |
+
|
91 |
+
if prompt:
|
92 |
+
with st.chat_message("user").markdown(prompt):
|
93 |
+
st.session_state.messages.append(
|
94 |
+
{
|
95 |
+
"role": "user",
|
96 |
+
"content": prompt
|
97 |
+
}
|
98 |
+
)
|
99 |
+
spinner_html = """
|
100 |
+
<div class="col-3">
|
101 |
+
<div class="snippet" data-title="dot-pulse">
|
102 |
+
<div class="stage">
|
103 |
+
<div class="dot-pulse"></div>
|
104 |
+
</div>
|
105 |
+
</div>
|
106 |
+
</div>
|
107 |
+
"""
|
108 |
+
|
109 |
+
spinner_css = """
|
110 |
+
.dot-pulse {
|
111 |
+
position: relative;
|
112 |
+
left: -9999px;
|
113 |
+
|
114 |
+
width: 10px;
|
115 |
+
height: 10px;
|
116 |
+
border-radius: 5px;
|
117 |
+
background-color: #9880ff;
|
118 |
+
color: #9880ff;
|
119 |
+
box-shadow: 9999px 0 0 -5px;
|
120 |
+
animation: dot-pulse 1.5s infinite linear;
|
121 |
+
animation-delay: 0.25s;
|
122 |
+
}
|
123 |
+
.dot-pulse::before, .dot-pulse::after {
|
124 |
+
content: "";
|
125 |
+
display: inline-block;
|
126 |
+
position: absolute;
|
127 |
+
top: 0;
|
128 |
+
width: 10px;
|
129 |
+
height: 10px;
|
130 |
+
border-radius: 5px;
|
131 |
+
background-color: #9880ff;
|
132 |
+
color: #9880ff;
|
133 |
+
}
|
134 |
+
.dot-pulse::before {
|
135 |
+
box-shadow: 9984px 0 0 -5px;
|
136 |
+
animation: dot-pulse-before 1.5s infinite linear;
|
137 |
+
animation-delay: 0s;
|
138 |
+
}
|
139 |
+
.dot-pulse::after {
|
140 |
+
box-shadow: 10014px 0 0 -5px;
|
141 |
+
animation: dot-pulse-after 1.5s infinite linear;
|
142 |
+
animation-delay: 0.5s;
|
143 |
+
}
|
144 |
+
|
145 |
+
@keyframes dot-pulse-before {
|
146 |
+
0% {
|
147 |
+
box-shadow: 9984px 0 0 -5px;
|
148 |
+
}
|
149 |
+
30% {
|
150 |
+
box-shadow: 9984px 0 0 2px;
|
151 |
+
}
|
152 |
+
60%, 100% {
|
153 |
+
box-shadow: 9984px 0 0 -5px;
|
154 |
+
}
|
155 |
+
}
|
156 |
+
@keyframes dot-pulse {
|
157 |
+
0% {
|
158 |
+
box-shadow: 9999px 0 0 -5px;
|
159 |
+
}
|
160 |
+
30% {
|
161 |
+
box-shadow: 9999px 0 0 2px;
|
162 |
+
}
|
163 |
+
60%, 100% {
|
164 |
+
box-shadow: 9999px 0 0 -5px;
|
165 |
+
}
|
166 |
+
}
|
167 |
+
@keyframes dot-pulse-after {
|
168 |
+
0% {
|
169 |
+
box-shadow: 10014px 0 0 -5px;
|
170 |
+
}
|
171 |
+
30% {
|
172 |
+
box-shadow: 10014px 0 0 2px;
|
173 |
+
}
|
174 |
+
60%, 100% {
|
175 |
+
box-shadow: 10014px 0 0 -5px;
|
176 |
+
}
|
177 |
+
}
|
178 |
+
"""
|
179 |
+
|
180 |
+
st.markdown(f'<style>{spinner_css}</style>', unsafe_allow_html=True)
|
181 |
+
st.markdown(spinner_html, unsafe_allow_html=True)
|
182 |
+
response = llm.invoke([message])
|
183 |
+
text_output = response.content
|
184 |
+
st.markdown('<style>.dot-pulse { visibility: hidden; }</style>', unsafe_allow_html=True)
|
185 |
+
|
186 |
+
with st.chat_message("assistant").markdown(text_output):
|
187 |
+
st.session_state.messages.append(
|
188 |
+
{
|
189 |
+
"role": "assistant",
|
190 |
+
"content": text_output
|
191 |
+
}
|
192 |
+
)
|
193 |
+
|
194 |
+
|
page3.py
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
|
3 |
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
4 |
+
|
5 |
+
import time
|
6 |
+
import os
|
7 |
+
|
8 |
+
def details():
|
9 |
+
api = os.environ.get("api_key")
|
10 |
+
apiKey = api
|
11 |
+
llm = ChatGoogleGenerativeAI(model="gemini-pro", google_api_key=apiKey)
|
12 |
+
st.header("Introducing Gemini")
|
13 |
+
with st.chat_message("assistant"):
|
14 |
+
for chunk in llm.stream("Tell me about google gemini ai model"):
|
15 |
+
st.write(chunk.content)
|
requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
pillow
|
2 |
+
langchain-google-genai
|
3 |
+
streamlit==1.29.0
|
4 |
+
streamlit-chat==0.1.1
|
5 |
+
streamlit_option_menu==0.3.6
|
6 |
+
langchain
|