Spaces:
Sleeping
Sleeping
Doux Thibault
commited on
Commit
•
9aff9bb
1
Parent(s):
d105aff
add router to smalltalk chain and rang chain
Browse files
app.py
CHANGED
@@ -3,11 +3,14 @@ from st_audiorec import st_audiorec
|
|
3 |
from Modules.Speech2Text.transcribe import transcribe
|
4 |
import base64
|
5 |
from langchain_mistralai import ChatMistralAI
|
|
|
6 |
from dotenv import load_dotenv
|
7 |
load_dotenv() # load .env api keys
|
8 |
import os
|
9 |
|
10 |
from Modules.rag import rag_chain
|
|
|
|
|
11 |
|
12 |
mistral_api_key = os.getenv("MISTRAL_API_KEY")
|
13 |
from Modules.PoseEstimation import pose_estimator
|
@@ -26,6 +29,15 @@ st.set_page_config(layout="wide", initial_sidebar_state="collapsed")
|
|
26 |
col1, col2 = st.columns(2)
|
27 |
video_uploaded = None
|
28 |
llm = ChatMistralAI(model="mistral-large-latest", mistral_api_key=mistral_api_key, temperature=0)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
|
30 |
# First column containers
|
31 |
with col1:
|
@@ -56,9 +68,19 @@ with col1:
|
|
56 |
|
57 |
with st.chat_message("assistant"):
|
58 |
# Build answer from LLM
|
59 |
-
|
60 |
-
|
61 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
print(type(response))
|
63 |
st.session_state.messages.append({"role": "assistant", "content": response})
|
64 |
st.markdown(response)
|
|
|
3 |
from Modules.Speech2Text.transcribe import transcribe
|
4 |
import base64
|
5 |
from langchain_mistralai import ChatMistralAI
|
6 |
+
from langchain_core.prompts import ChatPromptTemplate
|
7 |
from dotenv import load_dotenv
|
8 |
load_dotenv() # load .env api keys
|
9 |
import os
|
10 |
|
11 |
from Modules.rag import rag_chain
|
12 |
+
from Modules.router import router_chain
|
13 |
+
# from Modules.PoseEstimation.pose_agent import agent_executor
|
14 |
|
15 |
mistral_api_key = os.getenv("MISTRAL_API_KEY")
|
16 |
from Modules.PoseEstimation import pose_estimator
|
|
|
29 |
col1, col2 = st.columns(2)
|
30 |
video_uploaded = None
|
31 |
llm = ChatMistralAI(model="mistral-large-latest", mistral_api_key=mistral_api_key, temperature=0)
|
32 |
+
prompt = ChatPromptTemplate.from_template(
|
33 |
+
template =""" You are a personal AI sports coach with an expertise in nutrition and fitness.
|
34 |
+
You are having a conversation with your client, which is either a beginner or an advanced athlete.
|
35 |
+
You must be gentle, kind, and motivative.
|
36 |
+
Always try to answer concisely to the queries.
|
37 |
+
User: {question}
|
38 |
+
AI Coach:"""
|
39 |
+
)
|
40 |
+
base_chain = prompt | llm
|
41 |
|
42 |
# First column containers
|
43 |
with col1:
|
|
|
68 |
|
69 |
with st.chat_message("assistant"):
|
70 |
# Build answer from LLM
|
71 |
+
direction = router_chain.invoke({"question":instruction})
|
72 |
+
if direction=='fitness_advices':
|
73 |
+
response = rag_chain.invoke(
|
74 |
+
instruction
|
75 |
+
)
|
76 |
+
elif direction=='smalltalk':
|
77 |
+
response = base_chain.invoke(
|
78 |
+
{"question":instruction}
|
79 |
+
).content
|
80 |
+
# elif direction =='movement_analysis':
|
81 |
+
# response = agent_executor.invoke(
|
82 |
+
# {"input" : instruction}
|
83 |
+
# )["output"]
|
84 |
print(type(response))
|
85 |
st.session_state.messages.append({"role": "assistant", "content": response})
|
86 |
st.markdown(response)
|