|
|
|
import streamlit as st
|
|
from dotenv import load_dotenv, find_dotenv
|
|
import os
|
|
import time
|
|
from langchain.chains import LLMChain
|
|
from langchain_community.llms import HuggingFaceEndpoint
|
|
from langchain.prompts import PromptTemplate
|
|
from langchain_community.embeddings.sentence_transformer import SentenceTransformerEmbeddings
|
|
from langchain.prompts.chat import (
|
|
ChatPromptTemplate,
|
|
HumanMessagePromptTemplate,
|
|
SystemMessagePromptTemplate,
|
|
)
|
|
from langchain.memory import ChatMessageHistory, ConversationSummaryBufferMemory, ConversationBufferMemory, ConversationSummaryMemory
|
|
from langchain.chains import LLMChain, ConversationChain
|
|
|
|
|
|
|
|
load_dotenv(find_dotenv())
|
|
HUGGINGFACEHUB_API_TOKEN = os.environ["HUGGINGFACEHUB_API_TOKEN"]
|
|
|
|
repo_id ="mistralai/Mistral-7B-Instruct-v0.2"
|
|
def choose_model1(model):
|
|
global repo_id
|
|
if model == "Venilla Model":
|
|
repo_id="mistralai/Mistral-7B-Instruct-v0.2"
|
|
print("model chooosed from chat",repo_id)
|
|
else:
|
|
repo_id="GRMenon/mental-health-mistral-7b-instructv0.2-finetuned-V2"
|
|
print("model chooosed from chat",repo_id)
|
|
|
|
query2 = " "
|
|
def main():
|
|
llm = HuggingFaceEndpoint(
|
|
repo_id=repo_id, max_length=512, temperature=0.5, token=HUGGINGFACEHUB_API_TOKEN
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
global conversation,memory
|
|
template = """ Act as an expert mental health therapist, and conduct therapy sessions with the user. You are an expert Mental Health therapist who is asking the user questions to learn what professional mental health well-being advice could help the user.
|
|
Your goal is to analyse their mental health problem, based following input:{input}. You will always ask questions to the user to get them to explain more about whatever mental health condition is ailing them.
|
|
DO NOT give the user any mental health advice or medical advice, ONLY ask for more information about their symptoms.
|
|
Do not show your thought process, only output a single question. Your output should contain consolation related to the query and a single question.
|
|
Only ask one question a time.
|
|
|
|
Current conversation:
|
|
{history}
|
|
|
|
Human: {input}
|
|
AI Assistant:"""
|
|
|
|
|
|
PROMPT = PromptTemplate(input_variables=["history","input"], template=template)
|
|
memory = ConversationBufferMemory(llm=llm)
|
|
|
|
|
|
|
|
|
|
conversation = ConversationChain(
|
|
prompt=PROMPT,
|
|
llm=llm,
|
|
memory=memory,
|
|
|
|
)
|
|
|
|
|
|
def convo(query):
|
|
global conversation, memory, query2
|
|
response = conversation.predict(input=query)
|
|
|
|
query2 = query2 + "," + query
|
|
print("\n query2----------",query2)
|
|
print("\n chat_agent.py----------",memory.chat_memory)
|
|
summary = query2
|
|
return response, summary
|
|
|
|
|
|
def delete_all_variables():
|
|
global query2
|
|
query2 = " "
|
|
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|