ShivanshMathur007 commited on
Commit
06ac17f
1 Parent(s): de102ee

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -28
app.py CHANGED
@@ -1,5 +1,6 @@
1
  from langchain.vectorstores import FAISS
2
- from langchain.chains import ConversationalRetrievalChain
 
3
  from langchain.llms import HuggingFaceHub
4
  import gradio as gr
5
  import os
@@ -19,39 +20,37 @@ vector_store= FAISS.load_local("vector_db/", embeddings)
19
  repo_id="mistralai/Mixtral-8x7B-Instruct-v0.1"
20
  llm = HuggingFaceHub(repo_id=repo_id, model_kwargs={"temperature": 0.01, "max_new_tokens": 2048})
21
 
22
- memory = ConversationSummaryBufferMemory(
23
- llm=llm,
24
- output_key='answer',
25
- memory_key='chat_history',
26
- max_token_limit=300,
27
- return_messages=True)
28
 
29
- retriever = vector_store.as_retriever(
30
- search_type="similarity",
31
- search_kwargs={"k": 10, "include_metadata": True})
32
 
33
- qa = ConversationalRetrievalChain.from_llm(
34
- llm=llm,
35
- memory=memory,
36
- chain_type="stuff",
37
- retriever=retriever,
38
- return_source_documents=True,
39
- get_chat_history=lambda h : h,
40
- verbose=True)
41
 
42
  agent=create_csv_agent(llm,['data/Gretel_Data.csv','data/RAN_Data _T.csv'],verbose=True)
43
 
44
  def echo(message, history):
45
- context_prompt = """Assume the role of Clara, a seasoned senior telecom network engineer with access to troubleshooting tickets data and various technical and product documentation.
46
- Rely on information derived from these tickets to address queries. Feel free to seek clarification through relevant questions."""
47
- csv_context_prompt = """Assume access to multiple files, including a CSV with ticket details (ticket ID as primary key, columns like network element, location, RCA, status, severity, etc.).
48
- The model should refer to the CSV file specifically when users inquire about tickets or related network issues. For other queries, the model can utilize information from the broader context or available data sources as needed."""
49
-
50
- message= context_prompt + csv_context_prompt + "User Query: "+ message + "If asked about Root cause analysis (rca), give only one possible awnser and then give the steps to resolve in the exact format which is :- The steps to resolve could be -"
51
-
52
- result=qa({"question":message})
53
- bold_answer= "<b>" + result['answer'].split('Helpful Answer:')[-1] + "</b>"
54
- return bold_answer + "<br></br>" +'1. ' + str(result["source_documents"][0]) +"<br>" + '2. ' + str(result["source_documents"][1]) + "<br>" + "3. " + str(result["source_documents"][2])
55
 
56
  def echo_agent(message, history):
57
  message="There are 2 df's. If you find a KeyError check for the same in the other df." + "<br>" + message
 
1
  from langchain.vectorstores import FAISS
2
+ # from langchain.chains import ConversationalRetrievalChain
3
+ from langchain.chains import RetrievalQA
4
  from langchain.llms import HuggingFaceHub
5
  import gradio as gr
6
  import os
 
20
  repo_id="mistralai/Mixtral-8x7B-Instruct-v0.1"
21
  llm = HuggingFaceHub(repo_id=repo_id, model_kwargs={"temperature": 0.01, "max_new_tokens": 2048})
22
 
23
+ # memory = ConversationSummaryBufferMemory(
24
+ # llm=llm,
25
+ # output_key='answer',
26
+ # memory_key='chat_history',
27
+ # max_token_limit=300,
28
+ # return_messages=True)
29
 
30
+ # retriever = vector_store.as_retriever(
31
+ # search_type="similarity",
32
+ # search_kwargs={"k": 10, "include_metadata": True})
33
 
34
+ # qa = ConversationalRetrievalChain.from_llm(
35
+ # llm=llm,
36
+ # memory=memory,
37
+ # chain_type="stuff",
38
+ # retriever=retriever,
39
+ # return_source_documents=True,
40
+ # get_chat_history=lambda h : h,
41
+ # verbose=True)
42
 
43
  agent=create_csv_agent(llm,['data/Gretel_Data.csv','data/RAN_Data _T.csv'],verbose=True)
44
 
45
  def echo(message, history):
46
+ try:
47
+ qa=RetrievalQA.from_chain_type(llm=llm, retriever=retriever,return_source_documents=True)
48
+ message= "Your name is Clara. You are a senior telecom network engineer having access to troubleshooting tickets data and other technical and product documentation.Stick to the knowledge from these tickets. Ask clarification questions if needed. "+message
49
+ result=qa({"query":message})
50
+ bold_answer= "<b>" + result['result'] + "</b>"
51
+ return bold_answer + "<br></br>" +'1. ' + str(result["source_documents"][0]) +"<br>" + '2. ' + str(result["source_documents"][1]) + "<br>" + "3. " + str(result["source_documents"][2])
52
+ except Exception as e:
53
+ error_message = f"An error occurred: {e}"+str(e.with_traceback) + str(e.args)
 
 
54
 
55
  def echo_agent(message, history):
56
  message="There are 2 df's. If you find a KeyError check for the same in the other df." + "<br>" + message