File size: 4,947 Bytes
60ab574
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15da6e7
60ab574
 
 
 
 
6989e25
60ab574
 
 
 
 
 
 
 
 
 
 
 
 
72fdc64
60ab574
 
 
 
1178f22
60ab574
1178f22
60ab574
d3420db
60ab574
e811c46
60ab574
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6989e25
60ab574
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7e08dab
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
from langchain.vectorstores import FAISS
from langchain.chains import ConversationalRetrievalChain
from langchain.llms import HuggingFaceHub
import gradio as gr
import os
from langchain.embeddings import HuggingFaceEmbeddings
from langchain_experimental.agents.agent_toolkits.csv.base import create_csv_agent
from langchain.document_loaders import PyPDFDirectoryLoader
from langchain.document_loaders.csv_loader import CSVLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.memory import ConversationSummaryBufferMemory
import io
import contextlib


embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L12-v2")
vector_store= FAISS.load_local("vector_db/", embeddings)

repo_id="mistralai/Mixtral-8x7B-Instruct-v0.1"
llm = HuggingFaceHub(repo_id=repo_id, model_kwargs={"temperature": 0.1, "max_new_tokens": 4096})

memory = ConversationSummaryBufferMemory(
    llm=llm,
    output_key='answer',
    memory_key='chat_history',
    max_token_limit=500,
    return_messages=True)

retriever = vector_store.as_retriever(
    search_type="similarity",
    search_kwargs={"k": 10, "include_metadata": True})

qa = ConversationalRetrievalChain.from_llm(
    llm=llm,
    memory=memory,
    chain_type="stuff",
    retriever=retriever,
    return_source_documents=True,
    get_chat_history=lambda h : h,
    verbose=True)

agent=create_csv_agent(llm,['data/Gretel_Data.csv','data/RAN_Data _T.csv'],verbose=True)

def echo(message, history):
  context_prompt = """Assume the role of Clara, a seasoned senior telecom network engineer with access to troubleshooting tickets data and various technical and product documentation.
                      Rely on information derived from these tickets to address queries. Feel free to seek clarification through relevant questions."""
  message= context_prompt + "User Query: "+ message + "If asked about Root cause analysis (rca), give only one possible awnser and then give the steps to resolve in the exact format which is :- The steps to resolve could be:"
  result=qa({"question":message})
  bold_answer= "<b>" + result['answer'].split('Helpful Answer:')[-1] + "</b>"
  return bold_answer + "<br></br>" +'1. ' + str(result["source_documents"][0]) +"<br>" + '2. ' + str(result["source_documents"][1]) +  "<br>" + "3. " + str(result["source_documents"][2])

def echo_agent(message, history):
  message="There are 2 df's. If you find a KeyError check for the same in the other df." + "<br>" + message
  try:
    with io.StringIO() as buffer:
      with contextlib.redirect_stdout(buffer):
        result= agent.run(message)
      verbose_output = buffer.getvalue()
      verbose_output = verbose_output.replace("\x1b[36;1m\x1b[1;3m", "")
      verbose_output = verbose_output.replace("> ", "")
      verbose_output = verbose_output.replace("", "")
      verbose_output = verbose_output.replace("", "")
      result= "<b>" + verbose_output + "<br>" + result + "</b>"
    return result
  except Exception as e:
    error_message = f"An error occurred: {e}"+str(e.with_traceback) + str(e.args)
    return error_message

demo=gr.ChatInterface(
        fn=echo,
        chatbot=gr.Chatbot(height=300, label="Hi I am Clara!", show_label=True),
        textbox=gr.Textbox(placeholder="Ask me a question", container=True, autofocus=True, scale=7),
        title="Network Ticket Knowledge Management",
        description="<span style='font-size: 16x;'>Welcome to Verizon Network Operations Center!! I am here to help the Verizon Field Operations team with technical queries & escalation. I am trained on 1000s of RAN, Backhaul, Core network & End user equipment trouble tickets. Ask me!!!&nbsp;☺</span>",
        theme=gr.themes.Soft(),
        examples=["wifi connected but no internet showing", "internet stopped working after primary link down", "internet stopped working link not shifted to secondary after primary link down"],
        cache_examples=False,
        retry_btn=None,
        undo_btn="Delete Previous",
        clear_btn="Clear",
        stop_btn="Stop",
    )


demo1=gr.ChatInterface(
        fn=echo_agent,
        chatbot=gr.Chatbot(height=300, label="Hi I am Sam!", show_label=True),
        textbox=gr.Textbox(placeholder="Ask me a question", container=True, autofocus=True, scale=7),
        title="LLM Powered Agent",
        description="<span style='font-size: 16x;'>Welcome to Verizon RAN Visualization & Analytics powered by GEN AI. I have access 100 of metrices generated by a RAN base station and can help in visualizing, correlating and generating insights, using power of Conversational AI &nbsp;☺</span>",
        theme=gr.themes.Soft(),
        retry_btn=None,
        undo_btn="Delete Previous",
        clear_btn="Clear",
        stop_btn="Stop",
    )
demo2=gr.TabbedInterface([demo,demo1],["RAG","AGENT"], title='INCEDO', theme=gr.themes.Soft())
demo2.launch(auth=("admin", "Sam&Clara"))