Siddartha10 commited on
Commit
0b37f80
1 Parent(s): 7a08649

Upload 2 files

Browse files
Files changed (2) hide show
  1. data.json +0 -0
  2. level2.py +155 -0
data.json ADDED
The diff for this file is too large to render. See raw diff
 
level2.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_experimental.agents import create_csv_agent
2
+ from dotenv import load_dotenv
3
+ from langchain_openai import AzureChatOpenAI
4
+ import os
5
+ load_dotenv()
6
+ import streamlit as st
7
+ import pandas as pd
8
+ from langchain_community.document_loaders import JSONLoader
9
+ import requests
10
+ from langchain_openai import OpenAIEmbeddings
11
+ from langchain.vectorstores import FAISS
12
+ from langchain.chains import RetrievalQA
13
+ from langchain.prompts import PromptTemplate
14
+ from langchain.memory import ConversationSummaryMemory
15
+
16
+ llm = AzureChatOpenAI(openai_api_version=os.environ.get("AZURE_OPENAI_VERSION", "2023-07-01-preview"),
17
+ azure_deployment=os.environ.get("AZURE_OPENAI_DEPLOYMENT", "gpt4chat"),
18
+ azure_endpoint=os.environ.get("AZURE_OPENAI_ENDPOINT", "https://gpt-4-trails.openai.azure.com/"),
19
+ api_key=os.environ.get("AZURE_OPENAI_KEY"))
20
+
21
+
22
+ def metadata_func(record: str, metadata: dict) -> dict:
23
+ lines = record.split('\n')
24
+ locality_line = lines[10]
25
+ price_range_line = lines[12]
26
+ locality = locality_line.split(': ')[1]
27
+ price_range = price_range_line.split(': ')[1]
28
+ metadata["location"] = locality
29
+ metadata["price_range"] = price_range
30
+
31
+ return metadata
32
+
33
+ # Instantiate the JSONLoader with the metadata_func
34
+ jq_schema = '.parser[] | to_entries | map("\(.key): \(.value)") | join("\n")'
35
+ loader = JSONLoader(
36
+ jq_schema=jq_schema,
37
+ file_path='data.json',
38
+ metadata_func=metadata_func,
39
+ )
40
+
41
+ # Load the JSON file and extract metadata
42
+ documents = loader.load()
43
+
44
+
45
+ def get_vectorstore(text_chunks):
46
+ embeddings = OpenAIEmbeddings()
47
+ # Check if the FAISS index file already exists
48
+ if os.path.exists("faiss_index"):
49
+ # Load the existing FAISS index
50
+ vectorstore = FAISS.load_local("faiss_index", embeddings=embeddings)
51
+ print("Loaded existing FAISS index.")
52
+ else:
53
+ # Create a new FAISS index
54
+ embeddings = OpenAIEmbeddings()
55
+ vectorstore = FAISS.from_documents(documents=text_chunks, embedding=embeddings)
56
+ # Save the new FAISS index locally
57
+ vectorstore.save_local("faiss_index")
58
+ print("Created and saved new FAISS index.")
59
+ return vectorstore
60
+
61
+ #docs = new_db.similarity_search(query)
62
+
63
+ vector = get_vectorstore(documents)
64
+
65
+
66
+ def api_call(text):
67
+ url = "https://api-ares.traversaal.ai/live/predict"
68
+
69
+ payload = { "query": [text]}
70
+ headers = {
71
+ "x-api-key": "ares_a0866ad7d71d2e895c5e05dce656704a9e29ad37860912ad6a45a4e3e6c399b5",
72
+ "content-type": "application/json"
73
+ }
74
+
75
+ response = requests.post(url, json=payload, headers=headers)
76
+
77
+ # here we will use the llm to summarize the response received from the ares api
78
+ response_data = response.json()
79
+ #print(response_data)
80
+ try:
81
+ response_text = response_data['data']['response_text']
82
+ web_urls = response_data['data']['web_url']
83
+ # Continue processing the data...
84
+ except KeyError:
85
+ print("Error: Unexpected response from the API. Please try again or contact the api owner.")
86
+ # Optionally, you can log the error or perform other error handling actions.
87
+
88
+
89
+ if len(response_text) > 10000:
90
+ response_text = response_text[:8000]
91
+ prompt = f"Summarize the following text in 500-100 0 words and jsut summarize what you see and do not add anythhing else: {response_text}"
92
+ summary = llm.invoke(prompt)
93
+ print(summary)
94
+ else:
95
+ summary = response_text
96
+
97
+ result = "{} My list is: {}".format(response_text, web_urls)
98
+
99
+ # Convert the result to a string
100
+ result_str = str(result)
101
+
102
+ return result_str
103
+
104
+
105
+ template = """
106
+
107
+ context:- I have low budget what is the best hotel in Instanbul?
108
+ anser:- The other hotels in instanbul are costly and are not in your budget. so the best hotel in instanbul for you is hotel is xyz."
109
+
110
+ Don’t give information not mentioned in the CONTEXT INFORMATION.
111
+ The system should take into account various factors such as location, amenities, user reviews, and other relevant criteria to
112
+ generate informative and personalized explanations.
113
+ {context}
114
+ Question: {question}
115
+ Answer:"""
116
+
117
+ prompt = PromptTemplate(template=template, input_variables=["context","question"])
118
+
119
+ chain_type_kwargs = {"prompt": prompt}
120
+ chain = RetrievalQA.from_chain_type(
121
+ llm=llm,
122
+ chain_type="stuff",
123
+ retriever=vector.as_retriever(),
124
+ chain_type_kwargs=chain_type_kwargs,
125
+ )
126
+
127
+
128
+
129
+ prompt = """Please write the response to the user query: using the final_response and api_resource and make sure you are
130
+ The system should take into account various factors such as location, amenities, user reviews, and other relevant criteria to
131
+ generate informative and personalized explanations. Do not add any information that is not mentioned in the context.
132
+ and make sure the answer is up to the point and not too long.
133
+
134
+ question: when did sachin hit his 100th century?
135
+ final_response: I can you assist you with hotel's or travels or food but cannot help other than that..
136
+
137
+ """
138
+
139
+
140
+ def main():
141
+ st.title("Travel Assistant Chatbot JR")
142
+ st.write("Welcome to the Travel Assistant Chatbot!")
143
+ user_input = st.text_input("User Input:")
144
+
145
+ if st.button("Submit"):
146
+ response = chain.run(user_input)
147
+ api_response = api_call(user_input)
148
+ response = llm.invoke(prompt+user_input+response + api_response)
149
+ st.text_area("Chatbot Response:", value=response.content)
150
+
151
+ if st.button("Exit"):
152
+ st.stop()
153
+
154
+ if __name__ == "__main__":
155
+ main()