Spaces:
Runtime error
Runtime error
eaglelandsonce
commited on
Commit
•
15eb325
1
Parent(s):
bc5e07b
Update app.py
Browse files
app.py
CHANGED
@@ -97,96 +97,3 @@ if st.button("Query Vectara"):
|
|
97 |
else:
|
98 |
st.write("No results found.")
|
99 |
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
"""
|
105 |
-
import streamlit as st
|
106 |
-
import requests
|
107 |
-
import json
|
108 |
-
import os
|
109 |
-
import pandas as pd
|
110 |
-
|
111 |
-
# Assuming the environment variables are already set, we directly use them.
|
112 |
-
# However, in a Streamlit app, you might want to set them up within the script for demonstration purposes
|
113 |
-
# or securely use secrets management for API keys and other sensitive information.
|
114 |
-
|
115 |
-
# Define the Vectara query function
|
116 |
-
def vectara_query(query: str, config: dict):
|
117 |
-
# Query Vectara and return the results.
|
118 |
-
corpus_key = [{
|
119 |
-
"customerId": config["customer_id"],
|
120 |
-
"corpusId": config["corpus_id"],
|
121 |
-
"lexicalInterpolationConfig": {"lambda": config.get("lambda_val", 0.5)},
|
122 |
-
}]
|
123 |
-
data = {
|
124 |
-
"query": [{
|
125 |
-
"query": query,
|
126 |
-
"start": 0,
|
127 |
-
"numResults": config.get("top_k", 10),
|
128 |
-
"contextConfig": {
|
129 |
-
"sentencesBefore": 2,
|
130 |
-
"sentencesAfter": 2,
|
131 |
-
},
|
132 |
-
"corpusKey": corpus_key,
|
133 |
-
"summary": [{
|
134 |
-
"responseLang": "eng",
|
135 |
-
"maxSummarizedResults": 5,
|
136 |
-
}]
|
137 |
-
}]
|
138 |
-
}
|
139 |
-
|
140 |
-
headers = {
|
141 |
-
"x-api-key": config["api_key"],
|
142 |
-
"customer-id": config["customer_id"],
|
143 |
-
"Content-Type": "application/json",
|
144 |
-
}
|
145 |
-
response = requests.post(
|
146 |
-
headers=headers,
|
147 |
-
url="https://api.vectara.io/v1/query",
|
148 |
-
data=json.dumps(data),
|
149 |
-
)
|
150 |
-
if response.status_code != 200:
|
151 |
-
st.error(f"Query failed (code {response.status_code}, reason {response.reason}, details {response.text})")
|
152 |
-
return [], ""
|
153 |
-
|
154 |
-
result = response.json()
|
155 |
-
responses = result["responseSet"][0]["response"]
|
156 |
-
summary = result["responseSet"][0]["summary"][0]["text"]
|
157 |
-
|
158 |
-
res = [[r['text'], r['score']] for r in responses]
|
159 |
-
return res, summary
|
160 |
-
|
161 |
-
# Streamlit interface
|
162 |
-
st.title("Vectara Content Query Interface")
|
163 |
-
|
164 |
-
# User inputs
|
165 |
-
query = st.text_input("Enter your query here", "What does Vectara do?")
|
166 |
-
lambda_val = st.slider("Lambda Value", min_value=0.0, max_value=1.0, value=0.5)
|
167 |
-
top_k = st.number_input("Top K Results", min_value=1, max_value=50, value=10)
|
168 |
-
|
169 |
-
if st.button("Query Vectara"):
|
170 |
-
config = {
|
171 |
-
"api_key": os.environ.get("VECTARA_API_KEY", ""),
|
172 |
-
"customer_id": os.environ.get("VECTARA_CUSTOMER_ID", ""),
|
173 |
-
"corpus_id": os.environ.get("VECTARA_CORPUS_ID", ""),
|
174 |
-
"lambda_val": lambda_val,
|
175 |
-
"top_k": top_k,
|
176 |
-
}
|
177 |
-
|
178 |
-
results, summary = vectara_query(query, config)
|
179 |
-
|
180 |
-
if results:
|
181 |
-
st.subheader("Summary")
|
182 |
-
st.write(summary)
|
183 |
-
|
184 |
-
st.subheader("Top Results")
|
185 |
-
df = pd.DataFrame(results, columns=['Text', 'Score'])
|
186 |
-
st.dataframe(df)
|
187 |
-
else:
|
188 |
-
st.write("No results found.")
|
189 |
-
|
190 |
-
# Note: The integration of the model for HHEM scores is omitted as it requires the specific model details and implementation.
|
191 |
-
|
192 |
-
"""
|
|
|
97 |
else:
|
98 |
st.write("No results found.")
|
99 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|