RAHMAN00700
commited on
Commit
•
c742a90
1
Parent(s):
dea6e74
Update app.py
Browse files
app.py
CHANGED
@@ -8,7 +8,6 @@ import yaml
|
|
8 |
from bs4 import BeautifulSoup
|
9 |
from pptx import Presentation
|
10 |
from docx import Document
|
11 |
-
from dotenv import load_dotenv
|
12 |
|
13 |
from langchain.document_loaders import PyPDFLoader, TextLoader
|
14 |
from langchain.indexes import VectorstoreIndexCreator
|
@@ -23,9 +22,6 @@ from ibm_watson_machine_learning.foundation_models.extensions.langchain import W
|
|
23 |
from ibm_watson_machine_learning.metanames import GenTextParamsMetaNames as GenParams
|
24 |
from ibm_watson_machine_learning.foundation_models.utils.enums import DecodingMethods
|
25 |
|
26 |
-
# Load environment variables from .env file
|
27 |
-
load_dotenv()
|
28 |
-
|
29 |
# Initialize index and chain to None
|
30 |
index = None
|
31 |
rag_chain = None
|
@@ -106,6 +102,8 @@ def load_file(file_name, file_type):
|
|
106 |
text = load_yaml(file_name)
|
107 |
elif file_type == "html":
|
108 |
text = load_html(file_name)
|
|
|
|
|
109 |
else:
|
110 |
st.error("Unsupported file type.")
|
111 |
return None
|
@@ -126,7 +124,7 @@ def load_file(file_name, file_type):
|
|
126 |
return None
|
127 |
|
128 |
# Watsonx API setup
|
129 |
-
watsonx_api_key =
|
130 |
watsonx_project_id = os.getenv("WATSONX_PROJECT_ID")
|
131 |
|
132 |
if not watsonx_api_key or not watsonx_project_id:
|
@@ -145,7 +143,7 @@ I am a helpful assistant.
|
|
145 |
)
|
146 |
|
147 |
with st.sidebar:
|
148 |
-
st.title("Watsonx RAG
|
149 |
watsonx_model = st.selectbox("Model", ["meta-llama/llama-3-405b-instruct", "codellama/codellama-34b-instruct-hf", "ibm/granite-20b-multilingual"])
|
150 |
max_new_tokens = st.slider("Max output tokens", min_value=100, max_value=4000, value=600, step=100)
|
151 |
decoding_method = st.radio("Decoding", (DecodingMethods.GREEDY.value, DecodingMethods.SAMPLE.value))
|
@@ -212,4 +210,11 @@ prompt = st.chat_input("Ask your question here", disabled=False if chain else Tr
|
|
212 |
if prompt:
|
213 |
st.chat_message("user").markdown(prompt)
|
214 |
if rag_chain:
|
215 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
from bs4 import BeautifulSoup
|
9 |
from pptx import Presentation
|
10 |
from docx import Document
|
|
|
11 |
|
12 |
from langchain.document_loaders import PyPDFLoader, TextLoader
|
13 |
from langchain.indexes import VectorstoreIndexCreator
|
|
|
22 |
from ibm_watson_machine_learning.metanames import GenTextParamsMetaNames as GenParams
|
23 |
from ibm_watson_machine_learning.foundation_models.utils.enums import DecodingMethods
|
24 |
|
|
|
|
|
|
|
25 |
# Initialize index and chain to None
|
26 |
index = None
|
27 |
rag_chain = None
|
|
|
102 |
text = load_yaml(file_name)
|
103 |
elif file_type == "html":
|
104 |
text = load_html(file_name)
|
105 |
+
elif file_type == "htm":
|
106 |
+
text = load_html(file_name)
|
107 |
else:
|
108 |
st.error("Unsupported file type.")
|
109 |
return None
|
|
|
124 |
return None
|
125 |
|
126 |
# Watsonx API setup
|
127 |
+
watsonx_api_key = os.getenv("WATSONX_API_KEY")
|
128 |
watsonx_project_id = os.getenv("WATSONX_PROJECT_ID")
|
129 |
|
130 |
if not watsonx_api_key or not watsonx_project_id:
|
|
|
143 |
)
|
144 |
|
145 |
with st.sidebar:
|
146 |
+
st.title("Watsonx RAG: Multi-Document Retrieval")
|
147 |
watsonx_model = st.selectbox("Model", ["meta-llama/llama-3-405b-instruct", "codellama/codellama-34b-instruct-hf", "ibm/granite-20b-multilingual"])
|
148 |
max_new_tokens = st.slider("Max output tokens", min_value=100, max_value=4000, value=600, step=100)
|
149 |
decoding_method = st.radio("Decoding", (DecodingMethods.GREEDY.value, DecodingMethods.SAMPLE.value))
|
|
|
210 |
if prompt:
|
211 |
st.chat_message("user").markdown(prompt)
|
212 |
if rag_chain:
|
213 |
+
response_text = rag_chain.run(prompt).strip()
|
214 |
+
else:
|
215 |
+
response_text = chain.run(question=prompt, context="").strip()
|
216 |
+
|
217 |
+
st.session_state.messages.append({'role': 'User', 'content': prompt})
|
218 |
+
st.chat_message("assistant").markdown(response_text)
|
219 |
+
st.session_state.messages.append({'role': 'Assistant', 'content': response_text})
|
220 |
+
|