RAHMAN00700 commited on
Commit
d2e92de
·
unverified ·
1 Parent(s): 0fd54d0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +60 -48
app.py CHANGED
@@ -4,9 +4,9 @@ from ibm_watson import DiscoveryV2
4
  from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
5
  from ibm_watson_machine_learning.foundation_models import Model
6
  from ibm_watson_machine_learning.metanames import GenTextParamsMetaNames as GenParams
7
- from ibm_watson_machine_learning.foundation_models.utils.enums import DecodingMethods
8
 
9
- # Hardcoded IBM Watson Discovery and Watsonx Credentials
10
  authenticator = IAMAuthenticator('5sSmoI6y0ZHP7D3a6Iu80neypsbK3tsUZR_VdRAb7ed2')
11
  discovery = DiscoveryV2(
12
  version='2020-08-30',
@@ -14,15 +14,34 @@ discovery = DiscoveryV2(
14
  )
15
  discovery.set_service_url('https://api.us-south.discovery.watson.cloud.ibm.com/instances/62dc0387-6c6f-4128-b479-00cf5dea09ef')
16
 
17
- # Watsonx API Setup
18
  url = "https://us-south.ml.cloud.ibm.com"
19
  api_key = "zf-5qgRvW-_RMBGb0bQw5JPPGGj5wdYpLVypdjQxBGJz"
20
  watsonx_project_id = "32a4b026-a46a-48df-aae3-31e16caabc3b"
21
  model_type = "meta-llama/llama-3-1-70b-instruct"
22
- max_tokens = 4000 # Hardcoded as requested
23
- min_tokens = 50
24
- decoding = DecodingMethods.GREEDY
25
- temperature = 0.7 # Hardcoded as requested
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
  # Define the model generator function
28
  def get_model(model_type, max_tokens, min_tokens, decoding, temperature):
@@ -40,57 +59,50 @@ def get_model(model_type, max_tokens, min_tokens, decoding, temperature):
40
  )
41
  return model
42
 
43
- # Streamlit UI setup
44
- st.title("Watsonx AI and Discovery Integration")
45
- st.write("Ask your question below to get an answer from Watson Discovery and Watsonx AI.")
46
 
47
  # Input for the question
48
  question = st.text_input("Enter your question:")
49
 
50
  if st.button('Get Answer'):
51
  if question:
52
- try:
53
- # Query IBM Watson Discovery
54
- response = discovery.query(
55
- project_id='016da9fc-26f5-464a-a0b8-c9b0b9da83c7',
56
- collection_ids=['1d91d603-cd71-5cf5-0000-019325bcd328'],
57
- passages={'enabled': True, 'max_per_document': 5, 'find_answers': True},
58
- natural_language_query=question
59
- ).get_result()
60
-
61
- # Process the Discovery response
62
- if response.get('results'):
63
- passages = response['results'][0].get('document_passages', [])
64
- # Extracting and formatting passages
65
- passages = [
66
- p['passage_text'].replace('<em>', '').replace('</em>', '').replace('\n', '')
67
- for p in passages
68
- ]
69
- context = ' '.join(passages)
70
 
71
- # Prepare prompt for Watsonx
72
- prompt = (
73
- "<s>[INST] <<SYS>> "
74
- "Please answer the following question in one sentence using this text. "
75
- "If the question is unanswerable, say 'unanswerable'. "
76
- "Do not include irrelevant information. "
77
- "Question: " + question +
78
- " <</SYS>>" + context + " [/INST]"
79
- )
80
 
81
- # Generate response using Watsonx
82
- model = get_model(model_type, max_tokens, min_tokens, decoding, temperature)
83
- generated_response = model.generate(prompt)
84
- response_text = generated_response['results'][0]['generated_text']
 
 
 
 
 
 
 
 
 
85
 
86
- # Display the answer
87
- st.subheader("Generated Answer:")
88
- st.write(response_text)
89
- else:
90
- st.write("No relevant results found in Discovery.")
91
 
92
- except Exception as e:
93
- st.error(f"An error occurred: {str(e)}")
 
94
 
 
 
95
  else:
96
  st.error("Please enter a question!")
 
4
  from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
5
  from ibm_watson_machine_learning.foundation_models import Model
6
  from ibm_watson_machine_learning.metanames import GenTextParamsMetaNames as GenParams
7
+ from ibm_watson_machine_learning.foundation_models.utils.enums import ModelTypes, DecodingMethods
8
 
9
+ # IBM Watson Discovery and Watsonx Credentials (replace with your keys)
10
  authenticator = IAMAuthenticator('5sSmoI6y0ZHP7D3a6Iu80neypsbK3tsUZR_VdRAb7ed2')
11
  discovery = DiscoveryV2(
12
  version='2020-08-30',
 
14
  )
15
  discovery.set_service_url('https://api.us-south.discovery.watson.cloud.ibm.com/instances/62dc0387-6c6f-4128-b479-00cf5dea09ef')
16
 
17
+ # Watsonx Model Setup
18
  url = "https://us-south.ml.cloud.ibm.com"
19
  api_key = "zf-5qgRvW-_RMBGb0bQw5JPPGGj5wdYpLVypdjQxBGJz"
20
  watsonx_project_id = "32a4b026-a46a-48df-aae3-31e16caabc3b"
21
  model_type = "meta-llama/llama-3-1-70b-instruct"
22
+
23
+ # Streamlit UI setup
24
+ st.set_page_config(page_title="Watsonx AI and Discovery Integration", layout="wide")
25
+ st.title("Watsonx AI and Discovery Integration")
26
+
27
+ # Sidebar for Model Parameters
28
+ st.sidebar.header("Watsonx Model Settings")
29
+ max_tokens = st.sidebar.slider("Max Tokens", 50, 200, 100)
30
+ min_tokens = st.sidebar.slider("Min Tokens", 0, 50, 50)
31
+ decoding = st.sidebar.selectbox("Decoding Method", [DecodingMethods.GREEDY, DecodingMethods.SAMPLE])
32
+ temperature = st.sidebar.slider("Temperature", 0.0, 1.0, 0.7)
33
+
34
+ # Clear Messages button in Sidebar
35
+ if st.sidebar.button("Clear Messages"):
36
+ st.session_state.history = []
37
+
38
+ # Display History in Sidebar
39
+ st.sidebar.header("Chat History")
40
+ if "history" not in st.session_state:
41
+ st.session_state.history = []
42
+ for i, (q, a) in enumerate(st.session_state.history):
43
+ st.sidebar.write(f"Q{i+1}: {q}")
44
+ st.sidebar.write(f"A{i+1}: {a}")
45
 
46
  # Define the model generator function
47
  def get_model(model_type, max_tokens, min_tokens, decoding, temperature):
 
59
  )
60
  return model
61
 
62
+ st.write("This app allows you to ask questions, which will be answered by a combination of Watson Discovery and Watsonx model.")
 
 
63
 
64
  # Input for the question
65
  question = st.text_input("Enter your question:")
66
 
67
  if st.button('Get Answer'):
68
  if question:
69
+ # Query Watson Discovery
70
+ response = discovery.query(
71
+ project_id='016da9fc-26f5-464a-a0b8-c9b0b9da83c7',
72
+ collection_ids=['1d91d603-cd71-5cf5-0000-019325bcd328'],
73
+ passages={'enabled': True, 'max_per_document': 5, 'find_answers': True},
74
+ natural_language_query=question
75
+ ).get_result()
 
 
 
 
 
 
 
 
 
 
 
76
 
77
+ # Process the Discovery response
78
+ passages = response['results'][0]['document_passages']
79
+ passages = [p['passage_text'].replace('<em>', '').replace('</em>', '').replace('\n', '') for p in passages]
80
+ context = '\n '.join(passages)
 
 
 
 
 
81
 
82
+ # Prepare the prompt for Watsonx
83
+ prompt = (
84
+ "<s>[INST] <<SYS>> "
85
+ "Please answer the following question in one sentence using this text. "
86
+ "If the question is unanswerable, say 'unanswerable'. "
87
+ "If you responded to the question, don't say 'unanswerable'. "
88
+ "Do not include information that's not relevant to the question. "
89
+ "Do not answer other questions. "
90
+ "Make sure the language used is English.'"
91
+ "Do not use repetitions' "
92
+ "Question:" + question +
93
+ '<</SYS>>' + context + '[/INST]'
94
+ )
95
 
96
+ # Generate the answer using Watsonx
97
+ model = get_model(model_type, max_tokens, min_tokens, decoding, temperature)
98
+ generated_response = model.generate(prompt)
99
+ response_text = generated_response['results'][0]['generated_text']
 
100
 
101
+ # Display the generated response
102
+ st.subheader("Generated Answer:")
103
+ st.write(response_text)
104
 
105
+ # Add question and answer to history
106
+ st.session_state.history.append((question, response_text))
107
  else:
108
  st.error("Please enter a question!")