File size: 4,154 Bytes
a551d6c
 
 
 
 
 
f4748d8
82a4984
a551d6c
 
f221c4b
a551d6c
08066aa
a551d6c
d2e92de
d07bb27
82a4984
 
ec96b5c
f4748d8
 
 
 
 
92173e2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d2e92de
92173e2
 
a551d6c
f4748d8
 
 
 
92173e2
 
 
 
ec96b5c
92173e2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
08066aa
 
92173e2
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
import streamlit as st
from ibm_watson import DiscoveryV2
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
from ibm_watson_machine_learning.foundation_models import Model
from ibm_watson_machine_learning.metanames import GenTextParamsMetaNames as GenParams

# IBM Watson Discovery Credentials
authenticator = IAMAuthenticator('5sSmoI6y0ZHP7D3a6Iu80neypsbK3tsUZR_VdRAb7ed2')
discovery = DiscoveryV2(
    version='2020-08-30',
    authenticator=authenticator
)
discovery.set_service_url('https://api.us-south.discovery.watson.cloud.ibm.com/instances/62dc0387-6c6f-4128-b479-00cf5dea09ef')

# Watsonx Model Setup
url = "https://us-south.ml.cloud.ibm.com"
api_key = "zf-5qgRvW-_RMBGb0bQw5JPPGGj5wdYpLVypdjQxBGJz"
watsonx_project_id = "32a4b026-a46a-48df-aae3-31e16caabc3b"
model_type = "meta-llama/llama-3-1-70b-instruct"

# Streamlit UI setup
st.set_page_config(page_title="Watsonx AI and Discovery Integration", layout="wide")
st.title("Watsonx AI and Discovery Integration")

# Sidebar for selecting mode and uploading files
with st.sidebar:
    st.header("Document Uploader and Mode Selection")
    mode = st.radio("Select Mode", ["Watson Discovery", "LLM"], index=0)

    # File upload for document retrieval in LLM mode
    uploaded_file = st.file_uploader("Upload file for RAG", accept_multiple_files=False, type=["pdf", "docx", "txt", "pptx", "csv", "json", "xml", "yaml", "html"])
    
    # Sidebar for Model Parameters in LLM mode
    if mode == "LLM":
        st.header("Watsonx Model Settings")
        max_tokens = st.slider("Max Output Tokens", 100, 4000, 600)
        decoding = st.radio("Decoding Method", ["greedy", "sample"])
        temperature = st.slider("Temperature", 0.0, 1.0, 0.7)

        # Watsonx model generator
        def get_model(model_type, max_tokens, temperature):
            generate_params = {
                GenParams.MAX_NEW_TOKENS: max_tokens,
                GenParams.DECODING_METHOD: decoding,
                GenParams.TEMPERATURE: temperature,
            }
            model = Model(
                model_id=model_type,
                params=generate_params,
                credentials={"apikey": api_key, "url": url},
                project_id=watsonx_project_id
            )
            return model

# Main Chat Section
st.header("Chat with Watsonx AI or Discovery")

# Initialize chat history
if "history" not in st.session_state:
    st.session_state.history = []

# Display chat messages
for message in st.session_state.history:
    if message["role"] == "user":
        st.chat_message(message["role"], avatar="🟦").markdown(message["content"])
    else:
        st.chat_message(message["role"], avatar="🟨").markdown(message["content"])

# Text input for questions
prompt = st.chat_input("Ask your question here", disabled=False if mode == "LLM" or mode == "Watson Discovery" else True)

# Button for query submission and generating responses
if prompt:
    st.chat_message("user", avatar="🟦").markdown(prompt)
    st.session_state.history.append({"role": "user", "content": prompt})

    if mode == "LLM":
        model = get_model(model_type, max_tokens, temperature)
        prompt_text = f"<s>[INST] <<SYS>> Please answer the question: {prompt}<</SYS>>[/INST]"
        response = model.generate(prompt_text)
        response_text = response['results'][0]['generated_text']

    elif mode == "Watson Discovery":
        query_response = discovery.query(
            project_id='016da9fc-26f5-464a-a0b8-c9b0b9da83c7',  # project_id from notebook
            collection_ids=['1d91d603-cd71-5cf5-0000-019325bcd328'],  # collection_id from notebook
            natural_language_query=prompt,
            count=1
        ).get_result()
        if query_response['results']:
            response_text = query_response['results'][0]['text']
        else:
            response_text = "No relevant documents found."

    st.session_state.history.append({"role": "assistant", "content": response_text})
    st.chat_message("assistant", avatar="🟨").markdown(response_text)

# Button to clear chat history
if st.sidebar.button("Clear Messages"):
    st.session_state.history = []