import streamlit as st import requests import os from streamlit_chat import message @st.cache def query(payload): api_token = os.getenv("api_token") model_id = "deepset/roberta-base-squad2" headers = {"Authorization": f"Bearer {api_token}"} API_URL = f"https://api-inference.huggingface.co/models/{model_id}" response = requests.post(API_URL, headers=headers, json=payload) return response.json(), response context = "To extract information from documents, use sentence similarity task. To do sentiment analysis from tweets, use text classification task. To detect masks from images, use object detection task. To extract information from invoices, use named entity recognition from token classification task." message_history = ["Let's find out the best task for your use case! Tell me about your use case :)"] for msg in message_history: message(msg) # display all the previous message placeholder = st.empty() # placeholder for latest message input = st.text_input("Ask me 🤗") message_history.append(input) with placeholder.container(): if message_history[-1] != "": message(message_history[-1]) # display the latest message message(input, is_user=True) # align's the message to the right data, resp = query( { "inputs": { "question": input, "context": context, } } ) if resp.status_code == 200: model_answer = data["answer"] response_templates = [f"{model_answer} is the best task for this 🤩", f"I think you should use {model_answer} 🪄", f"I think {model_answer} should work for you 🤓"] bot_answer = random.choice(response_templates) message_history.append(bot_answer)