Spaces:
Running
Running
from src.llamaindex_palm import LlamaIndexPaLM, LlamaIndexPaLMText | |
import gradio as gr | |
from typing import List | |
import time | |
import logging | |
# import dotenv | |
# dotenv.load_dotenv(".env") | |
# Llama-Index LLM | |
llm_backend = LlamaIndexPaLMText(model_kwargs={'temperature': 0.8}) | |
llm = LlamaIndexPaLM(model=llm_backend) | |
llm.get_index_from_pinecone() | |
# Gradio | |
chat_history = [] | |
def clear_chat() -> None: | |
global chat_history | |
chat_history = [] | |
return None | |
def get_chat_history(chat_history: List[str]) -> str: | |
ind = 0 | |
formatted_chat_history = "" | |
for message in chat_history: | |
formatted_chat_history += f"User: \n{message}\n" if ind % 2 == 0 else f"Bot: \n{message}\n" | |
ind += 1 | |
return formatted_chat_history | |
def generate_text(prompt: str, llamaindex_llm: LlamaIndexPaLM): | |
global chat_history | |
logger.info("Generating Message...") | |
logger.info(f"User Message:\n{prompt}\n") | |
result = llamaindex_llm.generate_text(prompt, chat_history) | |
chat_history.append(prompt) | |
chat_history.append(result) | |
logger.info(f"Replied Message:\n{result}\n") | |
return result | |
if __name__ == "__main__": | |
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%Y-%m-%d %I:%M:%S %p', level=logging.INFO) | |
logger = logging.getLogger('app') | |
try: | |
with gr.Blocks(css=".input textarea {font-size: 16px !important}") as app: | |
chatbot = gr.Chatbot( | |
bubble_full_width=False, | |
container=True, | |
show_share_button=False, | |
avatar_images=[None, './asset/akag-g-only.png'] | |
) | |
msg = gr.Textbox( | |
show_label=False, | |
label="Type your message...", | |
placeholder="Hi Gerard, can you introduce yourself?", | |
container=False, | |
elem_classes="input" | |
) | |
with gr.Row(): | |
clear = gr.Button("Clear", scale=1) | |
send = gr.Button( | |
value="", | |
variant="primary", | |
icon="./asset/send-message.png", | |
scale=1 | |
) | |
def user(user_message, history): | |
return "", history + [[user_message, None]] | |
def bot(history): | |
bot_message = generate_text(history[-1][0], llm) | |
history[-1][1] = "" | |
for character in bot_message: | |
history[-1][1] += character | |
time.sleep(0.01) | |
yield history | |
msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then( | |
bot, chatbot, chatbot | |
) | |
send.click(user, [msg, chatbot], [msg, chatbot], queue=False).then( | |
bot, chatbot, chatbot | |
) | |
clear.click(clear_chat, None, chatbot, queue=False) | |
gr.HTML(""" | |
<p><center><i>Disclaimer: This RAG app is for demostration only. LLM hallucination might occur.</i></center></p> | |
<p><center>Hosted on 🤗 Spaces</center></p> | |
""") | |
app.queue() | |
app.launch() | |
except Exception as e: | |
logger.exception(e) |