import os import time import datetime import gradio as gr import google.generativeai as genai from src.llamaindex_palm import LlamaIndexPaLM import wandb from wandb.sdk.data_types.trace_tree import Trace import logging logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%Y-%m-%d %I:%M:%S %p', level=logging.INFO) logger = logging.getLogger('llm') # Llama-Index LLM llm = LlamaIndexPaLM() llm.set_index_from_pinecone() # Credentials genai.configure(api_key=os.getenv('PALM_API_KEY')) # W&B wandb.init(project=os.getenv('WANDB_PROJECT')) # Gradio chat_history = [] def clear_chat() -> None: global chat_history chat_history = [] return None def get_chat_history(chat_history) -> str: ind = 0 formatted_chat_history = "" for message in chat_history: formatted_chat_history += f"User: \n{message}\n" if ind % 2 == 0 else f"Bot: \n{message}\n" ind += 1 return formatted_chat_history def generate_chat(prompt: str, llamaindex_llm: LlamaIndexPaLM): global chat_history # get chat history context_chat_history = "\n".join(list(filter(None, chat_history))) logger.info("Generating Message...") logger.info(f"User Message:\n{prompt}\n") chat_history.append(prompt) # w&b trace start start_time_ms = round(datetime.datetime.now().timestamp() * 1000) root_span = Trace( name="LLMChain", kind="chain", start_time_ms=start_time_ms, metadata={"user": "Gradio"}, ) # get context context_from_index = llamaindex_llm.generate_response(prompt) logger.info(f"Context from Llama-Index:\n{context_from_index}\n") # w&b trace agent agent_end_time_ms = round(datetime.datetime.now().timestamp() * 1000) agent_span = Trace( name="Agent", kind="agent", status_code="success", metadata={ "framework": "Llama-Index", "index_type": "VectorStoreIndex", "vector_store": "Pinecone", "vector_store_index": llamaindex_llm._index_name, "vector_store_namespace": llamaindex_llm._index_namespace, "model_name": llamaindex_llm.llm._model_name, # "temperture": 0.7, # "top_k": 40, # "top_p": 0.95, "custom_kwargs": llamaindex_llm.llm._model_kwargs, }, start_time_ms=start_time_ms, end_time_ms=agent_end_time_ms, inputs={"query": prompt}, outputs={"response": context_from_index}, ) root_span.add_child(agent_span) prompt_with_context = f""" [System] You are in a role play of Gerard Lee and you need to pretend to be him to answer questions from people who interested in Gerard's background. Respond the User Query below in no more than 5 complete sentences, unless specifically asked by the user to elaborate on something. Use only the History and Context to inform your answers. [History] {context_chat_history} [Context] {context_from_index} [User Query] {prompt} """ try: response = genai.generate_text( prompt=prompt_with_context, safety_settings=[ { 'category': genai.types.HarmCategory.HARM_CATEGORY_UNSPECIFIED, 'threshold': genai.types.HarmBlockThreshold.BLOCK_NONE, }, ], temperature=0.9, ) result = response.result success_flag = "success" if result is None: result = "Seems something went wrong. Please try again later." logger.error(f"Result with 'None' received\n") success_flag = "fail" except Exception as e: result = "Seems something went wrong. Please try again later." logger.error(f"Exception {e} occured\n") success_flag = "fail" chat_history.append(result) logger.info(f"Bot Message:\n{result}\n") # w&b trace llm llm_end_time_ms = round(datetime.datetime.now().timestamp() * 1000) llm_span = Trace( name="LLM", kind="llm", status_code=success_flag, start_time_ms=agent_end_time_ms, end_time_ms=llm_end_time_ms, inputs={"input": prompt_with_context}, outputs={"result": result}, ) root_span.add_child(llm_span) # w&b finalize trace root_span.add_inputs_and_outputs( inputs={"query": prompt}, outputs={"result": result} ) root_span._span.end_time_ms = llm_end_time_ms root_span.log(name="llm_app_trace") return result with gr.Blocks() as app: chatbot = gr.Chatbot( bubble_full_width=False, container=True, show_share_button=False, avatar_images=[None, './asset/akag-g-only.png'] ) msg = gr.Textbox( show_label=False, label="Type your message...", placeholder="Hi Gerard, can you introduce yourself?", container=False, ) with gr.Row(): clear = gr.Button("Clear", scale=1) send = gr.Button( value="", variant="primary", icon="./asset/send-message.png", scale=1 ) def user(user_message, history): return "", history + [[user_message, None]] def bot(history): bot_message = generate_chat(history[-1][0], llm) history[-1][1] = "" for character in bot_message: history[-1][1] += character time.sleep(0.01) yield history msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then( bot, chatbot, chatbot ) send.click(user, [msg, chatbot], [msg, chatbot], queue=False).then( bot, chatbot, chatbot ) clear.click(clear_chat, None, chatbot, queue=False) gr.HTML("""

Disclaimer: This is a RAG app for demostration purpose. LLM hallucination might occur.

Hosted on 🤗 Spaces. Powered by Google PaLM 🌴

""") app.queue() app.launch()