File size: 3,321 Bytes
ecbd714
9afac3f
3dbf89f
9afac3f
ecbd714
 
1871bfe
3dbf89f
ecbd714
 
3dbf89f
ecbd714
 
 
 
9afac3f
3dbf89f
a218b95
 
1871bfe
 
4bde294
 
 
ecbd714
9afac3f
 
 
 
 
 
 
ecbd714
1871bfe
 
 
 
 
ecbd714
 
 
9afac3f
ecbd714
 
1871bfe
ecbd714
 
 
1871bfe
ecbd714
7c24283
ecbd714
 
 
 
 
 
 
 
 
 
 
7c24283
ecbd714
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dffd056
00cd19f
ecbd714
 
 
 
c19830e
ecbd714
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
from src.llamaindex_palm import LlamaIndexPaLM, LlamaIndexPaLMText

import gradio as gr

from typing import List
import time
import logging

# import dotenv
# dotenv.load_dotenv(".env")

# Llama-Index LLM
llm_backend = LlamaIndexPaLMText(model_kwargs={'temperature': 0.8})
llm = LlamaIndexPaLM(model=llm_backend)
llm.get_index_from_pinecone()

# Gradio
chat_history = []

def clear_chat() -> None:
    global chat_history 
    chat_history = []
    return None

def get_chat_history(chat_history: List[str]) -> str:
    ind = 0
    formatted_chat_history = ""
    for message in chat_history:
        formatted_chat_history += f"User: \n{message}\n" if ind % 2 == 0 else f"Bot: \n{message}\n"
        ind += 1
    return formatted_chat_history

def generate_text(prompt: str, llamaindex_llm: LlamaIndexPaLM):
    global chat_history

    logger.info("Generating Message...")
    logger.info(f"User Message:\n{prompt}\n")

    result = llamaindex_llm.generate_text(prompt, chat_history)
    chat_history.append(prompt)
    chat_history.append(result)
    
    logger.info(f"Replied Message:\n{result}\n")
    return result

if __name__ == "__main__":
    logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%Y-%m-%d %I:%M:%S %p', level=logging.INFO)
    logger = logging.getLogger('app')

    try:
        with gr.Blocks(css=".input textarea {font-size: 16px !important}") as app:
            chatbot = gr.Chatbot(
                bubble_full_width=False, 
                container=True, 
                show_share_button=False, 
                avatar_images=[None, './asset/akag-g-only.png']
            )
            msg = gr.Textbox(
                show_label=False,
                label="Type your message...", 
                placeholder="Hi Gerard, can you introduce yourself?",
                container=False, 
                elem_classes="input"
            )
            with gr.Row():
                clear = gr.Button("Clear", scale=1)
                send = gr.Button(
                    value="", 
                    variant="primary",
                    icon="./asset/send-message.png", 
                    scale=1
                )

            def user(user_message, history):
                return "", history + [[user_message, None]]

            def bot(history):
                bot_message = generate_text(history[-1][0], llm)
                history[-1][1] = ""
                for character in bot_message:
                    history[-1][1] += character
                    time.sleep(0.01)
                    yield history

            msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
                bot, chatbot, chatbot
            )
            send.click(user, [msg, chatbot], [msg, chatbot], queue=False).then(
                bot, chatbot, chatbot
            )
            clear.click(clear_chat, None, chatbot, queue=False)

            gr.HTML("""
                <p><center><i>Disclaimer: This RAG app is for demostration only. LLM hallucination might occur.</i></center></p>
                <p><center>Hosted on 🤗 Spaces | Built with 🌴 Google PaLM & 🦙 LlamaIndex | Last updated on Oct 30, 2023</center></p>
            """)
            
        app.queue()
        app.launch()
    except Exception as e:
        logger.exception(e)