File size: 5,970 Bytes
edda4ff
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
import os
import subprocess
repo_url = "https://huggingface.co/piyush2102020/veronica_model"
if not os.path.exists("veronica_model"):
    subprocess.run(["git", "clone", repo_url])

import sys
sys.path.append("veronica_model")  # Add the repo to the Python path

import gradio as gr
from transformers import GPT2Tokenizer
from veronica import Veronica, VeronicaConfig
import firebase_admin
import json
from firebase_admin import credentials, db
import time
from veronica import Veronica  # Import Veronica class



# Load tokenizer and model
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
model = Veronica(VeronicaConfig())
model.eval()
firebase_key_path = os.getenv("firebase_db_url")
cred = json.load(os.getenv("firebase_credentials"))
cred = credentials.Certificate(cred)
firebase_app = firebase_admin.initialize_app(cred, {
    'databaseURL': firebase_key_path
})

ref = db.reference('/rlhf')

# Define the build function
def build(question):
    user_token = "###user ###"
    bot_token = "###bot ###"
    return f"{user_token}{question}{bot_token}".lower()

# Chat function
def veronica_chat(history=None, question=None,max_new_tokens=10):
    global generated_answer1, generated_answer2
    if history is None:
        history = []

    if len(history) == 0:
        history.append({"role": "assistant", "content": "Hey, I am Veronica. How can I assist you today?"})
    else:
        history.append({"role": "user", "content": question})
        processing_message = "<i>Processing your question...</i>"
        history.append({"role": "assistant", "content": processing_message})
        
        # Generate responses
        prompt = build(question)
        responses, _ = model.generate(prompt, num_samples=2, max_new_tokens=max_new_tokens)
        answers = []

        for r in responses:
            for item in [prompt, "###user ###", "###bot ###", "###", "###end", "end"]:
                r = r.replace(item, "")
            answers.append(r)

        generated_answer1, generated_answer2 = answers

        # Update history with generated responses
        history[-1] = {
            "role": "assistant",
            "content": f"<div style='display: flex; justify-content: space-between; width: 100%;'>"
                       f"<div style='flex: 1; padding: 10px; background-color: #f9f9f9;'>"
                       f"<b>Response 1:</b><br>{generated_answer1}<br></div>"
                       f"<div style='width: 4px; background-color: black;'></div>"
                       f"<div style='flex: 1; padding: 10px; background-color: #f9f9f9;'>"
                       f"<b>Response 2:</b><br>{generated_answer2}<br></div></div>"
        }
    return history

# Clear textboxes
def clear_textboxes(*args):
    return "", ""

# Feedback function
def feedback(prompt="", flagged_answer="", decision="", human_response="", history=None):
    global generated_answer1, generated_answer2
    selected_answer = generated_answer1 if flagged_answer == "Response 1" else generated_answer2

    # Prepare Firebase payload
    payload = {
        "prompt": prompt,
        "selected_answer_content": selected_answer,
        "decision": decision,
        "human_response": human_response,
        "time_stamp": time.time()
    }
    print(payload)
    ref.push(payload)

    # Update chat history with acknowledgment
    if history is None:
        history = []
    history.append({"role": "assistant", "content": "Thanks for your feedback! How can I assist you further?"})
    return history

# Gradio Interface
with gr.Blocks(theme=gr.themes.Monochrome()) as interface:
    interface.css = """
    .gradio-container { margin: 0; padding: 0; }
    .gradio-row, .gradio-column { margin: 0; padding: 0; }
    .gradio-chatbot .message { font-size: 12px !important; }
    .gradio-textbox textarea { font-size: 12px !important; }
    """

    gr.Markdown("# Veronica AI")
    gr.Markdown("""
    **Chat with Veronica**, an AI model that is currently in the **fine-tuning stage**. During this stage, Veronica is learning from user interactions and adapting to provide better, more context-aware answers.
    """)

    with gr.Row():
        with gr.Column():
            gr.Markdown("## RLHF Instructions")
            gr.Markdown("""
            - Ask a question to see two responses.
            - Select your preferred response and provide feedback if needed.
            """)
            with gr.Row():
                slider=gr.Slider(minimum=10,maximum=500,label="Token Length")
            drop_down = gr.Dropdown(
                choices=VeronicaConfig().functions_list,
                label="Select Function",
                value=VeronicaConfig().functions_list[3]
            )
            answer_flag = gr.Dropdown(choices=["Response 1", "Response 2"], label="Flag Response", value="Response 1")
            answer_input_human = gr.Textbox(placeholder="Write your answer here...", label="Human Answer")
            submit_feedback = gr.Button("Submit Feedback")
        with gr.Column(scale=8):
            chatbot = gr.Chatbot(
                label="Chat",
                height=420,
                type="messages",
                value=[{"role": "assistant", "content": "Hey, I am Veronica. How can I assist you today?"}]
            )
            with gr.Row():
                with gr.Column(scale=6):
                    question_input = gr.Textbox(placeholder="Chat with Veronica...", label="")
                with gr.Column():
                    send_button = gr.Button("Send")

    # Link components to functions
    send_button.click(fn=veronica_chat, inputs=[chatbot, question_input,slider], outputs=[chatbot])
    submit_feedback.click(fn=feedback, inputs=[question_input, answer_flag, drop_down, answer_input_human, chatbot], outputs=[chatbot])
    submit_feedback.click(fn=clear_textboxes, inputs=[question_input, answer_input_human], outputs=[question_input, answer_input_human])

# Launch interface
interface.launch(share=True)