Spaces:
Runtime error
Runtime error
piyush2102020
commited on
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,153 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import subprocess
|
3 |
+
repo_url = "https://huggingface.co/piyush2102020/veronica_model"
|
4 |
+
if not os.path.exists("veronica_model"):
|
5 |
+
subprocess.run(["git", "clone", repo_url])
|
6 |
+
|
7 |
+
import sys
|
8 |
+
sys.path.append("veronica_model") # Add the repo to the Python path
|
9 |
+
|
10 |
+
import gradio as gr
|
11 |
+
from transformers import GPT2Tokenizer
|
12 |
+
from veronica import Veronica, VeronicaConfig
|
13 |
+
import firebase_admin
|
14 |
+
import json
|
15 |
+
from firebase_admin import credentials, db
|
16 |
+
import time
|
17 |
+
from veronica import Veronica # Import Veronica class
|
18 |
+
|
19 |
+
|
20 |
+
|
21 |
+
# Load tokenizer and model
|
22 |
+
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
|
23 |
+
model = Veronica(VeronicaConfig())
|
24 |
+
model.eval()
|
25 |
+
firebase_key_path = os.getenv("firebase_db_url")
|
26 |
+
cred = json.load(os.getenv("firebase_credentials"))
|
27 |
+
cred = credentials.Certificate(cred)
|
28 |
+
firebase_app = firebase_admin.initialize_app(cred, {
|
29 |
+
'databaseURL': firebase_key_path
|
30 |
+
})
|
31 |
+
|
32 |
+
ref = db.reference('/rlhf')
|
33 |
+
|
34 |
+
# Define the build function
|
35 |
+
def build(question):
|
36 |
+
user_token = "###user ###"
|
37 |
+
bot_token = "###bot ###"
|
38 |
+
return f"{user_token}{question}{bot_token}".lower()
|
39 |
+
|
40 |
+
# Chat function
|
41 |
+
def veronica_chat(history=None, question=None,max_new_tokens=10):
|
42 |
+
global generated_answer1, generated_answer2
|
43 |
+
if history is None:
|
44 |
+
history = []
|
45 |
+
|
46 |
+
if len(history) == 0:
|
47 |
+
history.append({"role": "assistant", "content": "Hey, I am Veronica. How can I assist you today?"})
|
48 |
+
else:
|
49 |
+
history.append({"role": "user", "content": question})
|
50 |
+
processing_message = "<i>Processing your question...</i>"
|
51 |
+
history.append({"role": "assistant", "content": processing_message})
|
52 |
+
|
53 |
+
# Generate responses
|
54 |
+
prompt = build(question)
|
55 |
+
responses, _ = model.generate(prompt, num_samples=2, max_new_tokens=max_new_tokens)
|
56 |
+
answers = []
|
57 |
+
|
58 |
+
for r in responses:
|
59 |
+
for item in [prompt, "###user ###", "###bot ###", "###", "###end", "end"]:
|
60 |
+
r = r.replace(item, "")
|
61 |
+
answers.append(r)
|
62 |
+
|
63 |
+
generated_answer1, generated_answer2 = answers
|
64 |
+
|
65 |
+
# Update history with generated responses
|
66 |
+
history[-1] = {
|
67 |
+
"role": "assistant",
|
68 |
+
"content": f"<div style='display: flex; justify-content: space-between; width: 100%;'>"
|
69 |
+
f"<div style='flex: 1; padding: 10px; background-color: #f9f9f9;'>"
|
70 |
+
f"<b>Response 1:</b><br>{generated_answer1}<br></div>"
|
71 |
+
f"<div style='width: 4px; background-color: black;'></div>"
|
72 |
+
f"<div style='flex: 1; padding: 10px; background-color: #f9f9f9;'>"
|
73 |
+
f"<b>Response 2:</b><br>{generated_answer2}<br></div></div>"
|
74 |
+
}
|
75 |
+
return history
|
76 |
+
|
77 |
+
# Clear textboxes
|
78 |
+
def clear_textboxes(*args):
|
79 |
+
return "", ""
|
80 |
+
|
81 |
+
# Feedback function
|
82 |
+
def feedback(prompt="", flagged_answer="", decision="", human_response="", history=None):
|
83 |
+
global generated_answer1, generated_answer2
|
84 |
+
selected_answer = generated_answer1 if flagged_answer == "Response 1" else generated_answer2
|
85 |
+
|
86 |
+
# Prepare Firebase payload
|
87 |
+
payload = {
|
88 |
+
"prompt": prompt,
|
89 |
+
"selected_answer_content": selected_answer,
|
90 |
+
"decision": decision,
|
91 |
+
"human_response": human_response,
|
92 |
+
"time_stamp": time.time()
|
93 |
+
}
|
94 |
+
print(payload)
|
95 |
+
ref.push(payload)
|
96 |
+
|
97 |
+
# Update chat history with acknowledgment
|
98 |
+
if history is None:
|
99 |
+
history = []
|
100 |
+
history.append({"role": "assistant", "content": "Thanks for your feedback! How can I assist you further?"})
|
101 |
+
return history
|
102 |
+
|
103 |
+
# Gradio Interface
|
104 |
+
with gr.Blocks(theme=gr.themes.Monochrome()) as interface:
|
105 |
+
interface.css = """
|
106 |
+
.gradio-container { margin: 0; padding: 0; }
|
107 |
+
.gradio-row, .gradio-column { margin: 0; padding: 0; }
|
108 |
+
.gradio-chatbot .message { font-size: 12px !important; }
|
109 |
+
.gradio-textbox textarea { font-size: 12px !important; }
|
110 |
+
"""
|
111 |
+
|
112 |
+
gr.Markdown("# Veronica AI")
|
113 |
+
gr.Markdown("""
|
114 |
+
**Chat with Veronica**, an AI model that is currently in the **fine-tuning stage**. During this stage, Veronica is learning from user interactions and adapting to provide better, more context-aware answers.
|
115 |
+
""")
|
116 |
+
|
117 |
+
with gr.Row():
|
118 |
+
with gr.Column():
|
119 |
+
gr.Markdown("## RLHF Instructions")
|
120 |
+
gr.Markdown("""
|
121 |
+
- Ask a question to see two responses.
|
122 |
+
- Select your preferred response and provide feedback if needed.
|
123 |
+
""")
|
124 |
+
with gr.Row():
|
125 |
+
slider=gr.Slider(minimum=10,maximum=500,label="Token Length")
|
126 |
+
drop_down = gr.Dropdown(
|
127 |
+
choices=VeronicaConfig().functions_list,
|
128 |
+
label="Select Function",
|
129 |
+
value=VeronicaConfig().functions_list[3]
|
130 |
+
)
|
131 |
+
answer_flag = gr.Dropdown(choices=["Response 1", "Response 2"], label="Flag Response", value="Response 1")
|
132 |
+
answer_input_human = gr.Textbox(placeholder="Write your answer here...", label="Human Answer")
|
133 |
+
submit_feedback = gr.Button("Submit Feedback")
|
134 |
+
with gr.Column(scale=8):
|
135 |
+
chatbot = gr.Chatbot(
|
136 |
+
label="Chat",
|
137 |
+
height=420,
|
138 |
+
type="messages",
|
139 |
+
value=[{"role": "assistant", "content": "Hey, I am Veronica. How can I assist you today?"}]
|
140 |
+
)
|
141 |
+
with gr.Row():
|
142 |
+
with gr.Column(scale=6):
|
143 |
+
question_input = gr.Textbox(placeholder="Chat with Veronica...", label="")
|
144 |
+
with gr.Column():
|
145 |
+
send_button = gr.Button("Send")
|
146 |
+
|
147 |
+
# Link components to functions
|
148 |
+
send_button.click(fn=veronica_chat, inputs=[chatbot, question_input,slider], outputs=[chatbot])
|
149 |
+
submit_feedback.click(fn=feedback, inputs=[question_input, answer_flag, drop_down, answer_input_human, chatbot], outputs=[chatbot])
|
150 |
+
submit_feedback.click(fn=clear_textboxes, inputs=[question_input, answer_input_human], outputs=[question_input, answer_input_human])
|
151 |
+
|
152 |
+
# Launch interface
|
153 |
+
interface.launch(share=True)
|