Spaces:
Running
Running
File size: 7,605 Bytes
4eaa76b 7e12b4f ca2a4e4 786bb0f ca2a4e4 3c3463c a858ac0 0468cdf a858ac0 9d87d2c a858ac0 ca2a4e4 80cdbfa 786bb0f a066122 79a2261 ca2a4e4 0dea974 a858ac0 0dea974 ca2a4e4 0dea974 7e12b4f a066122 ca2a4e4 b443c6a a1cf7d9 bf23bcd a066122 ec76c44 a066122 054299c aa18147 00b9c43 2904e32 054299c a066122 8b1d869 a066122 5673631 a066122 2e1ea9c a066122 ca2a4e4 a066122 ca2a4e4 a066122 ca2a4e4 a066122 ca2a4e4 2e1ea9c ca2a4e4 a066122 7e12b4f ca2a4e4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 |
import os
import gradio as gr
from openai import OpenAI
from optillm.cot_reflection import cot_reflection
from optillm.rto import round_trip_optimization
from optillm.z3_solver import Z3SymPySolverSystem
from optillm.self_consistency import advanced_self_consistency_approach
from optillm.plansearch import plansearch
from optillm.leap import leap
from optillm.reread import re2_approach
API_KEY = os.environ.get("OPENROUTER_API_KEY")
def compare_responses(message, model1, approach1, model2, approach2, system_message, max_tokens, temperature, top_p):
response1 = respond(message, [], model1, approach1, system_message, max_tokens, temperature, top_p)
response2 = respond(message, [], model2, approach2, system_message, max_tokens, temperature, top_p)
return response1, response2
def parse_conversation(messages):
system_prompt = ""
conversation = []
for message in messages:
role = message['role']
content = message['content']
if role == 'system':
system_prompt = content
elif role in ['user', 'assistant']:
conversation.append(f"{role.capitalize()}: {content}")
initial_query = "\n".join(conversation)
return system_prompt, initial_query
def respond(message, history, model, approach, system_message, max_tokens, temperature, top_p):
try:
client = OpenAI(api_key=API_KEY, base_url="https://openrouter.ai/api/v1")
messages = [{"role": "system", "content": system_message}]
for val in history:
if val[0]:
messages.append({"role": "user", "content": val[0]})
if val[1]:
messages.append({"role": "assistant", "content": val[1]})
messages.append({"role": "user", "content": message})
if approach == "none":
response = client.chat.completions.create(
extra_headers={
"HTTP-Referer": "https://github.com/codelion/optillm",
"X-Title": "optillm"
},
model=model,
messages=messages,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
)
return response.choices[0].message.content
else:
system_prompt, initial_query = parse_conversation(messages)
if approach == 'rto':
final_response, _ = round_trip_optimization(system_prompt, initial_query, client, model)
elif approach == 'z3':
z3_solver = Z3SymPySolverSystem(system_prompt, client, model)
final_response, _ = z3_solver.process_query(initial_query)
elif approach == "self_consistency":
final_response, _ = advanced_self_consistency_approach(system_prompt, initial_query, client, model)
elif approach == "cot_reflection":
final_response, _ = cot_reflection(system_prompt, initial_query, client, model)
elif approach == 'plansearch':
response, _ = plansearch(system_prompt, initial_query, client, model)
final_response = response[0]
elif approach == 'leap':
final_response, _ = leap(system_prompt, initial_query, client, model)
elif approach == 're2':
final_response, _ = re2_approach(system_prompt, initial_query, client, model)
return final_response
except Exception as e:
error_message = f"Error in respond function: {str(e)}\nType: {type(e).__name__}"
print(error_message)
def create_model_dropdown():
return gr.Dropdown(
[ "meta-llama/llama-3.1-8b-instruct:free", "nousresearch/hermes-3-llama-3.1-405b:free","meta-llama/llama-3.2-1b-instruct:free",
"mistralai/mistral-7b-instruct:free","mistralai/pixtral-12b:free","meta-llama/llama-3.1-70b-instruct:free",
"qwen/qwen-2-7b-instruct:free", "qwen/qwen-2-vl-7b-instruct:free", "google/gemma-2-9b-it:free", "liquid/lfm-40b:free", "meta-llama/llama-3.1-405b-instruct:free",
"openchat/openchat-7b:free", "meta-llama/llama-3.2-90b-vision-instruct:free", "meta-llama/llama-3.2-11b-vision-instruct:free",
"meta-llama/llama-3-8b-instruct:free", "meta-llama/llama-3.2-3b-instruct:free", "microsoft/phi-3-medium-128k-instruct:free",
"microsoft/phi-3-mini-128k-instruct:free", "huggingfaceh4/zephyr-7b-beta:free"],
value="meta-llama/llama-3.2-1b-instruct:free", label="Model"
)
def create_approach_dropdown():
return gr.Dropdown(
["none", "leap", "plansearch", "cot_reflection", "rto", "self_consistency", "z3", "re2"],
value="none", label="Approach"
)
html = """<iframe src="https://ghbtns.com/github-btn.html?user=codelion&repo=optillm&type=star&count=true&size=large" frameborder="0" scrolling="0" width="170" height="30" title="GitHub"></iframe>
"""
with gr.Blocks() as demo:
gr.Markdown("# optillm - Optimizing LLM Inference")
gr.HTML(html)
with gr.Row():
system_message = gr.Textbox(value="", label="System message")
max_tokens = gr.Slider(minimum=1, maximum=4096, value=1024, step=1, label="Max new tokens")
temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)")
with gr.Tabs():
with gr.TabItem("Chat"):
model = create_model_dropdown()
approach = create_approach_dropdown()
chatbot = gr.Chatbot()
msg = gr.Textbox()
with gr.Row():
submit = gr.Button("Submit")
clear = gr.Button("Clear")
def user(user_message, history):
return "", history + [[user_message, None]]
def bot(history, model, approach, system_message, max_tokens, temperature, top_p):
user_message = history[-1][0]
bot_message = respond(user_message, history[:-1], model, approach, system_message, max_tokens, temperature, top_p)
history[-1][1] = bot_message
return history
msg.submit(user, [msg, chatbot], [msg, chatbot]).then(
bot, [chatbot, model, approach, system_message, max_tokens, temperature, top_p], chatbot
)
submit.click(user, [msg, chatbot], [msg, chatbot]).then(
bot, [chatbot, model, approach, system_message, max_tokens, temperature, top_p], chatbot
)
clear.click(lambda: None, None, chatbot, queue=False)
with gr.TabItem("Compare"):
with gr.Row():
model1 = create_model_dropdown()
approach1 = create_approach_dropdown()
model2 = create_model_dropdown()
approach2 = create_approach_dropdown()
compare_input = gr.Textbox(label="Enter your message for comparison")
compare_button = gr.Button("Compare")
with gr.Row():
output1 = gr.Textbox(label="Response 1")
output2 = gr.Textbox(label="Response 2")
compare_button.click(
compare_responses,
inputs=[compare_input, model1, approach1, model2, approach2, system_message, max_tokens, temperature, top_p],
outputs=[output1, output2]
)
if __name__ == "__main__":
demo.launch() |