Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -45,23 +45,23 @@ def respond(message, history, model, approach, system_message, max_tokens, tempe
|
|
45 |
system_prompt = system_message
|
46 |
initial_query = message
|
47 |
if approach == 'rto':
|
48 |
-
final_response = round_trip_optimization(system_prompt, initial_query, client, model)
|
49 |
elif approach == 'z3':
|
50 |
z3_solver = Z3SolverSystem(system_prompt, client, model)
|
51 |
-
final_response = z3_solver.process_query(initial_query)
|
52 |
elif approach == "self_consistency":
|
53 |
-
final_response = advanced_self_consistency_approach(system_prompt, initial_query, client, model)
|
54 |
elif approach == "rstar":
|
55 |
rstar = RStar(system_prompt, client, model)
|
56 |
-
final_response = rstar.solve(initial_query)
|
57 |
elif approach == "cot_reflection":
|
58 |
-
final_response = cot_reflection(system_prompt, initial_query, client, model)
|
59 |
elif approach == 'plansearch':
|
60 |
-
final_response = plansearch(system_prompt, initial_query, client, model)[0]
|
61 |
elif approach == 'leap':
|
62 |
-
final_response = leap(system_prompt, initial_query, client, model)
|
63 |
elif approach == 're2':
|
64 |
-
final_response = re2_approach(system_prompt, initial_query, client, model)
|
65 |
|
66 |
return final_response
|
67 |
|
@@ -79,11 +79,11 @@ def respond(message, history, model, approach, system_message, max_tokens, tempe
|
|
79 |
|
80 |
def create_model_dropdown():
|
81 |
return gr.Dropdown(
|
82 |
-
["
|
83 |
-
|
84 |
-
|
85 |
"google/gemini-flash-1.5-exp", "google/gemini-pro-1.5-exp"],
|
86 |
-
value="
|
87 |
)
|
88 |
|
89 |
def create_approach_dropdown():
|
|
|
45 |
system_prompt = system_message
|
46 |
initial_query = message
|
47 |
if approach == 'rto':
|
48 |
+
final_response, _ = round_trip_optimization(system_prompt, initial_query, client, model)
|
49 |
elif approach == 'z3':
|
50 |
z3_solver = Z3SolverSystem(system_prompt, client, model)
|
51 |
+
final_response, _ = z3_solver.process_query(initial_query)
|
52 |
elif approach == "self_consistency":
|
53 |
+
final_response, _ = advanced_self_consistency_approach(system_prompt, initial_query, client, model)
|
54 |
elif approach == "rstar":
|
55 |
rstar = RStar(system_prompt, client, model)
|
56 |
+
final_response, _ = rstar.solve(initial_query)
|
57 |
elif approach == "cot_reflection":
|
58 |
+
final_response, _ = cot_reflection(system_prompt, initial_query, client, model)
|
59 |
elif approach == 'plansearch':
|
60 |
+
final_response, _ = plansearch(system_prompt, initial_query, client, model)[0]
|
61 |
elif approach == 'leap':
|
62 |
+
final_response, _ = leap(system_prompt, initial_query, client, model)
|
63 |
elif approach == 're2':
|
64 |
+
final_response, _ = re2_approach(system_prompt, initial_query, client, model)
|
65 |
|
66 |
return final_response
|
67 |
|
|
|
79 |
|
80 |
def create_model_dropdown():
|
81 |
return gr.Dropdown(
|
82 |
+
[ "meta-llama/llama-3.1-8b-instruct:free", "nousresearch/hermes-3-llama-3.1-405b:free",
|
83 |
+
"mistralai/mistral-7b-instruct:free","mistralai/pixtral-12b:free",
|
84 |
+
"qwen/qwen-2-7b-instruct:free", "qwen/qwen-2-vl-7b-instruct:free", "google/gemma-2-9b-it:free", "google/gemini-flash-8b-1.5-exp",
|
85 |
"google/gemini-flash-1.5-exp", "google/gemini-pro-1.5-exp"],
|
86 |
+
value="meta-llama/llama-3.1-8b-instruct:free", label="Model"
|
87 |
)
|
88 |
|
89 |
def create_approach_dropdown():
|