Updating Ranker Agent
Browse files- Agents/rankerAgent.py +78 -100
Agents/rankerAgent.py
CHANGED
@@ -2,84 +2,86 @@ import json
|
|
2 |
import os
|
3 |
from together import Together
|
4 |
|
5 |
-
def rerank_best_answer(json_files, config_file='config.json', model="meta-llama/Llama-3-
|
6 |
-
|
7 |
|
8 |
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
|
13 |
-
|
14 |
-
|
15 |
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
{json.dumps(prompt, indent=4)}
|
38 |
-
|
39 |
-
For the above question, identify which model gave the best response based on accuracy. Ensure the chosen response is an answer and not a follow-up question. Provide the output in the format:
|
40 |
-
{{
|
41 |
-
|
42 |
-
|
43 |
-
}}
|
44 |
-
Just output this JSON and nothing else.
|
45 |
-
"""
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
{
|
57 |
-
|
58 |
-
|
59 |
-
"""
|
60 |
-
|
61 |
-
{
|
62 |
-
|
63 |
-
|
64 |
-
"""
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
|
|
|
|
73 |
|
74 |
-
|
75 |
-
|
76 |
|
77 |
-
|
78 |
|
79 |
-
|
80 |
|
81 |
|
82 |
-
def rankerAgent(prompt,
|
83 |
# Load API key from configuration file
|
84 |
together_ai_key = os.getenv("TOGETHER_AI")
|
85 |
if not together_ai_key:
|
@@ -92,7 +94,7 @@ def rankerAgent(prompt, config_file='config.json', model="meta-llama/Meta-Llama-
|
|
92 |
prompt_text = f"""Input JSON:
|
93 |
{json.dumps(prompt, indent=4)}
|
94 |
|
95 |
-
For the above question, identify which model gave the best response based on accuracy. Ensure the chosen response is an answer and not a follow-up question. Provide the output in the format:
|
96 |
{{
|
97 |
"best_model": "<model_name>",
|
98 |
"best_answer": "<answer>"
|
@@ -100,6 +102,7 @@ For the above question, identify which model gave the best response based on acc
|
|
100 |
Just output this JSON and nothing else.
|
101 |
"""
|
102 |
|
|
|
103 |
# Generate response from Together API
|
104 |
response = client.chat.completions.create(
|
105 |
model=model,
|
@@ -108,15 +111,15 @@ Just output this JSON and nothing else.
|
|
108 |
response_content = response.choices[0].message.content
|
109 |
# print(response_content)
|
110 |
|
111 |
-
prompt_text_extract_bestModel = f"""
|
112 |
-
{
|
113 |
|
114 |
-
|
115 |
"""
|
116 |
-
prompt_text_extract_bestAnswer = f"""
|
117 |
-
{
|
118 |
|
119 |
-
|
120 |
"""
|
121 |
response_bestModel = client.chat.completions.create(
|
122 |
model=model,
|
@@ -129,28 +132,3 @@ Just Output the best_answer from above JSON and nothing else.
|
|
129 |
|
130 |
return response_bestModel.choices[0].message.content, response_bestAnswer.choices[0].message.content
|
131 |
|
132 |
-
|
133 |
-
# # Usage example
|
134 |
-
# json_files = ["../QnA_Eval/Responses/BOW_1_2_top_100_response.json",
|
135 |
-
# "../QnA_Eval/Responses/BOW_1_2_top_100_modified_response.json",
|
136 |
-
# "../QnA_Eval/Responses/tf-idf_1_2_top_100_response.json",
|
137 |
-
# "../QnA_Eval/Responses/tf-idf_1_2_top_100_modified_response.json",
|
138 |
-
# "../QnA_Eval/Responses/bm25_1_2_top_100_response.json",
|
139 |
-
# "../QnA_Eval/Responses/bm25_1_2_top_100_modified_response.json",
|
140 |
-
# "../QnA_Eval/Responses/open_source_1_2_top_100_response.json",
|
141 |
-
# "../QnA_Eval/Responses/open_source_1_2_top_100_modified_response.json",
|
142 |
-
# "../QnA_Eval/Responses/vision_1_2_top_100_response.json",
|
143 |
-
# "../QnA_Eval/Responses/vision_1_2_top_100_modified_response.json",
|
144 |
-
# "../QnA_Eval/Responses/ZeroShot_response.json",
|
145 |
-
# "../QnA_Eval/Responses/WikiAgent_response.json",
|
146 |
-
# "../QnA_Eval/Responses/WikiAgent_response_modified.json",
|
147 |
-
# "../QnA_Eval/Responses/LlamaAgent_response.json",
|
148 |
-
# "../QnA_Eval/Responses/LlamaAgent_response_modified.json",
|
149 |
-
# "../QnA_Eval/Responses/tf_idf_bm25_open_1_2_top_100_combined_response.json", "../QnA_Eval/Responses/tf_idf_bm25_open_1_2_top_100_combined_modified_response.json", "../QnA_Eval/Responses/tf_idf_bm25_open_1_2_top_100_combined_both_response.json"]
|
150 |
-
|
151 |
-
# config_file = "../config.json"
|
152 |
-
|
153 |
-
# result = rerank_best_answer(json_files, config_file)
|
154 |
-
|
155 |
-
# with open("reranked_best_answers_1_2.json", 'w') as file:
|
156 |
-
# json.dump(result, file, indent=4, ensure_ascii=False)
|
|
|
2 |
import os
|
3 |
from together import Together
|
4 |
|
5 |
+
# def rerank_best_answer(json_files, config_file='config.json', model="meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo"):
|
6 |
+
# # Load API key from configuration file
|
7 |
|
8 |
|
9 |
+
# together_ai_key = os.getenv("TOGETHER_AI")
|
10 |
+
# if not together_ai_key:
|
11 |
+
# raise ValueError("TOGETHER_AI environment variable not found. Please set it before running the script.")
|
12 |
|
13 |
+
# # Initialize Together client
|
14 |
+
# client = Together(api_key=together_ai_key)
|
15 |
|
16 |
+
# # Combine all JSON files into a single structure
|
17 |
+
# combined_prompts = {}
|
18 |
+
# for json_file in json_files:
|
19 |
+
# with open(json_file, 'r') as file:
|
20 |
+
# data = json.load(file)
|
21 |
|
22 |
+
# # Format the input for the prompt
|
23 |
+
# for item in data:
|
24 |
+
# query_id = item['query_id']
|
25 |
+
# if query_id not in combined_prompts:
|
26 |
+
# combined_prompts[query_id] = {
|
27 |
+
# "question": item['input'],
|
28 |
+
# "answers": {}
|
29 |
+
# }
|
30 |
+
# combined_prompts[query_id]["answers"][json_file] = item['response']
|
31 |
+
|
32 |
+
# responses = []
|
33 |
+
|
34 |
+
# for query_id, prompt in combined_prompts.items():
|
35 |
+
# # Generate the prompt text
|
36 |
+
# prompt_text = f"""Input JSON:
|
37 |
+
# {json.dumps(prompt, indent=4)}
|
38 |
+
|
39 |
+
# For the above question, identify which model gave the best response based on accuracy. Ensure the chosen response is an answer and not a follow-up question. Provide the output in the format:
|
40 |
+
# {{
|
41 |
+
# "best_model": "<model_name>",
|
42 |
+
# "best_answer": "<answer>"
|
43 |
+
# }}
|
44 |
+
# Just output this JSON and nothing else.
|
45 |
+
# """
|
46 |
+
|
47 |
+
# # Generate response from Together API
|
48 |
+
# response = client.chat.completions.create(
|
49 |
+
# model=model,
|
50 |
+
# messages=[{"role": "user", "content": prompt_text}],
|
51 |
+
# )
|
52 |
+
# response_content = response.choices[0].message.content
|
53 |
+
# # print(response_content)
|
54 |
+
|
55 |
+
# prompt_text_extract_bestModel = f"""Content:
|
56 |
+
# {response_content}
|
57 |
+
|
58 |
+
# Whats the best_model from above?
|
59 |
+
# """
|
60 |
+
# prompt_text_extract_bestAnswer = f"""Content:
|
61 |
+
# {response_content}
|
62 |
+
|
63 |
+
# Whats the best_answer from above?
|
64 |
+
# """
|
65 |
+
# print(prompt_text_extract_bestModel)
|
66 |
+
# print(prompt_text_extract_bestAnswer)
|
67 |
+
# response_bestModel = client.chat.completions.create(
|
68 |
+
# model=model,
|
69 |
+
# messages=[{"role": "user", "content": prompt_text_extract_bestModel}],
|
70 |
+
# )
|
71 |
+
# response_bestAnswer = client.chat.completions.create(
|
72 |
+
# model=model,
|
73 |
+
# messages=[{"role": "user", "content": prompt_text_extract_bestAnswer}],
|
74 |
+
# )
|
75 |
|
76 |
+
# # print({"query_id": query_id, "question": prompt["question"], "Ranker_Output": response.choices[0].message.content})
|
77 |
+
# responses.append({"query_id": query_id, "question": prompt["question"], "best_model": response_bestModel.choices[0].message.content, "best_answer": response_bestAnswer.choices[0].message.content})
|
78 |
|
79 |
+
# print(response_bestModel.choices[0].message.content)
|
80 |
|
81 |
+
# return responses
|
82 |
|
83 |
|
84 |
+
def rankerAgent(prompt, model="meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo"):
|
85 |
# Load API key from configuration file
|
86 |
together_ai_key = os.getenv("TOGETHER_AI")
|
87 |
if not together_ai_key:
|
|
|
94 |
prompt_text = f"""Input JSON:
|
95 |
{json.dumps(prompt, indent=4)}
|
96 |
|
97 |
+
For the above question, identify which model gave the best response based on accuracy. Ensure the chosen response is an answer and not a follow-up question. The best_answer should be from the best_model only, as given in the above content. Provide the output in the format:
|
98 |
{{
|
99 |
"best_model": "<model_name>",
|
100 |
"best_answer": "<answer>"
|
|
|
102 |
Just output this JSON and nothing else.
|
103 |
"""
|
104 |
|
105 |
+
|
106 |
# Generate response from Together API
|
107 |
response = client.chat.completions.create(
|
108 |
model=model,
|
|
|
111 |
response_content = response.choices[0].message.content
|
112 |
# print(response_content)
|
113 |
|
114 |
+
prompt_text_extract_bestModel = f"""Content:
|
115 |
+
{response_content}
|
116 |
|
117 |
+
Whats the best_model from above?
|
118 |
"""
|
119 |
+
prompt_text_extract_bestAnswer = f"""Content:
|
120 |
+
{response_content}
|
121 |
|
122 |
+
Whats the best_answer from above?
|
123 |
"""
|
124 |
response_bestModel = client.chat.completions.create(
|
125 |
model=model,
|
|
|
132 |
|
133 |
return response_bestModel.choices[0].message.content, response_bestAnswer.choices[0].message.content
|
134 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|