Spaces:
Sleeping
Sleeping
IliaLarchenko
commited on
Commit
•
269ec40
1
Parent(s):
572f802
Added more function for analytics
Browse files- tests/analysis.py +149 -1
tests/analysis.py
CHANGED
@@ -1,13 +1,20 @@
|
|
|
|
|
|
|
|
1 |
# to use analytics tools you need to install some extra libraries
|
2 |
# !pip install pandas
|
3 |
|
4 |
from tests.candidate import complete_interview
|
5 |
from tests.grader import grade
|
6 |
import pandas as pd
|
|
|
7 |
from functools import partial
|
8 |
import concurrent.futures
|
9 |
import os
|
10 |
-
from IPython.display import display
|
|
|
|
|
|
|
11 |
|
12 |
|
13 |
def complete_and_grade(interview_params, exp_name="GPT4", grader_model="gpt-4-turbo", candidate_model="gpt-3.5-turbo"):
|
@@ -145,3 +152,144 @@ def generate_and_display_tables(df):
|
|
145 |
display(table)
|
146 |
|
147 |
return tables_dict
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This file contains some functions I use for automated analysis and evaluation
|
2 |
+
# It is not used in the main functionality of the service
|
3 |
+
# It is quite messy so far
|
4 |
# to use analytics tools you need to install some extra libraries
|
5 |
# !pip install pandas
|
6 |
|
7 |
from tests.candidate import complete_interview
|
8 |
from tests.grader import grade
|
9 |
import pandas as pd
|
10 |
+
import numpy as np
|
11 |
from functools import partial
|
12 |
import concurrent.futures
|
13 |
import os
|
14 |
+
from IPython.display import Markdown, display
|
15 |
+
from openai import OpenAI
|
16 |
+
from tests.testing_prompts import feedback_analyzer
|
17 |
+
from resources.prompts import prompts, base_prompts
|
18 |
|
19 |
|
20 |
def complete_and_grade(interview_params, exp_name="GPT4", grader_model="gpt-4-turbo", candidate_model="gpt-3.5-turbo"):
|
|
|
152 |
display(table)
|
153 |
|
154 |
return tables_dict
|
155 |
+
|
156 |
+
|
157 |
+
def filter_df(df, prefixes=["problem", "interviewer", "feedback"]):
|
158 |
+
# Identify all columns starting with any of the prefixes
|
159 |
+
columns_to_check = [col for col in df.columns if any(col.startswith(prefix) for prefix in prefixes)]
|
160 |
+
|
161 |
+
# Function to check if a value is a boolean, None, or string representations of boolean types
|
162 |
+
def is_valid_value(val):
|
163 |
+
return isinstance(val, bool) or val is None or val is np.nan or val in {"True", "False", "None", "NaN"}
|
164 |
+
|
165 |
+
# Function to convert string representations to actual booleans
|
166 |
+
def to_bool(val):
|
167 |
+
if val == "True":
|
168 |
+
return True
|
169 |
+
elif val == "False":
|
170 |
+
return False
|
171 |
+
elif val == "None":
|
172 |
+
return None
|
173 |
+
return val
|
174 |
+
|
175 |
+
# Check if all values in the specified columns are valid
|
176 |
+
def all_values_valid(row):
|
177 |
+
return all(is_valid_value(row[col]) for col in columns_to_check)
|
178 |
+
|
179 |
+
# Apply filtering to keep only rows with valid values
|
180 |
+
valid_df = df[df.apply(all_values_valid, axis=1)].copy()
|
181 |
+
|
182 |
+
# Convert string representations to booleans
|
183 |
+
for col in columns_to_check:
|
184 |
+
valid_df[col] = valid_df[col].apply(to_bool)
|
185 |
+
|
186 |
+
# Identify removed rows
|
187 |
+
removed_rows = df[~df.index.isin(valid_df.index)]
|
188 |
+
|
189 |
+
# Print the number of rows removed
|
190 |
+
num_removed = len(removed_rows)
|
191 |
+
print(f"Number of rows removed: {num_removed}")
|
192 |
+
|
193 |
+
# Print the value from the "file_name" column for each removed row, or `None` if not present
|
194 |
+
if "file_name" in removed_rows.columns:
|
195 |
+
for value in removed_rows["file_name"].tolist():
|
196 |
+
print(f"Removed row file_name: {value}")
|
197 |
+
else:
|
198 |
+
print("Removed row file_name: None")
|
199 |
+
|
200 |
+
return valid_df
|
201 |
+
|
202 |
+
|
203 |
+
def generate_analysis_report(df, folder, focus=None, model="gpt-4-turbo"):
|
204 |
+
|
205 |
+
client = OpenAI(base_url="https://api.openai.com/v1")
|
206 |
+
|
207 |
+
all_comments = "\n\n".join([f"Interview type: {t}. Feedback: {str(f)}" for t, f in zip(df["type"].values, df["comments"].values)])
|
208 |
+
|
209 |
+
messages = [
|
210 |
+
{"role": "system", "content": feedback_analyzer},
|
211 |
+
{"role": "user", "content": f"Interview feedback: {all_comments}"},
|
212 |
+
]
|
213 |
+
|
214 |
+
if focus:
|
215 |
+
messages.append({"role": "user", "content": f"Focus only on comments about {focus} part of the interview"})
|
216 |
+
|
217 |
+
response = client.chat.completions.create(model=model, messages=messages, temperature=1)
|
218 |
+
|
219 |
+
comments_analysis = response.choices[0].message.content
|
220 |
+
display(Markdown(comments_analysis))
|
221 |
+
|
222 |
+
if folder is not None:
|
223 |
+
with open(os.path.join(folder, "analysis.md"), "w") as f:
|
224 |
+
f.write(comments_analysis)
|
225 |
+
f.write("\n\n")
|
226 |
+
for t in np.unique(df["type"]):
|
227 |
+
f.write(f"Type: {t}\n")
|
228 |
+
f.write(df[[c for c in df.columns if c != "comments"]][df["type"] == t].T.to_markdown())
|
229 |
+
f.write("\n\n")
|
230 |
+
f.write(f"Type: all\n")
|
231 |
+
f.write("\n\n")
|
232 |
+
f.write("Feedback:\n")
|
233 |
+
f.write(all_comments)
|
234 |
+
|
235 |
+
return comments_analysis
|
236 |
+
|
237 |
+
|
238 |
+
def analyze_and_improve_segment(df, segment_to_improve=None):
|
239 |
+
|
240 |
+
sorted_stages = df[["problem", "interviewer", "feedback"]].mean().sort_values()
|
241 |
+
if not segment_to_improve:
|
242 |
+
segment_to_improve = sorted_stages.index[0]
|
243 |
+
th_score = sorted_stages.iloc[0] + 0.1
|
244 |
+
|
245 |
+
print(f"Let's try to improve {segment_to_improve}")
|
246 |
+
print(f"Quality threshold {th_score}")
|
247 |
+
|
248 |
+
# Identifying types that need improvement
|
249 |
+
type_stage_scores = df.groupby("type")[segment_to_improve].mean()
|
250 |
+
types_to_improve = []
|
251 |
+
for t, s in type_stage_scores.items():
|
252 |
+
if s < th_score:
|
253 |
+
types_to_improve.append(t)
|
254 |
+
|
255 |
+
print(f"We will focus on {types_to_improve}")
|
256 |
+
|
257 |
+
# Filtering DataFrame based on identified types and scoring criteria
|
258 |
+
filtered_df = df[df["type"].apply(lambda x: x in types_to_improve)]
|
259 |
+
prefix_columns = [col for col in df.columns if col.startswith(segment_to_improve)]
|
260 |
+
filtered_df = filtered_df[filtered_df[prefix_columns].mean(axis=1) < th_score]
|
261 |
+
|
262 |
+
# Generating an analysis report
|
263 |
+
comments_analysis = generate_analysis_report(filtered_df, None, focus=segment_to_improve, model="gpt-4-turbo")
|
264 |
+
|
265 |
+
# Constructing improvement prompt
|
266 |
+
improvement_prompt = """You want to improve the prompts for LLM interviewer.
|
267 |
+
Below you will see some of the prompts that are used right now.
|
268 |
+
As well as a summary of mistakes that interviewer make.
|
269 |
+
You can add 1-3 lines to each of prompts if needed, but you can't change or remove anything.
|
270 |
+
"""
|
271 |
+
|
272 |
+
# Selecting the base prompt for the segment to improve
|
273 |
+
base_prompt = base_prompts.get(f"base_{segment_to_improve}", "Base prompt not found for the segment")
|
274 |
+
|
275 |
+
# Constructing the current prompts display
|
276 |
+
current_prompts = "The current prompts are below. \n"
|
277 |
+
current_prompts += "BASE PROMPT (applied to all interview types): \n"
|
278 |
+
current_prompts += base_prompt + "\n"
|
279 |
+
|
280 |
+
for k, v in prompts.items():
|
281 |
+
if segment_to_improve in k:
|
282 |
+
current_prompts += f"{k}: {v[len(base_prompt):]} \n\n"
|
283 |
+
|
284 |
+
# Making API call to OpenAI
|
285 |
+
client = OpenAI(base_url="https://api.openai.com/v1")
|
286 |
+
model = "gpt-4-turbo"
|
287 |
+
messages = [
|
288 |
+
{"role": "system", "content": improvement_prompt},
|
289 |
+
{"role": "user", "content": current_prompts},
|
290 |
+
{"role": "user", "content": f"Interview feedback: {comments_analysis}"},
|
291 |
+
{"role": "user", "content": "Please return any additional instructions you would like to add to any of the prompts."},
|
292 |
+
]
|
293 |
+
|
294 |
+
response = client.chat.completions.create(model=model, messages=messages, temperature=1).choices[0].message.content
|
295 |
+
print(response)
|