IliaLarchenko commited on
Commit
56bfed7
1 Parent(s): 1aaf3fc

Added some analytics functions

Browse files
Files changed (1) hide show
  1. tests/analysis.py +142 -0
tests/analysis.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # to use analytics tools you need to install some extra libraries
2
+ # !pip install pandas
3
+
4
+ from tests.candidate import complete_interview
5
+ from tests.grader import grade
6
+ import pandas as pd
7
+ from functools import partial
8
+ import concurrent.futures
9
+ import os
10
+ from IPython.display import display
11
+
12
+
13
+ def complete_and_grade(interview_params, exp_name="GPT4", grader_model="gpt-4-turbo", candidate_model="gpt-3.5-turbo"):
14
+ interview_type, attempt_num = interview_params
15
+ feedback = {}
16
+
17
+ try:
18
+ file_path, _ = complete_interview(interview_type, exp_name, model=candidate_model)
19
+ feedback = grade(file_path, grader_model)
20
+
21
+ # Just a heuristic check of the JSON format TODO: add a proper check
22
+ if "problem_statement_topic" not in feedback:
23
+ raise Exception("Grading failed")
24
+
25
+ print(f"Attempt {attempt_num + 1} of {interview_type} completed successfully")
26
+ print(f"Overall score: {feedback['overall_score']}")
27
+
28
+ except Exception as e:
29
+ print(f"Attempt {attempt_num + 1} of {interview_type} failed with error: {e}")
30
+
31
+ return feedback
32
+
33
+
34
+ def run_evaluation(
35
+ exp_name,
36
+ num=5,
37
+ interview_types=["ml_design", "math", "ml_theory", "system_design", "sql", "coding"],
38
+ grader_model="gpt-4-turbo",
39
+ candidate_model="gpt-3.5-turbo",
40
+ num_workers=3,
41
+ ):
42
+ exp_name = f"{exp_name}_{pd.Timestamp.now().strftime('%Y-%m-%d_%H-%M-%S')}"
43
+ os.makedirs(f"records/{exp_name}", exist_ok=True)
44
+ tasks = [(interview_type, i) for i in range(num) for interview_type in interview_types]
45
+ complete_f = partial(complete_and_grade, exp_name=exp_name, grader_model=grader_model, candidate_model=candidate_model)
46
+ with concurrent.futures.ThreadPoolExecutor(max_workers=num_workers) as executor:
47
+ results = list(executor.map(complete_f, tasks))
48
+
49
+ # Filter out empty results and count them
50
+ non_empty_results = [res for res in results if res]
51
+ empty_count = len(results) - len(non_empty_results)
52
+
53
+ print(f"Number of empty results (errors or failed grading): {empty_count}")
54
+
55
+ # Store non-empty results in a DataFrame
56
+ df = pd.DataFrame(non_empty_results)
57
+ df.to_csv(os.path.join("records", exp_name, "results.csv"), index=False)
58
+
59
+ return exp_name
60
+
61
+
62
+ def highlight_color(val):
63
+ color = "red" if val < 0.7 else "orange" if val < 0.9 else "lightgreen" if val < 0.95 else "green"
64
+ return f"color: {color}"
65
+
66
+
67
+ def generate_and_display_tables(df):
68
+ # Grouping by prefix
69
+ prefixes = ["problem", "interviewer", "feedback"]
70
+ prefix_columns = [col for col in df.columns if any(col.startswith(prefix) for prefix in prefixes)]
71
+
72
+ # Aggregated scores per stage
73
+ grouped_scores = {}
74
+ for prefix in prefixes:
75
+ prefix_cols = [col for col in df.columns if col.startswith(prefix)]
76
+ grouped_scores[prefix] = df[prefix_cols].mean(axis=1).mean()
77
+
78
+ grouped_scores_df = pd.DataFrame([grouped_scores]).T
79
+ grouped_scores_df.columns = ["avg score"]
80
+ grouped_scores_styled = grouped_scores_df.style.map(highlight_color)
81
+ grouped_scores_styled.set_caption("Aggregated Scores per Stage")
82
+
83
+ # Grouped by unique type
84
+ grouped_by_type = pd.DataFrame(df.groupby("type")[prefix_columns].mean().mean(axis=1), columns=["avg score"])
85
+ grouped_by_type_styled = grouped_by_type.style.map(highlight_color)
86
+ grouped_by_type_styled.set_caption("Scores Grouped by Unique Type")
87
+
88
+ # Grouped by unique interviewer model and sorted by descending total score
89
+ total_llm_scores = df.groupby("agent_llm")[prefix_columns].mean().mean(axis=1).sort_values(ascending=False)
90
+ grouped_by_interviewer = pd.DataFrame(total_llm_scores, columns=["avg score"])
91
+ grouped_by_interviewer_styled = grouped_by_interviewer.style.map(highlight_color)
92
+ grouped_by_interviewer_styled.set_caption("Scores Grouped by Unique Interviewer Model")
93
+
94
+ for prefix in prefixes:
95
+ prefix_cols = [col for col in prefix_columns if col.startswith(prefix)]
96
+ df[prefix] = df[prefix_cols].mean(axis=1)
97
+
98
+ # Pivot table: Agent model vs Stage
99
+ pivot1 = pd.pivot_table(df, values=prefixes, index="agent_llm", aggfunc="mean").reindex(total_llm_scores.index)
100
+ pivot1_styled = pivot1.style.map(highlight_color)
101
+ pivot1_styled.set_caption("Pivot Table: Agent Model vs Stage")
102
+
103
+ # Pivot table: Agent model vs Type (Single aggregated score per type)
104
+ pivot2 = pd.pivot_table(df, values="overall_score", index="agent_llm", columns="type", aggfunc="mean").reindex(total_llm_scores.index)
105
+ pivot2_styled = pivot2.style.map(highlight_color)
106
+ pivot2_styled.set_caption("Pivot Table: Agent Model vs Type")
107
+
108
+ # Pivot table: Type vs Stage
109
+ pivot3 = pd.pivot_table(df, values=prefixes, index="type", aggfunc="mean")
110
+ pivot3_styled = pivot3.style.map(highlight_color)
111
+ pivot3_styled.set_caption("Pivot Table: Type vs Stage")
112
+
113
+ # Pivot table: Agent Model x Stage vs Type (MultiIndex)
114
+ multi_index_data = [(llm, stage) for llm in total_llm_scores.index for stage in prefixes]
115
+ multi_index = pd.MultiIndex.from_tuples(multi_index_data, names=["agent_llm", "stage"])
116
+ types = df["type"].unique()
117
+ pivot4_df = pd.DataFrame(index=multi_index, columns=types)
118
+
119
+ # Fill the DataFrame with the aggregated scores grouped by type
120
+ for llm in total_llm_scores.index:
121
+ for stage in prefixes:
122
+ mask = df["agent_llm"] == llm
123
+ stage_values = df.loc[mask, ["type", stage]].groupby("type").mean()[stage]
124
+ pivot4_df.loc[(llm, stage), :] = stage_values
125
+
126
+ pivot4_styled = pivot4_df.style.map(highlight_color)
127
+ pivot4_styled.set_caption("Pivot Table: Agent Model x Stage vs Type")
128
+
129
+ tables_dict = {
130
+ "grouped_scores_styled": grouped_scores_styled,
131
+ "grouped_by_type_styled": grouped_by_type_styled,
132
+ "grouped_by_interviewer_styled": grouped_by_interviewer_styled,
133
+ "pivot1_styled": pivot1_styled,
134
+ "pivot2_styled": pivot2_styled,
135
+ "pivot3_styled": pivot3_styled,
136
+ "pivot4_styled": pivot4_styled,
137
+ }
138
+
139
+ for table in tables_dict.values():
140
+ display(table)
141
+
142
+ return tables_dict