Shiv1143 commited on
Commit
25f445e
·
verified ·
1 Parent(s): b4871fc

Upload evaluation.py

Browse files
Files changed (1) hide show
  1. src/evaluation.py +28 -0
src/evaluation.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from src.model import compare_candidates
2
+
3
+ def evaluate_model(openai, data):
4
+ correct_predictions = 0
5
+ count = 0
6
+
7
+ for index, row in data.iterrows():
8
+ count += 1
9
+ candidateA = {
10
+ 'resume': row['candidateAResume'],
11
+ 'transcript': row['candidateATranscript']
12
+ }
13
+ candidateB = {
14
+ 'resume': row['candidateBResume'],
15
+ 'transcript': row['candidateBTranscript']
16
+ }
17
+ role = row['role']
18
+
19
+ prediction = compare_candidates(openai, candidateA, candidateB, role)
20
+
21
+ if prediction:
22
+ if (prediction == 'Candidate A' and row['winnerId'] == row['candidateAId']) or \
23
+ (prediction == 'Candidate B' and row['winnerId'] == row['candidateBId']):
24
+ correct_predictions += 1
25
+ print('current_accuracy:', correct_predictions / count)
26
+
27
+ accuracy = correct_predictions / len(data)
28
+ return accuracy