File size: 945 Bytes
25f445e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
from src.model import compare_candidates

def evaluate_model(openai, data):
    correct_predictions = 0
    count = 0

    for index, row in data.iterrows():
        count += 1
        candidateA = {
            'resume': row['candidateAResume'],
            'transcript': row['candidateATranscript']
        }
        candidateB = {
            'resume': row['candidateBResume'],
            'transcript': row['candidateBTranscript']
        }
        role = row['role']
        
        prediction = compare_candidates(openai, candidateA, candidateB, role)
        
        if prediction:
            if (prediction == 'Candidate A' and row['winnerId'] == row['candidateAId']) or \
               (prediction == 'Candidate B' and row['winnerId'] == row['candidateBId']):
                correct_predictions += 1
        print('current_accuracy:', correct_predictions / count)

    accuracy = correct_predictions / len(data)
    return accuracy