Candidate_Selection / src /evaluation.py
Shiv1143's picture
Upload evaluation.py
25f445e verified
raw
history blame contribute delete
945 Bytes
from src.model import compare_candidates
def evaluate_model(openai, data):
correct_predictions = 0
count = 0
for index, row in data.iterrows():
count += 1
candidateA = {
'resume': row['candidateAResume'],
'transcript': row['candidateATranscript']
}
candidateB = {
'resume': row['candidateBResume'],
'transcript': row['candidateBTranscript']
}
role = row['role']
prediction = compare_candidates(openai, candidateA, candidateB, role)
if prediction:
if (prediction == 'Candidate A' and row['winnerId'] == row['candidateAId']) or \
(prediction == 'Candidate B' and row['winnerId'] == row['candidateBId']):
correct_predictions += 1
print('current_accuracy:', correct_predictions / count)
accuracy = correct_predictions / len(data)
return accuracy