File size: 2,934 Bytes
06da8b9
 
 
 
59d0bec
06da8b9
34250bf
06da8b9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34250bf
06da8b9
34250bf
06da8b9
59d0bec
 
 
 
 
 
ce0412b
06da8b9
 
ce0412b
06da8b9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59d0bec
06da8b9
 
 
 
 
 
59d0bec
 
 
34250bf
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import json

def recommend_jobs_for_input_skills(input_hard_skills, input_soft_skills, input_major, jobs, tfidf_vectorizer_skills, tfidf_vectorizer_majors, companies_skills_vec, companies_majors_vec):
    input_hard_skills_vec = tfidf_vectorizer_skills.transform([input_hard_skills])
    input_soft_skills_vec = tfidf_vectorizer_skills.transform([input_soft_skills])
    input_major_vec = tfidf_vectorizer_majors.transform([input_major])

    input_skills_vec = (input_hard_skills_vec + input_soft_skills_vec) / 2

    skills_similarity = cosine_similarity(input_skills_vec, companies_skills_vec)
    major_similarity = cosine_similarity(input_major_vec, companies_majors_vec)

    if skills_similarity.shape[1] != major_similarity.shape[1]:
        min_dim = min(skills_similarity.shape[1], major_similarity.shape[1])
        skills_similarity = skills_similarity[:, :min_dim]
        major_similarity = major_similarity[:, :min_dim]

    combined_similarity = (skills_similarity + major_similarity) / 2

    sorted_company_indices = np.argsort(-combined_similarity[0])
    recommended_companies = jobs.iloc[sorted_company_indices]['Major'].values[:3]

    return recommended_companies.tolist()

def handler(event, context):
    input_data = json.loads(event['body'])
    input_hard_skills = input_data["input_hard_skills"]
    input_soft_skills = input_data["input_soft_skills"]
    input_major = input_data["input_major"]

    users_data = "1st_train.csv"
    applicants = pd.read_csv(users_data)

    jobs_data = "jobs_data.csv"
    companies = pd.read_csv(jobs_data)

    tfidf_vectorizer_skills = TfidfVectorizer()
    tfidf_vectorizer_majors = TfidfVectorizer()

    all_skills = pd.concat([applicants['final_hard_skill'], applicants['final_soft_skill'],
                            companies['final_hard_skill'], companies['final_soft_skill']])
    all_majors = pd.concat([applicants['candidate_field'], companies['Major']])

    all_skills_vectorized = tfidf_vectorizer_skills.fit_transform(all_skills)
    all_majors_vectorized = tfidf_vectorizer_majors.fit_transform(all_majors)

    num_applicants = len(applicants)
    num_companies = len(companies)

    applicants_skills_vectorized = all_skills_vectorized[:num_applicants*2]
    companies_skills_vectorized = all_skills_vectorized[num_applicants*2:]

    applicants_majors_vectorized = all_majors_vectorized[:num_applicants]
    companies_majors_vectorized = all_majors_vectorized[num_applicants:]

    recommended_jobs = recommend_jobs_for_input_skills(input_hard_skills, input_soft_skills, input_major, companies, tfidf_vectorizer_skills, tfidf_vectorizer_majors, companies_skills_vectorized, companies_majors_vectorized)

    return {
        'statusCode': 200,
        'body': json.dumps(recommended_jobs)
    }