Spaces:
Sleeping
Sleeping
File size: 3,819 Bytes
0ca64ea 9565502 0ca64ea 9565502 0ca64ea 9565502 0ca64ea 9eb79a6 0ca64ea 468b33e adfb212 0ca64ea |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 |
import gradio as gr
from huggingface_hub import InferenceClient
import os
import fitz # PyMuPDF for PDF handling
from collections import Counter
from docx import Document # For handling MS Word documents
# Initialize Hugging Face Inference Client
try:
client = InferenceClient(
model="meta-llama/Meta-Llama-3.1-8B-Instruct",
token=os.getenv("HUGGINGFACEHUB_API_TOKEN")
)
except Exception as e:
print(f"Error initializing InferenceClient: {e}")
# Function to optimize resume based on job title
def optimize_resume(resume_text, job_title):
prompt = f"Optimize the following resume for the job title '{job_title}':\n\n{resume_text}"
responses = []
try:
for message in client.chat_completion(
messages=[{"role": "user", "content": prompt}],
max_tokens=1000,
stream=True,
):
responses.append(message.choices[0].delta.content)
except Exception as e:
return f"Error during model inference: {e}"
return ''.join(responses)
# Function to calculate an ATS score based on keyword matching
def calculate_ats_score(resume_text, job_title):
job_keywords = Counter(job_title.lower().split())
resume_keywords = Counter(resume_text.lower().split())
match_score = sum(min(resume_keywords[word], job_keywords[word]) for word in job_keywords)
max_score = sum(job_keywords.values())
return round((match_score / max_score) * 100, 2) # Score as a percentage
# Function to extract text from a PDF file
def extract_text_from_pdf(pdf_file_path):
text = ""
try:
pdf_document = fitz.open(pdf_file_path)
for page_num in range(len(pdf_document)):
page = pdf_document.load_page(page_num)
text += page.get_text()
except Exception as e:
return f"Error extracting text from PDF: {e}"
return text
# Function to extract text from a Word document
def extract_text_from_word(docx_file_path):
text = ""
try:
doc = Document(docx_file_path)
for paragraph in doc.paragraphs:
text += paragraph.text + "\n"
except Exception as e:
return f"Error extracting text from Word document: {e}"
return text
# Function to process the resume and job title inputs
def process_resume(file, job_title):
try:
file_name = file.name
if file_name.endswith(".pdf"):
# Extract text if the file is a PDF
resume_text = extract_text_from_pdf(file.name)
elif file_name.endswith(".docx"):
# Extract text if the file is a Word document
resume_text = extract_text_from_word(file.name)
else:
# Assume the file is a text file and read it directly
resume_text = file.decode("utf-8")
# Optimize the resume
optimized_resume = optimize_resume(resume_text, job_title)
# Calculate ATS score
ats_score = calculate_ats_score(optimized_resume, job_title)
result = f"ATS Score: {ats_score}%\n\nOptimized Resume:\n{optimized_resume}"
return result
except Exception as e:
return f"Error processing resume: {e}"
# Gradio Interface
interface = gr.Interface(
fn=process_resume,
inputs=[
gr.File(label="Upload your resume (Word)"),
gr.Textbox(lines=1, placeholder="Enter the job title...", label="Job Title"),
],
outputs=gr.Textbox(label="Optimized Resume and ATS Score", lines=20),
title="Ai Resume Optimizer with ATS Scoring",
description="Upload your resume and specify a job title to optimize your resume for that position. The system will also calculate an ATS score. Try out other models [here](https://huggingface.co/tchans123)."
)
# Launch the Gradio app
interface.launch(share=True)
|