import gradio as gr from huggingface_hub import InferenceClient import os import fitz # PyMuPDF for PDF handling from collections import Counter from docx import Document # For handling MS Word documents # Initialize Hugging Face Inference Client try: client = InferenceClient( model="meta-llama/Meta-Llama-3.1-8B-Instruct", token=os.getenv("HUGGINGFACEHUB_API_TOKEN") ) except Exception as e: print(f"Error initializing InferenceClient: {e}") # Function to optimize resume based on job title def optimize_resume(resume_text, job_title): prompt = f"Optimize the following resume for the job title '{job_title}':\n\n{resume_text}" responses = [] try: for message in client.chat_completion( messages=[{"role": "user", "content": prompt}], max_tokens=1000, stream=True, ): responses.append(message.choices[0].delta.content) except Exception as e: return f"Error during model inference: {e}" return ''.join(responses) # Function to calculate an ATS score based on keyword matching def calculate_ats_score(resume_text, job_title): job_keywords = Counter(job_title.lower().split()) resume_keywords = Counter(resume_text.lower().split()) match_score = sum(min(resume_keywords[word], job_keywords[word]) for word in job_keywords) max_score = sum(job_keywords.values()) return round((match_score / max_score) * 100, 2) # Score as a percentage # Function to extract text from a PDF file def extract_text_from_pdf(pdf_file_path): text = "" try: pdf_document = fitz.open(pdf_file_path) for page_num in range(len(pdf_document)): page = pdf_document.load_page(page_num) text += page.get_text() except Exception as e: return f"Error extracting text from PDF: {e}" return text # Function to extract text from a Word document def extract_text_from_word(docx_file_path): text = "" try: doc = Document(docx_file_path) for paragraph in doc.paragraphs: text += paragraph.text + "\n" except Exception as e: return f"Error extracting text from Word document: {e}" return text # Function to process the resume and job title inputs def process_resume(file, job_title): try: file_name = file.name if file_name.endswith(".pdf"): # Extract text if the file is a PDF resume_text = extract_text_from_pdf(file.name) elif file_name.endswith(".docx"): # Extract text if the file is a Word document resume_text = extract_text_from_word(file.name) else: # Assume the file is a text file and read it directly resume_text = file.decode("utf-8") # Optimize the resume optimized_resume = optimize_resume(resume_text, job_title) # Calculate ATS score ats_score = calculate_ats_score(optimized_resume, job_title) result = f"ATS Score: {ats_score}%\n\nOptimized Resume:\n{optimized_resume}" return result except Exception as e: return f"Error processing resume: {e}" # Gradio Interface interface = gr.Interface( fn=process_resume, inputs=[ gr.File(label="Upload your resume (Word)"), gr.Textbox(lines=1, placeholder="Enter the job title...", label="Job Title"), ], outputs=gr.Textbox(label="Optimized Resume and ATS Score", lines=20), title="Ai Resume Optimizer with ATS Scoring", description="Upload your resume and specify a job title to optimize your resume for that position. The system will also calculate an ATS score. Try out other models [here](https://huggingface.co/tchans123)." ) # Launch the Gradio app interface.launch(share=True)