Spaces:
Runtime error
Runtime error
File size: 3,741 Bytes
0d938ae |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 |
import os
import openai
import PyPDF2
import gradio as gr
import docx
class QuestionsGenerator:
def __init__(self):
openai.api_key = os.getenv("OPENAI_API_KEY")
def extract_text_from_file(self,file_path):
# Get the file extension
file_extension = os.path.splitext(file_path)[1]
if file_extension == '.pdf':
with open(file_path, 'rb') as file:
# Create a PDF file reader object
reader = PyPDF2.PdfFileReader(file)
# Create an empty string to hold the extracted text
extracted_text = ""
# Loop through each page in the PDF and extract the text
for page_number in range(reader.getNumPages()):
page = reader.getPage(page_number)
extracted_text += page.extractText()
return extracted_text
elif file_extension == '.txt':
with open(file_path, 'r') as file:
# Just read the entire contents of the text file
return file.read()
elif file_extension == '.docx':
doc = docx.Document(file_path)
text = []
for paragraph in doc.paragraphs:
text.append(paragraph.text)
return '\n'.join(text)
else:
return "Unsupported file type"
def response(self,job_description_path):
job_description_path = job_description_path.name
job_description = self.extract_text_from_file(job_description_path)
# Define the prompt or input for the model
prompt = f"""Generate interview questions for screening following job_description delimitted by triple backticks. Generate atmost ten questions.
```{job_description}```
"""
# Generate a response from the GPT-3 model
response = openai.Completion.create(
engine='text-davinci-003', # Choose the GPT-3 engine you want to use
prompt=prompt,
max_tokens=200, # Set the maximum number of tokens in the generated response
temperature=0, # Controls the randomness of the output. Higher values = more random, lower values = more focused
n=1, # Generate a single response
stop=None, # Specify an optional stop sequence to limit the length of the response
)
# Extract the generated text from the API response
generated_text = response.choices[0].text.strip()
return generated_text
def gradio_interface(self):
with gr.Blocks(css="style.css",theme=gr.themes.Soft()) as app:
gr.HTML("""<img class="leftimage" align="left" src="https://templates.images.credential.net/1612472097627370951721412474196.png" alt="Image" width="210" height="210">
<img class="rightimage" align="right" src="https://companieslogo.com/img/orig/RAND.AS_BIG-0f1935a4.png?t=1651813778" alt="Image" width="210" height="210">""")
with gr.Row(elem_id="col-container"):
with gr.Column():
gr.HTML("<br>")
gr.HTML(
"""<h1 style="text-align:center; color:"white">Randstad Questions For Screening</h1> """
)
gr.HTML("<br>")
with gr.Column():
jobDescription = gr.File(label="Job Description")
with gr.Column():
analyse = gr.Button("Generate")
with gr.Column():
result = gr.Textbox(label="Questions For Screening",lines=8)
analyse.click(self.response, [jobDescription], result)
app.launch()
ques = QuestionsGenerator()
ques.gradio_interface() |