Upload 6 files
Browse files- .env +1 -0
- chains.py +60 -0
- main.py +50 -0
- portfolio.py +21 -0
- requirements.txt +9 -0
- utils.py +16 -0
.env
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
GROQ_API_KEY= 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
|
chains.py
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from langchain_groq import ChatGroq
|
3 |
+
from langchain_core.prompts import PromptTemplate
|
4 |
+
from langchain_core.output_parsers import JsonOutputParser
|
5 |
+
from langchain_core.exceptions import OutputParserException
|
6 |
+
from dotenv import load_dotenv
|
7 |
+
|
8 |
+
load_dotenv()
|
9 |
+
|
10 |
+
class Chain:
|
11 |
+
def __init__(self):
|
12 |
+
self.llm = ChatGroq(temperature=0, groq_api_key=os.getenv("GROQ_API_KEY"), model_name="llama-3.1-70b-versatile", max_tokens=512)
|
13 |
+
|
14 |
+
def extract_jobs(self, cleaned_text):
|
15 |
+
prompt_extract = PromptTemplate.from_template(
|
16 |
+
"""
|
17 |
+
### SCRAPED TEXT FROM WEBSITE:
|
18 |
+
{page_data}
|
19 |
+
### INSTRUCTION:
|
20 |
+
The scraped text is from the career's page of a website.
|
21 |
+
Your job is to extract the job postings and return them in JSON format containing the following keys: `role`, `experience`, `skills` and `description`.
|
22 |
+
Only return the valid JSON.
|
23 |
+
### VALID JSON (NO PREAMBLE):
|
24 |
+
"""
|
25 |
+
)
|
26 |
+
chain_extract = prompt_extract | self.llm
|
27 |
+
res = chain_extract.invoke(input={"page_data": cleaned_text})
|
28 |
+
try:
|
29 |
+
json_parser = JsonOutputParser()
|
30 |
+
res = json_parser.parse(res.content)
|
31 |
+
except OutputParserException:
|
32 |
+
raise OutputParserException("Context too big. Unable to parse jobs.")
|
33 |
+
return res if isinstance(res, list) else [res]
|
34 |
+
|
35 |
+
def write_mail(self, job, links):
|
36 |
+
prompt_email = PromptTemplate.from_template(
|
37 |
+
"""
|
38 |
+
### JOB DESCRIPTION:
|
39 |
+
{job_description}
|
40 |
+
|
41 |
+
### INSTRUCTION:
|
42 |
+
You are Hassaan, a business development executive at EziLine. EziLine is an AI & Software Consulting company dedicated to facilitating
|
43 |
+
the seamless integration of business processes through automated tools.
|
44 |
+
Over our experience, we have empowered numerous enterprises with tailored solutions, fostering scalability,
|
45 |
+
process optimization, cost reduction, and heightened overall efficiency.
|
46 |
+
Your job is to write a cold email to the client regarding the job mentioned above describing the capability of EziLine
|
47 |
+
in fulfilling their needs.
|
48 |
+
Also add the most relevant ones from the following links to showcase EziLine's portfolio: {link_list}
|
49 |
+
Remember you are Hassaan, BDE at Eziline.
|
50 |
+
Do not provide a preamble.
|
51 |
+
### EMAIL (NO PREAMBLE):
|
52 |
+
|
53 |
+
"""
|
54 |
+
)
|
55 |
+
chain_email = prompt_email | self.llm
|
56 |
+
res = chain_email.invoke({"job_description": str(job), "link_list": links})
|
57 |
+
return res.content
|
58 |
+
|
59 |
+
if __name__ == "__main__":
|
60 |
+
print(os.getenv("GROQ_API_KEY"))
|
main.py
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from flask import Flask, request, jsonify, render_template
|
2 |
+
from chains import Chain
|
3 |
+
from portfolio import Portfolio
|
4 |
+
from utils import clean_text
|
5 |
+
from langchain_community.document_loaders import WebBaseLoader
|
6 |
+
|
7 |
+
|
8 |
+
app = Flask(__name__)
|
9 |
+
|
10 |
+
chain = Chain()
|
11 |
+
portfolio = Portfolio()
|
12 |
+
|
13 |
+
@app.route('/')
|
14 |
+
def index():
|
15 |
+
return render_template('index.html')
|
16 |
+
|
17 |
+
@app.route('/generate-email', methods=['POST'])
|
18 |
+
def generate_email():
|
19 |
+
url = request.form.get('url')
|
20 |
+
if not url:
|
21 |
+
return jsonify({"error": "URL is required"}), 400
|
22 |
+
|
23 |
+
try:
|
24 |
+
# Load the webpage content
|
25 |
+
loader = WebBaseLoader([url])
|
26 |
+
data = clean_text(loader.load().pop().page_content)
|
27 |
+
|
28 |
+
# Load the portfolio into the vector database
|
29 |
+
portfolio.load_portfolio()
|
30 |
+
|
31 |
+
# Extract jobs from the cleaned text (use the first job found)
|
32 |
+
jobs = chain.extract_jobs(data)
|
33 |
+
if not jobs:
|
34 |
+
return jsonify({"error": "No jobs found on the provided URL"}), 404
|
35 |
+
|
36 |
+
# Generate a single email for the first job
|
37 |
+
job = jobs[0] # Take the first job if multiple are found
|
38 |
+
skills = job.get('skills', [])
|
39 |
+
links = portfolio.query_links(skills)
|
40 |
+
if not links:
|
41 |
+
links = "No relevant portfolio links found."
|
42 |
+
email = chain.write_mail(job, links)
|
43 |
+
|
44 |
+
return jsonify({"email": email})
|
45 |
+
|
46 |
+
except Exception as e:
|
47 |
+
return jsonify({"error": str(e)}), 500
|
48 |
+
|
49 |
+
if __name__ == '__main__':
|
50 |
+
app.run(debug=True)
|
portfolio.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
import chromadb
|
3 |
+
import uuid
|
4 |
+
|
5 |
+
|
6 |
+
class Portfolio:
|
7 |
+
def __init__(self, file_path="Resource\\my_portfolio.csv"):
|
8 |
+
self.file_path = file_path
|
9 |
+
self.data = pd.read_csv(file_path)
|
10 |
+
self.chroma_client = chromadb.PersistentClient('vectorstore')
|
11 |
+
self.collection = self.chroma_client.get_or_create_collection(name="portfolio")
|
12 |
+
|
13 |
+
def load_portfolio(self):
|
14 |
+
if not self.collection.count():
|
15 |
+
for _, row in self.data.iterrows():
|
16 |
+
self.collection.add(documents=row["Techstack"],
|
17 |
+
metadatas={"links": row["Links"]},
|
18 |
+
ids=[str(uuid.uuid4())])
|
19 |
+
|
20 |
+
def query_links(self, skills):
|
21 |
+
return self.collection.query(query_texts=skills, n_results=2).get('metadatas', [])
|
requirements.txt
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
langchain==0.2.14
|
2 |
+
langchain-community==0.2.12
|
3 |
+
langchain-groq===0.1.9
|
4 |
+
unstructured==0.14.6
|
5 |
+
selenium==4.21.0
|
6 |
+
chromadb==0.5.0
|
7 |
+
streamlit==1.35.0
|
8 |
+
pandas==2.0.2
|
9 |
+
python-dotenv==1.0.0
|
utils.py
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
|
3 |
+
def clean_text(text):
|
4 |
+
# Remove HTML tags
|
5 |
+
text = re.sub(r'<[^>]*?>', '', text)
|
6 |
+
# Remove URLs
|
7 |
+
text = re.sub(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', '', text)
|
8 |
+
# Remove special characters
|
9 |
+
text = re.sub(r'[^a-zA-Z0-9 ]', '', text)
|
10 |
+
# Replace multiple spaces with a single space
|
11 |
+
text = re.sub(r'\s{2,}', ' ', text)
|
12 |
+
# Trim leading and trailing whitespace
|
13 |
+
text = text.strip()
|
14 |
+
# Remove extra whitespace
|
15 |
+
text = ' '.join(text.split())
|
16 |
+
return text
|