Spaces:
Sleeping
Sleeping
Jobanpreet
commited on
Commit
•
46290fc
1
Parent(s):
790499e
Upload 4 files
Browse files- advance_post.py +102 -0
- app.py +133 -0
- paraphrase_post.py +99 -0
- scrap_post.py +33 -0
advance_post.py
ADDED
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import requests
|
2 |
+
from langchain.output_parsers import ResponseSchema, StructuredOutputParser
|
3 |
+
from langchain.prompts import PromptTemplate
|
4 |
+
from langchain_community.document_loaders import WebBaseLoader
|
5 |
+
from langchain.prompts import ChatPromptTemplate
|
6 |
+
from langchain_core.output_parsers import StrOutputParser
|
7 |
+
import nest_asyncio
|
8 |
+
|
9 |
+
|
10 |
+
def google_search(linkedin_post,model , google_api_key, search_engine_id , num_results_per_query=[3,2,1]):
|
11 |
+
|
12 |
+
response_schemas = [
|
13 |
+
ResponseSchema(name="answer", description="These are the top three relevant questions from the LinkedIn post" , type="list")]
|
14 |
+
output_parser = StructuredOutputParser.from_response_schemas(response_schemas)
|
15 |
+
format_instructions = output_parser.get_format_instructions()
|
16 |
+
|
17 |
+
template = """
|
18 |
+
You are a helpful question extractor bot. You are provided with LinkedIn post and your task is to extract the top three relevant questions from the post which are related to the topics of the post only.:
|
19 |
+
LinkedIn post: {post}
|
20 |
+
{format_instructions}
|
21 |
+
|
22 |
+
"""
|
23 |
+
|
24 |
+
prompt = PromptTemplate(
|
25 |
+
template=template,
|
26 |
+
input_variables=["post"],
|
27 |
+
partial_variables={"format_instructions": format_instructions},
|
28 |
+
)
|
29 |
+
|
30 |
+
chain = prompt | model | output_parser
|
31 |
+
result=chain.invoke({"post": linkedin_post})
|
32 |
+
questions=result['answer']
|
33 |
+
# print(questions)
|
34 |
+
|
35 |
+
all_links = []
|
36 |
+
for query, num_results in zip(questions, num_results_per_query):
|
37 |
+
url = f"https://www.googleapis.com/customsearch/v1?key={google_api_key}&cx={search_engine_id}&q={query}&tbm=nws&num={num_results}"
|
38 |
+
headers = {'Cookie': 'NID=513=KqMRZpKGj6WedOM42XZfrWSUunISFtrQ1twN2s6GEO_lIwb4SzNBCoRHw1Z6lmrRjuSHMxW2wIm1kL20piObJbroQQR5Sr3YSuCTXqH9UstqwzvSaUgS6P40fPvq9OKeDxWg3O8UGTYX_7g8xR76ox80aUZ4oy14DCjgwNInLDc'}
|
39 |
+
response = requests.get(url, headers=headers)
|
40 |
+
search_results = response.json()
|
41 |
+
links = [item['link'] for item in search_results.get('items', [])]
|
42 |
+
all_links.extend(links)
|
43 |
+
|
44 |
+
|
45 |
+
return all_links
|
46 |
+
|
47 |
+
|
48 |
+
# nest_asyncio.apply()
|
49 |
+
# def advanced_post(all_links ,model ,linkedinpost):
|
50 |
+
# loader = WebBaseLoader(all_links,encoding="utf-8")
|
51 |
+
# loader.requests_per_second = 1
|
52 |
+
# docs = loader.load()
|
53 |
+
# template="""You are a helpful linkedin post creator . You are provided with LinkedIn post and documents related to the post extracted from different articles from the internet.
|
54 |
+
# Your task is to create a new linkedin post but content should be taken from the documents according to the semantic similarity of the post content with document content.
|
55 |
+
|
56 |
+
# Linkedin post:{post}
|
57 |
+
# Documents: {content}"""
|
58 |
+
|
59 |
+
# prompt = ChatPromptTemplate.from_template(template)
|
60 |
+
# chain= prompt | model | StrOutputParser()
|
61 |
+
# result=chain.invoke({'post':linkedinpost , 'content':docs})
|
62 |
+
# return result , docs
|
63 |
+
|
64 |
+
|
65 |
+
def advanced_post(all_links ,model ,linkedinpost):
|
66 |
+
loader = WebBaseLoader(all_links,encoding="utf-8")
|
67 |
+
loader.requests_per_second = 1
|
68 |
+
docs = loader.load()
|
69 |
+
template1="""You are provided with LinkedIn post and document which is related to the post . Your task is to extract the relevant information from the document which has similarity with LinkedIn post.
|
70 |
+
Do not add LinkedIn Post content. It should only from document.
|
71 |
+
Linkedin post:{post}
|
72 |
+
Document: {content}"""
|
73 |
+
|
74 |
+
prompt = ChatPromptTemplate.from_template(template1)
|
75 |
+
chain= prompt | model | StrOutputParser()
|
76 |
+
relevant_content=""
|
77 |
+
for i in docs:
|
78 |
+
r=chain.invoke({'post':linkedinpost , 'content':i.page_content})
|
79 |
+
relevant_content+=r
|
80 |
+
|
81 |
+
template2="""You are provided with a document. Your task is to create a new LinkedIn post. Take content from the document only. Choose the topic of the post wisely. Do not add anything outside of the provided document content.
|
82 |
+
Format should be professional .It should include topic , headings , key points , stickers and emojis.
|
83 |
+
|
84 |
+
The length of the post should be between 400 to 500 words.
|
85 |
+
Document: {content}"""
|
86 |
+
prompt2 = ChatPromptTemplate.from_template(template2)
|
87 |
+
chain2= prompt2 | model | StrOutputParser()
|
88 |
+
result=chain2.invoke({'content':relevant_content})
|
89 |
+
return result
|
90 |
+
|
91 |
+
|
92 |
+
|
93 |
+
|
94 |
+
|
95 |
+
|
96 |
+
|
97 |
+
|
98 |
+
|
99 |
+
|
100 |
+
|
101 |
+
|
102 |
+
|
app.py
ADDED
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import re
|
3 |
+
import openai
|
4 |
+
from paraphrase_post import get_original_url , paraphrased_post
|
5 |
+
from advance_post import google_search , advanced_post
|
6 |
+
from langchain.chat_models import ChatOpenAI
|
7 |
+
from langchain_groq import ChatGroq
|
8 |
+
#from langchain import HuggingFaceHub
|
9 |
+
|
10 |
+
|
11 |
+
def main():
|
12 |
+
st.title("LinkedIn Post Creator")
|
13 |
+
|
14 |
+
# Initialize SessionState dictionary
|
15 |
+
session_state = st.session_state
|
16 |
+
|
17 |
+
if 'paraphrase' not in session_state:
|
18 |
+
session_state.paraphrase = ""
|
19 |
+
if 'keywords' not in session_state:
|
20 |
+
session_state.keywords = ""
|
21 |
+
if 'take_aways' not in session_state:
|
22 |
+
session_state.take_aways = ""
|
23 |
+
if 'highlights' not in session_state:
|
24 |
+
session_state.highlights = ""
|
25 |
+
|
26 |
+
if 'advancepost' not in session_state:
|
27 |
+
session_state.advancepost = ""
|
28 |
+
|
29 |
+
url = st.sidebar.text_input("Enter URL:", placeholder="Enter URL here...")
|
30 |
+
option = st.sidebar.selectbox('Select Model:', ('GPT-4',"Mixtral-8x7b","Gemma-7b"))
|
31 |
+
temperature= st.sidebar.select_slider(
|
32 |
+
'How much accurate post you want ?',
|
33 |
+
options=['Less accuracy', 9, 8, 7, 6, 5,4,3 ,2,1,'High accuracy'])
|
34 |
+
if temperature=='Less accuracy':
|
35 |
+
temperature=10
|
36 |
+
elif temperature=="High accuracy":
|
37 |
+
temperature=0
|
38 |
+
temperature=temperature/10
|
39 |
+
|
40 |
+
|
41 |
+
if option=="GPT-4":
|
42 |
+
api_key=st.sidebar.text_input("API Key:",placeholder="Enter OpenAI API Key...")
|
43 |
+
if api_key:
|
44 |
+
model=ChatOpenAI(model="gpt-4-turbo-preview" , temperature=temperature , api_key=api_key)
|
45 |
+
elif option=="Mixtral-8x7b":
|
46 |
+
api_key= st.sidebar.text_input("API Key:",placeholder="Enter Groq API Key...")
|
47 |
+
if api_key:
|
48 |
+
model = ChatGroq(temperature=temperature,groq_api_key=api_key, model_name="mixtral-8x7b-32768")
|
49 |
+
elif option=="Gemma-7b":
|
50 |
+
api_key=st.sidebar.text_input("API Key:",placeholder="Enter Groq API Key...")
|
51 |
+
if api_key:
|
52 |
+
model= ChatGroq(temperature=temperature,groq_api_key=api_key , model_name="Gemma-7b-It")
|
53 |
+
|
54 |
+
|
55 |
+
# elif option=="Llama-3":
|
56 |
+
# api_key=st.sidebar.text_input("API Key:",placeholder="Enter HuggingFace API Token...")
|
57 |
+
# if api_key:
|
58 |
+
# model=HuggingFaceHub(repo_id="mistralai/Mixtral-8x22B-Instruct-v0.1",huggingfacehub_api_token=api_key ,model_kwargs={"temperature":temperature})
|
59 |
+
|
60 |
+
|
61 |
+
|
62 |
+
|
63 |
+
if st.sidebar.button("Submit"):
|
64 |
+
if url:
|
65 |
+
if api_key:
|
66 |
+
original_url = get_original_url(url)
|
67 |
+
match = re.match(r"https?://(?:www\.)?linkedin\.com/(posts|feed|pulse)/.*", original_url) # checking domain and url page (means it should only be a post nothing else like login page or something else)
|
68 |
+
|
69 |
+
if match:
|
70 |
+
try:
|
71 |
+
session_state.paraphrase, session_state.keywords, session_state.take_aways, session_state.highlights = paraphrased_post(url , model)
|
72 |
+
except (openai.AuthenticationError) as e:
|
73 |
+
st.sidebar.error("Enter your valid API key")
|
74 |
+
else:
|
75 |
+
st.sidebar.error("Put a valid LinkedIn post url only")
|
76 |
+
else:
|
77 |
+
st.sidebar.error("Please enter API Key")
|
78 |
+
else:
|
79 |
+
st.sidebar.error("Please enter url")
|
80 |
+
|
81 |
+
|
82 |
+
|
83 |
+
paraphrase_text=st.text_area("Generated LinkedIn post",value=session_state.paraphrase, height=400)
|
84 |
+
# import pyperclip
|
85 |
+
# if st.button('Copy'):
|
86 |
+
# pyperclip.copy(paraphrase_text)
|
87 |
+
# st.success('Text copied successfully!')
|
88 |
+
|
89 |
+
if st.sidebar.toggle("Show Details") and session_state.keywords:
|
90 |
+
st.write("Keywords:")
|
91 |
+
for i, statement in enumerate(session_state.keywords, start=1):
|
92 |
+
st.write(f"{i}. {statement}")
|
93 |
+
|
94 |
+
st.write("Take Aways:")
|
95 |
+
for i, statement in enumerate(session_state.take_aways, start=1):
|
96 |
+
st.write(f"{i}. {statement}")
|
97 |
+
|
98 |
+
st.write("Highlights:")
|
99 |
+
for i, statement in enumerate(session_state.highlights, start=1):
|
100 |
+
st.write(f"{i}. {statement}")
|
101 |
+
|
102 |
+
#------------------------------------------------------------Advance LinkedIn post code below-----------------------------------------------------------------
|
103 |
+
|
104 |
+
if st.sidebar.toggle("Advance LinkedIn Post"):
|
105 |
+
google_api_key=st.sidebar.text_input("Google API Key:",placeholder="Enter Google Search API Key...")
|
106 |
+
search_engine_id=st.sidebar.text_input("Search Engine ID:",placeholder="Enter Search Engine ID...")
|
107 |
+
google_api_key = "AIzaSyDh-lkJh2Zef0t6UVqSu_w3njpucx40mDc"
|
108 |
+
search_engine_id = "44bbd32a2b2fc4418"
|
109 |
+
if st.sidebar.button("Generate Advance Post"):
|
110 |
+
if google_api_key:
|
111 |
+
if search_engine_id:
|
112 |
+
all_links =google_search(session_state.paraphrase ,model , google_api_key,search_engine_id)
|
113 |
+
session_state.advancepost = advanced_post(all_links ,model ,session_state.paraphrase)
|
114 |
+
# if len(docs)==0:
|
115 |
+
# st.sidebar.error("Please Check your both credentials carefully")
|
116 |
+
|
117 |
+
else:
|
118 |
+
st.sidebar.error("Please enter Search Engine ID")
|
119 |
+
else:
|
120 |
+
st.sidebar.error("Please enter Google API Key")
|
121 |
+
advance_post=st.text_area("Advance LinkedIn post",value=session_state.advancepost, height=400)
|
122 |
+
|
123 |
+
|
124 |
+
# if st.button('Copy Advanced Post'):
|
125 |
+
# pyperclip.copy(advance_post)
|
126 |
+
# st.success('Text copied successfully!')
|
127 |
+
#--------------------------------------------------------------------------------------------------------------------------------------------------------------
|
128 |
+
|
129 |
+
if __name__ == "__main__":
|
130 |
+
main()
|
131 |
+
|
132 |
+
|
133 |
+
|
paraphrase_post.py
ADDED
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain_community.document_loaders import WebBaseLoader
|
2 |
+
from langchain.prompts import ChatPromptTemplate
|
3 |
+
from langchain.output_parsers import ResponseSchema
|
4 |
+
from langchain.output_parsers import StructuredOutputParser
|
5 |
+
from langchain_core.output_parsers import StrOutputParser
|
6 |
+
from scrap_post import scrappost
|
7 |
+
import requests
|
8 |
+
|
9 |
+
|
10 |
+
|
11 |
+
def is_shortened_url(url): # It is checking whether it is a shorten url or regular website url
|
12 |
+
try:
|
13 |
+
response = requests.head(url, allow_redirects=True)
|
14 |
+
final_url = response.url
|
15 |
+
if final_url != url:
|
16 |
+
return True
|
17 |
+
return False
|
18 |
+
except requests.exceptions.RequestException as e:
|
19 |
+
print("Error:", e)
|
20 |
+
return False
|
21 |
+
|
22 |
+
def expand_short_url(short_url): # It is converting shorten url to regular url
|
23 |
+
try:
|
24 |
+
response = requests.head(short_url, allow_redirects=True)
|
25 |
+
if response.status_code == 200:
|
26 |
+
return response.url
|
27 |
+
else:
|
28 |
+
print("Error: Short URL couldn't be expanded.")
|
29 |
+
return None
|
30 |
+
except requests.exceptions.RequestException as e:
|
31 |
+
print("Error:", e)
|
32 |
+
return None
|
33 |
+
|
34 |
+
def get_original_url(url):
|
35 |
+
if is_shortened_url(url):
|
36 |
+
return expand_short_url(url)
|
37 |
+
else:
|
38 |
+
return url
|
39 |
+
|
40 |
+
|
41 |
+
|
42 |
+
|
43 |
+
# Below function extract the post only content from complete web page content and parraphrase the extracted post
|
44 |
+
|
45 |
+
def paraphrased_post(url,model):
|
46 |
+
|
47 |
+
post=scrappost(url)
|
48 |
+
|
49 |
+
template="""You are a helpful paraphraser tool. You are provided with a content and your task is to paraphrase it.
|
50 |
+
{data}"""
|
51 |
+
|
52 |
+
prompt = ChatPromptTemplate.from_template(template)
|
53 |
+
|
54 |
+
chain = prompt | model | StrOutputParser()
|
55 |
+
phrased_post=chain.invoke({"data":post})
|
56 |
+
|
57 |
+
data2=extract_data(phrased_post , model)
|
58 |
+
keywords=data2['Keywords'][:3]
|
59 |
+
take_aways=data2['Take Aways'][:3]
|
60 |
+
highlights=data2['Highlights'][:3]
|
61 |
+
return phrased_post,keywords , take_aways, highlights
|
62 |
+
|
63 |
+
|
64 |
+
|
65 |
+
|
66 |
+
|
67 |
+
# Below function extract the details such as keywords , Take aways , highlights and questions
|
68 |
+
def extract_data(post_data ,model):
|
69 |
+
keywords = ResponseSchema(name="Keywords",
|
70 |
+
description="These are the keywords extracted from LinkedIn post",type="list")
|
71 |
+
|
72 |
+
Take_aways = ResponseSchema(name="Take Aways",
|
73 |
+
description="These are the take aways extracted from LinkedIn post", type= "list")
|
74 |
+
Highlights=ResponseSchema(name="Highlights",
|
75 |
+
description="These are the highlights extracted from LinkedIn post", type= "list")
|
76 |
+
|
77 |
+
|
78 |
+
response_schema = [
|
79 |
+
keywords,
|
80 |
+
Take_aways,
|
81 |
+
Highlights
|
82 |
+
|
83 |
+
]
|
84 |
+
output_parser = StructuredOutputParser.from_response_schemas(response_schema)
|
85 |
+
format_instructions = output_parser.get_format_instructions()
|
86 |
+
|
87 |
+
template = """
|
88 |
+
You are a helpful keywords , take aways and highlights extractor from the post of LinkedIn Bot. Your task is to extract relevant keywords , take aways and highlights in descending order of their scores in a list, means high relevant should be on the top .
|
89 |
+
From the following text message, extract the following information:
|
90 |
+
|
91 |
+
text message: {content}
|
92 |
+
{format_instructions}
|
93 |
+
"""
|
94 |
+
|
95 |
+
prompt_template = ChatPromptTemplate.from_template(template)
|
96 |
+
messages = prompt_template.format_messages(content=post_data, format_instructions=format_instructions)
|
97 |
+
response = model(messages)
|
98 |
+
output_dict= output_parser.parse(response.content)
|
99 |
+
return output_dict
|
scrap_post.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
from unstructured.partition.html import partition_html
|
3 |
+
#source = 'https://www.linkedin.com/posts/jobanpreet-singh-392581207_asr-whisper-speechrecognition-activity-7172803455718158336-MC-j?utm_source=share&utm_medium=member_desktop'
|
4 |
+
|
5 |
+
|
6 |
+
def scrappost(url):
|
7 |
+
all_groups = []
|
8 |
+
group = {'page_content': ''}
|
9 |
+
|
10 |
+
# ingest and preprocess webpage into Unstructured elements object
|
11 |
+
glossary_page = partition_html(url=url)
|
12 |
+
|
13 |
+
# iterate the document elements and group texts by title
|
14 |
+
for element in glossary_page:
|
15 |
+
if 'unstructured.documents.html.HTMLTitle' in str(type(element)):
|
16 |
+
# If there's already content in the group, add it to all_groups
|
17 |
+
if group['page_content']:
|
18 |
+
all_groups.append(group)
|
19 |
+
group = {'page_content': ''}
|
20 |
+
group['page_content'] += element.text
|
21 |
+
if 'unstructured.documents.html.HTMLNarrativeText' in str(type(element)):
|
22 |
+
group['page_content'] += element.text
|
23 |
+
|
24 |
+
if "unstructured.documents.html.HTMLListItem" in str(type(element)):
|
25 |
+
group['page_content']+=element.text
|
26 |
+
|
27 |
+
# # Add the last group if it exists
|
28 |
+
if group['page_content']:
|
29 |
+
all_groups.append(group)
|
30 |
+
|
31 |
+
# Print the groups
|
32 |
+
for group in all_groups[:1]:
|
33 |
+
return group["page_content"]
|