subashdvorak
commited on
Update brain_strom_with_influencer_input.py
Browse files- brain_strom_with_influencer_input.py +280 -281
brain_strom_with_influencer_input.py
CHANGED
@@ -1,281 +1,280 @@
|
|
1 |
-
import os
|
2 |
-
|
3 |
-
import gradio as gr
|
4 |
-
import torch
|
5 |
-
import re
|
6 |
-
# from transformers import AutoTokenizer, AutoModelForCausalLM
|
7 |
-
from
|
8 |
-
from
|
9 |
-
from
|
10 |
-
from
|
11 |
-
from phi.
|
12 |
-
from phi.
|
13 |
-
from phi.
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
self.
|
22 |
-
self.
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
self.
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
self.temp_data
|
36 |
-
self.
|
37 |
-
self.
|
38 |
-
self.
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
self.scores, self.retrieved_examples
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
#
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
prompt = prompt + f"\
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
),
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
run
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
- I want to
|
127 |
-
- I want to
|
128 |
-
- I want to
|
129 |
-
- I want to
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
),
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
run
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
"
|
169 |
-
"
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
"
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
"
|
275 |
-
"
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
import gradio as gr
|
4 |
+
import torch
|
5 |
+
import re
|
6 |
+
# from transformers import AutoTokenizer, AutoModelForCausalLM
|
7 |
+
from sentence_transformers import SentenceTransformer
|
8 |
+
from datasets import load_dataset,Dataset
|
9 |
+
from shared_resources import shared_resources
|
10 |
+
from phi.agent import Agent
|
11 |
+
from phi.tools.duckduckgo import DuckDuckGo
|
12 |
+
from phi.agent import Agent, RunResponse
|
13 |
+
from phi.model.huggingface import HuggingFaceChat
|
14 |
+
|
15 |
+
|
16 |
+
|
17 |
+
class ResponseGenerator:
|
18 |
+
def __init__(self):
|
19 |
+
|
20 |
+
self.ST = shared_resources.sentence_transformer
|
21 |
+
self.data = shared_resources.data
|
22 |
+
self.original_query = ""
|
23 |
+
|
24 |
+
# Define the search function
|
25 |
+
def search(self, query: str, usernames=None, k: int = 3):
|
26 |
+
"""Function to get recommended videos based on user input"""
|
27 |
+
self.embedded_query = self.ST.encode(query) # Embed the user input
|
28 |
+
self.all_retrievals=[]
|
29 |
+
if usernames:
|
30 |
+
dataset=self.data.to_pandas()
|
31 |
+
for username in usernames:
|
32 |
+
username = [username]
|
33 |
+
filtered_df = dataset[dataset['username'].isin(username)]
|
34 |
+
self.temp_data = Dataset.from_pandas(filtered_df)
|
35 |
+
self.temp_data=self.temp_data.add_faiss_index("embeddings")
|
36 |
+
self.scores, self.retrieved_examples = self.temp_data.get_nearest_examples("embeddings", self.embedded_query, k=k) # Search for top k results
|
37 |
+
self.all_retrievals.append(str(self.retrieved_examples['Caption'][0]))
|
38 |
+
self.temp_data=None
|
39 |
+
print('All retrievals are:',self.all_retrievals)
|
40 |
+
return self.all_retrievals
|
41 |
+
|
42 |
+
self.scores, self.retrieved_examples = self.data.get_nearest_examples("embeddings", self.embedded_query, k=k) # Search for top k results
|
43 |
+
return self.scores, self.retrieved_examples
|
44 |
+
|
45 |
+
|
46 |
+
def generate_response(self, query, username=None, additional_focus=None):
|
47 |
+
# print('The usernames are:',username)
|
48 |
+
"""
|
49 |
+
Generates text using the Llama 3.1 model.
|
50 |
+
"""
|
51 |
+
self.original_query = query # Save the original query for future focus
|
52 |
+
|
53 |
+
# If we are going deeper, add the additional focus to the prompt
|
54 |
+
if additional_focus:
|
55 |
+
# prompt = f"Explain the Given topic:\n{self.original_query}. Also focus on: {additional_focus}\n1."
|
56 |
+
prompt = f"""
|
57 |
+
I want to create a detailed storyline for a video primarily focusing on the sentence: **{additional_focus}**, keeping it under 500 words. Please provide the storyline in 6 concise paragraphs that must incorporate the following key features everytime:
|
58 |
+
|
59 |
+
1. **Story:** How to introduce the scene and set the tone. What is happening in the scence? Describe key visuals and actions.
|
60 |
+
2. **Narration or Voiceover:** Suggestions for narration or voiceover that complements the visuals.
|
61 |
+
3. **Text in the Video:** Propose important text overlays for key moments.
|
62 |
+
4. **Transitions:** Smooth transitions between scenes to maintain flow.
|
63 |
+
5. **Emotional Tone:** The mood and energy of the scenes (e.g., excitement, calm, tension, joy).
|
64 |
+
6. **Key Visuals & Sounds:** Important props, locations, sound effects, or background music to enhance the video.
|
65 |
+
|
66 |
+
The storyline should flow naturally, without repeating the same information or listing individual features. Ensure the output is engaging and cohesive.
|
67 |
+
|
68 |
+
Also, suggest **5 relevant hashtags** for the video that reflect its content and themes commonly used on social media for similar videos.
|
69 |
+
|
70 |
+
|
71 |
+
"""
|
72 |
+
|
73 |
+
else:
|
74 |
+
# prompt = f"Explain the Given topic:\n{query}\n1."
|
75 |
+
prompt = f"""
|
76 |
+
I want to create a detailed storyline for a video in any domain, keeping it under 500 words. Please provide the storyline in 6 concise paragraphs that must incorporate the following key features everytime:
|
77 |
+
|
78 |
+
1. **Story:** How to introduce the scene and set the tone. What is happening in the scene? Describe key visuals and actions.
|
79 |
+
2. **Narration or Voiceover:** Suggestions for narration or voiceover that complements the visuals.
|
80 |
+
3. **Text in the Video:** Propose important text overlays for key moments.
|
81 |
+
4. **Transitions:** Smooth transitions between scenes to maintain flow.
|
82 |
+
5. **Emotional Tone:** The mood and energy of the scenes (e.g., excitement, calm, tension, joy).
|
83 |
+
6. **Key Visuals & Sounds:** Important props, locations, sound effects, or background music to enhance the video.
|
84 |
+
|
85 |
+
The storyline should flow naturally, without repeating the same information or listing individual features. Ensure the output is engaging and cohesive.
|
86 |
+
|
87 |
+
Also, suggest **5 relevant hashtags** for the video that reflect its content and themes commonly used on social media for similar videos.
|
88 |
+
|
89 |
+
"""
|
90 |
+
|
91 |
+
|
92 |
+
# prefix = f"The question is:{self.original_query}"
|
93 |
+
# print('The data is:',self.data)
|
94 |
+
if username:
|
95 |
+
retrieved_list = self.search(query,username,1)
|
96 |
+
retrieved_context = "\n".join(retrieved_list)
|
97 |
+
prompt = prompt + f"\n Here is the random video story from the dataset for you. You can use it just for analysing purpose, not for similar generation. This is the story:\n{retrieved_context}"
|
98 |
+
prompt = prompt + f"\nNow finally i am providing you a question to create a detailed story line for a video. The question is: **{self.original_query}**"
|
99 |
+
|
100 |
+
else:
|
101 |
+
prompt = prompt + f"\nNow finally i am providing you a question to create a detailed story line for a video. The question is: **{self.original_query}**"
|
102 |
+
agent = Agent(
|
103 |
+
model=HuggingFaceChat(
|
104 |
+
id="meta-llama/Meta-Llama-3-8B-Instruct",
|
105 |
+
max_tokens=4096,
|
106 |
+
),
|
107 |
+
# tools=[DuckDuckGo()],
|
108 |
+
markdown=True
|
109 |
+
)
|
110 |
+
|
111 |
+
# Get the response in a variable
|
112 |
+
run: RunResponse = agent.run(prompt)
|
113 |
+
return run.content
|
114 |
+
|
115 |
+
|
116 |
+
|
117 |
+
def extract_topics(self, story):
|
118 |
+
"""
|
119 |
+
Extracts 5 key sentences from the generated text using KeyBERT.
|
120 |
+
"""
|
121 |
+
|
122 |
+
prompt = f'''I want to brainstorm ways to diversify or improve a storyline in exactly 5 sentences. No more than 5 nor less than 5.
|
123 |
+
The goal is to generate creative and actionable ideas that are not on the storyline on how the storyline can be expanded or modified for better engagement.
|
124 |
+
For example: If the storyline is about creating a promotional video for a restaurant, the new suggestions might include:
|
125 |
+
- I want to showcase the chef preparing a signature dish.
|
126 |
+
- I want to add a sequence of customers sharing their experiences at the restaurant.
|
127 |
+
- I want to highlight the farm-to-table sourcing of ingredients with a short segment showing local farms.
|
128 |
+
- I want to include a time-lapse of the restaurant transforming from day to night, capturing its unique ambiance.
|
129 |
+
- I want to feature a quick interview with the owner sharing the story behind the restaurant.
|
130 |
+
|
131 |
+
Now, I will provide you with the storyline. The storyline is:\n{story}
|
132 |
+
Please remember, don't give any introduction or explanations. Just generate 5 sentences directly, focusing on creative suggestions for diversifying or modifying the storyline. '''
|
133 |
+
|
134 |
+
|
135 |
+
agent = Agent(
|
136 |
+
model=HuggingFaceChat(
|
137 |
+
id="meta-llama/Meta-Llama-3-8B-Instruct",
|
138 |
+
max_tokens=4096,
|
139 |
+
),
|
140 |
+
# tools=[DuckDuckGo()],
|
141 |
+
markdown=True
|
142 |
+
)
|
143 |
+
|
144 |
+
# Get the response in a variable
|
145 |
+
run: RunResponse = agent.run(prompt)
|
146 |
+
generated_text=run.content
|
147 |
+
# Split the text into sentences and strip each one
|
148 |
+
sentences = [sentence.strip() for sentence in re.split(r'[.?]', generated_text) if sentence.strip()]
|
149 |
+
print('The sentences are:',sentences)
|
150 |
+
|
151 |
+
return sentences[-4:]
|
152 |
+
|
153 |
+
|
154 |
+
def on_select_topic(self, selected_topic, history_stack, current_state):
|
155 |
+
"""
|
156 |
+
Generates new points for the selected topic and updates history.
|
157 |
+
"""
|
158 |
+
# Save current state in history
|
159 |
+
history_stack.append(current_state)
|
160 |
+
|
161 |
+
# Generate new outputs with the selected topic as additional focus
|
162 |
+
new_response = self.generate_response(self.original_query, additional_focus=selected_topic)
|
163 |
+
new_topics = self.extract_topics(new_response)
|
164 |
+
|
165 |
+
# Prepare new state
|
166 |
+
new_state = {
|
167 |
+
"response": new_response,
|
168 |
+
"topics": new_topics,
|
169 |
+
"key_topics": new_topics
|
170 |
+
}
|
171 |
+
|
172 |
+
return new_state, history_stack, gr.update(value=new_response), gr.update(choices=new_topics)
|
173 |
+
|
174 |
+
def on_back(self, history_stack):
|
175 |
+
"""
|
176 |
+
Restores the previous state for all outputs.
|
177 |
+
"""
|
178 |
+
if history_stack:
|
179 |
+
# Pop the last state from history
|
180 |
+
previous_state = history_stack.pop()
|
181 |
+
|
182 |
+
return history_stack, gr.update(value=previous_state["response"]), \
|
183 |
+
gr.update(choices=previous_state["key_topics"])
|
184 |
+
|
185 |
+
# If no history, clear outputs
|
186 |
+
return history_stack, gr.update(value=""), gr.update(choices=[])
|
187 |
+
|
188 |
+
class ResponseGeneratorApp:
|
189 |
+
def __init__(self):
|
190 |
+
self.point_generator = ResponseGenerator()
|
191 |
+
|
192 |
+
def build_ui(self):
|
193 |
+
with gr.Blocks() as demo:
|
194 |
+
gr.Markdown(
|
195 |
+
"""
|
196 |
+
#Brainstorming App
|
197 |
+
Enter a query to generate a detailed response and start brainstroming for further exploration.
|
198 |
+
"""
|
199 |
+
)
|
200 |
+
|
201 |
+
query_input = gr.Textbox(
|
202 |
+
label="Enter your query",
|
203 |
+
placeholder="Type a query, e.g., 'I want to create a promotional video of Begnas Lake.'",
|
204 |
+
lines=2,
|
205 |
+
)
|
206 |
+
|
207 |
+
usernames = [
|
208 |
+
"_travelwithsapana", "givina_9", "rajen.rb", "wh0z.khu5h1", "palam061",
|
209 |
+
"prettiest_sky", "explorepokhara", "ggkaam610", "anjana_dhl1"
|
210 |
+
]
|
211 |
+
|
212 |
+
# username_inputs =gr.Radio(label="Select Username of whose you want similar story::", choices=usernames, type="value")
|
213 |
+
|
214 |
+
username_inputs = gr.CheckboxGroup(choices=usernames,label="Choose one or more username of whose you want similar story::",type="value")
|
215 |
+
|
216 |
+
|
217 |
+
|
218 |
+
generate_btn = gr.Button(value="Generate")
|
219 |
+
|
220 |
+
# Output box for the generated text
|
221 |
+
response_output = gr.Textbox(
|
222 |
+
label="Generated Response",
|
223 |
+
lines=10,
|
224 |
+
interactive=False
|
225 |
+
)
|
226 |
+
|
227 |
+
# Dynamic radio buttons area for the extracted topics
|
228 |
+
topics_radio = gr.Radio(
|
229 |
+
label="Brain Stroming Areas....",
|
230 |
+
choices=[],
|
231 |
+
type="value",
|
232 |
+
interactive=True
|
233 |
+
)
|
234 |
+
|
235 |
+
back_btn = gr.Button(value="Back")
|
236 |
+
|
237 |
+
# State for managing current topics and history
|
238 |
+
current_state = gr.State({}) # Store response, topics, and key_topics
|
239 |
+
history_stack = gr.State([]) # Stack of previous states
|
240 |
+
|
241 |
+
# Link the generate button to the processing function
|
242 |
+
generate_btn.click(
|
243 |
+
fn=lambda query,usernames: self.generate_handler(query,usernames),
|
244 |
+
inputs=[query_input,username_inputs],
|
245 |
+
outputs=[current_state, response_output, topics_radio],
|
246 |
+
)
|
247 |
+
|
248 |
+
# Handle selection of a topic (generate new stage)
|
249 |
+
topics_radio.change(
|
250 |
+
fn=self.point_generator.on_select_topic,
|
251 |
+
inputs=[topics_radio, history_stack, current_state],
|
252 |
+
outputs=[current_state, history_stack, response_output, topics_radio]
|
253 |
+
)
|
254 |
+
|
255 |
+
# Handle back button
|
256 |
+
back_btn.click(
|
257 |
+
fn=self.point_generator.on_back,
|
258 |
+
inputs=[history_stack],
|
259 |
+
outputs=[history_stack, response_output, topics_radio]
|
260 |
+
)
|
261 |
+
|
262 |
+
return demo
|
263 |
+
|
264 |
+
def generate_handler(self, query,usernames):
|
265 |
+
"""
|
266 |
+
Handles the generation of the response and topics.
|
267 |
+
"""
|
268 |
+
response = self.point_generator.generate_response(query,usernames)
|
269 |
+
topics = self.point_generator.extract_topics(response)
|
270 |
+
|
271 |
+
# Prepare the current state
|
272 |
+
current_state = {
|
273 |
+
"response": response,
|
274 |
+
"topics": topics,
|
275 |
+
"key_topics": topics
|
276 |
+
}
|
277 |
+
|
278 |
+
return current_state, gr.update(value=response), gr.update(choices=topics)
|
279 |
+
|
280 |
+
|
|