subashdvorak commited on
Commit
5188e99
·
verified ·
1 Parent(s): 956a6ec

Update brain_strom_with_influencer_input.py

Browse files
Files changed (1) hide show
  1. brain_strom_with_influencer_input.py +280 -281
brain_strom_with_influencer_input.py CHANGED
@@ -1,281 +1,280 @@
1
- import os
2
-
3
- import gradio as gr
4
- import torch
5
- import re
6
- # from transformers import AutoTokenizer, AutoModelForCausalLM
7
- from keybert import KeyBERT
8
- from sentence_transformers import SentenceTransformer
9
- from datasets import load_dataset,Dataset
10
- from shared_resources import shared_resources
11
- from phi.agent import Agent
12
- from phi.tools.duckduckgo import DuckDuckGo
13
- from phi.agent import Agent, RunResponse
14
- from phi.model.huggingface import HuggingFaceChat
15
-
16
-
17
-
18
- class ResponseGenerator:
19
- def __init__(self):
20
-
21
- self.ST = shared_resources.sentence_transformer
22
- self.data = shared_resources.data
23
- self.original_query = ""
24
-
25
- # Define the search function
26
- def search(self, query: str, usernames=None, k: int = 3):
27
- """Function to get recommended videos based on user input"""
28
- self.embedded_query = self.ST.encode(query) # Embed the user input
29
- self.all_retrievals=[]
30
- if usernames:
31
- dataset=self.data.to_pandas()
32
- for username in usernames:
33
- username = [username]
34
- filtered_df = dataset[dataset['username'].isin(username)]
35
- self.temp_data = Dataset.from_pandas(filtered_df)
36
- self.temp_data=self.temp_data.add_faiss_index("embeddings")
37
- self.scores, self.retrieved_examples = self.temp_data.get_nearest_examples("embeddings", self.embedded_query, k=k) # Search for top k results
38
- self.all_retrievals.append(str(self.retrieved_examples['Caption'][0]))
39
- self.temp_data=None
40
- print('All retrievals are:',self.all_retrievals)
41
- return self.all_retrievals
42
-
43
- self.scores, self.retrieved_examples = self.data.get_nearest_examples("embeddings", self.embedded_query, k=k) # Search for top k results
44
- return self.scores, self.retrieved_examples
45
-
46
-
47
- def generate_response(self, query, username=None, additional_focus=None):
48
- # print('The usernames are:',username)
49
- """
50
- Generates text using the Llama 3.1 model.
51
- """
52
- self.original_query = query # Save the original query for future focus
53
-
54
- # If we are going deeper, add the additional focus to the prompt
55
- if additional_focus:
56
- # prompt = f"Explain the Given topic:\n{self.original_query}. Also focus on: {additional_focus}\n1."
57
- prompt = f"""
58
- I want to create a detailed storyline for a video primarily focusing on the sentence: **{additional_focus}**, keeping it under 500 words. Please provide the storyline in 6 concise paragraphs that must incorporate the following key features everytime:
59
-
60
- 1. **Story:** How to introduce the scene and set the tone. What is happening in the scence? Describe key visuals and actions.
61
- 2. **Narration or Voiceover:** Suggestions for narration or voiceover that complements the visuals.
62
- 3. **Text in the Video:** Propose important text overlays for key moments.
63
- 4. **Transitions:** Smooth transitions between scenes to maintain flow.
64
- 5. **Emotional Tone:** The mood and energy of the scenes (e.g., excitement, calm, tension, joy).
65
- 6. **Key Visuals & Sounds:** Important props, locations, sound effects, or background music to enhance the video.
66
-
67
- The storyline should flow naturally, without repeating the same information or listing individual features. Ensure the output is engaging and cohesive.
68
-
69
- Also, suggest **5 relevant hashtags** for the video that reflect its content and themes commonly used on social media for similar videos.
70
-
71
-
72
- """
73
-
74
- else:
75
- # prompt = f"Explain the Given topic:\n{query}\n1."
76
- prompt = f"""
77
- I want to create a detailed storyline for a video in any domain, keeping it under 500 words. Please provide the storyline in 6 concise paragraphs that must incorporate the following key features everytime:
78
-
79
- 1. **Story:** How to introduce the scene and set the tone. What is happening in the scene? Describe key visuals and actions.
80
- 2. **Narration or Voiceover:** Suggestions for narration or voiceover that complements the visuals.
81
- 3. **Text in the Video:** Propose important text overlays for key moments.
82
- 4. **Transitions:** Smooth transitions between scenes to maintain flow.
83
- 5. **Emotional Tone:** The mood and energy of the scenes (e.g., excitement, calm, tension, joy).
84
- 6. **Key Visuals & Sounds:** Important props, locations, sound effects, or background music to enhance the video.
85
-
86
- The storyline should flow naturally, without repeating the same information or listing individual features. Ensure the output is engaging and cohesive.
87
-
88
- Also, suggest **5 relevant hashtags** for the video that reflect its content and themes commonly used on social media for similar videos.
89
-
90
- """
91
-
92
-
93
- # prefix = f"The question is:{self.original_query}"
94
- # print('The data is:',self.data)
95
- if username:
96
- retrieved_list = self.search(query,username,1)
97
- retrieved_context = "\n".join(retrieved_list)
98
- prompt = prompt + f"\n Here is the random video story from the dataset for you. You can use it just for analysing purpose, not for similar generation. This is the story:\n{retrieved_context}"
99
- prompt = prompt + f"\nNow finally i am providing you a question to create a detailed story line for a video. The question is: **{self.original_query}**"
100
-
101
- else:
102
- prompt = prompt + f"\nNow finally i am providing you a question to create a detailed story line for a video. The question is: **{self.original_query}**"
103
- agent = Agent(
104
- model=HuggingFaceChat(
105
- id="meta-llama/Meta-Llama-3-8B-Instruct",
106
- max_tokens=4096,
107
- ),
108
- # tools=[DuckDuckGo()],
109
- markdown=True
110
- )
111
-
112
- # Get the response in a variable
113
- run: RunResponse = agent.run(prompt)
114
- return run.content
115
-
116
-
117
-
118
- def extract_topics(self, story):
119
- """
120
- Extracts 5 key sentences from the generated text using KeyBERT.
121
- """
122
-
123
- prompt = f'''I want to brainstorm ways to diversify or improve a storyline in exactly 5 sentences. No more than 5 nor less than 5.
124
- The goal is to generate creative and actionable ideas that are not on the storyline on how the storyline can be expanded or modified for better engagement.
125
- For example: If the storyline is about creating a promotional video for a restaurant, the new suggestions might include:
126
- - I want to showcase the chef preparing a signature dish.
127
- - I want to add a sequence of customers sharing their experiences at the restaurant.
128
- - I want to highlight the farm-to-table sourcing of ingredients with a short segment showing local farms.
129
- - I want to include a time-lapse of the restaurant transforming from day to night, capturing its unique ambiance.
130
- - I want to feature a quick interview with the owner sharing the story behind the restaurant.
131
-
132
- Now, I will provide you with the storyline. The storyline is:\n{story}
133
- Please remember, don't give any introduction or explanations. Just generate 5 sentences directly, focusing on creative suggestions for diversifying or modifying the storyline. '''
134
-
135
-
136
- agent = Agent(
137
- model=HuggingFaceChat(
138
- id="meta-llama/Meta-Llama-3-8B-Instruct",
139
- max_tokens=4096,
140
- ),
141
- # tools=[DuckDuckGo()],
142
- markdown=True
143
- )
144
-
145
- # Get the response in a variable
146
- run: RunResponse = agent.run(prompt)
147
- generated_text=run.content
148
- # Split the text into sentences and strip each one
149
- sentences = [sentence.strip() for sentence in re.split(r'[.?]', generated_text) if sentence.strip()]
150
- print('The sentences are:',sentences)
151
-
152
- return sentences[-4:]
153
-
154
-
155
- def on_select_topic(self, selected_topic, history_stack, current_state):
156
- """
157
- Generates new points for the selected topic and updates history.
158
- """
159
- # Save current state in history
160
- history_stack.append(current_state)
161
-
162
- # Generate new outputs with the selected topic as additional focus
163
- new_response = self.generate_response(self.original_query, additional_focus=selected_topic)
164
- new_topics = self.extract_topics(new_response)
165
-
166
- # Prepare new state
167
- new_state = {
168
- "response": new_response,
169
- "topics": new_topics,
170
- "key_topics": new_topics
171
- }
172
-
173
- return new_state, history_stack, gr.update(value=new_response), gr.update(choices=new_topics)
174
-
175
- def on_back(self, history_stack):
176
- """
177
- Restores the previous state for all outputs.
178
- """
179
- if history_stack:
180
- # Pop the last state from history
181
- previous_state = history_stack.pop()
182
-
183
- return history_stack, gr.update(value=previous_state["response"]), \
184
- gr.update(choices=previous_state["key_topics"])
185
-
186
- # If no history, clear outputs
187
- return history_stack, gr.update(value=""), gr.update(choices=[])
188
-
189
- class ResponseGeneratorApp:
190
- def __init__(self):
191
- self.point_generator = ResponseGenerator()
192
-
193
- def build_ui(self):
194
- with gr.Blocks() as demo:
195
- gr.Markdown(
196
- """
197
- #Brainstorming App
198
- Enter a query to generate a detailed response and start brainstroming for further exploration.
199
- """
200
- )
201
-
202
- query_input = gr.Textbox(
203
- label="Enter your query",
204
- placeholder="Type a query, e.g., 'I want to create a promotional video of Begnas Lake.'",
205
- lines=2,
206
- )
207
-
208
- usernames = [
209
- "_travelwithsapana", "givina_9", "rajen.rb", "wh0z.khu5h1", "palam061",
210
- "prettiest_sky", "explorepokhara", "ggkaam610", "anjana_dhl1"
211
- ]
212
-
213
- # username_inputs =gr.Radio(label="Select Username of whose you want similar story::", choices=usernames, type="value")
214
-
215
- username_inputs = gr.CheckboxGroup(choices=usernames,label="Choose one or more username of whose you want similar story::",type="value")
216
-
217
-
218
-
219
- generate_btn = gr.Button(value="Generate")
220
-
221
- # Output box for the generated text
222
- response_output = gr.Textbox(
223
- label="Generated Response",
224
- lines=10,
225
- interactive=False
226
- )
227
-
228
- # Dynamic radio buttons area for the extracted topics
229
- topics_radio = gr.Radio(
230
- label="Brain Stroming Areas....",
231
- choices=[],
232
- type="value",
233
- interactive=True
234
- )
235
-
236
- back_btn = gr.Button(value="Back")
237
-
238
- # State for managing current topics and history
239
- current_state = gr.State({}) # Store response, topics, and key_topics
240
- history_stack = gr.State([]) # Stack of previous states
241
-
242
- # Link the generate button to the processing function
243
- generate_btn.click(
244
- fn=lambda query,usernames: self.generate_handler(query,usernames),
245
- inputs=[query_input,username_inputs],
246
- outputs=[current_state, response_output, topics_radio],
247
- )
248
-
249
- # Handle selection of a topic (generate new stage)
250
- topics_radio.change(
251
- fn=self.point_generator.on_select_topic,
252
- inputs=[topics_radio, history_stack, current_state],
253
- outputs=[current_state, history_stack, response_output, topics_radio]
254
- )
255
-
256
- # Handle back button
257
- back_btn.click(
258
- fn=self.point_generator.on_back,
259
- inputs=[history_stack],
260
- outputs=[history_stack, response_output, topics_radio]
261
- )
262
-
263
- return demo
264
-
265
- def generate_handler(self, query,usernames):
266
- """
267
- Handles the generation of the response and topics.
268
- """
269
- response = self.point_generator.generate_response(query,usernames)
270
- topics = self.point_generator.extract_topics(response)
271
-
272
- # Prepare the current state
273
- current_state = {
274
- "response": response,
275
- "topics": topics,
276
- "key_topics": topics
277
- }
278
-
279
- return current_state, gr.update(value=response), gr.update(choices=topics)
280
-
281
-
 
1
+ import os
2
+
3
+ import gradio as gr
4
+ import torch
5
+ import re
6
+ # from transformers import AutoTokenizer, AutoModelForCausalLM
7
+ from sentence_transformers import SentenceTransformer
8
+ from datasets import load_dataset,Dataset
9
+ from shared_resources import shared_resources
10
+ from phi.agent import Agent
11
+ from phi.tools.duckduckgo import DuckDuckGo
12
+ from phi.agent import Agent, RunResponse
13
+ from phi.model.huggingface import HuggingFaceChat
14
+
15
+
16
+
17
+ class ResponseGenerator:
18
+ def __init__(self):
19
+
20
+ self.ST = shared_resources.sentence_transformer
21
+ self.data = shared_resources.data
22
+ self.original_query = ""
23
+
24
+ # Define the search function
25
+ def search(self, query: str, usernames=None, k: int = 3):
26
+ """Function to get recommended videos based on user input"""
27
+ self.embedded_query = self.ST.encode(query) # Embed the user input
28
+ self.all_retrievals=[]
29
+ if usernames:
30
+ dataset=self.data.to_pandas()
31
+ for username in usernames:
32
+ username = [username]
33
+ filtered_df = dataset[dataset['username'].isin(username)]
34
+ self.temp_data = Dataset.from_pandas(filtered_df)
35
+ self.temp_data=self.temp_data.add_faiss_index("embeddings")
36
+ self.scores, self.retrieved_examples = self.temp_data.get_nearest_examples("embeddings", self.embedded_query, k=k) # Search for top k results
37
+ self.all_retrievals.append(str(self.retrieved_examples['Caption'][0]))
38
+ self.temp_data=None
39
+ print('All retrievals are:',self.all_retrievals)
40
+ return self.all_retrievals
41
+
42
+ self.scores, self.retrieved_examples = self.data.get_nearest_examples("embeddings", self.embedded_query, k=k) # Search for top k results
43
+ return self.scores, self.retrieved_examples
44
+
45
+
46
+ def generate_response(self, query, username=None, additional_focus=None):
47
+ # print('The usernames are:',username)
48
+ """
49
+ Generates text using the Llama 3.1 model.
50
+ """
51
+ self.original_query = query # Save the original query for future focus
52
+
53
+ # If we are going deeper, add the additional focus to the prompt
54
+ if additional_focus:
55
+ # prompt = f"Explain the Given topic:\n{self.original_query}. Also focus on: {additional_focus}\n1."
56
+ prompt = f"""
57
+ I want to create a detailed storyline for a video primarily focusing on the sentence: **{additional_focus}**, keeping it under 500 words. Please provide the storyline in 6 concise paragraphs that must incorporate the following key features everytime:
58
+
59
+ 1. **Story:** How to introduce the scene and set the tone. What is happening in the scence? Describe key visuals and actions.
60
+ 2. **Narration or Voiceover:** Suggestions for narration or voiceover that complements the visuals.
61
+ 3. **Text in the Video:** Propose important text overlays for key moments.
62
+ 4. **Transitions:** Smooth transitions between scenes to maintain flow.
63
+ 5. **Emotional Tone:** The mood and energy of the scenes (e.g., excitement, calm, tension, joy).
64
+ 6. **Key Visuals & Sounds:** Important props, locations, sound effects, or background music to enhance the video.
65
+
66
+ The storyline should flow naturally, without repeating the same information or listing individual features. Ensure the output is engaging and cohesive.
67
+
68
+ Also, suggest **5 relevant hashtags** for the video that reflect its content and themes commonly used on social media for similar videos.
69
+
70
+
71
+ """
72
+
73
+ else:
74
+ # prompt = f"Explain the Given topic:\n{query}\n1."
75
+ prompt = f"""
76
+ I want to create a detailed storyline for a video in any domain, keeping it under 500 words. Please provide the storyline in 6 concise paragraphs that must incorporate the following key features everytime:
77
+
78
+ 1. **Story:** How to introduce the scene and set the tone. What is happening in the scene? Describe key visuals and actions.
79
+ 2. **Narration or Voiceover:** Suggestions for narration or voiceover that complements the visuals.
80
+ 3. **Text in the Video:** Propose important text overlays for key moments.
81
+ 4. **Transitions:** Smooth transitions between scenes to maintain flow.
82
+ 5. **Emotional Tone:** The mood and energy of the scenes (e.g., excitement, calm, tension, joy).
83
+ 6. **Key Visuals & Sounds:** Important props, locations, sound effects, or background music to enhance the video.
84
+
85
+ The storyline should flow naturally, without repeating the same information or listing individual features. Ensure the output is engaging and cohesive.
86
+
87
+ Also, suggest **5 relevant hashtags** for the video that reflect its content and themes commonly used on social media for similar videos.
88
+
89
+ """
90
+
91
+
92
+ # prefix = f"The question is:{self.original_query}"
93
+ # print('The data is:',self.data)
94
+ if username:
95
+ retrieved_list = self.search(query,username,1)
96
+ retrieved_context = "\n".join(retrieved_list)
97
+ prompt = prompt + f"\n Here is the random video story from the dataset for you. You can use it just for analysing purpose, not for similar generation. This is the story:\n{retrieved_context}"
98
+ prompt = prompt + f"\nNow finally i am providing you a question to create a detailed story line for a video. The question is: **{self.original_query}**"
99
+
100
+ else:
101
+ prompt = prompt + f"\nNow finally i am providing you a question to create a detailed story line for a video. The question is: **{self.original_query}**"
102
+ agent = Agent(
103
+ model=HuggingFaceChat(
104
+ id="meta-llama/Meta-Llama-3-8B-Instruct",
105
+ max_tokens=4096,
106
+ ),
107
+ # tools=[DuckDuckGo()],
108
+ markdown=True
109
+ )
110
+
111
+ # Get the response in a variable
112
+ run: RunResponse = agent.run(prompt)
113
+ return run.content
114
+
115
+
116
+
117
+ def extract_topics(self, story):
118
+ """
119
+ Extracts 5 key sentences from the generated text using KeyBERT.
120
+ """
121
+
122
+ prompt = f'''I want to brainstorm ways to diversify or improve a storyline in exactly 5 sentences. No more than 5 nor less than 5.
123
+ The goal is to generate creative and actionable ideas that are not on the storyline on how the storyline can be expanded or modified for better engagement.
124
+ For example: If the storyline is about creating a promotional video for a restaurant, the new suggestions might include:
125
+ - I want to showcase the chef preparing a signature dish.
126
+ - I want to add a sequence of customers sharing their experiences at the restaurant.
127
+ - I want to highlight the farm-to-table sourcing of ingredients with a short segment showing local farms.
128
+ - I want to include a time-lapse of the restaurant transforming from day to night, capturing its unique ambiance.
129
+ - I want to feature a quick interview with the owner sharing the story behind the restaurant.
130
+
131
+ Now, I will provide you with the storyline. The storyline is:\n{story}
132
+ Please remember, don't give any introduction or explanations. Just generate 5 sentences directly, focusing on creative suggestions for diversifying or modifying the storyline. '''
133
+
134
+
135
+ agent = Agent(
136
+ model=HuggingFaceChat(
137
+ id="meta-llama/Meta-Llama-3-8B-Instruct",
138
+ max_tokens=4096,
139
+ ),
140
+ # tools=[DuckDuckGo()],
141
+ markdown=True
142
+ )
143
+
144
+ # Get the response in a variable
145
+ run: RunResponse = agent.run(prompt)
146
+ generated_text=run.content
147
+ # Split the text into sentences and strip each one
148
+ sentences = [sentence.strip() for sentence in re.split(r'[.?]', generated_text) if sentence.strip()]
149
+ print('The sentences are:',sentences)
150
+
151
+ return sentences[-4:]
152
+
153
+
154
+ def on_select_topic(self, selected_topic, history_stack, current_state):
155
+ """
156
+ Generates new points for the selected topic and updates history.
157
+ """
158
+ # Save current state in history
159
+ history_stack.append(current_state)
160
+
161
+ # Generate new outputs with the selected topic as additional focus
162
+ new_response = self.generate_response(self.original_query, additional_focus=selected_topic)
163
+ new_topics = self.extract_topics(new_response)
164
+
165
+ # Prepare new state
166
+ new_state = {
167
+ "response": new_response,
168
+ "topics": new_topics,
169
+ "key_topics": new_topics
170
+ }
171
+
172
+ return new_state, history_stack, gr.update(value=new_response), gr.update(choices=new_topics)
173
+
174
+ def on_back(self, history_stack):
175
+ """
176
+ Restores the previous state for all outputs.
177
+ """
178
+ if history_stack:
179
+ # Pop the last state from history
180
+ previous_state = history_stack.pop()
181
+
182
+ return history_stack, gr.update(value=previous_state["response"]), \
183
+ gr.update(choices=previous_state["key_topics"])
184
+
185
+ # If no history, clear outputs
186
+ return history_stack, gr.update(value=""), gr.update(choices=[])
187
+
188
+ class ResponseGeneratorApp:
189
+ def __init__(self):
190
+ self.point_generator = ResponseGenerator()
191
+
192
+ def build_ui(self):
193
+ with gr.Blocks() as demo:
194
+ gr.Markdown(
195
+ """
196
+ #Brainstorming App
197
+ Enter a query to generate a detailed response and start brainstroming for further exploration.
198
+ """
199
+ )
200
+
201
+ query_input = gr.Textbox(
202
+ label="Enter your query",
203
+ placeholder="Type a query, e.g., 'I want to create a promotional video of Begnas Lake.'",
204
+ lines=2,
205
+ )
206
+
207
+ usernames = [
208
+ "_travelwithsapana", "givina_9", "rajen.rb", "wh0z.khu5h1", "palam061",
209
+ "prettiest_sky", "explorepokhara", "ggkaam610", "anjana_dhl1"
210
+ ]
211
+
212
+ # username_inputs =gr.Radio(label="Select Username of whose you want similar story::", choices=usernames, type="value")
213
+
214
+ username_inputs = gr.CheckboxGroup(choices=usernames,label="Choose one or more username of whose you want similar story::",type="value")
215
+
216
+
217
+
218
+ generate_btn = gr.Button(value="Generate")
219
+
220
+ # Output box for the generated text
221
+ response_output = gr.Textbox(
222
+ label="Generated Response",
223
+ lines=10,
224
+ interactive=False
225
+ )
226
+
227
+ # Dynamic radio buttons area for the extracted topics
228
+ topics_radio = gr.Radio(
229
+ label="Brain Stroming Areas....",
230
+ choices=[],
231
+ type="value",
232
+ interactive=True
233
+ )
234
+
235
+ back_btn = gr.Button(value="Back")
236
+
237
+ # State for managing current topics and history
238
+ current_state = gr.State({}) # Store response, topics, and key_topics
239
+ history_stack = gr.State([]) # Stack of previous states
240
+
241
+ # Link the generate button to the processing function
242
+ generate_btn.click(
243
+ fn=lambda query,usernames: self.generate_handler(query,usernames),
244
+ inputs=[query_input,username_inputs],
245
+ outputs=[current_state, response_output, topics_radio],
246
+ )
247
+
248
+ # Handle selection of a topic (generate new stage)
249
+ topics_radio.change(
250
+ fn=self.point_generator.on_select_topic,
251
+ inputs=[topics_radio, history_stack, current_state],
252
+ outputs=[current_state, history_stack, response_output, topics_radio]
253
+ )
254
+
255
+ # Handle back button
256
+ back_btn.click(
257
+ fn=self.point_generator.on_back,
258
+ inputs=[history_stack],
259
+ outputs=[history_stack, response_output, topics_radio]
260
+ )
261
+
262
+ return demo
263
+
264
+ def generate_handler(self, query,usernames):
265
+ """
266
+ Handles the generation of the response and topics.
267
+ """
268
+ response = self.point_generator.generate_response(query,usernames)
269
+ topics = self.point_generator.extract_topics(response)
270
+
271
+ # Prepare the current state
272
+ current_state = {
273
+ "response": response,
274
+ "topics": topics,
275
+ "key_topics": topics
276
+ }
277
+
278
+ return current_state, gr.update(value=response), gr.update(choices=topics)
279
+
280
+