Spaces:
Sleeping
Sleeping
import gradio as gr | |
import asyncio | |
import os | |
import time | |
from typing import List, Dict | |
from openai import OpenAI | |
from openai import AsyncOpenAI | |
MAX_SUBCONCEPTS = 25 | |
async def call_openai_api(sn_api_key, prompt: str) -> Dict: | |
sn_dev_client_async = AsyncOpenAI( | |
base_url="https://api.sambanova.ai/v1", | |
api_key=sn_api_key | |
) | |
response = await sn_dev_client_async.chat.completions.create( | |
model="Meta-Llama-3.2-3B-Instruct", | |
messages=[ | |
{ | |
"role": "system", | |
"content": "You are a knowledge graph generator. Generate detailed answers to questions posed about subconcepts and give an educated response as if you were a professor explaining to a student. Where applicable add in code examples to solidify the concept for the student." | |
}, | |
{ | |
"role": "user", | |
"content": f"{prompt}" | |
} | |
], | |
) | |
return response | |
async def make_multiple_openai_calls(sn_api_key, prompts: List[str]) -> List[Dict]: | |
tasks = [call_openai_api(sn_api_key, prompt) for prompt in prompts] | |
return await asyncio.gather(*tasks) | |
def process_concept(sn_api_key, concept, num_subconcepts, progress=gr.Progress()) -> tuple: | |
start_time = time.time() | |
sn_dev_client = OpenAI( | |
base_url="https://api.sambanova.ai/v1", | |
api_key=sn_api_key | |
) | |
progress(0, "Identifying subconcepts") | |
# Single API call to break down the concepts | |
response = sn_dev_client.chat.completions.create( | |
model="Meta-Llama-3.2-3B-Instruct", | |
messages=[ | |
{ | |
"role": "user", | |
"content": f"""Create a set of subconcepts from this concept: {concept}. | |
Do this by breaking down the concept into multiple subconcepts with each on a new line along with their questions so it is easy to parse in the following way. Note in the example above there is no additional text except for the subconcepts and new lines. | |
Example (for the concept "machine learning"): | |
gradient descent | |
neuron training | |
loss function | |
optimization functions | |
backpropagation | |
""" | |
} | |
], | |
) | |
result = response.choices[0].message.content | |
# clean up response | |
subconcepts = result.strip().split('\n') | |
subconcepts = list(set([subconcept.strip() for subconcept in subconcepts])) | |
num_total_subconcepts = len(subconcepts) | |
progress(0.2, "Preparing subconcepts") | |
# limit to 10 requests for now | |
lmt = min(num_subconcepts, num_total_subconcepts) | |
subconcepts = subconcepts[:lmt] | |
prompts = [ | |
f"Please give a detailed explanation of this subconcept: {subconcept} in the context of this original concept: {concept}" for subconcept in subconcepts | |
] | |
progress(0.3, f"Generating explanations for {len(subconcepts)} subconcepts in parallel") | |
# Run the async function in the synchronous context | |
loop = asyncio.new_event_loop() | |
asyncio.set_event_loop(loop) | |
try: | |
results = loop.run_until_complete(make_multiple_openai_calls(sn_api_key, prompts)) | |
finally: | |
loop.close() | |
# Extract strings from the results | |
responses = [result.choices[0].message.content for result in results] | |
progress(0.6, f"Summarizing explanations to create an intro for {len(subconcepts)} subconcepts") | |
# Summarize results - using a synchonous call | |
content_to_summarize = "" | |
for subconcept, response in zip(subconcepts, responses): | |
content_to_summarize += f"## {subconcept.title()}\n\n{response}\n\n---\n\n" | |
response = sn_dev_client.chat.completions.create( | |
model="Meta-Llama-3.1-8B-Instruct", # need longer context | |
messages=[ | |
{ | |
"role": "user", | |
"content": f"""Summarize the results for the {concept} by creating an introduction for the class that incorporates | |
the subconcepts: {" ".join(subconcepts)}. Here is all of the information you want to summarize: | |
{content_to_summarize} | |
Please present this as an introduction to a class on | |
{concept}. | |
""" | |
} | |
], | |
) | |
intro_summary = response.choices[0].message.content | |
end_time = time.time() | |
total_time = end_time - start_time | |
progress(0.9, "Formatting output") | |
# Format the output in Markdown | |
perf_markdown = f"### Performance\n\n" | |
perf_markdown += f"**# of LLama 3.2 3B calls made to SambaNova's API:** {num_subconcepts + 1}\n\n" | |
perf_markdown += f"**# of LLama 3.1 8B calls made to SambaNova's API: 1 (summary)**\n\n" | |
perf_markdown += f"**Total time taken:** {total_time:.2f} seconds\n\n" | |
markdown_intro = f"# Lesson Plan: {concept.title()}\n\n" | |
markdown_intro += intro_summary | |
subconcept_markdowns = [] | |
for subconcept, response in zip(subconcepts, responses): | |
subconcept_markdowns.append(f"## {subconcept.title()}\n\n{response}\n\n") | |
progress(1.0, "Complete") | |
# Update the tabs (and its corresponding contents) with content for each lesson | |
new_tabs = [] | |
new_tab_contents = [] | |
for i in range(len(subconcept_markdowns)): | |
new_tabs.append(gr.update(label=f"Lesson {i+1}: {subconcepts[i].title()}", visible=True, render=True)) | |
new_tab_contents.append(gr.Markdown(f"{subconcept_markdowns[i]}")) | |
new_tabs.extend([gr.update(visible=False, render=False) for _ in range(MAX_SUBCONCEPTS-len(subconcept_markdowns))]) | |
new_tab_contents.extend([gr.update(visible=False) for _ in range(MAX_SUBCONCEPTS-len(subconcept_markdowns))]) | |
return "Process complete!", perf_markdown, markdown_intro, *new_tabs, *new_tab_contents | |
with gr.Blocks() as demo: | |
gr.Markdown( | |
""" | |
# Lesson Plan Generator | |
**Ever wanted to learn something new? Struggled to break down the concept to more digestible subconcepts? In this demo, we use <a href="https://cloud.sambanova.ai">SambaNova's</a> superfast LLama 3.2 3B and LLama 3.1 8B models to expound on the concept and subconcepts, provide a detailed lesson for each of the subconcepts, and summarize the lesson plan.** | |
*To use this, follow the instructions below:* | |
1. Navigate to <a href="https://cloud.sambanova.ai">https://cloud.sambanova.ai</a>, login and copy your API Key | |
2. Paste it in the SambaNova API Key box below | |
3. Enter a concept you are interested in (e.g. Variational Autoencoders) | |
4. Choose the number of subconcepts you want to break your lessons into | |
5. Click 'Generate Lesson Plan' | |
6. Wait for a few seconds for multiple LLama 3B and 8B calls to finish | |
7. Read through and enjoy your lesson plans (Navigate to the tabs at the bottom) | |
""", | |
elem_id="header", container=True | |
) | |
with gr.Column(): | |
sn_api_key_input = gr.Textbox(label="Enter your SambaNova API Key (https://cloud.sambanova.ai)", type="password") | |
with gr.Row(): | |
concept_input = gr.Textbox(label="Enter a concept", placeholder="e.g., Artificial Intelligence") | |
slider = gr.Slider(minimum=1, maximum=MAX_SUBCONCEPTS, value=5, label="Number of subconcepts", step=1) | |
generate_btn = gr.Button("Generate Lesson Plan", variant="primary", size="lg") | |
with gr.Column(): | |
progress_output = gr.Textbox(label="Progress", interactive=True) | |
performance_output = gr.Markdown(label="Performance", container=True, value="Performance") | |
lesson_intro = gr.Markdown(label="Lesson Intro", container=True, value="Lesson Intro") | |
tab_contents = [] | |
with gr.Tabs() as tabs: | |
for i in range(MAX_SUBCONCEPTS): # Initial set of tabs | |
with gr.Tab(f"Lesson {i+1}", visible=False): | |
exec(f'tab_{i}=gr.Markdown(f"This is content for Lesson {i+1}", container=True)') | |
exec(f'tab_contents.append(tab_{i})') | |
generate_btn.click( | |
process_concept, | |
inputs=[sn_api_key_input, concept_input, slider], | |
outputs=[progress_output, performance_output, lesson_intro] + [tabs.children[i] for i in range(MAX_SUBCONCEPTS)] + tab_contents | |
) | |
demo.launch() |