Spaces:
Sleeping
Sleeping
File size: 6,939 Bytes
7453da0 697bfd1 8e44e1a c657020 9738ed3 697bfd1 540056e ec771a2 697bfd1 5301c9c 697bfd1 5301c9c a15b3ce 99bd104 aa93861 9738ed3 aa93861 c657020 a15b3ce bf18300 a15b3ce bf18300 a15b3ce bf18300 a15b3ce aa93861 540056e cf39162 0a67e21 eb67b06 0a67e21 80aa4e5 508045d 4a21b10 da49ebc 4a21b10 8d15222 05d65fa cc4f886 72f0d04 9375fef aceb7ce 72f0d04 b8b8031 9375fef aceb7ce 455006b 4a21b10 508045d fc0768e 508045d da49ebc 4a21b10 ec771a2 4a21b10 64e42e3 ec771a2 64e42e3 e8fd75c 20fea69 da49ebc e8fd75c 508045d 80aa4e5 e8fd75c 5bddbaf 508045d 23bb5b3 e4f34f5 b9bed89 9375fef b9bed89 ce1ec8d b9bed89 23bb5b3 b9bed89 776a974 ec771a2 776a974 5bddbaf 4a21b10 5bddbaf 776a974 5bddbaf c6466be 776a974 306eb01 51691ec a72a83f 306eb01 e0b6034 a2c4d07 306eb01 ec771a2 306eb01 b9bed89 ec771a2 b9bed89 5bddbaf b9bed89 ec771a2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 |
import gradio as gr
import os
import spaces
import json
import re
from gradio_client import Client, handle_file
kosmos2_token = os.environ.get("KOSMOS2_TOKEN")
def get_caption_from_kosmos(image_in):
kosmos2_client = Client("fffiloni/Kosmos-2-API", hf_token=kosmos2_token)
kosmos2_result = kosmos2_client.predict(
image_input=handle_file(image_in),
text_input="Detailed",
api_name="/generate_predictions"
)
print(f"KOSMOS2 RETURNS: {kosmos2_result}")
data = kosmos2_result[1]
# Extract and combine tokens starting from the second element
sentence = ''.join(item['token'] for item in data[1:])
# Find the last occurrence of "."
#last_period_index = full_sentence.rfind('.')
# Truncate the string up to the last period
#truncated_caption = full_sentence[:last_period_index + 1]
# print(truncated_caption)
#print(f"\n—\nIMAGE CAPTION: {truncated_caption}")
return sentence
def get_caption_from_MD(image_in):
client = Client("https://vikhyatk-moondream1.hf.space/")
result = client.predict(
image_in, # filepath in 'image' Image component
"Describe character like if it was fictional", # str in 'Question' Textbox component
api_name="/answer_question"
)
print(result)
return result
import re
import torch
from transformers import pipeline
pipe = pipeline("text-generation", model="HuggingFaceH4/zephyr-7b-beta", torch_dtype=torch.bfloat16, device_map="auto")
@spaces.GPU(enable_queue=True)
def get_llm_idea(user_prompt):
agent_maker_sys = f"""
You are an AI whose job is to help users create their own chatbot whose personality will reflect the character and scene atmosphere from an image described by users.
In particular, you need to respond succintly in a friendly tone, write a system prompt for an LLM, a catchy title for the chatbot, and a very short example user input. Make sure each part is included.
The system prompt will not mention any image provided. But You can include provided additional details about the character to the System Prompt, if it makes sense for a more sophisticated LLM persona.
For example, if a user says, "a picture of a man in a black suit and tie riding a black dragon", first do a friendly response, then add the title, system prompt, and example user input.
Immediately STOP after the example input. It should be EXACTLY in this format:
"Sure, I'd be happy to help you build a bot! I'm generating a title, system prompt, and an example input. How do they sound?
\n Title: Dragon Trainer
\n System prompt: Let's say You are a Dragon trainer and your job is to provide guidance and tips on mastering dragons. Use a friendly and informative tone.
\n Example input: How can I train a dragon to breathe fire?"
Here's another example. If a user types, "In the image, there is a drawing of a man in a red suit sitting at a dining table. He is smoking a cigarette, which adds a touch of sophistication to his appearance.", respond:
"Sure, I'd be happy to help you build a bot! I'm generating a title, system prompt, and an example input. How do they sound?
\n Title: Gentleman's Companion
\n System prompt: Let's say You are sophisticated old man, also know as the Gentleman's Companion. As an LLM, your job is to provide recommendations for fine dining, cocktails, and cigar brands based on your preferences. Use a sophisticated and refined tone.
\n Example input: Can you suggest a good cigar brand for a man who enjoys smoking while dining in style?"
"""
instruction = f"""
<|system|>
{agent_maker_sys}</s>
<|user|>
"""
prompt = f"{instruction.strip()}\n{user_prompt}</s>"
#print(f"PROMPT: {prompt}")
outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
return outputs
def infer(image_in, cap_type):
gr.Info("Getting image description...")
"""
if cap_type == "Fictional" :
user_prompt = get_caption_from_MD(image_in)
elif cap_type == "Literal" :
user_prompt = get_caption_from_kosmos(image_in)
"""
user_prompt = get_caption_from_kosmos(image_in)
gr.Info("Building a system according to the image caption ...")
outputs = get_llm_idea(user_prompt)
pattern = r'\<\|system\|\>(.*?)\<\|assistant\|\>'
cleaned_text = re.sub(pattern, '', outputs[0]["generated_text"], flags=re.DOTALL)
print(f"SUGGESTED LLM: {cleaned_text}")
return user_prompt, cleaned_text.lstrip("\n")
title = f"LLM Agent from a Picture",
description = f"Get a LLM system prompt idea from a picture so you can use it as a kickstarter for your new <a href='https://huggingface.co/chat/assistants'>Hugging Chat Assistant</a>."
css = """
#col-container{
margin: 0 auto;
max-width: 780px;
text-align: left;
}
/* fix examples gallery width on mobile */
div#component-14 > .gallery > .gallery-item > .container > img {
width: auto!important;
}
"""
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.HTML(f"""
<h2 style="text-align: center;">LLM Agent from a Picture</h2>
<p style="text-align: center;">{description}</p>
""")
with gr.Row():
with gr.Column():
image_in = gr.Image(
label = "Image reference",
type = "filepath",
elem_id = "image-in"
)
cap_type = gr.Radio(
label = "Caption type",
choices = [
"Literal",
"Fictional"
],
value = "Fictional"
)
submit_btn = gr.Button("Make LLM system from my pic !")
with gr.Column():
caption = gr.Textbox(
label = "Image caption",
elem_id = "image-caption"
)
result = gr.Textbox(
label = "Suggested System",
lines = 6,
max_lines = 30,
elem_id = "suggested-system-prompt"
)
with gr.Row():
gr.Examples(
examples = [
["examples/monalisa.png"],
["examples/santa.png"],
["examples/ocean_poet.jpeg"],
["examples/winter_hiking.png"],
["examples/teatime.jpeg"],
["examples/news_experts.jpeg"],
["examples/chicken_adobo.jpeg"]
],
fn = infer,
inputs = [image_in, cap_type]
)
submit_btn.click(
fn = infer,
inputs = [
image_in,
cap_type
],
outputs =[
caption,
result
]
)
demo.queue().launch(show_api=False, show_error=True) |