ysharma's picture
ysharma HF staff
create app.py
4fd86c2
raw
history blame
4.61 kB
import gradio as gr
import requests
import os
import PIL
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
##Bloom
API_URL = "https://api-inference.huggingface.co/models/bigscience/bloom"
HF_TOKEN = os.environ["HF_TOKEN"]
headers = {"Authorization": f"Bearer {HF_TOKEN}"}
def write_on_image(final_solution):
print("************ Inside write_on_image ***********")
image_path0 = "./distracted0.jpg"
image0 = Image.open(image_path0)
I1 = ImageDraw.Draw(image0)
myfont = ImageFont.truetype('./font1.ttf', 30)
prompt_list = final_solution.split('\n')
girlfriend = prompt_list[8].split(':')[1].strip()
girlfriend_list = girlfriend.split()
if len(girlfriend_list) >= 2:
girlfriend = '\n'.join(girlfriend_list)
print(f"girlfriend is : {girlfriend }")
new_girl = prompt_list[9].split(':')[1].strip()
new_girl_list = new_girl.split()
if len(new_girl_list) > 2:
new_girl = '\n'.join(new_girl_list)
print(f"new_girl is : {new_girl}")
prompt_list.pop(0)
prompt_list.pop(0)
prompt_list = prompt_list[:8]
prompt_list.append('Distracted from:')
print(f"prompt list is : {prompt_list}")
new_prompt = '\n'.join(prompt_list)
print(f"final_solution is : {new_prompt}")
I1.text((613, 89), girlfriend,font=myfont, fill =(255, 255, 255))
I1.text((371, 223), "ME", font=myfont, fill =(255, 255, 255))
I1.text((142, 336), new_girl,font=myfont, fill =(255, 255, 255))
return image0, new_prompt
def meme_generate(img, prompt, temp, top_p): #prompt, generated_txt): #, input_prompt_sql ): #, input_prompt_dalle2):
print(f"*****Inside meme_generate - Prompt is :{prompt}")
if len(prompt) == 0:
prompt = """Distracted from: homework\nby: side project\nDistracted from: goals\nby: new goals\nDistracted from: working hard\nby: hardly working\nDistracted from: twitter\nby: open in browser\nDistracted from:"""
json_ = {"inputs": prompt,
"parameters":
{
"top_p": top_p, #0.90 default
"max_new_tokens": 250,
"temperature": temp, #1.1 default
"return_full_text": True,
"do_sample": True,
},
"options":
{"use_cache": True,
"wait_for_model": True,
},}
response = requests.post(API_URL, headers=headers, json=json_)
print(f"Response is : {response}")
output = response.json()
print(f"output is : {output}")
output_tmp = output[0]['generated_text']
print(f"output_tmp is: {output_tmp}")
solution = output_tmp.split("\nQ:")[0]
print(f"Final response after splits is: {solution}")
meme_image, new_prompt = write_on_image(solution)
return meme_image, new_prompt
demo = gr.Blocks()
with demo:
gr.Markdown("<h1><center>Distracted Boyfriend MemeπŸ˜„- Brought to you by Bloom 🌸 </center></h1>")
gr.Markdown(
"""Bloom is a model made by research teams from [HuggingFace](https://huggingface.co/bigscience/bloom) and world over (more than 1000 researchers coming together and working as [BigScienceW Bloom](https://twitter.com/BigscienceW)).\n\nLarge language models can produce coherent sentences but can they produce **Humor** too? Yes, they can, given the correct prompt (Yes, Prompt Engineering πŸ€– has definitely become a thing).\n\n**How to Use this App**: Just Fire Away the Generate Meme button below!!\n\n**How this App works**: Figuring out the right set of Prompting + Writing on an Image + Bit of engineering. Currently, Bloom's Public API has size-limits on Token-Generation, yo can get only very few tokens generated in one go.\n\n<pre>Bloom generating very few tokens When Few words are Enough</pre>\n\n<pre> 🀝</pre>\n\n\nIt is a fun little App which you can play with for a while.\n\nThis Space is created by [Yuvraj Sharma](https://twitter.com/yvrjsharma)"""
)
with gr.Row():
in_image = gr.Image(value="./distracted0.jpg", visible=False)
in_image_display = gr.Image(value="./distracted00.jpg", visible=True)
input_prompt = gr.Textbox(label="Write some prompt...", lines=5, visible=False)
output_image = gr.Image()
with gr.Row():
in_slider_temp = gr.Slider(minimum=0.0, maximum=1.4, value=1.1, step=0.1, label='Temperature')
in_slider_top_p = gr.Slider(minimum=0.50, maximum=0.99, value=0.90, step=0.01, label='Top_p')
b1 = gr.Button("Generate")
b1.click(meme_generate, inputs=[in_image, input_prompt, in_slider_temp, in_slider_top_p] , outputs=[output_image,input_prompt])
demo.launch(enable_queue=True, debug=True)