Spaces:
Runtime error
Runtime error
create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import requests
|
3 |
+
import os
|
4 |
+
import PIL
|
5 |
+
from PIL import Image
|
6 |
+
from PIL import ImageDraw
|
7 |
+
from PIL import ImageFont
|
8 |
+
|
9 |
+
##Bloom
|
10 |
+
API_URL = "https://api-inference.huggingface.co/models/bigscience/bloom"
|
11 |
+
HF_TOKEN = os.environ["HF_TOKEN"]
|
12 |
+
headers = {"Authorization": f"Bearer {HF_TOKEN}"}
|
13 |
+
|
14 |
+
|
15 |
+
def write_on_image(final_solution):
|
16 |
+
print("************ Inside write_on_image ***********")
|
17 |
+
image_path0 = "./distracted0.jpg"
|
18 |
+
image0 = Image.open(image_path0)
|
19 |
+
I1 = ImageDraw.Draw(image0)
|
20 |
+
myfont = ImageFont.truetype('./font1.ttf', 30)
|
21 |
+
|
22 |
+
prompt_list = final_solution.split('\n')
|
23 |
+
girlfriend = prompt_list[8].split(':')[1].strip()
|
24 |
+
girlfriend_list = girlfriend.split()
|
25 |
+
if len(girlfriend_list) >= 2:
|
26 |
+
girlfriend = '\n'.join(girlfriend_list)
|
27 |
+
print(f"girlfriend is : {girlfriend }")
|
28 |
+
new_girl = prompt_list[9].split(':')[1].strip()
|
29 |
+
new_girl_list = new_girl.split()
|
30 |
+
if len(new_girl_list) > 2:
|
31 |
+
new_girl = '\n'.join(new_girl_list)
|
32 |
+
print(f"new_girl is : {new_girl}")
|
33 |
+
prompt_list.pop(0)
|
34 |
+
prompt_list.pop(0)
|
35 |
+
prompt_list = prompt_list[:8]
|
36 |
+
prompt_list.append('Distracted from:')
|
37 |
+
print(f"prompt list is : {prompt_list}")
|
38 |
+
new_prompt = '\n'.join(prompt_list)
|
39 |
+
print(f"final_solution is : {new_prompt}")
|
40 |
+
|
41 |
+
I1.text((613, 89), girlfriend,font=myfont, fill =(255, 255, 255))
|
42 |
+
I1.text((371, 223), "ME", font=myfont, fill =(255, 255, 255))
|
43 |
+
I1.text((142, 336), new_girl,font=myfont, fill =(255, 255, 255))
|
44 |
+
|
45 |
+
return image0, new_prompt
|
46 |
+
|
47 |
+
def meme_generate(img, prompt, temp, top_p): #prompt, generated_txt): #, input_prompt_sql ): #, input_prompt_dalle2):
|
48 |
+
|
49 |
+
print(f"*****Inside meme_generate - Prompt is :{prompt}")
|
50 |
+
if len(prompt) == 0:
|
51 |
+
prompt = """Distracted from: homework\nby: side project\nDistracted from: goals\nby: new goals\nDistracted from: working hard\nby: hardly working\nDistracted from: twitter\nby: open in browser\nDistracted from:"""
|
52 |
+
|
53 |
+
json_ = {"inputs": prompt,
|
54 |
+
"parameters":
|
55 |
+
{
|
56 |
+
"top_p": top_p, #0.90 default
|
57 |
+
"max_new_tokens": 250,
|
58 |
+
"temperature": temp, #1.1 default
|
59 |
+
"return_full_text": True,
|
60 |
+
"do_sample": True,
|
61 |
+
},
|
62 |
+
"options":
|
63 |
+
{"use_cache": True,
|
64 |
+
"wait_for_model": True,
|
65 |
+
},}
|
66 |
+
response = requests.post(API_URL, headers=headers, json=json_)
|
67 |
+
print(f"Response is : {response}")
|
68 |
+
output = response.json()
|
69 |
+
print(f"output is : {output}")
|
70 |
+
output_tmp = output[0]['generated_text']
|
71 |
+
print(f"output_tmp is: {output_tmp}")
|
72 |
+
solution = output_tmp.split("\nQ:")[0]
|
73 |
+
print(f"Final response after splits is: {solution}")
|
74 |
+
|
75 |
+
meme_image, new_prompt = write_on_image(solution)
|
76 |
+
return meme_image, new_prompt
|
77 |
+
|
78 |
+
|
79 |
+
demo = gr.Blocks()
|
80 |
+
|
81 |
+
with demo:
|
82 |
+
gr.Markdown("<h1><center>Distracted Boyfriend Memeπ- Brought to you by Bloom πΈ </center></h1>")
|
83 |
+
gr.Markdown(
|
84 |
+
"""Bloom is a model made by research teams from [HuggingFace](https://huggingface.co/bigscience/bloom) and world over (more than 1000 researchers coming together and working as [BigScienceW Bloom](https://twitter.com/BigscienceW)).\n\nLarge language models can produce coherent sentences but can they produce **Humor** too? Yes, they can, given the correct prompt (Yes, Prompt Engineering π€ has definitely become a thing).\n\n**How to Use this App**: Just Fire Away the Generate Meme button below!!\n\n**How this App works**: Figuring out the right set of Prompting + Writing on an Image + Bit of engineering. Currently, Bloom's Public API has size-limits on Token-Generation, yo can get only very few tokens generated in one go.\n\n<pre>Bloom generating very few tokens When Few words are Enough</pre>\n\n<pre> π€</pre>\n\n\nIt is a fun little App which you can play with for a while.\n\nThis Space is created by [Yuvraj Sharma](https://twitter.com/yvrjsharma)"""
|
85 |
+
)
|
86 |
+
with gr.Row():
|
87 |
+
|
88 |
+
in_image = gr.Image(value="./distracted0.jpg", visible=False)
|
89 |
+
in_image_display = gr.Image(value="./distracted00.jpg", visible=True)
|
90 |
+
input_prompt = gr.Textbox(label="Write some prompt...", lines=5, visible=False)
|
91 |
+
|
92 |
+
output_image = gr.Image()
|
93 |
+
|
94 |
+
with gr.Row():
|
95 |
+
in_slider_temp = gr.Slider(minimum=0.0, maximum=1.4, value=1.1, step=0.1, label='Temperature')
|
96 |
+
in_slider_top_p = gr.Slider(minimum=0.50, maximum=0.99, value=0.90, step=0.01, label='Top_p')
|
97 |
+
|
98 |
+
|
99 |
+
b1 = gr.Button("Generate")
|
100 |
+
|
101 |
+
b1.click(meme_generate, inputs=[in_image, input_prompt, in_slider_temp, in_slider_top_p] , outputs=[output_image,input_prompt])
|
102 |
+
|
103 |
+
demo.launch(enable_queue=True, debug=True)
|