Spaces:
Running
Running
File size: 10,108 Bytes
32cd2af 1b7bcbe 32cd2af 0c1ac8f 32cd2af 1b7bcbe a49071b 89068c4 1b7bcbe 32cd2af 1b7bcbe 32cd2af 59fbbdc 32cd2af 018b873 32cd2af 1b7bcbe 32cd2af 0c9e043 32cd2af 64ec212 32cd2af 1b7bcbe 32cd2af 1b7bcbe 32cd2af 1b7bcbe 32cd2af |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 |
import json
import requests
import gradio as gr
import random
import time
import os
import datetime
from datetime import datetime
from PIL import Image
from PIL import ImageOps
from PIL import Image, ImageDraw, ImageFont
from textwrap import wrap
import json
from io import BytesIO
print('for update')
API_TOKEN = os.getenv("API_TOKEN")
DECODEM_TOKEN=os.getenv("DECODEM_TOKEN")
from huggingface_hub import InferenceApi
inference = InferenceApi("bigscience/bloom",token=API_TOKEN)
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
url_decodemprompts='https://us-central1-createinsightsproject.cloudfunctions.net/getdecodemprompts'
data={"prompt_type":'ad_text_prompt',"decodem_token":DECODEM_TOKEN}
try:
r = requests.post(url_decodemprompts, data=json.dumps(data), headers=headers)
except requests.exceptions.ReadTimeout as e:
print(e)
#print(r.content)
prompt_text=str(r.content, 'UTF-8')
print(prompt_text)
data={"prompt_type":'ad_image_prompt',"decodem_token":DECODEM_TOKEN}
try:
r = requests.post(url_decodemprompts, data=json.dumps(data), headers=headers)
except requests.exceptions.ReadTimeout as e:
print(e)
#print(r.content)
prompt_image=str(r.content, 'UTF-8')
print(prompt_image)
ENDPOINT_URL="https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-2-1" # url of your endpoint
#ENDPOINT_URL="https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-1-5" # url of your endpoint
HF_TOKEN=API_TOKEN# token where you deployed your endpoint
def generate_image(prompt_SD:str):
payload = {"inputs": prompt_SD,}
headers = {
"Authorization": f"Bearer {HF_TOKEN}",
"Content-Type": "application/json",
"Accept": "image/png" # important to get an image back
}
response = requests.post(ENDPOINT_URL, headers=headers, json=payload)
#print(response.content)
img = Image.open(BytesIO(response.content))
return img
def infer(prompt,
max_length = 250,
top_k = 0,
num_beams = 0,
no_repeat_ngram_size = 2,
top_p = 0.9,
seed=42,
temperature=0.7,
greedy_decoding = False,
return_full_text = False):
print(seed)
top_k = None if top_k == 0 else top_k
do_sample = False if num_beams > 0 else not greedy_decoding
num_beams = None if (greedy_decoding or num_beams == 0) else num_beams
no_repeat_ngram_size = None if num_beams is None else no_repeat_ngram_size
top_p = None if num_beams else top_p
early_stopping = None if num_beams is None else num_beams > 0
params = {
"max_new_tokens": max_length,
"top_k": top_k,
"top_p": top_p,
"temperature": temperature,
"do_sample": do_sample,
"seed": seed,
"early_stopping":early_stopping,
"no_repeat_ngram_size":no_repeat_ngram_size,
"num_beams":num_beams,
"return_full_text":return_full_text
}
s = time.time()
response = inference(prompt, params=params)
#print(response)
proc_time = time.time()-s
#print(f"Processing time was {proc_time} seconds")
return response
def getadline(text_inp):
print(text_inp)
print(datetime.today().strftime("%d-%m-%Y"))
text = prompt_text+"\nInput:"+text_inp + "\nOutput:"
resp = infer(text,seed=random.randint(0,100))
generated_text=resp[0]['generated_text']
result = generated_text.replace(text,'').strip()
result = result.replace("Output:","")
parts = result.split("###")
topic = parts[0].strip()
topic="\n".join(topic.split('\n'))
response_nsfw = requests.get('https://github.com/coffee-and-fun/google-profanity-words/raw/main/data/list.txt')
data_nsfw = response_nsfw.text
nsfwlist=data_nsfw.split('\n')
nsfwlowerlist=[]
for each in nsfwlist:
if each!='':
nsfwlowerlist.append(each.lower())
nsfwlowerlist.extend(['bra','gay','lesbian',])
print(topic)
foundnsfw=0
for each_word in nsfwlowerlist:
if each_word in topic.lower() or each_word in text_inp :
foundnsfw=1
if foundnsfw==1:
topic="Unsafe content found. Please try again with different prompts."
print(topic)
return(topic)
def getadvertisement(topic):
input_keyword=topic
backdrop=['surrounded by water droplets','in front of a brick wall','in front of a wooden wall','in a white boho style studio','with nature backdrop','with water splash','laying on a wooden table',]
whichitem=random.randint(0,len(backdrop)-1)
prompt_SD='product photograph of '+input_keyword+' '+backdrop[whichitem]+prompt_image
# generate image
image = generate_image(prompt_SD)
# save to disk
image.save("generation.png")
# Set the font to be used
req = requests.get("https://github.com/openmaptiles/fonts/raw/master/roboto/Roboto-Regular.ttf")
FONT_USER_INFO = ImageFont.truetype(BytesIO(req.content), 75, encoding="utf-8")
FONT_TEXT = ImageFont.truetype(BytesIO(req.content), 75, encoding="utf-8")
TITLE_TEXT = ImageFont.truetype(BytesIO(req.content), 75, encoding="utf-8")
#FONT_USER_INFO = ImageFont.load_default()
#FONT_TEXT = ImageFont.load_default()
# Image dimensions (pixels)
WIDTH = 768
HEIGHT = 768
# Color scheme
COLOR_BG = 'white'
COLOR_NAME = 'black'
COLOR_TAG = (64, 64, 64)
COLOR_TEXT = 'black'
# Write coordinates
COORD_PHOTO = (10, 40)
COORD_NAME = (10, 200)
COORD_TAG = (10, 280)
COORD_TEXT = (10, 128)
# Extra space to add in between lines of text
LINE_MARGIN = 5
# -----------------------------------------------------------------------------
# Information for the image
# -----------------------------------------------------------------------------
text = getadline(input_keyword)
print(text)
img_name = "textimage"
# -----------------------------------------------------------------------------
# Setup of variables and calculations
# -----------------------------------------------------------------------------
# Break the text string into smaller strings, each having a maximum of 37\
# characters (a.k.a. create the lines of text for the image)
text_string_lines = wrap(text, 10)
# Horizontal position at which to start drawing each line of the tweet body
x = COORD_TEXT[0]
# Current vertical position of drawing (starts as the first vertical drawing\
# position of the tweet body)
y = COORD_TEXT[1]
# Create an Image object to be used as a means of extracting the height needed\
# to draw each line of text
temp_img = Image.new('RGB', (0, 0))
temp_img_draw_interf = ImageDraw.Draw(temp_img)
# List with the height (pixels) needed to draw each line of the tweet body
# Loop through each line of text, and extract the height needed to draw it,\
# using our font settings
line_height = [
temp_img_draw_interf.textsize(text_string_lines[i], font=FONT_TEXT )[1]
for i in range(len(text_string_lines))
]
# -----------------------------------------------------------------------------
# Image creation
# -----------------------------------------------------------------------------
# Create what will be the final image
img_final = Image.new('RGB', (WIDTH, HEIGHT), color='white')
# Create the drawing interface
draw_interf = ImageDraw.Draw(img_final)
# Draw each line of the tweet body. To find the height at which the next\
# line will be drawn, add the line height of the next line to the current\
# y position, along with a small margin
for index, line in enumerate(text_string_lines):
# Draw a line of text
draw_interf.text((x, y), line, font=FONT_USER_INFO, fill=COLOR_TEXT)
# Increment y to draw the next line at the adequate height
y += line_height[index] + LINE_MARGIN
# Load the user photo (read-mode). It should be a 250x250 circle
#user_photo = Image.open('userprofilepic.png', 'r').convert("RGBA")
# Paste the user photo into the working image. We also use the photo for\
# its own mask to keep the photo's transparencies
#img_final.paste(user_photo, COORD_PHOTO, mask=user_photo)
# Finally, save the created image
img_final.save(f'{img_name}.png')
# -----------------------------------------------------------------------------
im = Image.open(img_name+".png")
width_orig, height_orig = im.size
print(width_orig, height_orig)
im_bar = Image.open("generation.png")
width_orig_x, height_orig_x = im_bar.size
print(width_orig_x, height_orig_x)
im_bar = im_bar.resize((int(width_orig / 1), int(height_orig / 1)))
new_im = Image.new('RGB', (2*im.size[0],1*im_bar.size[1]), (250,250,250))
new_im.paste(im, (0,0))
new_im.paste(im_bar, (im.size[0],0))
new_im.save('finalimage.png')
return 'finalimage.png'
with gr.Blocks() as demo:
gr.Markdown("<h1><center>Ad Ideas for Your Business</center></h1>")
gr.Markdown(
"""ChatGPT based Insights from <a href="https://www.decodem.ai">Decodem.ai</a> for businesses.\nWhile ChatGPT has multiple use cases we have evolved specific use cases/ templates for businesses \n\n This template provides ideas on how a business can generate Advertisement ideas for a product. Enter a product area and get the results. Use examples as a guide. We use a equally powerful AI model bigscience/bloom."""
)
textbox = gr.Textbox(placeholder="Enter product name...", lines=1,label='Your product')
btn = gr.Button("Generate")
#output1 = gr.Textbox(lines=2,label='Market Sizing Framework')
output_image = gr.components.Image(label="Your Advertisement")
btn.click(getadvertisement,inputs=[textbox], outputs=[output_image])
examples = gr.Examples(examples=['spectacles','rice cooker','smart watch','coffee mug',],
inputs=[textbox])
demo.launch() |