|
import gradio as gr |
|
import os |
|
import sys |
|
import random |
|
import string |
|
import time |
|
from queue import Queue |
|
from threading import Thread |
|
from transformers import MBartForConditionalGeneration, MBart50TokenizerFast |
|
|
|
|
|
model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-50-many-to-one-mmt") |
|
tokenizer = MBart50TokenizerFast |
|
|
|
|
|
|
|
|
|
API_URL = "models/facebook/mbart-large-50-many-to-one-mmt" |
|
API_TOKEN = os.environ.get("HF_READ_TOKEN") |
|
|
|
headers = {"Authorization": f"Bearer {API_TOKEN}"} |
|
text_gen = gr.Interface.load(API_URL, headers=headers) |
|
|
|
proc1 = gr.Interface.load("models/openskyml/midjourney-v4-xl") |
|
|
|
|
|
|
|
|
|
|
|
def restart_script_periodically(): |
|
while True: |
|
random_time = random.randint(540, 600) |
|
time.sleep(random_time) |
|
os.execl(sys.executable, sys.executable, *sys.argv) |
|
|
|
restart_thread = Thread(target=restart_script_periodically, daemon=True) |
|
restart_thread.start() |
|
|
|
queue = Queue() |
|
queue_threshold = 100 |
|
|
|
def add_random_noise(prompt, noise_level=0.00): |
|
if noise_level == 0: |
|
noise_level = 0.00 |
|
percentage_noise = noise_level * 5 |
|
num_noise_chars = int(len(prompt) * (percentage_noise / 100)) |
|
noise_indices = random.sample(range(len(prompt)), num_noise_chars) |
|
prompt_list = list(prompt) |
|
noise_chars = list(string.ascii_letters + string.punctuation + ' ' + string.digits) |
|
noise_chars.extend(['๐', '๐ฉ', '๐', '๐ค', '๐', '๐ค', '๐ญ', '๐', '๐ท', '๐คฏ', '๐คซ', '๐ฅด', '๐ด', '๐คฉ', '๐ฅณ', '๐', '๐ฉ', '๐คช', '๐', '๐คข', '๐', '๐น', '๐ป', '๐ค', '๐ฝ', '๐', '๐', '๐
', '๐', '๐', '๐', '๐', '๐', '๐', '๐ฎ', 'โค๏ธ', '๐', '๐', '๐', '๐', '๐ถ', '๐ฑ', '๐ญ', '๐น', '๐ฆ', '๐ป', '๐จ', '๐ฏ', '๐ฆ', '๐', '๐ฅ', '๐ง๏ธ', '๐', '๐', '๐ฅ', '๐ด', '๐', '๐บ', '๐ป', '๐ธ', '๐จ', '๐
', '๐', 'โ๏ธ', 'โ๏ธ', 'โ๏ธ', 'โ๏ธ', '๐ค๏ธ', 'โ
๏ธ', '๐ฅ๏ธ', '๐ฆ๏ธ', '๐ง๏ธ', '๐ฉ๏ธ', '๐จ๏ธ', '๐ซ๏ธ', 'โ๏ธ', '๐ฌ๏ธ', '๐จ', '๐ช๏ธ', '๐']) |
|
for index in noise_indices: |
|
prompt_list[index] = random.choice(noise_chars) |
|
return "".join(prompt_list) |
|
|
|
|
|
|
|
import uuid |
|
|
|
|
|
|
|
|
|
|
|
request_counter = 0 |
|
|
|
def send_it1(inputs, noise_level, proc=proc1): |
|
global request_counter |
|
request_counter += 1 |
|
timestamp = f"{time.time()}_{request_counter}" |
|
prompt_with_noise = add_random_noise(inputs, noise_level) + f" - {timestamp}" |
|
try: |
|
while queue.qsize() >= queue_threshold: |
|
time.sleep(2) |
|
queue.put(prompt_with_noise) |
|
output = proc(prompt_with_noise) |
|
return output |
|
except Exception as e: |
|
|
|
raise gr.Error("Experiencing high demand. Please retry shortly. Thank you for your patience.") |
|
|
|
import random |
|
|
|
import random |
|
import time |
|
|
|
|
|
|
|
import random |
|
import time |
|
|
|
|
|
model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-50-many-to-one-mmt") |
|
tokenizer = MBart50TokenizerFast |
|
|
|
def get_prompts(prompt_text): |
|
if not prompt_text: |
|
return "Please enter text before generating prompts.ุฑุฌุงุก ุงุฏุฎู ุงููุต ุงููุง" |
|
else: |
|
global request_counter |
|
request_counter += 1 |
|
timestamp = f"{time.time()}_{request_counter}" |
|
|
|
tokenizer.src_lang = "ar_AR" |
|
encoded_ar = tokenizer(prompt_text, return_tensors="pt") |
|
generated_tokens = model.generate(**encoded_ar) |
|
translated_text = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)[0] |
|
|
|
return translated_text |
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.Blocks(css=".gradio-container {background-color: #F5F5F5;} .dark .gradio-container {background-color: linear-gradient(to top, #09203f 0%, #537895 100%);} footer{display:none !important;}",) as demo: |
|
|
|
|
|
with gr.Column(elem_id="col-container"): |
|
with gr.Row(variant="compact"): |
|
input_text = gr.Textbox( |
|
lines=8, |
|
label="Short Prompt", |
|
show_label=False, |
|
max_lines=10, |
|
placeholder="Enter a basic idea and click 'Magic Prompt'. Got no ideas? No problem, Simply just hit the magic button!", |
|
).style( |
|
container=False, |
|
textarea={'height': '400px'} |
|
) |
|
see_prompts = gr.Button("โจ Magic Prompt โจ").style(full_width=False) |
|
|
|
with gr.Row(variant="compact"): |
|
prompt = gr.Textbox( |
|
lines=8, |
|
label="Enter your prompt", |
|
show_label=False, |
|
max_lines=10, |
|
placeholder="Full Prompt", |
|
).style( |
|
container=False, |
|
textarea={'height': '400px'} |
|
) |
|
run = gr.Button("Generate Images").style(full_width=False) |
|
|
|
with gr.Row(): |
|
with gr.Row(): |
|
noise_level = gr.Slider(minimum=0.0, maximum=3, step=0.1, label="Noise Level") |
|
|
|
with gr.Row(): |
|
with gr.Row(): |
|
output1 = gr.Image(label="Dreamlike Diffusion 1.0", show_label=False, show_share_button=False) |
|
output2 = gr.Image(label="Dreamlike Diffusion 1.0", show_label=False, show_share_button=False) |
|
|
|
see_prompts.click(get_prompts, inputs=[input_text], outputs=[prompt], queue=False) |
|
run.click(send_it1, inputs=[prompt, noise_level], outputs=[output1]) |
|
run.click(send_it1, inputs=[prompt, noise_level], outputs=[output2]) |
|
|
|
|
|
|
|
demo.launch(enable_queue=True, inline=True) |
|
|