File size: 5,819 Bytes
b56f39c
 
 
 
 
 
 
 
f6bde3a
 
 
 
 
b56f39c
6e3dbf6
 
 
c62ca2f
163dc94
c62ca2f
163dc94
c62ca2f
 
a6f57fd
b56f39c
163dc94
 
 
 
b56f39c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a6f57fd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6e3dbf6
 
a6f57fd
 
 
30c9cd0
a6f57fd
 
 
 
 
30c9cd0
 
 
 
 
 
a6f57fd
 
 
 
 
 
7b09e4b
a6f57fd
b56f39c
 
a6f57fd
 
 
 
 
 
 
 
 
 
 
 
 
b56f39c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a6f57fd
 
b56f39c
a6f57fd
 
 
 
 
b56f39c
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
import gradio as gr
import os
import sys
import random
import string
import time
from queue import Queue
from threading import Thread
from transformers import MBartForConditionalGeneration, MBart50TokenizerFast


model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-50-many-to-one-mmt")
tokenizer = MBart50TokenizerFast




API_URL = "models/facebook/mbart-large-50-many-to-one-mmt"    
API_TOKEN = os.environ.get("HF_READ_TOKEN")

headers = {"Authorization": f"Bearer {API_TOKEN}"}
text_gen = gr.Interface.load(API_URL, headers=headers)

proc1 = gr.Interface.load("models/openskyml/midjourney-v4-xl")

# ... (remaining code)

# The rest of your existing code...

def restart_script_periodically():
    while True:
        random_time = random.randint(540, 600)
        time.sleep(random_time)
        os.execl(sys.executable, sys.executable, *sys.argv)

restart_thread = Thread(target=restart_script_periodically, daemon=True)
restart_thread.start()

queue = Queue()
queue_threshold = 100

def add_random_noise(prompt, noise_level=0.00):
    if noise_level == 0:
        noise_level = 0.00
    percentage_noise = noise_level * 5
    num_noise_chars = int(len(prompt) * (percentage_noise / 100))
    noise_indices = random.sample(range(len(prompt)), num_noise_chars)
    prompt_list = list(prompt)
    noise_chars = list(string.ascii_letters + string.punctuation + ' ' + string.digits)
    noise_chars.extend(['๐Ÿ˜', '๐Ÿ’ฉ', '๐Ÿ˜‚', '๐Ÿค”', '๐Ÿ˜Š', '๐Ÿค—', '๐Ÿ˜ญ', '๐Ÿ™„', '๐Ÿ˜ท', '๐Ÿคฏ', '๐Ÿคซ', '๐Ÿฅด', '๐Ÿ˜ด', '๐Ÿคฉ', '๐Ÿฅณ', '๐Ÿ˜”', '๐Ÿ˜ฉ', '๐Ÿคช', '๐Ÿ˜‡', '๐Ÿคข', '๐Ÿ˜ˆ', '๐Ÿ‘น', '๐Ÿ‘ป', '๐Ÿค–', '๐Ÿ‘ฝ', '๐Ÿ’€', '๐ŸŽƒ', '๐ŸŽ…', '๐ŸŽ„', '๐ŸŽ', '๐ŸŽ‚', '๐ŸŽ‰', '๐ŸŽˆ', '๐ŸŽŠ', '๐ŸŽฎ', 'โค๏ธ', '๐Ÿ’”', '๐Ÿ’•', '๐Ÿ’–', '๐Ÿ’—', '๐Ÿถ', '๐Ÿฑ', '๐Ÿญ', '๐Ÿน', '๐ŸฆŠ', '๐Ÿป', '๐Ÿจ', '๐Ÿฏ', '๐Ÿฆ', '๐Ÿ˜', '๐Ÿ”ฅ', '๐ŸŒง๏ธ', '๐ŸŒž', '๐ŸŒˆ', '๐Ÿ’ฅ', '๐ŸŒด', '๐ŸŒŠ', '๐ŸŒบ', '๐ŸŒป', '๐ŸŒธ', '๐ŸŽจ', '๐ŸŒ…', '๐ŸŒŒ', 'โ˜๏ธ', 'โ›ˆ๏ธ', 'โ„๏ธ', 'โ˜€๏ธ', '๐ŸŒค๏ธ', 'โ›…๏ธ', '๐ŸŒฅ๏ธ', '๐ŸŒฆ๏ธ', '๐ŸŒง๏ธ', '๐ŸŒฉ๏ธ', '๐ŸŒจ๏ธ', '๐ŸŒซ๏ธ', 'โ˜”๏ธ', '๐ŸŒฌ๏ธ', '๐Ÿ’จ', '๐ŸŒช๏ธ', '๐ŸŒˆ'])
    for index in noise_indices:
        prompt_list[index] = random.choice(noise_chars)
    return "".join(prompt_list)

# Existing code...

import uuid  # Import the UUID library

# Existing code...

# Existing code...

request_counter = 0  # Global counter to track requests

def send_it1(inputs, noise_level, proc=proc1):
    global request_counter
    request_counter += 1
    timestamp = f"{time.time()}_{request_counter}"
    prompt_with_noise = add_random_noise(inputs, noise_level) + f" - {timestamp}"
    try:
        while queue.qsize() >= queue_threshold:
            time.sleep(2)
        queue.put(prompt_with_noise)
        output = proc(prompt_with_noise)
        return output
    except Exception as e:
        # Display a generic error message to the user
        raise gr.Error("Experiencing high demand. Please retry shortly. Thank you for your patience.") 

import random

import random
import time

# ... (existing code)

import random
import time

# ... (existing code)
model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-50-many-to-one-mmt")
tokenizer = MBart50TokenizerFast

def get_prompts(prompt_text):
    if not prompt_text:
        return "Please enter text before generating prompts.ุฑุฌุงุก ุงุฏุฎู„ ุงู„ู†ุต ุงูˆู„ุง"
    else:
        global request_counter
        request_counter += 1
        timestamp = f"{time.time()}_{request_counter}"

        tokenizer.src_lang = "ar_AR"
        encoded_ar = tokenizer(prompt_text, return_tensors="pt")
        generated_tokens = model.generate(**encoded_ar)
        translated_text = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)[0]
        
        return translated_text


# Existing code...

# Existing code...

with gr.Blocks(css=".gradio-container {background-color: #F5F5F5;} .dark .gradio-container {background-color: linear-gradient(to top, #09203f 0%, #537895 100%);} footer{display:none !important;}",) as demo:


    with gr.Column(elem_id="col-container"):
        with gr.Row(variant="compact"):
            input_text = gr.Textbox(
                lines=8,
                label="Short Prompt",
                show_label=False,
                max_lines=10,
                placeholder="Enter a basic idea and click 'Magic Prompt'. Got no ideas? No problem, Simply just hit the magic button!",
            ).style(
                container=False,
                textarea={'height': '400px'}
            )
            see_prompts = gr.Button("โœจ Magic Prompt โœจ").style(full_width=False)

        with gr.Row(variant="compact"):
            prompt = gr.Textbox(
                lines=8,
                label="Enter your prompt",
                show_label=False,
                max_lines=10,
                placeholder="Full Prompt",
            ).style(
                container=False,
                textarea={'height': '400px'}
            )
            run = gr.Button("Generate Images").style(full_width=False)

        with gr.Row():
            with gr.Row():
                noise_level = gr.Slider(minimum=0.0, maximum=3, step=0.1, label="Noise Level")

        with gr.Row():
            with gr.Row():
                output1 = gr.Image(label="Dreamlike Diffusion 1.0", show_label=False, show_share_button=False)
                output2 = gr.Image(label="Dreamlike Diffusion 1.0", show_label=False, show_share_button=False)

        see_prompts.click(get_prompts, inputs=[input_text], outputs=[prompt], queue=False)
        run.click(send_it1, inputs=[prompt, noise_level], outputs=[output1])
        run.click(send_it1, inputs=[prompt, noise_level], outputs=[output2])
        
        
        
    demo.launch(enable_queue=True, inline=True)