Esmail-AGumaan commited on
Commit
4ea8388
1 Parent(s): 8839614

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +405 -0
app.py ADDED
@@ -0,0 +1,405 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from PIL import Image
3
+ from pathlib import Path
4
+ from transformers import CLIPTokenizer
5
+ import torch
6
+ import subprocess
7
+ import os
8
+ import random
9
+
10
+ from nanograd.models.stable_diffusion import model_loader, pipeline
11
+
12
+ # Configure devices
13
+ DEVICE = "cpu"
14
+ ALLOW_CUDA = False
15
+ ALLOW_MPS = True
16
+
17
+ if torch.cuda.is_available() and ALLOW_CUDA:
18
+ DEVICE = "cuda"
19
+ elif torch.backends.mps.is_available() and ALLOW_MPS:
20
+ DEVICE = "mps"
21
+ print(f"Using device: {DEVICE}")
22
+
23
+ # Load Stable Diffusion model
24
+ tokenizer_vocab_path = Path("C:\\Users\\Esmail\\Desktop\\nanograd\\nanograd\\models\\stable_diffusion\\sd_data\\tokenizer_vocab.json")
25
+ tokenizer_merges_path = Path("C:\\Users\\Esmail\\Desktop\\nanograd\\nanograd\\models\\stable_diffusion\\sd_data\\tokenizer_merges.txt")
26
+ model_file = Path("C:\\Users\\Esmail\\Desktop\\nanograd\\nanograd\\models\\stable_diffusion\\sd_data\\v1-5-pruned-emaonly.ckpt")
27
+
28
+ tokenizer = CLIPTokenizer(str(tokenizer_vocab_path), merges_file=str(tokenizer_merges_path))
29
+ models = model_loader.preload_models_from_standard_weights(str(model_file), DEVICE)
30
+
31
+ # Blueprints for image generation and text generation
32
+ blueprints = {
33
+ "Visual Story": {
34
+ "sd_prompts": [
35
+ "A futuristic city skyline at dusk, flying cars, neon lights, cyberpunk style",
36
+ "A bustling marketplace in a futuristic city, holograms, diverse crowd",
37
+ "A serene park in a futuristic city with advanced technology blending with nature"
38
+ ],
39
+ "sd_cfg_scales": [9, 8, 7],
40
+ "sd_num_inference_steps": [60, 50, 45],
41
+ "sd_samplers": ["ddpm", "k_euler_ancestral", "euler"],
42
+ "ollama_prompts": [
43
+ "Describe a futuristic city that blends natural elements with advanced technology.",
44
+ "Write about an advanced cityscape with unique technological elements.",
45
+ "Imagine a futuristic metropolis where nature and technology harmoniously coexist."
46
+ ],
47
+ "ollama_models": ["llama3", "aya", "codellama"]
48
+ },
49
+ # Other blueprints with similar structure...
50
+ "Nature & Poetry": {
51
+ "sd_prompts": [
52
+ "A peaceful mountain landscape at sunrise, photorealistic, serene",
53
+ "A tranquil lake surrounded by autumn trees, soft light, misty atmosphere",
54
+ "A hidden waterfall in a dense jungle, lush greenery, crystal clear water"
55
+ ],
56
+ "sd_cfg_scales": [9, 8, 7],
57
+ "sd_num_inference_steps": [60, 50, 45],
58
+ "sd_samplers": ["ddpm", "k_euler_ancestral", "euler"],
59
+ "ollama_prompts": [
60
+ "Write a short poem about a tranquil sunrise over the mountains.",
61
+ "Describe the beauty of a hidden waterfall in a jungle.",
62
+ "Compose a poetic reflection on the serenity of a lake at dawn."
63
+ ],
64
+ "ollama_models": ["llama3", "aya", "codellama"]
65
+ },
66
+ # Additional blueprints with multiple prompts...
67
+ "Dreamscape": {
68
+ "sd_prompts": [
69
+ "A surreal dreamscape with floating islands and bioluminescent creatures",
70
+ "An endless horizon of strange landscapes, blending day and night",
71
+ "A fantastical world with floating rocks and neon-lit skies"
72
+ ],
73
+ "sd_cfg_scales": [9, 8, 7],
74
+ "sd_num_inference_steps": [60, 50, 45],
75
+ "sd_samplers": ["ddpm", "k_euler_ancestral", "euler"],
76
+ "ollama_prompts": [
77
+ "Describe a dreamlike world filled with wonder and mystery.",
78
+ "Write about a place where time doesn't exist, only dreams.",
79
+ "Create a story where reality and fantasy blur together."
80
+ ],
81
+ "ollama_models": ["llama3", "aya", "codellama"]
82
+ },
83
+ "Abstract Art": {
84
+ "sd_prompts": [
85
+ "Abstract painting with vibrant colors and dynamic shapes",
86
+ "A digital artwork with chaotic patterns and bold contrasts",
87
+ "Geometric abstraction with a focus on form and color"
88
+ ],
89
+ "sd_cfg_scales": [9, 8, 7],
90
+ "sd_num_inference_steps": [60, 50, 45],
91
+ "sd_samplers": ["ddpm", "k_euler_ancestral", "euler"],
92
+ "ollama_prompts": [
93
+ "Write a short description of an abstract painting.",
94
+ "Describe a piece of modern art that defies traditional norms.",
95
+ "Imagine a world where art is created by emotions, not hands."
96
+ ],
97
+ "ollama_models": ["llama3", "aya", "codellama"]
98
+ },
99
+ "Fashion Design": {
100
+ "sd_prompts": [
101
+ "A high-fashion model wearing a futuristic outfit, neon colors, catwalk pose",
102
+ "A chic ensemble blending classic elegance with modern flair",
103
+ "Avant-garde fashion with bold textures and unconventional shapes"
104
+ ],
105
+ "sd_cfg_scales": [9, 8, 7],
106
+ "sd_num_inference_steps": [60, 50, 45],
107
+ "sd_samplers": ["ddpm", "k_euler_ancestral", "euler"],
108
+ "ollama_prompts": [
109
+ "Describe a unique and innovative fashion design.",
110
+ "Write about a new fashion trend inspired by nature.",
111
+ "Imagine a clothing line that combines style with sustainability."
112
+ ],
113
+ "ollama_models": ["llama3", "aya", "codellama"]
114
+ },
115
+ "Food & Recipe": {
116
+ "sd_prompts": [
117
+ "Abstract painting with vibrant colors and dynamic shapes",
118
+ "A digital artwork with chaotic patterns and bold contrasts",
119
+ "Geometric abstraction with a focus on form and color"
120
+ ],
121
+ "sd_cfg_scales": [9, 8, 7],
122
+ "sd_num_inference_steps": [60, 50, 45],
123
+ "sd_samplers": ["ddpm", "k_euler_ancestral", "euler"],
124
+ "ollama_prompts": [
125
+ "Write a short description of an abstract painting.",
126
+ "Describe a piece of modern art that defies traditional norms.",
127
+ "Imagine a world where art is created by emotions, not hands."
128
+ ],
129
+ "ollama_models": ["llama3", "aya", "codellama"]
130
+ },
131
+ "Interior Design": {
132
+ "sd_prompts": [
133
+ "A modern living room with sleek furniture, minimalist design, and natural light",
134
+ "A cozy study room with rich textures, warm colors, and elegant decor",
135
+ "An open-plan kitchen with contemporary appliances and stylish finishes"
136
+ ],
137
+ "sd_cfg_scales": [9, 8, 7],
138
+ "sd_num_inference_steps": [60, 50, 45],
139
+ "sd_samplers": ["ddpm", "k_euler_ancestral", "euler"],
140
+ "ollama_prompts": [
141
+ "Describe an interior design that combines modern and classic elements.",
142
+ "Write about a space that enhances productivity and relaxation through design.",
143
+ "Imagine a luxurious interior design for a high-end apartment."
144
+ ],
145
+ "ollama_models": ["llama3", "aya", "codellama"]
146
+ },
147
+ "Historical Fiction": {
148
+ "sd_prompts": [
149
+ "A bustling Victorian-era street with horse-drawn carriages and period architecture",
150
+ "A grand historical ballroom with opulent decor and elegantly dressed guests",
151
+ "An ancient battlefield with detailed historical accuracy and dramatic scenery"
152
+ ],
153
+ "sd_cfg_scales": [9, 8, 7],
154
+ "sd_num_inference_steps": [60, 50, 45],
155
+ "sd_samplers": ["ddpm", "k_euler_ancestral", "euler"],
156
+ "ollama_prompts": [
157
+ "Describe a significant historical event as if it were a scene in a novel.",
158
+ "Write about a character navigating the challenges of a historical setting.",
159
+ "Imagine a historical figure interacting with modern technology."
160
+ ],
161
+ "ollama_models": ["llama3", "aya", "codellama"]
162
+ },
163
+ "Science Fiction": {
164
+ "sd_prompts": [
165
+ "A futuristic cityscape with flying cars, neon lights, and towering skyscrapers",
166
+ "An alien planet with unique landscapes, strange flora, and advanced technology",
167
+ "A space station with cutting-edge design and high-tech equipment"
168
+ ],
169
+ "sd_cfg_scales": [9, 8, 7],
170
+ "sd_num_inference_steps": [60, 50, 45],
171
+ "sd_samplers": ["ddpm", "k_euler_ancestral", "euler"],
172
+ "ollama_prompts": [
173
+ "Describe a futuristic world where technology has reshaped society.",
174
+ "Write about an encounter with an alien civilization.",
175
+ "Imagine a story set in a distant future with advanced technology and space exploration."
176
+ ],
177
+ "ollama_models": ["llama3", "aya", "codellama"]
178
+ },
179
+ "Character Design": {
180
+ "sd_prompts": [
181
+ "A detailed fantasy character with elaborate costumes and accessories",
182
+ "A sci-fi hero with futuristic armor and high-tech gadgets",
183
+ "A historical figure portrayed with accurate attire and realistic features"
184
+ ],
185
+ "sd_cfg_scales": [9, 8, 7],
186
+ "sd_num_inference_steps": [60, 50, 45],
187
+ "sd_samplers": ["ddpm", "k_euler_ancestral", "euler"],
188
+ "ollama_prompts": [
189
+ "Describe a unique character from a fantasy novel, focusing on their appearance and personality.",
190
+ "Write about a futuristic character with advanced technology and a compelling backstory.",
191
+ "Imagine a historical figure as a character in a modern setting."
192
+ ],
193
+ "ollama_models": ["llama3", "aya", "codellama"]
194
+ }
195
+ }
196
+
197
+ # Define functions for each feature
198
+ def generate_image(prompt, cfg_scale, num_inference_steps, sampler):
199
+ uncond_prompt = ""
200
+ do_cfg = True
201
+ input_image = None
202
+ strength = 0.9
203
+ seed = 42
204
+
205
+ output_image = pipeline.generate(
206
+ prompt=prompt,
207
+ uncond_prompt=uncond_prompt,
208
+ input_image=input_image,
209
+ strength=strength,
210
+ do_cfg=do_cfg,
211
+ cfg_scale=cfg_scale,
212
+ sampler_name=sampler,
213
+ n_inference_steps=num_inference_steps,
214
+ seed=seed,
215
+ models=models,
216
+ device=DEVICE,
217
+ idle_device="cpu",
218
+ tokenizer=tokenizer,
219
+ )
220
+
221
+ output_image = Image.fromarray(output_image)
222
+ return output_image
223
+
224
+ def apply_blueprint(blueprint_name):
225
+ if blueprint_name in blueprints:
226
+ bp = blueprints[blueprint_name]
227
+ sd_prompts = random.choice(bp["sd_prompts"])
228
+ sd_cfg_scale = random.choice(bp["sd_cfg_scales"])
229
+ sd_num_inference_steps = random.choice(bp["sd_num_inference_steps"])
230
+ sd_sampler = random.choice(bp["sd_samplers"])
231
+ ollama_prompts = random.choice(bp["ollama_prompts"])
232
+ ollama_model = random.choice(bp["ollama_models"])
233
+ return (
234
+ sd_prompts, sd_cfg_scale, sd_num_inference_steps, sd_sampler,
235
+ ollama_model, ollama_prompts
236
+ )
237
+ return "", 7, 20, "ddpm", "aya", ""
238
+
239
+ def download_checkpoint(checkpoint):
240
+ try:
241
+ # Run the litgpt download command
242
+ command = ["litgpt", "download", checkpoint]
243
+ process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
244
+ output, error = process.communicate()
245
+ if process.returncode == 0:
246
+ return f"Checkpoint '{checkpoint}' downloaded successfully.\n{output}"
247
+ else:
248
+ return f"Error downloading checkpoint '{checkpoint}':\n{error}"
249
+ except Exception as e:
250
+ return f"Unexpected error: {str(e)}"
251
+
252
+ def chat_with_ollama(model_name, prompt):
253
+ command = ['ollama', 'run', model_name, prompt]
254
+ result = subprocess.run(command, capture_output=True, text=True)
255
+ return result.stdout
256
+
257
+ def install_ollama():
258
+ try:
259
+ # Command to install Ollama silently
260
+ installer_path = "OllamaSetup.exe"
261
+ if not os.path.exists(installer_path):
262
+ # Download the installer if not already available
263
+ subprocess.run(["curl", "-o", installer_path, "https://ollama.com/download/OllamaSetup.exe"], check=True)
264
+
265
+ # Run the installer silently
266
+ subprocess.run([installer_path, "/S"], check=True)
267
+ return "Ollama installed successfully."
268
+ except Exception as e:
269
+ return f"Installation failed: {str(e)}"
270
+
271
+ def welcome(name):
272
+ return f"Welcome to nanograd Engine, {name}!"
273
+
274
+ js = """
275
+ function createGradioAnimation() {
276
+ var container = document.createElement('div');
277
+ container.id = 'gradio-animation';
278
+ container.style.fontSize = '2em';
279
+ container.style.fontWeight = 'bold';
280
+ container.style.textAlign = 'center';
281
+ container.style.marginBottom = '20px';
282
+
283
+ var text = 'Welcome to nanograd Engine!';
284
+ for (var i = 0; i < text.length; i++) {
285
+ (function(i){
286
+ setTimeout(function(){
287
+ var letter = document.createElement('span');
288
+ letter.style.opacity = '0';
289
+ letter.style.transition = 'opacity 0.5s';
290
+ letter.innerText = text[i];
291
+
292
+ container.appendChild(letter);
293
+
294
+ setTimeout(function() {
295
+ letter.style.opacity = '1';
296
+ }, 50);
297
+ }, i * 250);
298
+ })(i);
299
+ }
300
+
301
+ var gradioContainer = document.querySelector('.gradio-container');
302
+ gradioContainer.insertBefore(container, gradioContainer.firstChild);
303
+
304
+ return 'Animation created';
305
+ }
306
+ """
307
+
308
+ # Gradio interface
309
+ def gradio_interface():
310
+ with gr.Blocks('ParityError/Interstellar', js=js) as demo:
311
+ with gr.Tab("nano-Engine"):
312
+ with gr.Row():
313
+ with gr.Column(scale=1):
314
+ # Text Generation with Ollama
315
+ gr.Markdown("### Generate Text with Ollama")
316
+ ollama_model_name = gr.Dropdown(label="Select Ollama Model", choices=["aya", "llama3", "codellama"], value="aya")
317
+ ollama_prompts = gr.Textbox(label="Prompt", placeholder="Enter your prompt here")
318
+ ollama_output = gr.Textbox(label="Output", placeholder="Model output will appear here", interactive=True)
319
+ ollama_btn = gr.Button("Generate", variant="primary")
320
+ ollama_btn.click(fn=chat_with_ollama, inputs=[ollama_model_name, ollama_prompts], outputs=ollama_output)
321
+
322
+ gr.Markdown("### GPT Checkpoints Management")
323
+ checkpoint_dropdown = gr.Dropdown(label="Select Checkpoint", choices=["EleutherAI/gpt-neo-125M", "EleutherAI/gpt-neo-1.3B", "microsoft/phi-2", "codellama/CodeLlama-13b-hf"], value="EleutherAI/gpt-neo-125M")
324
+ download_btn = gr.Button("Download Checkpoint", variant="primary")
325
+ checkpoint_status = gr.Textbox(label="Download Status", placeholder="Status will appear here", interactive=True)
326
+ download_btn.click(fn=download_checkpoint, inputs=checkpoint_dropdown, outputs=checkpoint_status)
327
+
328
+ gr.Markdown("### Install Ollama")
329
+ install_ollama_btn = gr.Button("Install Ollama", variant="primary")
330
+ installation_status = gr.Textbox(label="Installation Status", placeholder="Status will appear here", interactive=True)
331
+ install_ollama_btn.click(fn=install_ollama, outputs=installation_status)
332
+
333
+ with gr.Column(scale=1):
334
+ gr.Markdown("### Stable Diffusion Image Generation")
335
+
336
+ prompt_input = gr.Textbox(label="Prompt", placeholder="A cat stretching on the floor, highly detailed, ultra sharp, cinematic, 100mm lens, 8k resolution")
337
+ cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, value=7, step=1)
338
+ num_inference_steps = gr.Slider(label="Sampling Steps", minimum=10, maximum=100, value=20, step=5)
339
+ sampler = gr.Radio(label="Sampling Method", choices=["ddpm", "Euler a", "Euler", "LMS", "Heun", "DPM2 a", "PLMS"], value="ddpm")
340
+ generate_img_btn = gr.Button("Generate", variant="primary")
341
+ output_image = gr.Image(label="Output", show_label=False, height=700, width=750)
342
+
343
+ generate_img_btn.click(fn=generate_image, inputs=[prompt_input, cfg_scale, num_inference_steps, sampler], outputs=output_image)
344
+
345
+ with gr.Tab("Blueprints"):
346
+ with gr.Row():
347
+ blueprint_dropdown = gr.Dropdown(label="Select Blueprint", choices=list(blueprints.keys()), value=list(blueprints.keys())[0])
348
+ load_blueprint_btn = gr.Button("Load Blueprint", variant="primary")
349
+
350
+ # Blueprint Outputs
351
+ sd_prompt_output = gr.Textbox(label="SD Prompt", interactive=True)
352
+ sd_cfg_output = gr.Slider(label="SD CFG Scale", minimum=1, maximum=20, step=1, interactive=True)
353
+ sd_steps_output = gr.Slider(label="SD Sampling Steps", minimum=10, maximum=100, step=5, interactive=True)
354
+ sd_sampler_output = gr.Radio(label="SD Sampler", choices=["ddpm", "Euler a", "Euler", "LMS", "Heun", "DPM2 a", "PLMS"], value="ddpm", interactive=True)
355
+ ollama_model_output = gr.Dropdown(label="Ollama Model", choices=["aya", "llama3", "codellama"], value="aya", interactive=True)
356
+ ollama_prompt_output = gr.Textbox(label="Ollama Prompt", interactive=True)
357
+
358
+ def load_blueprint(blueprint_name):
359
+ if blueprint_name in blueprints:
360
+ bp = blueprints[blueprint_name]
361
+ sd_prompts = random.choice(bp["sd_prompts"])
362
+ sd_cfg_scale = random.choice(bp["sd_cfg_scales"])
363
+ sd_num_inference_steps = random.choice(bp["sd_num_inference_steps"])
364
+ sd_sampler = random.choice(bp["sd_samplers"])
365
+ ollama_prompts = random.choice(bp["ollama_prompts"])
366
+ ollama_model = random.choice(bp["ollama_models"])
367
+ return (
368
+ sd_prompts, sd_cfg_scale, sd_num_inference_steps, sd_sampler,
369
+ ollama_model, ollama_prompts
370
+ )
371
+ return "", 7, 20, "ddpm", "aya", ""
372
+
373
+ def apply_loaded_blueprint(prompt, cfg_scale, num_inference_steps, sampler, model, ollama_prompts):
374
+ return (
375
+ gr.update(value=prompt),
376
+ gr.update(value=cfg_scale),
377
+ gr.update(value=num_inference_steps),
378
+ gr.update(value=sampler),
379
+ gr.update(value=model),
380
+ gr.update(value=ollama_prompts)
381
+ )
382
+
383
+ load_blueprint_btn.click(fn=load_blueprint, inputs=blueprint_dropdown, outputs=[sd_prompt_output, sd_cfg_output, sd_steps_output, sd_sampler_output, ollama_model_output, ollama_prompt_output])
384
+ load_blueprint_btn.click(fn=apply_loaded_blueprint, inputs=[sd_prompt_output, sd_cfg_output, sd_steps_output, sd_sampler_output, ollama_model_output, ollama_prompt_output], outputs=[prompt_input, cfg_scale, num_inference_steps, sampler, ollama_model_name, ollama_prompts])
385
+
386
+ with gr.Tab("Chatbot-Prompts"):
387
+ with gr.Row():
388
+ with gr.Column(scale=1):
389
+ from nanograd.models.GPT.tokenizer import tokenize
390
+ gr.Markdown("<h1><center>BPE Tokenizer</h1></center>")
391
+ iface = gr.Interface(fn=tokenize, inputs="text", outputs="json")
392
+
393
+ with gr.Column(scale=1):
394
+ from examples import ollama_prompted
395
+ gr.Markdown("<h1><center>Chatbot (لغة عربية)</h1></center>")
396
+ i = gr.Interface(
397
+ fn=ollama_prompted.run,
398
+ inputs=gr.Textbox(lines=1, placeholder="Ask a question about travel or airlines"),
399
+ outputs=gr.Textbox(label="Aya's response"),
400
+ )
401
+
402
+ demo.launch()
403
+
404
+ # Run the Gradio interface
405
+ gradio_interface()