prithivMLmods commited on
Commit
b214dbe
1 Parent(s): 8ac21ea

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +242 -138
app.py CHANGED
@@ -1,152 +1,256 @@
 
 
 
 
 
 
 
 
 
 
1
  import os
2
- import shutil
3
- import pandas as pd
4
- import matplotlib.pyplot as plt
5
- import seaborn as sns
6
- import plotly.express as px
7
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
  css = '''
10
- .gradio-container{max-width: 900px !important}
11
  h1{text-align:center}
 
 
 
12
  '''
13
 
14
- def create_visualizations(data):
15
- plots = []
16
-
17
- # Create figures directory
18
- figures_dir = "./figures"
19
- shutil.rmtree(figures_dir, ignore_errors=True)
20
- os.makedirs(figures_dir, exist_ok=True)
21
-
22
- # Histograms for numeric columns
23
- numeric_cols = data.select_dtypes(include=['number']).columns
24
- for col in numeric_cols:
25
- plt.figure()
26
- sns.histplot(data[col], kde=True)
27
- plt.title(f'Histogram of {col}')
28
- plt.xlabel(col)
29
- plt.ylabel('Frequency')
30
- hist_path = os.path.join(figures_dir, f'histogram_{col}.png')
31
- plt.savefig(hist_path)
32
- plt.close()
33
- plots.append(hist_path)
34
-
35
- # Box plots for numeric columns
36
- for col in numeric_cols:
37
- plt.figure()
38
- sns.boxplot(x=data[col])
39
- plt.title(f'Box Plot of {col}')
40
- box_path = os.path.join(figures_dir, f'boxplot_{col}.png')
41
- plt.savefig(box_path)
42
- plt.close()
43
- plots.append(box_path)
44
-
45
- # Scatter plot matrix
46
- if len(numeric_cols) > 1:
47
- plt.figure()
48
- sns.pairplot(data[numeric_cols])
49
- plt.title('Scatter Plot Matrix')
50
- scatter_matrix_path = os.path.join(figures_dir, 'scatter_matrix.png')
51
- plt.savefig(scatter_matrix_path)
52
- plt.close()
53
- plots.append(scatter_matrix_path)
54
-
55
- # Correlation heatmap
56
- if len(numeric_cols) > 1:
57
- plt.figure()
58
- corr = data[numeric_cols].corr()
59
- sns.heatmap(corr, annot=True, cmap='coolwarm')
60
- plt.title('Correlation Heatmap')
61
- heatmap_path = os.path.join(figures_dir, 'correlation_heatmap.png')
62
- plt.savefig(heatmap_path)
63
- plt.close()
64
- plots.append(heatmap_path)
65
-
66
- # Bar charts for categorical columns
67
- categorical_cols = data.select_dtypes(include=['object']).columns
68
- if not categorical_cols.empty:
69
- for col in categorical_cols:
70
- plt.figure()
71
- data[col].value_counts().plot(kind='bar')
72
- plt.title(f'Bar Chart of {col}')
73
- plt.xlabel(col)
74
- plt.ylabel('Count')
75
- bar_path = os.path.join(figures_dir, f'bar_chart_{col}.png')
76
- plt.savefig(bar_path)
77
- plt.close()
78
- plots.append(bar_path)
79
 
80
- # Line charts (if a 'date' column is present)
81
- if 'date' in data.columns:
82
- plt.figure()
83
- data['date'] = pd.to_datetime(data['date'])
84
- data.set_index('date').plot()
85
- plt.title('Line Chart of Date Series')
86
- line_chart_path = os.path.join(figures_dir, 'line_chart.png')
87
- plt.savefig(line_chart_path)
88
- plt.close()
89
- plots.append(line_chart_path)
90
-
91
- # Scatter plot using Plotly
92
- if len(numeric_cols) >= 2:
93
- fig = px.scatter(data, x=numeric_cols[0], y=numeric_cols[1], title='Scatter Plot')
94
- scatter_plot_path = os.path.join(figures_dir, 'scatter_plot.html')
95
- fig.write_html(scatter_plot_path)
96
- plots.append(scatter_plot_path)
97
-
98
- # Pie chart for categorical columns (only the first categorical column)
99
- if not categorical_cols.empty:
100
- fig = px.pie(data, names=categorical_cols[0], title='Pie Chart of ' + categorical_cols[0])
101
- pie_chart_path = os.path.join(figures_dir, 'pie_chart.html')
102
- fig.write_html(pie_chart_path)
103
- plots.append(pie_chart_path)
104
-
105
- # Heatmaps (e.g., for a correlation matrix or cross-tabulation)
106
- if len(numeric_cols) > 1:
107
- heatmap_data = data[numeric_cols].corr()
108
- fig = px.imshow(heatmap_data, text_auto=True, title='Heatmap of Numeric Variables')
109
- heatmap_plot_path = os.path.join(figures_dir, 'heatmap_plot.html')
110
- fig.write_html(heatmap_plot_path)
111
- plots.append(heatmap_plot_path)
112
-
113
- # Violin plots for numeric columns
114
- for col in numeric_cols:
115
- plt.figure()
116
- sns.violinplot(x=data[col])
117
- plt.title(f'Violin Plot of {col}')
118
- violin_path = os.path.join(figures_dir, f'violin_plot_{col}.png')
119
- plt.savefig(violin_path)
120
- plt.close()
121
- plots.append(violin_path)
122
-
123
- return plots
124
 
125
- def analyze_data(file_input):
126
- data = pd.read_csv(file_input.name)
127
- return create_visualizations(data)
128
 
129
- # Example file path
130
- example_file_path = "./example/example.csv"
 
 
 
 
 
131
 
132
- with gr.Blocks(css=css, theme=gr.themes.Soft(primary_hue=gr.themes.colors.orange, secondary_hue=gr.themes.colors.blue)) as demo:
133
- gr.Markdown("# DATA BOARD📊\nUpload a `.csv` file to generate various visualizations and interactive plots.")
134
-
135
- file_input = gr.File(label="Upload your `.csv` file")
136
- submit = gr.Button("Generate Dashboards")
137
-
138
- # Display images and interactive plots in a gallery
139
- gallery = gr.Gallery(label="Visualizations")
140
-
141
- # Example block with cache_examples set to True
142
- examples = gr.Examples(
143
- examples=[[example_file_path]],
144
- inputs=file_input,
145
- outputs=gallery,
146
- cache_examples=True # Enable caching
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147
  )
148
-
149
- submit.click(analyze_data, file_input, gallery)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
150
 
151
  if __name__ == "__main__":
152
- demo.launch(share=True)
 
1
+ #!/usr/bin/env python
2
+ #patch 2.0 ()
3
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
4
+ # of this software and associated documentation files (the "Software"), to deal
5
+ # in the Software without restriction, including without limitation the rights
6
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
+ # copies of the Software, and to permit persons to whom the Software is
8
+ # furnished to do so, subject to the following conditions:
9
+ #
10
+ # ...
11
  import os
12
+ import random
13
+ import uuid
14
+ import json
 
 
15
  import gradio as gr
16
+ import numpy as np
17
+ from PIL import Image
18
+ import spaces
19
+ import torch
20
+ from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
21
+
22
+ #Load the HTML content
23
+ #html_file_url = "https://prithivmlmods-hamster-static.static.hf.space/index.html"
24
+ #html_content = f'<iframe src="{html_file_url}" style="width:100%; height:180px; border:none;"></iframe>'
25
+ #html_file_url = "https://prithivmlmods-static-loading-theme.static.hf.space/index.html"
26
+
27
+ #html_file_url = "https://prithivhamster.vercel.app/"
28
+ #html_content = f'<iframe src="{html_file_url}" style="width:100%; height:400px; border:none"></iframe>'
29
+
30
+ DESCRIPTIONx = """## STABLE HAMSTER 🐹
31
+
32
+ """
33
 
34
  css = '''
35
+ .gradio-container{max-width: 560px !important}
36
  h1{text-align:center}
37
+ footer {
38
+ visibility: hidden
39
+ }
40
  '''
41
 
42
+ examples = [
43
+ "3d image, cute girl, in the style of Pixar --ar 1:2 --stylize 750, 4K resolution highlights, Sharp focus, octane render, ray tracing, Ultra-High-Definition, 8k, UHD, HDR, (Masterpiece:1.5), (best quality:1.5)",
44
+ "Cold coffee in a cup bokeh --ar 85:128 --v 6.0 --style raw5, 4K",
45
+ "Vector illustration of a horse, vector graphic design with flat colors on an brown background in the style of vector art, using simple shapes and graphics with simple details, professionally designed as a tshirt logo ready for print on a white background. --ar 89:82 --v 6.0 --style raw",
46
+ "Man in brown leather jacket posing for camera, in the style of sleek and stylized, clockpunk, subtle shades, exacting precision, ferrania p30 --ar 67:101 --v 5",
47
+ "Commercial photography, giant burger, white lighting, studio light, 8k octane rendering, high resolution photography, insanely detailed, fine details, on white isolated plain, 8k, commercial photography, stock photo, professional color grading, --v 4 --ar 9:16 "
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
 
49
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
 
 
 
 
51
 
52
+ #examples = [
53
+ # ["file/1.png", "3d image, cute girl, in the style of Pixar --ar 1:2 --stylize 750, 4K resolution highlights, Sharp focus, octane render, ray tracing, Ultra-High-Definition, 8k, UHD, HDR, (Masterpiece:1.5), (best quality:1.5)"],
54
+ # ["file/2.png", "Cold coffee in a cup bokeh --ar 85:128 --v 6.0 --style raw5, 4K"],
55
+ #["file/3.png", "Vector illustration of a horse, vector graphic design with flat colors on a brown background in the style of vector art, using simple shapes and graphics with simple details, professionally designed as a tshirt logo ready for print on a white background. --ar 89:82 --v 6.0 --style raw"],
56
+ #["file/4.png", "Man in brown leather jacket posing for the camera, in the style of sleek and stylized, clockpunk, subtle shades, exacting precision, ferrania p30 --ar 67:101 --v 5"],
57
+ #["file/5.png", "Commercial photography, giant burger, white lighting, studio light, 8k octane rendering, high resolution photography, insanely detailed, fine details, on a white isolated plain, 8k, commercial photography, stock photo, professional color grading, --v 4 --ar 9:16"]
58
+ #]
59
 
60
+
61
+ #Set an os.Getenv variable
62
+ #set VAR_NAME=”VALUE”
63
+ #Fetch an environment variable
64
+ #echo %VAR_NAME%
65
+
66
+ MODEL_ID = os.getenv("MODEL_VAL_PATH") #Use SDXL Model as "MODEL_REPO" --------->>> ”VALUE”.
67
+ MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "4096"))
68
+ USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", "0") == "1"
69
+ ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
70
+ BATCH_SIZE = int(os.getenv("BATCH_SIZE", "1")) # Allow generating multiple images at once
71
+
72
+ #Load model outside of function
73
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
74
+ pipe = StableDiffusionXLPipeline.from_pretrained(
75
+ MODEL_ID,
76
+ torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
77
+ use_safetensors=True,
78
+ add_watermarker=False,
79
+ ).to(device)
80
+ pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
81
+
82
+ # <compile speedup >
83
+ if USE_TORCH_COMPILE:
84
+ pipe.compile()
85
+
86
+ # Offloading capacity (RAM)
87
+ if ENABLE_CPU_OFFLOAD:
88
+ pipe.enable_model_cpu_offload()
89
+
90
+ MAX_SEED = np.iinfo(np.int32).max
91
+
92
+ def save_image(img):
93
+ unique_name = str(uuid.uuid4()) + ".png"
94
+ img.save(unique_name)
95
+ return unique_name
96
+
97
+ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
98
+ if randomize_seed:
99
+ seed = random.randint(0, MAX_SEED)
100
+ return seed
101
+
102
+ @spaces.GPU(duration=60, enable_queue=True)
103
+ def generate(
104
+ prompt: str,
105
+ negative_prompt: str = "",
106
+ use_negative_prompt: bool = False,
107
+ seed: int = 1,
108
+ width: int = 1024,
109
+ height: int = 1024,
110
+ guidance_scale: float = 3,
111
+ num_inference_steps: int = 25,
112
+ randomize_seed: bool = False,
113
+ use_resolution_binning: bool = True,
114
+ num_images: int = 1, # Number of images to generate
115
+ progress=gr.Progress(track_tqdm=True),
116
+ ):
117
+ seed = int(randomize_seed_fn(seed, randomize_seed))
118
+ generator = torch.Generator(device=device).manual_seed(seed)
119
+
120
+ #Options
121
+ options = {
122
+ "prompt": [prompt] * num_images,
123
+ "negative_prompt": [negative_prompt] * num_images if use_negative_prompt else None,
124
+ "width": width,
125
+ "height": height,
126
+ "guidance_scale": guidance_scale,
127
+ "num_inference_steps": num_inference_steps,
128
+ "generator": generator,
129
+ "output_type": "pil",
130
+ }
131
+
132
+ #VRAM usage Lesser
133
+ if use_resolution_binning:
134
+ options["use_resolution_binning"] = True
135
+
136
+ #Images potential batches
137
+ images = []
138
+ for i in range(0, num_images, BATCH_SIZE):
139
+ batch_options = options.copy()
140
+ batch_options["prompt"] = options["prompt"][i:i+BATCH_SIZE]
141
+ if "negative_prompt" in batch_options:
142
+ batch_options["negative_prompt"] = options["negative_prompt"][i:i+BATCH_SIZE]
143
+ images.extend(pipe(**batch_options).images)
144
+
145
+ image_paths = [save_image(img) for img in images]
146
+ return image_paths, seed
147
+ #Main gr.Block
148
+ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
149
+ gr.Markdown(DESCRIPTIONx)
150
+
151
+ with gr.Group():
152
+ with gr.Row():
153
+ prompt = gr.Text(
154
+ label="Prompt",
155
+ show_label=False,
156
+ max_lines=1,
157
+ placeholder="Enter your prompt",
158
+ container=False,
159
+ )
160
+ run_button = gr.Button("Run", scale=0)
161
+ result = gr.Gallery(label="Result", columns=1, show_label=False)
162
+ with gr.Accordion("Advanced options", open=False, visible=False):
163
+ num_images = gr.Slider(
164
+ label="Number of Images",
165
+ minimum=1,
166
+ maximum=4,
167
+ step=1,
168
+ value=1,
169
+ )
170
+ with gr.Row():
171
+ use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=True)
172
+ negative_prompt = gr.Text(
173
+ label="Negative prompt",
174
+ max_lines=5,
175
+ lines=4,
176
+ placeholder="Enter a negative prompt",
177
+ value="(deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers:1.4), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation",
178
+ visible=True,
179
+ )
180
+ seed = gr.Slider(
181
+ label="Seed",
182
+ minimum=0,
183
+ maximum=MAX_SEED,
184
+ step=1,
185
+ value=0,
186
+ )
187
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
188
+ with gr.Row(visible=True):
189
+ width = gr.Slider(
190
+ label="Width",
191
+ minimum=512,
192
+ maximum=MAX_IMAGE_SIZE,
193
+ step=64,
194
+ value=1024,
195
+ )
196
+ height = gr.Slider(
197
+ label="Height",
198
+ minimum=512,
199
+ maximum=MAX_IMAGE_SIZE,
200
+ step=64,
201
+ value=1024,
202
+ )
203
+ with gr.Row():
204
+ guidance_scale = gr.Slider(
205
+ label="Guidance Scale",
206
+ minimum=0.1,
207
+ maximum=6,
208
+ step=0.1,
209
+ value=3.0,
210
+ )
211
+ num_inference_steps = gr.Slider(
212
+ label="Number of inference steps",
213
+ minimum=1,
214
+ maximum=25,
215
+ step=1,
216
+ value=23,
217
+ )
218
+
219
+ gr.Examples(
220
+ examples=examples,
221
+ inputs=prompt,
222
+ cache_examples=False
223
  )
224
+
225
+ use_negative_prompt.change(
226
+ fn=lambda x: gr.update(visible=x),
227
+ inputs=use_negative_prompt,
228
+ outputs=negative_prompt,
229
+ api_name=False,
230
+ )
231
+
232
+ gr.on(
233
+ triggers=[
234
+ prompt.submit,
235
+ negative_prompt.submit,
236
+ run_button.click,
237
+ ],
238
+ fn=generate,
239
+ inputs=[
240
+ prompt,
241
+ negative_prompt,
242
+ use_negative_prompt,
243
+ seed,
244
+ width,
245
+ height,
246
+ guidance_scale,
247
+ num_inference_steps,
248
+ randomize_seed,
249
+ num_images
250
+ ],
251
+ outputs=[result, seed],
252
+ api_name="run",
253
+ )
254
 
255
  if __name__ == "__main__":
256
+ demo.queue(max_size=40).launch()