prithivMLmods commited on
Commit
8ac21ea
1 Parent(s): df6ffb4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +139 -239
app.py CHANGED
@@ -1,252 +1,152 @@
1
- #!/usr/bin/env python
2
- #patch 2.0 ()
3
- # Permission is hereby granted, free of charge, to any person obtaining a copy
4
- # of this software and associated documentation files (the "Software"), to deal
5
- # in the Software without restriction, including without limitation the rights
6
- # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
- # copies of the Software, and to permit persons to whom the Software is
8
- # furnished to do so, subject to the following conditions:
9
- # ...
10
  import os
11
- import random
12
- import uuid
13
- import json
 
 
14
  import gradio as gr
15
- import numpy as np
16
- from PIL import Image
17
- import spaces
18
- import torch
19
- from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
20
-
21
- #Load the HTML content
22
- #html_file_url = "https://prithivmlmods-hamster-static.static.hf.space/index.html"
23
- #html_content = f'<iframe src="{html_file_url}" style="width:100%; height:180px; border:none;"></iframe>'
24
- #html_file_url = "https://prithivmlmods-static-loading-theme.static.hf.space/index.html"
25
-
26
- #html_file_url = "https://prithivhamster.vercel.app/"
27
- #html_content = f'<iframe src="{html_file_url}" style="width:100%; height:400px; border:none"></iframe>'
28
-
29
- DESCRIPTIONx = """## STABLE HAMSTER 🐹
30
-
31
-
32
- """
33
 
34
  css = '''
35
- .gradio-container{max-width: 560px !important}
36
  h1{text-align:center}
37
- footer {
38
- visibility: hidden
39
- }
40
  '''
41
- examples = [
42
- "3d image, cute girl, in the style of Pixar --ar 1:2 --stylize 750, 4K resolution highlights, Sharp focus, octane render, ray tracing, Ultra-High-Definition, 8k, UHD, HDR, (Masterpiece:1.5), (best quality:1.5)",
43
- "Cold coffee in a cup bokeh --ar 85:128 --v 6.0 --style raw5, 4K",
44
- "Vector illustration of a horse, vector graphic design with flat colors on an brown background in the style of vector art, using simple shapes and graphics with simple details, professionally designed as a tshirt logo ready for print on a white background. --ar 89:82 --v 6.0 --style raw",
45
- "Man in brown leather jacket posing for camera, in the style of sleek and stylized, clockpunk, subtle shades, exacting precision, ferrania p30 --ar 67:101 --v 5",
46
- "Commercial photography, giant burger, white lighting, studio light, 8k octane rendering, high resolution photography, insanely detailed, fine details, on white isolated plain, 8k, commercial photography, stock photo, professional color grading, --v 4 --ar 9:16 "
47
-
48
- ]
49
-
50
- #examples = [
51
- # ["file/1.png", "3d image, cute girl, in the style of Pixar --ar 1:2 --stylize 750, 4K resolution highlights, Sharp focus, octane render, ray tracing, Ultra-High-Definition, 8k, UHD, HDR, (Masterpiece:1.5), (best quality:1.5)"],
52
- # ["file/2.png", "Cold coffee in a cup bokeh --ar 85:128 --v 6.0 --style raw5, 4K"],
53
- #["file/3.png", "Vector illustration of a horse, vector graphic design with flat colors on a brown background in the style of vector art, using simple shapes and graphics with simple details, professionally designed as a tshirt logo ready for print on a white background. --ar 89:82 --v 6.0 --style raw"],
54
- #["file/4.png", "Man in brown leather jacket posing for the camera, in the style of sleek and stylized, clockpunk, subtle shades, exacting precision, ferrania p30 --ar 67:101 --v 5"],
55
- #["file/5.png", "Commercial photography, giant burger, white lighting, studio light, 8k octane rendering, high resolution photography, insanely detailed, fine details, on a white isolated plain, 8k, commercial photography, stock photo, professional color grading, --v 4 --ar 9:16"]
56
- #]
57
-
58
- #Set an os.Getenv variable
59
- #set VAR_NAME=”VALUE”
60
- #Fetch an environment variable
61
- #echo %VAR_NAME%
62
-
63
- MODEL_ID = os.getenv("MODEL_VAL_PATH") #Use SDXL Model as "MODEL_REPO" --------->>> ”VALUE”.
64
- MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "4096"))
65
- USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", "0") == "1"
66
- ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
67
- BATCH_SIZE = int(os.getenv("BATCH_SIZE", "1")) # Allow generating multiple images at once
68
-
69
- #Load model outside of function
70
- device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
71
- pipe = StableDiffusionXLPipeline.from_pretrained(
72
- MODEL_ID,
73
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
74
- use_safetensors=True,
75
- add_watermarker=False,
76
- ).to(device)
77
- pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
78
-
79
- # <compile speedup >
80
- if USE_TORCH_COMPILE:
81
- pipe.compile()
82
-
83
- # Offloading capacity (RAM)
84
- if ENABLE_CPU_OFFLOAD:
85
- pipe.enable_model_cpu_offload()
86
-
87
- MAX_SEED = np.iinfo(np.int32).max
88
-
89
- def save_image(img):
90
- unique_name = str(uuid.uuid4()) + ".png"
91
- img.save(unique_name)
92
- return unique_name
93
-
94
- def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
95
- if randomize_seed:
96
- seed = random.randint(0, MAX_SEED)
97
- return seed
98
-
99
- @spaces.GPU(duration=60, enable_queue=True)
100
- def generate(
101
- prompt: str,
102
- negative_prompt: str = "",
103
- use_negative_prompt: bool = False,
104
- seed: int = 1,
105
- width: int = 1024,
106
- height: int = 1024,
107
- guidance_scale: float = 3,
108
- num_inference_steps: int = 25,
109
- randomize_seed: bool = False,
110
- use_resolution_binning: bool = True,
111
- num_images: int = 1, # Number of images to generate
112
- progress=gr.Progress(track_tqdm=True),
113
- ):
114
- seed = int(randomize_seed_fn(seed, randomize_seed))
115
- generator = torch.Generator(device=device).manual_seed(seed)
116
-
117
- #Options
118
- options = {
119
- "prompt": [prompt] * num_images,
120
- "negative_prompt": [negative_prompt] * num_images if use_negative_prompt else None,
121
- "width": width,
122
- "height": height,
123
- "guidance_scale": guidance_scale,
124
- "num_inference_steps": num_inference_steps,
125
- "generator": generator,
126
- "output_type": "pil",
127
- }
128
-
129
- #VRAM usage Lesser
130
- if use_resolution_binning:
131
- options["use_resolution_binning"] = True
132
-
133
- #Images potential batches
134
- images = []
135
- for i in range(0, num_images, BATCH_SIZE):
136
- batch_options = options.copy()
137
- batch_options["prompt"] = options["prompt"][i:i+BATCH_SIZE]
138
- if "negative_prompt" in batch_options:
139
- batch_options["negative_prompt"] = options["negative_prompt"][i:i+BATCH_SIZE]
140
- images.extend(pipe(**batch_options).images)
141
 
142
- image_paths = [save_image(img) for img in images]
143
- return image_paths, seed
144
- #Main gr.Block
145
- with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
146
- gr.Markdown(DESCRIPTIONx)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147
 
148
- with gr.Group():
149
- with gr.Row():
150
- prompt = gr.Text(
151
- label="Prompt",
152
- show_label=False,
153
- max_lines=1,
154
- placeholder="Enter your prompt",
155
- container=False,
156
- )
157
- run_button = gr.Button("Run", scale=0)
158
- result = gr.Gallery(label="Result", columns=1, show_label=False)
159
- with gr.Accordion("Advanced options", open=False, visible=False):
160
- num_images = gr.Slider(
161
- label="Number of Images",
162
- minimum=1,
163
- maximum=4,
164
- step=1,
165
- value=1,
166
- )
167
- with gr.Row():
168
- use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=True)
169
- negative_prompt = gr.Text(
170
- label="Negative prompt",
171
- max_lines=5,
172
- lines=4,
173
- placeholder="Enter a negative prompt",
174
- value="(deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers:1.4), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation",
175
- visible=True,
176
- )
177
- seed = gr.Slider(
178
- label="Seed",
179
- minimum=0,
180
- maximum=MAX_SEED,
181
- step=1,
182
- value=0,
183
- )
184
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
185
- with gr.Row(visible=True):
186
- width = gr.Slider(
187
- label="Width",
188
- minimum=512,
189
- maximum=MAX_IMAGE_SIZE,
190
- step=64,
191
- value=1024,
192
- )
193
- height = gr.Slider(
194
- label="Height",
195
- minimum=512,
196
- maximum=MAX_IMAGE_SIZE,
197
- step=64,
198
- value=1024,
199
- )
200
- with gr.Row():
201
- guidance_scale = gr.Slider(
202
- label="Guidance Scale",
203
- minimum=0.1,
204
- maximum=6,
205
- step=0.1,
206
- value=3.0,
207
- )
208
- num_inference_steps = gr.Slider(
209
- label="Number of inference steps",
210
- minimum=1,
211
- maximum=25,
212
- step=1,
213
- value=23,
214
- )
215
 
216
- gr.Examples(
217
- examples=examples,
218
- inputs=prompt,
219
- cache_examples=False
220
- )
221
 
222
- use_negative_prompt.change(
223
- fn=lambda x: gr.update(visible=x),
224
- inputs=use_negative_prompt,
225
- outputs=negative_prompt,
226
- api_name=False,
 
 
 
 
 
 
 
 
 
 
227
  )
228
 
229
- gr.on(
230
- triggers=[
231
- prompt.submit,
232
- negative_prompt.submit,
233
- run_button.click,
234
- ],
235
- fn=generate,
236
- inputs=[
237
- prompt,
238
- negative_prompt,
239
- use_negative_prompt,
240
- seed,
241
- width,
242
- height,
243
- guidance_scale,
244
- num_inference_steps,
245
- randomize_seed,
246
- num_images
247
- ],
248
- outputs=[result, seed],
249
- api_name="run",
250
- )
251
  if __name__ == "__main__":
252
- demo.queue(max_size=40).launch()
 
 
 
 
 
 
 
 
 
 
1
  import os
2
+ import shutil
3
+ import pandas as pd
4
+ import matplotlib.pyplot as plt
5
+ import seaborn as sns
6
+ import plotly.express as px
7
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
  css = '''
10
+ .gradio-container{max-width: 900px !important}
11
  h1{text-align:center}
 
 
 
12
  '''
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
+ def create_visualizations(data):
15
+ plots = []
16
+
17
+ # Create figures directory
18
+ figures_dir = "./figures"
19
+ shutil.rmtree(figures_dir, ignore_errors=True)
20
+ os.makedirs(figures_dir, exist_ok=True)
21
+
22
+ # Histograms for numeric columns
23
+ numeric_cols = data.select_dtypes(include=['number']).columns
24
+ for col in numeric_cols:
25
+ plt.figure()
26
+ sns.histplot(data[col], kde=True)
27
+ plt.title(f'Histogram of {col}')
28
+ plt.xlabel(col)
29
+ plt.ylabel('Frequency')
30
+ hist_path = os.path.join(figures_dir, f'histogram_{col}.png')
31
+ plt.savefig(hist_path)
32
+ plt.close()
33
+ plots.append(hist_path)
34
+
35
+ # Box plots for numeric columns
36
+ for col in numeric_cols:
37
+ plt.figure()
38
+ sns.boxplot(x=data[col])
39
+ plt.title(f'Box Plot of {col}')
40
+ box_path = os.path.join(figures_dir, f'boxplot_{col}.png')
41
+ plt.savefig(box_path)
42
+ plt.close()
43
+ plots.append(box_path)
44
+
45
+ # Scatter plot matrix
46
+ if len(numeric_cols) > 1:
47
+ plt.figure()
48
+ sns.pairplot(data[numeric_cols])
49
+ plt.title('Scatter Plot Matrix')
50
+ scatter_matrix_path = os.path.join(figures_dir, 'scatter_matrix.png')
51
+ plt.savefig(scatter_matrix_path)
52
+ plt.close()
53
+ plots.append(scatter_matrix_path)
54
+
55
+ # Correlation heatmap
56
+ if len(numeric_cols) > 1:
57
+ plt.figure()
58
+ corr = data[numeric_cols].corr()
59
+ sns.heatmap(corr, annot=True, cmap='coolwarm')
60
+ plt.title('Correlation Heatmap')
61
+ heatmap_path = os.path.join(figures_dir, 'correlation_heatmap.png')
62
+ plt.savefig(heatmap_path)
63
+ plt.close()
64
+ plots.append(heatmap_path)
65
+
66
+ # Bar charts for categorical columns
67
+ categorical_cols = data.select_dtypes(include=['object']).columns
68
+ if not categorical_cols.empty:
69
+ for col in categorical_cols:
70
+ plt.figure()
71
+ data[col].value_counts().plot(kind='bar')
72
+ plt.title(f'Bar Chart of {col}')
73
+ plt.xlabel(col)
74
+ plt.ylabel('Count')
75
+ bar_path = os.path.join(figures_dir, f'bar_chart_{col}.png')
76
+ plt.savefig(bar_path)
77
+ plt.close()
78
+ plots.append(bar_path)
79
+
80
+ # Line charts (if a 'date' column is present)
81
+ if 'date' in data.columns:
82
+ plt.figure()
83
+ data['date'] = pd.to_datetime(data['date'])
84
+ data.set_index('date').plot()
85
+ plt.title('Line Chart of Date Series')
86
+ line_chart_path = os.path.join(figures_dir, 'line_chart.png')
87
+ plt.savefig(line_chart_path)
88
+ plt.close()
89
+ plots.append(line_chart_path)
90
+
91
+ # Scatter plot using Plotly
92
+ if len(numeric_cols) >= 2:
93
+ fig = px.scatter(data, x=numeric_cols[0], y=numeric_cols[1], title='Scatter Plot')
94
+ scatter_plot_path = os.path.join(figures_dir, 'scatter_plot.html')
95
+ fig.write_html(scatter_plot_path)
96
+ plots.append(scatter_plot_path)
97
+
98
+ # Pie chart for categorical columns (only the first categorical column)
99
+ if not categorical_cols.empty:
100
+ fig = px.pie(data, names=categorical_cols[0], title='Pie Chart of ' + categorical_cols[0])
101
+ pie_chart_path = os.path.join(figures_dir, 'pie_chart.html')
102
+ fig.write_html(pie_chart_path)
103
+ plots.append(pie_chart_path)
104
+
105
+ # Heatmaps (e.g., for a correlation matrix or cross-tabulation)
106
+ if len(numeric_cols) > 1:
107
+ heatmap_data = data[numeric_cols].corr()
108
+ fig = px.imshow(heatmap_data, text_auto=True, title='Heatmap of Numeric Variables')
109
+ heatmap_plot_path = os.path.join(figures_dir, 'heatmap_plot.html')
110
+ fig.write_html(heatmap_plot_path)
111
+ plots.append(heatmap_plot_path)
112
+
113
+ # Violin plots for numeric columns
114
+ for col in numeric_cols:
115
+ plt.figure()
116
+ sns.violinplot(x=data[col])
117
+ plt.title(f'Violin Plot of {col}')
118
+ violin_path = os.path.join(figures_dir, f'violin_plot_{col}.png')
119
+ plt.savefig(violin_path)
120
+ plt.close()
121
+ plots.append(violin_path)
122
+
123
+ return plots
124
 
125
+ def analyze_data(file_input):
126
+ data = pd.read_csv(file_input.name)
127
+ return create_visualizations(data)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128
 
129
+ # Example file path
130
+ example_file_path = "./example/example.csv"
 
 
 
131
 
132
+ with gr.Blocks(css=css, theme=gr.themes.Soft(primary_hue=gr.themes.colors.orange, secondary_hue=gr.themes.colors.blue)) as demo:
133
+ gr.Markdown("# DATA BOARD📊\nUpload a `.csv` file to generate various visualizations and interactive plots.")
134
+
135
+ file_input = gr.File(label="Upload your `.csv` file")
136
+ submit = gr.Button("Generate Dashboards")
137
+
138
+ # Display images and interactive plots in a gallery
139
+ gallery = gr.Gallery(label="Visualizations")
140
+
141
+ # Example block with cache_examples set to True
142
+ examples = gr.Examples(
143
+ examples=[[example_file_path]],
144
+ inputs=file_input,
145
+ outputs=gallery,
146
+ cache_examples=True # Enable caching
147
  )
148
 
149
+ submit.click(analyze_data, file_input, gallery)
150
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
151
  if __name__ == "__main__":
152
+ demo.launch(share=True)