Jordan Legg commited on
Commit
fa4f33d
1 Parent(s): ab94548

add application

Browse files
README.md CHANGED
@@ -1,14 +1,14 @@
1
  ---
 
2
  title: CineDiffusion
3
- emoji: 🦀
4
  colorFrom: purple
5
- colorTo: gray
6
  sdk: gradio
7
  sdk_version: 5.6.0
8
  app_file: app.py
9
  pinned: false
10
- license: mit
11
- short_description: CineDiffusion is an application for creating very high resol
12
  ---
13
 
14
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ python_version: 3.11.10
3
  title: CineDiffusion
4
+ emoji: 🖼
5
  colorFrom: purple
6
+ colorTo: red
7
  sdk: gradio
8
  sdk_version: 5.6.0
9
  app_file: app.py
10
  pinned: false
11
+ license: apache-2.0
 
12
  ---
13
 
14
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ # import random # Removed unused import
4
+ import spaces
5
+ import torch
6
+ from diffusers import DiffusionPipeline
7
+ from numpy.random import PCG64DXSM, Generator # Add Generator import
8
+ from typing import Tuple, Any
9
+
10
+ dtype: torch.dtype = torch.bfloat16
11
+ device: str = "cuda" if torch.cuda.is_available() else "cpu"
12
+ MAX_SEED = np.iinfo(np.int32).max
13
+ rng = Generator(PCG64DXSM()) # Create a Generator instance instead of using PCG64DXSM directly
14
+
15
+ pipe = DiffusionPipeline.from_pretrained("shuttleai/shuttle-3-diffusion", torch_dtype=dtype).to(device)
16
+ # Enable VAE tiling
17
+ pipe.vae.enable_tiling()
18
+
19
+ # Define cinematic aspect ratios
20
+ ASPECT_RATIOS = {
21
+ "2.39:1 (Modern Widescreen)": 2.39,
22
+ "2.76:1 (Ultra Panavision 70)": 2.76,
23
+ "3.00:1 (Experimental Ultra-wide)": 3.00,
24
+ "4.00:1 (Polyvision)": 4.00,
25
+ "2.55:1 (CinemaScope)": 2.55,
26
+ "2.20:1 (Todd-AO)": 2.20
27
+ }
28
+
29
+ MIN_WIDTH = 512
30
+ MAX_WIDTH = 3072
31
+ STANDARD_WIDTH = 2048
32
+ STEP_WIDTH = 8
33
+ STYLE_PROMPT = "hyperrealistic widescreen cinematic still shallow depth vignette high budget bokeh film grain dramatic lighting epic composition moody detailed wide shot atmospheric backlit soft light, "
34
+
35
+ def calculate_height(width: int, aspect_ratio: float) -> int:
36
+ height = int(width / aspect_ratio)
37
+ return (height // 8) * 8
38
+
39
+ # Pre-calculate height mappings for common widths
40
+ HEIGHT_CACHE = {}
41
+ for ratio_name, ratio in ASPECT_RATIOS.items():
42
+ HEIGHT_CACHE[ratio_name] = {
43
+ width: calculate_height(width, ratio)
44
+ for width in range(MIN_WIDTH, MAX_WIDTH + 1, STEP_WIDTH)
45
+ }
46
+
47
+ def validate_aspect_ratio(ratio_name: str) -> float | None:
48
+ match ratio_name:
49
+ case "2.39:1 (Modern Widescreen)":
50
+ return 2.39
51
+ case "2.76:1 (Ultra Panavision 70)":
52
+ return 2.76
53
+ case "3.00:1 (Experimental Ultra-wide)":
54
+ return 3.00
55
+ case "4.00:1 (Polyvision)":
56
+ return 4.00
57
+ case "2.55:1 (CinemaScope)":
58
+ return 2.55
59
+ case "2.20:1 (Todd-AO)":
60
+ return 2.20
61
+ case _:
62
+ return None
63
+
64
+ @spaces.GPU()
65
+ def infer(
66
+ prompt: str,
67
+ aspect_ratio: str,
68
+ width: int,
69
+ seed: int = 42,
70
+ randomize_seed: bool = False,
71
+ num_inference_steps: int = 4,
72
+ progress: Any = gr.Progress(track_tqdm=True)
73
+ ) -> Tuple[Any, int]:
74
+ # Prepend style prompt to user input
75
+ FULL_PROMPT = f"{STYLE_PROMPT} {prompt}"
76
+
77
+ if randomize_seed:
78
+ seed = int(rng.integers(0, MAX_SEED))
79
+
80
+ ratio = validate_aspect_ratio(aspect_ratio)
81
+ if ratio is None:
82
+ raise ValueError(f"Invalid aspect ratio: {aspect_ratio}")
83
+
84
+ generator = torch.Generator().manual_seed(seed)
85
+ height = HEIGHT_CACHE[aspect_ratio][width]
86
+
87
+ image = pipe(
88
+ prompt=FULL_PROMPT, # Use the combined prompt
89
+ width=width,
90
+ height=height,
91
+ num_inference_steps=num_inference_steps,
92
+ generator=generator,
93
+ max_sequence_length=256
94
+ ).images[0]
95
+ return image, seed
96
+
97
+ examples = [
98
+ # Taxi Driver
99
+ [
100
+ "This gripping frame captures a close-up of a man, his face illuminated by the harsh red glow of city lights, evoking a mood of unease and introspection. His expression is intense and unreadable, with a hint of brooding menace. The dark, blurred background suggests a bustling urban night, with neon lights flickering faintly, emphasizing the gritty, isolating atmosphere. The contrast between the man’s rugged features and the vibrant red lighting highlights the tension and internal conflict likely central to the scene, immersing the viewer in the character’s psychological state.", # prompt
101
+ "2.39:1 (Modern Widescreen)", # aspect_ratio
102
+ 2048, # width
103
+ 0, # seed
104
+ False, # randomize_seed
105
+ 4, # num_inference_steps
106
+ ],
107
+ # Leon The Professional
108
+ [
109
+ "This tightly framed shot focuses on the reflective lenses of round sun glasses, worn by a figure with weathered skin. The reflections in the glasses reveal a table with cups and hands mid-gesture, suggesting an intense, unseen discussion or ritual taking place. The muted tones and soft lighting enhance the intimate and mysterious mood, drawing attention to the details of the reflections. The perspective feels voyeuristic, as if glimpsing a private moment through the character’s point of view. This evocative close-up emphasizes themes of observation, secrecy, and layered meaning within the narrative.",
110
+ "2.76:1 (Ultra Panavision 70)",
111
+ 2048,
112
+ 1744078352,
113
+ False,
114
+ 4,
115
+ ],
116
+ # Lawrence of Arabia
117
+ [
118
+ "three individuals on camels traversing a vast, sunlit desert. The golden sand stretches endlessly in the foreground, interrupted by the striking presence of dark, rugged mountains in the background, bathed in warm sunlight. The composition emphasizes the isolation and majesty of the desert landscape, with the figures casting long shadows that add depth to the scene. The muted blue sky contrasts beautifully with the earthy tones, creating a balanced and immersive visual. The moment conveys a sense of adventure, introspection, and the timeless allure of the natural world.",
119
+ "2.20:1 (Todd-AO)",
120
+ 2048,
121
+ 0,
122
+ False,
123
+ 4,
124
+ ],
125
+ ]
126
+
127
+ css="""
128
+ #col-container {
129
+ margin: 0 auto;
130
+ max-width: 100%;
131
+ }
132
+ """
133
+
134
+ with gr.Blocks(css=css) as demo:
135
+
136
+ with gr.Column(elem_id="col-container"):
137
+ gr.Markdown(f"""# CineDiffusion
138
+ CineDiffusion is an application for creating very high resolution Cinematic widescreen images based on historical standard cinema widescreen aspect ratios.
139
+ """)
140
+
141
+ with gr.Row():
142
+
143
+ prompt = gr.Text(
144
+ label="Prompt",
145
+ show_label=False,
146
+ max_lines=1,
147
+ placeholder="Enter your prompt",
148
+ container=False,
149
+ )
150
+
151
+ run_button = gr.Button("Run", scale=0)
152
+
153
+ result = gr.Image(label="Result", show_label=False, width="100%")
154
+
155
+ with gr.Row():
156
+ aspect_ratio = gr.Dropdown(
157
+ label="Aspect Ratio",
158
+ choices=list(ASPECT_RATIOS.keys()),
159
+ value="2.39:1 (Modern Widescreen)"
160
+ )
161
+
162
+ with gr.Accordion("Advanced Settings", open=False):
163
+
164
+ seed = gr.Slider(
165
+ label="Seed",
166
+ minimum=0,
167
+ maximum=MAX_SEED,
168
+ step=1,
169
+ value=0,
170
+ )
171
+
172
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
173
+
174
+ with gr.Row():
175
+
176
+ width = gr.Slider(
177
+ label="Width",
178
+ minimum=MIN_WIDTH,
179
+ maximum=MAX_WIDTH,
180
+ step=STEP_WIDTH,
181
+ value=STANDARD_WIDTH,
182
+ )
183
+
184
+ num_inference_steps = gr.Slider(
185
+ label="Number of inference steps",
186
+ minimum=1,
187
+ maximum=50,
188
+ step=1,
189
+ value=4,
190
+ )
191
+
192
+ gr.Examples(
193
+ examples=examples,
194
+ fn=infer,
195
+ inputs=[prompt, aspect_ratio, width, seed, randomize_seed, num_inference_steps],
196
+ outputs=[result, seed],
197
+ cache_examples=True,
198
+ cache_mode="lazy"
199
+ )
200
+
201
+ gr.on(
202
+ triggers=[run_button.click, prompt.submit],
203
+ fn=infer,
204
+ inputs=[prompt, aspect_ratio, width, seed, randomize_seed, num_inference_steps],
205
+ outputs=[result, seed]
206
+ )
207
+
208
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ accelerate
2
+ diffusers
3
+ invisible_watermark
4
+ torch
5
+ transformers
6
+ xformers
7
+ sentencepiece
samples/image-54.webp ADDED
samples/image-56.webp ADDED
samples/image-57.webp ADDED