blanchon commited on
Commit
fdd8ad8
Β·
1 Parent(s): 153befb

Initial Commit

Browse files
Files changed (4) hide show
  1. README.md +21 -3
  2. app.py +190 -0
  3. pyproject.toml +21 -0
  4. requirements.txt +10 -0
README.md CHANGED
@@ -1,12 +1,30 @@
1
  ---
2
- title: VirtualStagingDemo
3
  emoji: πŸŒ–
4
  colorFrom: pink
5
  colorTo: red
6
  sdk: gradio
 
7
  sdk_version: 5.12.0
 
8
  app_file: app.py
9
- pinned: false
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
1
  ---
2
+ title: VirtualStaging
3
  emoji: πŸŒ–
4
  colorFrom: pink
5
  colorTo: red
6
  sdk: gradio
7
+ python_version: 3.12
8
  sdk_version: 5.12.0
9
+ suggested_hardware: a100-large
10
  app_file: app.py
11
+ # fullWidth: true
12
+ # header: mini
13
+ # models: blanchon/VirtualStaging
14
+ # datasets: blanchon/VirtualStagingDataset
15
+ tags:
16
+ - image-generation
17
+ - image-to-image
18
+ - furniture
19
+ - virtual-staging
20
+ - home-decor
21
+ - home-design
22
+ pinned: true
23
+ # preload_from_hub:
24
+ # - blanchon/VirtualStaging
25
+ license: mit
26
  ---
27
 
28
+ # VirtualStaging
29
+
30
+ ...
app.py ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ from PIL import Image
4
+ from diffusers import DiffusionPipeline
5
+ import gradio as gr
6
+ import spaces
7
+
8
+ DEVICE = "cuda"
9
+
10
+ MAIN_MODEL_REPO_ID = os.getenv("MAIN_MODEL_REPO_ID", None)
11
+ SUB_MODEL_REPO_ID = os.getenv("SUB_MODEL_REPO_ID", None)
12
+ SUB_MODEL_SUBFOLDER = os.getenv("SUB_MODEL_SUBFOLDER", None)
13
+
14
+ if MAIN_MODEL_REPO_ID is None:
15
+ raise ValueError("MAIN_MODEL_REPO_ID is not set")
16
+ if SUB_MODEL_REPO_ID is None:
17
+ raise ValueError("SUB_MODEL_REPO_ID is not set")
18
+ if SUB_MODEL_SUBFOLDER is None:
19
+ raise ValueError("SUB_MODEL_SUBFOLDER is not set")
20
+
21
+ pipeline = DiffusionPipeline.from_pretrained(
22
+ MAIN_MODEL_REPO_ID,
23
+ custom_pipeline=SUB_MODEL_REPO_ID,
24
+ ).to(DEVICE)
25
+
26
+ pipeline.load(
27
+ SUB_MODEL_REPO_ID,
28
+ subfolder=SUB_MODEL_SUBFOLDER,
29
+ )
30
+
31
+
32
+ def crop_divisible_by_16(image: Image.Image) -> Image.Image:
33
+ w, h = image.size
34
+ w = w - w % 16
35
+ h = h - h % 16
36
+ return image.crop((0, 0, w, h))
37
+
38
+
39
+ @spaces.GPU(duration=150)
40
+ def predict(
41
+ room_image_input: Image.Image,
42
+ room_image_category: str,
43
+ custom_prompt: str | None = None,
44
+ num_inference_steps: int = 28,
45
+ max_dimension: int = 1024,
46
+ seed: int = 0,
47
+ condition_scale: float = 1.0,
48
+ progress: gr.Progress = gr.Progress(track_tqdm=True), # noqa: ARG001, B008
49
+ ) -> Image.Image:
50
+ # Resize to max dimension
51
+ room_image_input.thumbnail((max_dimension, max_dimension))
52
+ # Ensure dimensions are multiple of 16 (for VAE)
53
+ room_image_input = crop_divisible_by_16(room_image_input)
54
+
55
+ prompt = f"[VIRTUAL STAGING] {room_image_category}\n"
56
+ if custom_prompt:
57
+ prompt += f" {custom_prompt}"
58
+
59
+ generator = torch.Generator(device=DEVICE).manual_seed(seed)
60
+
61
+ final_images = pipeline(
62
+ condition_image=room_image_input,
63
+ condition_scale=condition_scale,
64
+ prompt=prompt,
65
+ num_inference_steps=num_inference_steps,
66
+ generator=generator,
67
+ )
68
+
69
+ return final_images
70
+
71
+
72
+ intro_markdown = r"""
73
+ # Virtual Staging Demo
74
+ """
75
+
76
+ css = r"""
77
+ #col-left {
78
+ margin: 0 auto;
79
+ max-width: 650px;
80
+ }
81
+ #col-right {
82
+ margin: 0 auto;
83
+ max-width: 650px;
84
+ }
85
+ #col-showcase {
86
+ margin: 0 auto;
87
+ max-width: 1100px;
88
+ }
89
+ """
90
+
91
+
92
+ with gr.Blocks(css=css) as demo:
93
+ gr.Markdown(intro_markdown)
94
+
95
+ with gr.Row(visible=False) as content:
96
+ with gr.Column(elem_id="col-left"):
97
+ gr.HTML(
98
+ """
99
+ <div style="display: flex; justify-content: center; align-items: center; text-align: center; font-size: 20px;">
100
+ <div>
101
+ Step 1. Upload a room image ⬇️
102
+ </div>
103
+ </div>
104
+ """,
105
+ max_height=50,
106
+ )
107
+ room_image_input = gr.Image(
108
+ label="room",
109
+ type="pil",
110
+ sources=["upload"],
111
+ image_mode="RGB",
112
+ )
113
+ room_image_category = gr.Dropdown(
114
+ label="Room category",
115
+ choices=[
116
+ "bedroom",
117
+ "living room",
118
+ "bathroom",
119
+ ],
120
+ info="Select the room category",
121
+ multiselect=False,
122
+ )
123
+ with gr.Column(elem_id="col-right"):
124
+ gr.HTML(
125
+ """
126
+ <div style="display: flex; justify-content: center; align-items: center; text-align: center; font-size: 20px;">
127
+ <div>
128
+ Step 2. Press Run to launch
129
+ </div>
130
+ </div>
131
+ """,
132
+ max_height=50,
133
+ )
134
+ result = gr.Image(label="result")
135
+ run_button = gr.Button("Run")
136
+
137
+ with gr.Accordion("Advanced Settings", open=False):
138
+ custom_prompt = gr.Text(
139
+ label="Prompt",
140
+ max_lines=3,
141
+ placeholder="Enter a custom prompt",
142
+ container=False,
143
+ )
144
+ seed = gr.Slider(
145
+ label="Seed",
146
+ minimum=0,
147
+ maximum=100_000,
148
+ step=1,
149
+ value=0,
150
+ )
151
+ condition_scale = gr.Slider(
152
+ label="Condition Scale",
153
+ minimum=-10.0,
154
+ maximum=10.0,
155
+ step=0.10,
156
+ value=1.0,
157
+ )
158
+ with gr.Column():
159
+ max_dimension = gr.Slider(
160
+ label="Max Dimension",
161
+ minimum=512,
162
+ maximum=2048,
163
+ step=128,
164
+ value=1024,
165
+ )
166
+
167
+ num_inference_steps = gr.Slider(
168
+ label="Number of inference steps",
169
+ minimum=1,
170
+ maximum=50,
171
+ step=1,
172
+ value=28,
173
+ )
174
+
175
+ run_button.click(
176
+ fn=predict,
177
+ inputs=[
178
+ room_image_input,
179
+ room_image_category,
180
+ custom_prompt,
181
+ seed,
182
+ num_inference_steps,
183
+ max_dimension,
184
+ seed,
185
+ ],
186
+ outputs=[result],
187
+ )
188
+
189
+
190
+ demo.launch()
pyproject.toml ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "VirtualStaging"
3
+ version = "0.1.0"
4
+ description = "Add your description here"
5
+ readme = "README.md"
6
+ requires-python = ">=3.12"
7
+ dependencies = [
8
+ "accelerate>=1.2.1",
9
+ "diffusers",
10
+ "gradio>=5.12.0",
11
+ "gradio-imageslider>=0.0.20",
12
+ "peft>=0.14.0",
13
+ "pillow>=11.1.0",
14
+ "safetensors>=0.5.2",
15
+ "sentencepiece>=0.2.0",
16
+ "spaces>=0.32.0",
17
+ "transformers>=4.48.0",
18
+ ]
19
+
20
+ [tool.uv.sources]
21
+ diffusers = { git = "https://github.com/huggingface/diffusers.git" }
requirements.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ git+https://github.com/huggingface/diffusers.git
2
+ transformers
3
+ accelerate
4
+ safetensors
5
+ sentencepiece
6
+ peft
7
+ gradio
8
+ spaces
9
+ pillow
10
+ gradio_imageslider