Ashoka74 commited on
Commit
10d4b8f
ยท
verified ยท
1 Parent(s): 43ecd1c

Update merged_app2.py

Browse files
Files changed (1) hide show
  1. merged_app2.py +95 -95
merged_app2.py CHANGED
@@ -116,7 +116,7 @@ model_path3 = './checkpoints/sam2_hiera_large.pt'
116
  model_path4 = './checkpoints/config.json'
117
  model_path5 = './checkpoints/preprocessor_config.json'
118
  model_path6 = './configs/sam2_hiera_l.yaml'
119
- #model_path7 = './mvadapter_i2mv_sdxl.safetensors'
120
 
121
  # Base URL for the repository
122
  BASE_URL = 'https://huggingface.co/Ashoka74/Placement/resolve/main/'
@@ -129,7 +129,7 @@ model_urls = {
129
  model_path4: 'config.json',
130
  model_path5: 'preprocessor_config.json',
131
  model_path6: 'sam2_hiera_l.yaml',
132
- # model_path7: 'mvadapter_i2mv_sdxl.safetensors'
133
  }
134
 
135
  # Ensure directories exist
@@ -153,13 +153,13 @@ ensure_directories()
153
  download_models()
154
 
155
 
156
- # hf_hub_download(repo_id="black-forest-labs/FLUX.1-Redux-dev", filename="flux1-redux-dev.safetensors", local_dir="models/style_models")
157
- # hf_hub_download(repo_id="black-forest-labs/FLUX.1-Depth-dev", filename="flux1-depth-dev.safetensors", local_dir="models/diffusion_models")
158
- # hf_hub_download(repo_id="Comfy-Org/sigclip_vision_384", filename="sigclip_vision_patch14_384.safetensors", local_dir="models/clip_vision")
159
- # hf_hub_download(repo_id="Kijai/DepthAnythingV2-safetensors", filename="depth_anything_v2_vitl_fp32.safetensors", local_dir="models/depthanything")
160
- # hf_hub_download(repo_id="black-forest-labs/FLUX.1-dev", filename="ae.safetensors", local_dir="models/vae/FLUX1")
161
- # hf_hub_download(repo_id="comfyanonymous/flux_text_encoders", filename="clip_l.safetensors", local_dir="models/text_encoders")
162
- # t5_path = hf_hub_download(repo_id="comfyanonymous/flux_text_encoders", filename="t5xxl_fp16.safetensors", local_dir="models/text_encoders/t5")
163
 
164
 
165
  sd15_name = 'stablediffusionapi/realistic-vision-v51'
@@ -1375,8 +1375,8 @@ def add_extra_model_paths() -> None:
1375
  print("Could not find the extra_model_paths config file.")
1376
 
1377
  # Initialize paths
1378
- # add_comfyui_directory_to_sys_path()
1379
- # add_extra_model_paths()
1380
 
1381
  def import_custom_nodes() -> None:
1382
  import asyncio
@@ -1389,90 +1389,90 @@ def import_custom_nodes() -> None:
1389
  execution.PromptQueue(server_instance)
1390
  init_extra_nodes()
1391
 
1392
- # Import all necessary nodes
1393
- # from nodes import (
1394
- # StyleModelLoader,
1395
- # VAEEncode,
1396
- # NODE_CLASS_MAPPINGS,
1397
- # LoadImage,
1398
- # CLIPVisionLoader,
1399
- # SaveImage,
1400
- # VAELoader,
1401
- # CLIPVisionEncode,
1402
- # DualCLIPLoader,
1403
- # EmptyLatentImage,
1404
- # VAEDecode,
1405
- # UNETLoader,
1406
- # CLIPTextEncode,
1407
- # )
1408
-
1409
- # Initialize all constant nodes and models in global context
1410
- # import_custom_nodes()
1411
-
1412
- # Global variables for preloaded models and constants
1413
- #with torch.inference_mode():
1414
- # Initialize constants
1415
- # intconstant = NODE_CLASS_MAPPINGS["INTConstant"]()
1416
- # CONST_1024 = intconstant.get_value(value=1024)
1417
-
1418
- # # Load CLIP
1419
- # dualcliploader = DualCLIPLoader()
1420
- # CLIP_MODEL = dualcliploader.load_clip(
1421
- # clip_name1="t5/t5xxl_fp16.safetensors",
1422
- # clip_name2="clip_l.safetensors",
1423
- # type="flux",
1424
- # )
1425
-
1426
- # # Load VAE
1427
- # vaeloader = VAELoader()
1428
- # VAE_MODEL = vaeloader.load_vae(vae_name="FLUX1/ae.safetensors")
1429
-
1430
- # # Load UNET
1431
- # unetloader = UNETLoader()
1432
- # UNET_MODEL = unetloader.load_unet(
1433
- # unet_name="flux1-depth-dev.safetensors", weight_dtype="default"
1434
- # )
1435
-
1436
- # # Load CLIP Vision
1437
- # clipvisionloader = CLIPVisionLoader()
1438
- # CLIP_VISION_MODEL = clipvisionloader.load_clip(
1439
- # clip_name="sigclip_vision_patch14_384.safetensors"
1440
- # )
1441
-
1442
- # # Load Style Model
1443
- # stylemodelloader = StyleModelLoader()
1444
- # STYLE_MODEL = stylemodelloader.load_style_model(
1445
- # style_model_name="flux1-redux-dev.safetensors"
1446
- # )
1447
-
1448
- # # Initialize samplers
1449
- # ksamplerselect = NODE_CLASS_MAPPINGS["KSamplerSelect"]()
1450
- # SAMPLER = ksamplerselect.get_sampler(sampler_name="euler")
1451
-
1452
- # # Initialize depth model
1453
- # cr_clip_input_switch = NODE_CLASS_MAPPINGS["CR Clip Input Switch"]()
1454
- # downloadandloaddepthanythingv2model = NODE_CLASS_MAPPINGS["DownloadAndLoadDepthAnythingV2Model"]()
1455
- # DEPTH_MODEL = downloadandloaddepthanythingv2model.loadmodel(
1456
- # model="depth_anything_v2_vitl_fp32.safetensors"
1457
- # )
1458
- # cliptextencode = CLIPTextEncode()
1459
- # loadimage = LoadImage()
1460
- # vaeencode = VAEEncode()
1461
- # fluxguidance = NODE_CLASS_MAPPINGS["FluxGuidance"]()
1462
- # instructpixtopixconditioning = NODE_CLASS_MAPPINGS["InstructPixToPixConditioning"]()
1463
- # clipvisionencode = CLIPVisionEncode()
1464
- # stylemodelapplyadvanced = NODE_CLASS_MAPPINGS["StyleModelApplyAdvanced"]()
1465
- # emptylatentimage = EmptyLatentImage()
1466
- # basicguider = NODE_CLASS_MAPPINGS["BasicGuider"]()
1467
- # basicscheduler = NODE_CLASS_MAPPINGS["BasicScheduler"]()
1468
- # randomnoise = NODE_CLASS_MAPPINGS["RandomNoise"]()
1469
- # samplercustomadvanced = NODE_CLASS_MAPPINGS["SamplerCustomAdvanced"]()
1470
- # vaedecode = VAEDecode()
1471
- # cr_text = NODE_CLASS_MAPPINGS["CR Text"]()
1472
- # saveimage = SaveImage()
1473
- # getimagesizeandcount = NODE_CLASS_MAPPINGS["GetImageSizeAndCount"]()
1474
- # depthanything_v2 = NODE_CLASS_MAPPINGS["DepthAnything_V2"]()
1475
- # imageresize = NODE_CLASS_MAPPINGS["ImageResize+"]()
1476
 
1477
 
1478
  @spaces.GPU
 
116
  model_path4 = './checkpoints/config.json'
117
  model_path5 = './checkpoints/preprocessor_config.json'
118
  model_path6 = './configs/sam2_hiera_l.yaml'
119
+ model_path7 = './mvadapter_i2mv_sdxl.safetensors'
120
 
121
  # Base URL for the repository
122
  BASE_URL = 'https://huggingface.co/Ashoka74/Placement/resolve/main/'
 
129
  model_path4: 'config.json',
130
  model_path5: 'preprocessor_config.json',
131
  model_path6: 'sam2_hiera_l.yaml',
132
+ model_path7: 'mvadapter_i2mv_sdxl.safetensors'
133
  }
134
 
135
  # Ensure directories exist
 
153
  download_models()
154
 
155
 
156
+ hf_hub_download(repo_id="black-forest-labs/FLUX.1-Redux-dev", filename="flux1-redux-dev.safetensors", local_dir="models/style_models")
157
+ hf_hub_download(repo_id="black-forest-labs/FLUX.1-Depth-dev", filename="flux1-depth-dev.safetensors", local_dir="models/diffusion_models")
158
+ hf_hub_download(repo_id="Comfy-Org/sigclip_vision_384", filename="sigclip_vision_patch14_384.safetensors", local_dir="models/clip_vision")
159
+ hf_hub_download(repo_id="Kijai/DepthAnythingV2-safetensors", filename="depth_anything_v2_vitl_fp32.safetensors", local_dir="models/depthanything")
160
+ hf_hub_download(repo_id="black-forest-labs/FLUX.1-dev", filename="ae.safetensors", local_dir="models/vae/FLUX1")
161
+ hf_hub_download(repo_id="comfyanonymous/flux_text_encoders", filename="clip_l.safetensors", local_dir="models/text_encoders")
162
+ t5_path = hf_hub_download(repo_id="comfyanonymous/flux_text_encoders", filename="t5xxl_fp16.safetensors", local_dir="models/text_encoders/t5")
163
 
164
 
165
  sd15_name = 'stablediffusionapi/realistic-vision-v51'
 
1375
  print("Could not find the extra_model_paths config file.")
1376
 
1377
  # Initialize paths
1378
+ add_comfyui_directory_to_sys_path()
1379
+ add_extra_model_paths()
1380
 
1381
  def import_custom_nodes() -> None:
1382
  import asyncio
 
1389
  execution.PromptQueue(server_instance)
1390
  init_extra_nodes()
1391
 
1392
+ #Import all necessary nodes
1393
+ from nodes import (
1394
+ StyleModelLoader,
1395
+ VAEEncode,
1396
+ NODE_CLASS_MAPPINGS,
1397
+ LoadImage,
1398
+ CLIPVisionLoader,
1399
+ SaveImage,
1400
+ VAELoader,
1401
+ CLIPVisionEncode,
1402
+ DualCLIPLoader,
1403
+ EmptyLatentImage,
1404
+ VAEDecode,
1405
+ UNETLoader,
1406
+ CLIPTextEncode,
1407
+ )
1408
+
1409
+ #Initialize all constant nodes and models in global context
1410
+ import_custom_nodes()
1411
+
1412
+ #Global variables for preloaded models and constants
1413
+ with torch.inference_mode():
1414
+ Initialize constants
1415
+ intconstant = NODE_CLASS_MAPPINGS["INTConstant"]()
1416
+ CONST_1024 = intconstant.get_value(value=1024)
1417
+
1418
+ # Load CLIP
1419
+ dualcliploader = DualCLIPLoader()
1420
+ CLIP_MODEL = dualcliploader.load_clip(
1421
+ clip_name1="t5/t5xxl_fp16.safetensors",
1422
+ clip_name2="clip_l.safetensors",
1423
+ type="flux",
1424
+ )
1425
+
1426
+ # Load VAE
1427
+ vaeloader = VAELoader()
1428
+ VAE_MODEL = vaeloader.load_vae(vae_name="FLUX1/ae.safetensors")
1429
+
1430
+ # Load UNET
1431
+ unetloader = UNETLoader()
1432
+ UNET_MODEL = unetloader.load_unet(
1433
+ unet_name="flux1-depth-dev.safetensors", weight_dtype="default"
1434
+ )
1435
+
1436
+ # Load CLIP Vision
1437
+ clipvisionloader = CLIPVisionLoader()
1438
+ CLIP_VISION_MODEL = clipvisionloader.load_clip(
1439
+ clip_name="sigclip_vision_patch14_384.safetensors"
1440
+ )
1441
+
1442
+ # Load Style Model
1443
+ stylemodelloader = StyleModelLoader()
1444
+ STYLE_MODEL = stylemodelloader.load_style_model(
1445
+ style_model_name="flux1-redux-dev.safetensors"
1446
+ )
1447
+
1448
+ # Initialize samplers
1449
+ ksamplerselect = NODE_CLASS_MAPPINGS["KSamplerSelect"]()
1450
+ SAMPLER = ksamplerselect.get_sampler(sampler_name="euler")
1451
+
1452
+ # Initialize depth model
1453
+ cr_clip_input_switch = NODE_CLASS_MAPPINGS["CR Clip Input Switch"]()
1454
+ downloadandloaddepthanythingv2model = NODE_CLASS_MAPPINGS["DownloadAndLoadDepthAnythingV2Model"]()
1455
+ DEPTH_MODEL = downloadandloaddepthanythingv2model.loadmodel(
1456
+ model="depth_anything_v2_vitl_fp32.safetensors"
1457
+ )
1458
+ cliptextencode = CLIPTextEncode()
1459
+ loadimage = LoadImage()
1460
+ vaeencode = VAEEncode()
1461
+ fluxguidance = NODE_CLASS_MAPPINGS["FluxGuidance"]()
1462
+ instructpixtopixconditioning = NODE_CLASS_MAPPINGS["InstructPixToPixConditioning"]()
1463
+ clipvisionencode = CLIPVisionEncode()
1464
+ stylemodelapplyadvanced = NODE_CLASS_MAPPINGS["StyleModelApplyAdvanced"]()
1465
+ emptylatentimage = EmptyLatentImage()
1466
+ basicguider = NODE_CLASS_MAPPINGS["BasicGuider"]()
1467
+ basicscheduler = NODE_CLASS_MAPPINGS["BasicScheduler"]()
1468
+ randomnoise = NODE_CLASS_MAPPINGS["RandomNoise"]()
1469
+ samplercustomadvanced = NODE_CLASS_MAPPINGS["SamplerCustomAdvanced"]()
1470
+ vaedecode = VAEDecode()
1471
+ cr_text = NODE_CLASS_MAPPINGS["CR Text"]()
1472
+ saveimage = SaveImage()
1473
+ getimagesizeandcount = NODE_CLASS_MAPPINGS["GetImageSizeAndCount"]()
1474
+ depthanything_v2 = NODE_CLASS_MAPPINGS["DepthAnything_V2"]()
1475
+ imageresize = NODE_CLASS_MAPPINGS["ImageResize+"]()
1476
 
1477
 
1478
  @spaces.GPU