Ashoka74 commited on
Commit
251ea91
1 Parent(s): e36f52f

Update app_3.py

Browse files
Files changed (1) hide show
  1. app_3.py +14 -8
app_3.py CHANGED
@@ -75,11 +75,11 @@ transform_image = transforms.Compose(
75
  # Model paths
76
  model_path = './models/iclight_sd15_fc.safetensors'
77
  model_path2 = './checkpoints/depth_anything_v2_vits.pth'
78
- model_path3 = './checkpoints/sam2_hiera_large.pt'
79
  model_path4 = './checkpoints/config.json'
80
  model_path5 = './checkpoints/preprocessor_config.json'
81
- model_path6 = './configs/sam2_hiera_l.yaml'
82
- model_path7 = './mvadapter_i2mv_sdxl.safetensors'
83
 
84
  # Base URL for the repository
85
  BASE_URL = 'https://huggingface.co/Ashoka74/Placement/resolve/main/'
@@ -88,11 +88,11 @@ BASE_URL = 'https://huggingface.co/Ashoka74/Placement/resolve/main/'
88
  model_urls = {
89
  model_path: 'iclight_sd15_fc.safetensors',
90
  model_path2: 'depth_anything_v2_vits.pth',
91
- model_path3: 'sam2_hiera_large.pt',
92
  model_path4: 'config.json',
93
  model_path5: 'preprocessor_config.json',
94
- model_path6: 'sam2_hiera_l.yaml',
95
- model_path7: 'mvadapter_i2mv_sdxl.safetensors'
96
  }
97
 
98
  # Ensure directories exist
@@ -149,10 +149,16 @@ tokenizer = CLIPTokenizer.from_pretrained(sd15_name, subfolder="tokenizer")
149
  text_encoder = CLIPTextModel.from_pretrained(sd15_name, subfolder="text_encoder")
150
  vae = AutoencoderKL.from_pretrained(sd15_name, subfolder="vae")
151
  unet = UNet2DConditionModel.from_pretrained(sd15_name, subfolder="unet")
 
152
  # Load model directly
153
  from transformers import AutoModelForImageSegmentation
154
- rmbg = AutoModelForImageSegmentation.from_pretrained("briaai/RMBG-2.0", trust_remote_code=True)#, token=os.getenv('token'))
155
- rmbg = rmbg.to(device=device, dtype=torch.float32) # Keep this as float32
 
 
 
 
 
156
 
157
  # remove bg
158
  # rmbg = AutoModelForImageSegmentation.from_pretrained(
 
75
  # Model paths
76
  model_path = './models/iclight_sd15_fc.safetensors'
77
  model_path2 = './checkpoints/depth_anything_v2_vits.pth'
78
+ #model_path3 = './checkpoints/sam2_hiera_large.pt'
79
  model_path4 = './checkpoints/config.json'
80
  model_path5 = './checkpoints/preprocessor_config.json'
81
+ #model_path6 = './configs/sam2_hiera_l.yaml'
82
+ #model_path7 = './mvadapter_i2mv_sdxl.safetensors'
83
 
84
  # Base URL for the repository
85
  BASE_URL = 'https://huggingface.co/Ashoka74/Placement/resolve/main/'
 
88
  model_urls = {
89
  model_path: 'iclight_sd15_fc.safetensors',
90
  model_path2: 'depth_anything_v2_vits.pth',
91
+ #model_path3: 'sam2_hiera_large.pt',
92
  model_path4: 'config.json',
93
  model_path5: 'preprocessor_config.json',
94
+ #model_path6: 'sam2_hiera_l.yaml',
95
+ #model_path7: 'mvadapter_i2mv_sdxl.safetensors'
96
  }
97
 
98
  # Ensure directories exist
 
149
  text_encoder = CLIPTextModel.from_pretrained(sd15_name, subfolder="text_encoder")
150
  vae = AutoencoderKL.from_pretrained(sd15_name, subfolder="vae")
151
  unet = UNet2DConditionModel.from_pretrained(sd15_name, subfolder="unet")
152
+
153
  # Load model directly
154
  from transformers import AutoModelForImageSegmentation
155
+ # rmbg = AutoModelForImageSegmentation.from_pretrained("briaai/RMBG-2.0", trust_remote_code=True)#, token=os.getenv('token'))
156
+ # rmbg = rmbg.to(device=device, dtype=torch.float32) # Keep this as float32
157
+
158
+ rmbg = AutoModelForImageSegmentation.from_pretrained(
159
+ "ZhengPeng7/BiRefNet", trust_remote_code=True
160
+ )
161
+ rmbg = rmbg.to_device(device=device, dtype=torch.float32)
162
 
163
  # remove bg
164
  # rmbg = AutoModelForImageSegmentation.from_pretrained(