update CLIP loda device
Browse files
llava/model/multimodal_encoder/clip_encoder.py
CHANGED
@@ -27,7 +27,7 @@ class CLIPVisionTower(nn.Module):
|
|
27 |
return
|
28 |
|
29 |
self.image_processor = CLIPImageProcessor.from_pretrained(self.vision_tower_name)
|
30 |
-
self.vision_tower = CLIPVisionModel.from_pretrained(self.vision_tower_name, device_map=
|
31 |
self.vision_tower.requires_grad_(False)
|
32 |
|
33 |
self.is_loaded = True
|
|
|
27 |
return
|
28 |
|
29 |
self.image_processor = CLIPImageProcessor.from_pretrained(self.vision_tower_name)
|
30 |
+
self.vision_tower = CLIPVisionModel.from_pretrained(self.vision_tower_name, device_map='cuda')# Error on the HuggingFace for auto
|
31 |
self.vision_tower.requires_grad_(False)
|
32 |
|
33 |
self.is_loaded = True
|