Update app.py
Browse files
app.py
CHANGED
@@ -14,11 +14,11 @@ blip_model_id = "Salesforce/blip-image-captioning-base"
|
|
14 |
pipe = StableDiffusionPipeline.from_pretrained(model_id)
|
15 |
pipe = pipe.to("cpu") #cuda
|
16 |
|
17 |
-
blip_model = BlipForConditionalGeneration.from_pretrained(blip_model_id, torch_dtype=torch.
|
18 |
processor = BlipProcessor.from_pretrained(blip_model_id)
|
19 |
|
20 |
def predict(image):
|
21 |
-
inputs = processor(image, return_tensors="pt").to("cpu", torch.
|
22 |
output_blip = blip_model.generate(**inputs)
|
23 |
prompt = processor.decode(output_blip[0], skip_special_tokens=True)
|
24 |
|
|
|
14 |
pipe = StableDiffusionPipeline.from_pretrained(model_id)
|
15 |
pipe = pipe.to("cpu") #cuda
|
16 |
|
17 |
+
blip_model = BlipForConditionalGeneration.from_pretrained(blip_model_id, torch_dtype=torch.float).to("CPU") #CUDA
|
18 |
processor = BlipProcessor.from_pretrained(blip_model_id)
|
19 |
|
20 |
def predict(image):
|
21 |
+
inputs = processor(image, return_tensors="pt").to("cpu", torch.float) #cuda
|
22 |
output_blip = blip_model.generate(**inputs)
|
23 |
prompt = processor.decode(output_blip[0], skip_special_tokens=True)
|
24 |
|