from colpali_engine.models import ColQwen2, ColQwen2Processor
from peft import PeftModel

merged_model = ColQwen2.from_pretrained(
    "michaelfeil/colqwen2-v0.1",
    torch_dtype="bfloat16",
)
merged_model = PeftModel.from_pretrained(model, "vidore/colqwen2-v0.1")
merged_model: ColQwen2 = merged_model.merge_and_unload()
processor = ColQwen2Processor.from_pretrained("vidore/colqwen2-v0.1")

processor.push_to_hub("michaelfeil/colqwen2-v0.1")
# merged_model.save_pretrained("mf-colqwen2-v0.1",max_shard_size = "2GB",)
merged_model.push_to_hub("michaelfeil/colqwen2-v0.1", max_shard_size="2GB")
Downloads last month
471
Safetensors
Model size
2.21B params
Tensor type
BF16
·
Inference API
Unable to determine this model's library. Check the docs .