File size: 654 Bytes
4cef0a2
 
 
64a1932
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
---
{}
---
    from colpali_engine.models import ColQwen2, ColQwen2Processor
    from peft import PeftModel

    merged_model = ColQwen2.from_pretrained(
        "michaelfeil/colqwen2-v0.1",
        torch_dtype="bfloat16",
    )
    merged_model = PeftModel.from_pretrained(model, "vidore/colqwen2-v0.1")
    merged_model: ColQwen2 = merged_model.merge_and_unload()
    processor = ColQwen2Processor.from_pretrained("vidore/colqwen2-v0.1")

    processor.push_to_hub("michaelfeil/colqwen2-v0.1")
    # merged_model.save_pretrained("mf-colqwen2-v0.1",max_shard_size = "2GB",)
    merged_model.push_to_hub("michaelfeil/colqwen2-v0.1", max_shard_size="2GB")