Rorschach / .integrity /bafkr4ifay6uimdc6iv7a5kblryg2zf6nf6nywyuvuut52227xvlcnzzleu.node
NoPattern's picture
Upload 71 files
91a2fc3 verified
raw
history blame
1.3 kB
{"node":{"id":"urn:cid:bafkr4ifay6uimdc6iv7a5kblryg2zf6nf6nywyuvuut52227xvlcnzzleu","properties":{"registeredBy":"did:key:z6MkhQD1A9eMQ8bZNGmBiCVz7kG4mfnApD7WjHKNhkZp7HEY","timestamp":"2024-01-29T15:59:12Z","nodeType":"data","dataRegistrationJcs":"urn:cid:baga6yaq6ebn6ucx7ix24yuqoksaurftohczlldwfpqw22rznwufksfr7ovg7i"}},"enrichments":{"asset_hub":{"asset_id":104,"asset_name":"BLIP: Bootstrapping Language-Image Pre-training","owning_project":"Salesforce Research","asset_description":"BLIP is a versatile model capable of performing tasks such as Visual Question Answering, Image-Text Retrieval, and Image Captioning. Developed by Junnan Li, Dongxu Li, Caiming Xiong, and Steven Hoi, it utilizes Vision-Language Pre-training (VLP) to excel in both understanding-based and generation-based tasks. The model's efficacy is showcased through state-of-the-art results in various vision-language tasks.","asset_format":"PyTorch","asset_type":"Model","asset_blob_type":"iroh-collection","source_location_url":"","contact_info":"Refer to the original paper or Salesforce's official channels for contact information.","license":"bsd-3-clause","license_link":"https://opensource.org/license/bsd-3-clause/","registered_date":"2024-01-29T16:00:32.259189Z","last_modified_date":"2024-01-29T16:00:32.259189Z"}}}