metadata
dataset_info:
features:
- name: image
dtype: image
- name: canny_images
dtype: image
- name: image_caption
dtype: string
splits:
- name: train
num_bytes: 1568029838.25
num_examples: 6462
download_size: 1566706053
dataset_size: 1568029838.25
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
from transformers import pipeline from datasets import load_dataset
dataset_name = "Jieya/only_fractal_canny" faces = load_dataset(dataset_name) faces = faces["train"]
captioner = pipeline("image-to-text",model="Salesforce/blip-image-captioning-large", device=0)
def caption_image_data(example): image = example["image"] image_caption = captioner(image)[0]['generated_text'] example['image_caption'] = image_caption return example
faces_proc = faces.map(caption_image_data) faces_proc.push_to_hub(f"Jieya/captioned_fractal_canny")