File size: 3,076 Bytes
650d521 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 |
import datasets
import json
import numpy
_FEATURES = datasets.Features(
{
"id": datasets.Value("string"),
"prompt": datasets.Array3D(shape=(1, 77, 768), dtype="float32"),
"video": datasets.Sequence(feature=datasets.Array3D(shape=(4, 64, 64), dtype="float64")),
"description": datasets.Value("string"),
"videourl": datasets.Value("string"),
"categories": datasets.Value("string"),
"duration": datasets.Value("float"),
"full_metadata": datasets.Value("string"),
}
)
class FunkLoaderStream(datasets.GeneratorBasedBuilder):
"""TempoFunk Dataset"""
def _info(self):
return datasets.DatasetInfo(
description="TempoFunk Dataset",
features=_FEATURES,
homepage="None",
citation="None",
license="None"
)
def _split_generators(self, dl_manager):
print("id_list available at:", dl_manager.download("data/id_list.json"))
_ID_LIST = json.loads(open(dl_manager.download("data/id_list.json"), 'r').read())
_SHARD_LENGTH = 20
_SPLITS = [_ID_LIST[i:i + _SHARD_LENGTH] for i in range(0, len(_ID_LIST), _SHARD_LENGTH)]
print("avail splits: ", _SPLITS)
l=[]
_split_count = 0
for split in _SPLITS:
_list = []
for video_id in split:
_list.append({
"frames": dl_manager.download(f"data/videos/{video_id}.npy"),
"prompt": dl_manager.download(f"data/prompts/{video_id}.npy"),
"metadata": dl_manager.download(f"data/metadata/{video_id}.json"),
})
l.append(
datasets.SplitGenerator(
name=f"split_{_split_count}",
gen_kwargs={
"chunk_container": _list,
},)
)
_split_count = _split_count + 1
print("Total Splits: ", _split_count)
return l
def _generate_examples(self, chunk_container):
"""Generate images and labels for splits."""
for video_entry in chunk_container:
frames_binary = video_entry['frames']
prompt_binary = video_entry['prompt']
metadata = json.loads(open(video_entry['metadata'], 'r').read())
txt_embed = numpy.load(prompt_binary)
vid_embed = numpy.load(frames_binary)
# txt_embed = torch.load(open(prompt_binary, 'rb')).cpu().detach().numpy()
# vid_embed = torch.load(open(frames_binary, 'rb'))
print(vid_embed.shape)
yield metadata['id'], {
"id": metadata['id'],
"prompt": txt_embed,
"video": vid_embed,
"description": metadata['description'],
"videourl": metadata['videourl'],
"categories": metadata['categories'],
"duration": metadata['duration'],
"full_metadata": metadata
} |