obrookes commited on
Commit
4ce4025
β€’
1 Parent(s): eb24165

add str byte decoding

Browse files
Files changed (1) hide show
  1. frames.py +220 -0
frames.py ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+ import functools
3
+ import io
4
+ import os
5
+ import time
6
+ from typing import Union
7
+
8
+ import av
9
+ import numpy as np
10
+ import torch
11
+
12
+
13
+ class FrameSelectionMethod:
14
+ """
15
+ Enum-like class for frame selection methods 🎞
16
+ """
17
+
18
+ RANDOM: str = "random" # 🎲
19
+ UNIFORM: str = "uniform" # πŸ“
20
+ SEQUENTIAL: str = "sequential" #
21
+
22
+
23
+ def seek_to_second(container, stream, second):
24
+ # Convert the second to the stream's time base
25
+ timestamp = int(
26
+ second * stream.time_base.denominator / stream.time_base.numerator
27
+ )
28
+ # Seek to the timestamp
29
+ container.seek(timestamp, stream=stream)
30
+ return container
31
+
32
+
33
+ def duration_in_seconds(stream):
34
+ return float(stream.duration * stream.time_base)
35
+
36
+
37
+ def frame_timestamp_in_seconds(frame, stream):
38
+ return float(frame.pts * stream.time_base)
39
+
40
+
41
+ def duration_in_seconds_from_path(video_path, modality):
42
+ with av.open(video_path) as container:
43
+ stream = next(s for s in container.streams if s.type == modality)
44
+ return duration_in_seconds(stream)
45
+
46
+
47
+ def suppress_stderr(func):
48
+ @functools.wraps(func)
49
+ def wrapper(*args, **kwargs):
50
+ with open(os.devnull, "w") as devnull:
51
+ with contextlib.redirect_stderr(devnull):
52
+ return func(*args, **kwargs)
53
+
54
+ return wrapper
55
+
56
+
57
+ @suppress_stderr
58
+ def extract_frames_pyav(
59
+ video_data: Union[str, bytes],
60
+ modality: str,
61
+ starting_second: float,
62
+ ending_second: float,
63
+ num_frames: int,
64
+ rng: np.random.Generator,
65
+ frame_selection_method: str = "RANDOM",
66
+ key_frames_only: bool = False,
67
+ stereo_audio_if_available: bool = False,
68
+ single_image_frame: bool = False,
69
+ ) -> torch.Tensor:
70
+ frame_dict = {}
71
+
72
+ video_source = (
73
+ io.BytesIO(video_data) if isinstance(video_data, bytes) else video_data
74
+ )
75
+
76
+ with av.open(video_source) as container:
77
+ stream = next(s for s in container.streams if s.type == modality)
78
+ if key_frames_only:
79
+ stream.codec_context.skip_frame = "NONKEY"
80
+
81
+ container = seek_to_second(container, stream, starting_second)
82
+
83
+ # Get the duration of the video
84
+ video_duration = duration_in_seconds(stream)
85
+ # print(f"Video duration: {video_duration} seconds")
86
+
87
+ # Get the FPS of the video
88
+ video_fps = stream.average_rate
89
+ # print(f"Video FPS: {video_fps}")
90
+
91
+ for frame in container.decode(stream):
92
+ # logger.info(f"Frame timestamp: {frame}")
93
+ frame_timestamp = frame_timestamp_in_seconds(frame, stream)
94
+ # logger.info(f"Frame timestamp: {frame_timestamp}")
95
+ array_frame = torch.from_numpy(
96
+ frame.to_ndarray(
97
+ format="rgb24" if modality == "video" else None
98
+ )
99
+ )
100
+
101
+ if modality == "video" and len(array_frame.shape) == 2:
102
+ array_frame = array_frame.unsqueeze(0)
103
+
104
+ if modality == "audio" and not stereo_audio_if_available:
105
+ array_frame = array_frame[0].unsqueeze(0)
106
+
107
+ if frame_timestamp > ending_second:
108
+ break
109
+ frame_dict[frame_timestamp] = array_frame
110
+ # logger.info(f"Frame dict: {frame_dict}")
111
+ if single_image_frame:
112
+ break
113
+
114
+ frame_values = (
115
+ torch.stack(list(frame_dict.values()))
116
+ if modality == "video"
117
+ else torch.cat(list(frame_dict.values()), dim=1).permute(1, 0)
118
+ )
119
+
120
+ if frame_selection_method == FrameSelectionMethod.RANDOM:
121
+ frame_indices = rng.choice(
122
+ len(frame_values),
123
+ min(num_frames, len(frame_values)),
124
+ replace=key_frames_only,
125
+ )
126
+ elif frame_selection_method == FrameSelectionMethod.UNIFORM:
127
+ frame_indices = np.linspace(
128
+ 0,
129
+ len(frame_values),
130
+ min(num_frames, len(frame_values)),
131
+ endpoint=False,
132
+ dtype=int,
133
+ )
134
+ elif frame_selection_method == FrameSelectionMethod.SEQUENTIAL:
135
+ frame_indices = np.arange(0, min(num_frames, len(frame_values)))
136
+
137
+ frame_indices = sorted(set(frame_indices))
138
+ output = frame_values[frame_indices]
139
+
140
+ if modality == "video" and len(output.shape) == 3:
141
+ output = output.unsqueeze(0)
142
+
143
+ return output
144
+
145
+
146
+ def test_extract_frames_video_pyav():
147
+ video_path = "/data/datasets/tali-wit-2-1-buckets/video_data.parquet/550/550321/4chLRYT8ylY/360p_90.mp4"
148
+ video_path = "/data/datasets/tali-wit-2-1-buckets//video_data.parquet/10/10586/SA7bKo4HRTg/360p_0.mp4"
149
+ modality = "video"
150
+ start_time = 10
151
+ end_time = 20
152
+ num_frames = 30
153
+ rng = np.random.default_rng()
154
+
155
+ for selection_method in [
156
+ FrameSelectionMethod.RANDOM,
157
+ FrameSelectionMethod.UNIFORM,
158
+ FrameSelectionMethod.SEQUENTIAL,
159
+ ]:
160
+ for i in range(5):
161
+ time_list = []
162
+ for key_frames_only in [False]:
163
+ start_fn_time = time.time()
164
+ frames = extract_frames_pyav(
165
+ video_path=video_path,
166
+ modality=modality,
167
+ starting_second=start_time,
168
+ ending_second=end_time,
169
+ num_frames=num_frames,
170
+ rng=rng,
171
+ frame_selection_method=selection_method,
172
+ key_frames_only=key_frames_only,
173
+ )
174
+ end_fn_time = time.time()
175
+ time_list.append(end_fn_time - start_fn_time)
176
+ print(
177
+ f"Using {selection_method} frame selection method 🎲, with key_frames_only: {key_frames_only}, have extracted {frames.shape}, mean time {np.mean(time_list)} seconds, std time {np.std(time_list)} seconds"
178
+ )
179
+
180
+
181
+ def test_extract_frames_audio_pyav():
182
+ video_path = "/data/datasets/tali-wit-2-1-buckets/video_data.parquet/550/550321/4chLRYT8ylY/360p_90.mp4"
183
+ video_path = "/data/datasets/tali-wit-2-1-buckets//video_data.parquet/10/10586/SA7bKo4HRTg/360p_0.mp4"
184
+ modality = "audio"
185
+ start_time = 10
186
+ end_time = 20
187
+ num_frames = 88200
188
+ rng = np.random.default_rng()
189
+
190
+ for selection_method in [
191
+ FrameSelectionMethod.RANDOM,
192
+ FrameSelectionMethod.UNIFORM,
193
+ FrameSelectionMethod.SEQUENTIAL,
194
+ ]:
195
+ for i in range(5):
196
+ time_list = []
197
+ for key_frames_only in [False]:
198
+ start_fn_time = time.time()
199
+ frames = extract_frames_pyav(
200
+ video_path=video_path,
201
+ modality=modality,
202
+ starting_second=start_time,
203
+ ending_second=end_time,
204
+ num_frames=num_frames,
205
+ rng=rng,
206
+ frame_selection_method=selection_method,
207
+ key_frames_only=key_frames_only,
208
+ stereo_audio_if_available=False,
209
+ )
210
+ end_fn_time = time.time()
211
+ time_list.append(end_fn_time - start_fn_time)
212
+ print(
213
+ f"Using {selection_method} frame selection method 🎲, with key_frames_only: {key_frames_only}, have extracted {frames.shape}, mean time {np.mean(time_list)} seconds, std time {np.std(time_list)} seconds"
214
+ )
215
+
216
+
217
+ if __name__ == "__main__":
218
+ # test_extract_frames_torchvision()
219
+ # test_extract_frames_video_pyav()
220
+ test_extract_frames_audio_pyav()