Make sure we are using CUDA if available
Browse files
app.py
CHANGED
@@ -13,16 +13,14 @@ from torchvision.transforms import (
|
|
13 |
Lambda,
|
14 |
Resize, RandomCrop,
|
15 |
)
|
16 |
-
from transformers import
|
17 |
-
|
18 |
|
19 |
MODEL_CKPT = "omermazig/videomae-finetuned-nba-5-class-4-batch-8000-vid-multiclass"
|
20 |
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
21 |
CLIPS_FROM_SINGLE_VIDEO = 5
|
22 |
|
23 |
-
|
24 |
-
|
25 |
-
image_processor = pipe.image_processor
|
26 |
|
27 |
mean = image_processor.image_mean
|
28 |
std = image_processor.image_std
|
@@ -67,7 +65,7 @@ def parse_video_to_clips(video_file):
|
|
67 |
video_clips_list.append(inference_transform(video_clip))
|
68 |
|
69 |
videos_tensor = torch.stack([single_clip.permute(1, 0, 2, 3) for single_clip in video_clips_list])
|
70 |
-
return videos_tensor
|
71 |
|
72 |
|
73 |
def infer(video_file):
|
|
|
13 |
Lambda,
|
14 |
Resize, RandomCrop,
|
15 |
)
|
16 |
+
from transformers import VideoMAEForVideoClassification, VideoMAEFeatureExtractor
|
|
|
17 |
|
18 |
MODEL_CKPT = "omermazig/videomae-finetuned-nba-5-class-4-batch-8000-vid-multiclass"
|
19 |
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
20 |
CLIPS_FROM_SINGLE_VIDEO = 5
|
21 |
|
22 |
+
trained_model = VideoMAEForVideoClassification.from_pretrained(MODEL_CKPT).to(DEVICE)
|
23 |
+
image_processor = VideoMAEFeatureExtractor.from_pretrained(MODEL_CKPT)
|
|
|
24 |
|
25 |
mean = image_processor.image_mean
|
26 |
std = image_processor.image_std
|
|
|
65 |
video_clips_list.append(inference_transform(video_clip))
|
66 |
|
67 |
videos_tensor = torch.stack([single_clip.permute(1, 0, 2, 3) for single_clip in video_clips_list])
|
68 |
+
return videos_tensor.to(DEVICE)
|
69 |
|
70 |
|
71 |
def infer(video_file):
|