Spaces:
Running
on
T4
Running
on
T4
yourusername
commited on
Commit
•
938aa7b
0
Parent(s):
:tada: init
Browse files- .gitattributes +27 -0
- .github/workflows/check_size.yaml +17 -0
- .github/workflows/sync_to_hub.yaml +20 -0
- README.md +19 -0
- app.py +91 -0
- packages.txt +0 -0
- requirements.txt +10 -0
.gitattributes
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bin.* filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
20 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
26 |
+
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
.github/workflows/check_size.yaml
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Check file size
|
2 |
+
|
3 |
+
on:
|
4 |
+
pull_request:
|
5 |
+
branches: [main]
|
6 |
+
|
7 |
+
# to run this workflow manually from the Actions tab
|
8 |
+
workflow_dispatch:
|
9 |
+
|
10 |
+
jobs:
|
11 |
+
sync-to-hub:
|
12 |
+
runs-on: ubuntu-latest
|
13 |
+
steps:
|
14 |
+
- name: Check large files
|
15 |
+
uses: ActionsDesk/lfs-warning@v2.0
|
16 |
+
with:
|
17 |
+
filesizelimit: 10485760 # = 10MB, so we can sync to HF spaces
|
.github/workflows/sync_to_hub.yaml
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Sync to Hugging Face hub
|
2 |
+
|
3 |
+
on:
|
4 |
+
push:
|
5 |
+
branches: [main]
|
6 |
+
|
7 |
+
# to run this workflow manually from the Actions tab
|
8 |
+
workflow_dispatch:
|
9 |
+
|
10 |
+
jobs:
|
11 |
+
sync-to-hub:
|
12 |
+
runs-on: ubuntu-latest
|
13 |
+
steps:
|
14 |
+
- uses: actions/checkout@v2
|
15 |
+
with:
|
16 |
+
fetch-depth: 0
|
17 |
+
- name: Push to hub
|
18 |
+
env:
|
19 |
+
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
20 |
+
run: git push https://nateraw:$HF_TOKEN@huggingface.co/spaces/nateraw/animegan-v2-for-videos main --force
|
README.md
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: AnimeGAN-v2 For Videos
|
3 |
+
emoji: 🔥
|
4 |
+
colorFrom: blue
|
5 |
+
colorTo: red
|
6 |
+
sdk: gradio
|
7 |
+
app_file: app.py
|
8 |
+
pinned: false
|
9 |
+
---
|
10 |
+
|
11 |
+
# AnimeGAN-v2 For Videos
|
12 |
+
|
13 |
+
[![Generic badge](https://img.shields.io/badge/🤗-Open%20In%20Spaces-blue.svg)](https://huggingface.co/spaces/nateraw/animegan-v2-for-videos)
|
14 |
+
|
15 |
+
Apply AnimeGAN-v2 across frames of a video
|
16 |
+
|
17 |
+
---
|
18 |
+
|
19 |
+
Autogenerated using [this template](https://github.com/nateraw/spaces-template)
|
app.py
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from PIL import Image
|
2 |
+
import torch
|
3 |
+
import gradio as gr
|
4 |
+
import numpy as np
|
5 |
+
from encoded_video import EncodedVideo, write_video
|
6 |
+
from io import BytesIO
|
7 |
+
|
8 |
+
model2 = torch.hub.load(
|
9 |
+
"AK391/animegan2-pytorch:main",
|
10 |
+
"generator",
|
11 |
+
pretrained=True,
|
12 |
+
device="cuda",
|
13 |
+
progress=True,
|
14 |
+
force_reload=True,
|
15 |
+
)
|
16 |
+
face2paint = torch.hub.load(
|
17 |
+
'AK391/animegan2-pytorch:main', 'face2paint',
|
18 |
+
size=512, device="cuda",side_by_side=False
|
19 |
+
)
|
20 |
+
|
21 |
+
def uniform_temporal_subsample(
|
22 |
+
x: torch.Tensor, num_samples: int, temporal_dim: int = -3
|
23 |
+
) -> torch.Tensor:
|
24 |
+
"""
|
25 |
+
Uniformly subsamples num_samples indices from the temporal dimension of the video.
|
26 |
+
When num_samples is larger than the size of temporal dimension of the video, it
|
27 |
+
will sample frames based on nearest neighbor interpolation.
|
28 |
+
Args:
|
29 |
+
x (torch.Tensor): A video tensor with dimension larger than one with torch
|
30 |
+
tensor type includes int, long, float, complex, etc.
|
31 |
+
num_samples (int): The number of equispaced samples to be selected
|
32 |
+
temporal_dim (int): dimension of temporal to perform temporal subsample.
|
33 |
+
Returns:
|
34 |
+
An x-like Tensor with subsampled temporal dimension.
|
35 |
+
"""
|
36 |
+
t = x.shape[temporal_dim]
|
37 |
+
assert num_samples > 0 and t > 0
|
38 |
+
# Sample by nearest neighbor interpolation if num_samples > t.
|
39 |
+
indices = torch.linspace(0, t - 1, num_samples)
|
40 |
+
indices = torch.clamp(indices, 0, t - 1).long()
|
41 |
+
return torch.index_select(x, temporal_dim, indices)
|
42 |
+
|
43 |
+
|
44 |
+
def inference_video(video_file):
|
45 |
+
out_fps = 12
|
46 |
+
start_sec = 0
|
47 |
+
duration = 2
|
48 |
+
vid = EncodedVideo.from_path(video_file)
|
49 |
+
clip = vid.get_clip(start_sec, start_sec + duration)
|
50 |
+
video_arr = clip['video']
|
51 |
+
audio_arr = np.expand_dims(clip['audio'], 0)
|
52 |
+
audio_fps = None if not vid._has_audio else vid._container.streams.audio[0].sample_rate
|
53 |
+
|
54 |
+
frames = uniform_temporal_subsample(torch.from_numpy(video_arr), duration * out_fps, 0).to(torch.uint8).numpy()
|
55 |
+
|
56 |
+
out_frames = []
|
57 |
+
for frame in frames:
|
58 |
+
im = Image.fromarray(frame)
|
59 |
+
out = face2paint(model2, im)
|
60 |
+
out_frames.append(np.array(out))
|
61 |
+
|
62 |
+
|
63 |
+
out_frames = np.array(out_frames)
|
64 |
+
|
65 |
+
bytes_mp4 = bytes()
|
66 |
+
out_file = BytesIO(bytes_mp4)
|
67 |
+
|
68 |
+
# Add dummy file name to stream, as write_video will be looking for it
|
69 |
+
out_file.name = "out.mp4"
|
70 |
+
|
71 |
+
write_video(
|
72 |
+
'out.mp4',
|
73 |
+
out_frames,
|
74 |
+
fps=out_fps,
|
75 |
+
audio_array=audio_arr,
|
76 |
+
audio_fps=audio_fps,
|
77 |
+
audio_codec='aac'
|
78 |
+
)
|
79 |
+
return 'out.mp4'
|
80 |
+
|
81 |
+
gr.Interface(
|
82 |
+
inference_video,
|
83 |
+
inputs=gr.inputs.Video(),
|
84 |
+
outputs=gr.outputs.Video(),
|
85 |
+
title='AnimeGANV2 On Videos',
|
86 |
+
description="Applying AnimeGAN-V2 to frame from video clips",
|
87 |
+
article = "<p style='text-align: center'><a href='https://github.com/bryandlee/animegan2-pytorch' target='_blank'>Github Repo Pytorch</a></p><p style='text-align: center'>samples from repo: <img src='https://user-images.githubusercontent.com/26464535/129888683-98bb6283-7bb8-4d1a-a04a-e795f5858dcf.gif' alt='animation'/> <img src='https://user-images.githubusercontent.com/26464535/137619176-59620b59-4e20-4d98-9559-a424f86b7f24.jpg' alt='animation'/><img src='https://user-images.githubusercontent.com/26464535/127134790-93595da2-4f8b-4aca-a9d7-98699c5e6914.jpg' alt='animation'/></p>",
|
88 |
+
enable_queue=True,
|
89 |
+
# examples=examples,
|
90 |
+
allow_flagging=False
|
91 |
+
).launch(debug=True)
|
packages.txt
ADDED
File without changes
|
requirements.txt
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
gradio
|
2 |
+
torch
|
3 |
+
torchvision
|
4 |
+
Pillow
|
5 |
+
gdown
|
6 |
+
numpy
|
7 |
+
scipy
|
8 |
+
cmake
|
9 |
+
onnxruntime-gpu
|
10 |
+
opencv-python-headless
|