Spaces:
Sleeping
Sleeping
File size: 2,979 Bytes
694ced3 |
1 |
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: animeganv2\n", "### Recreate the viral AnimeGAN image transformation demo.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio torch torchvision Pillow gdown numpy scipy cmake onnxruntime-gpu opencv-python-headless"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/animeganv2/gongyoo.jpeg\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/animeganv2/groot.jpeg"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import torch\n", "\n", "model2 = torch.hub.load(\n", " \"AK391/animegan2-pytorch:main\",\n", " \"generator\",\n", " pretrained=True,\n", " progress=False\n", ")\n", "model1 = torch.hub.load(\"AK391/animegan2-pytorch:main\", \"generator\", pretrained=\"face_paint_512_v1\")\n", "face2paint = torch.hub.load(\n", " 'AK391/animegan2-pytorch:main', 'face2paint',\n", " size=512,side_by_side=False\n", ")\n", "\n", "def inference(img, ver):\n", " if ver == 'version 2 (\ud83d\udd3a robustness,\ud83d\udd3b stylization)':\n", " out = face2paint(model2, img)\n", " else:\n", " out = face2paint(model1, img)\n", " return out\n", "\n", "title = \"AnimeGANv2\"\n", "description = \"Gradio Demo for AnimeGanv2 Face Portrait. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below. Please use a cropped portrait picture for best results similar to the examples below.\"\n", "article = \"<p style='text-align: center'><a href='https://github.com/bryandlee/animegan2-pytorch' target='_blank'>Github Repo Pytorch</a></p> <center><img src='https://visitor-badge.glitch.me/badge?page_id=akhaliq_animegan' alt='visitor badge'></center></p>\"\n", "examples=[['groot.jpeg','version 2 (\ud83d\udd3a robustness,\ud83d\udd3b stylization)'],['gongyoo.jpeg','version 1 (\ud83d\udd3a stylization, \ud83d\udd3b robustness)']]\n", "\n", "demo = gr.Interface(\n", " fn=inference,\n", " inputs=[gr.Image(type=\"pil\"),gr.Radio(['version 1 (\ud83d\udd3a stylization, \ud83d\udd3b robustness)','version 2 (\ud83d\udd3a robustness,\ud83d\udd3b stylization)'], type=\"value\", value='version 2 (\ud83d\udd3a robustness,\ud83d\udd3b stylization)', label='version')],\n", " outputs=gr.Image(type=\"pil\"),\n", " title=title,\n", " description=description,\n", " article=article,\n", " examples=examples)\n", "\n", "demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} |