File size: 5,877 Bytes
39a6792
 
 
f70898c
069fc81
 
7a7cda5
39a6792
 
f70898c
98afd85
51fab87
39a6792
d179c4c
65d64be
d179c4c
98afd85
d179c4c
39a6792
 
7a7cda5
d179c4c
 
39a6792
 
 
 
 
 
069fc81
 
 
 
 
 
 
 
 
 
d179c4c
39a6792
 
 
 
 
 
 
 
 
 
 
 
 
d179c4c
 
 
 
 
 
 
 
 
 
 
9e8b99d
 
 
 
 
51fab87
 
 
 
 
 
 
 
65d64be
d179c4c
 
 
65d64be
 
 
 
 
 
 
d179c4c
 
 
65d64be
 
f70898c
 
 
069fc81
f70898c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d179c4c
f70898c
d179c4c
f70898c
 
7a7cda5
 
 
 
 
 
98afd85
7a7cda5
 
98afd85
 
7a7cda5
 
 
 
 
 
 
 
98afd85
7a7cda5
 
98afd85
7a7cda5
 
98afd85
7a7cda5
98afd85
 
7a7cda5
 
 
98afd85
7a7cda5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39a6792
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
import functools
import inspect
import json
import os
import time
from contextlib import contextmanager
from typing import Callable, Tuple, TypeVar

import anyio
import httpx
import numpy as np
import torch
from anyio import Semaphore
from diffusers.utils import logging as diffusers_logging
from huggingface_hub._snapshot_download import snapshot_download
from huggingface_hub.utils import are_progress_bars_disabled
from PIL import Image
from transformers import logging as transformers_logging
from typing_extensions import ParamSpec

from .annotators import CannyAnnotator
from .logger import Logger

T = TypeVar("T")
P = ParamSpec("P")

MAX_CONCURRENT_THREADS = 1
MAX_THREADS_GUARD = Semaphore(MAX_CONCURRENT_THREADS)


@contextmanager
def timer(message="Operation", logger=print):
    start = time.perf_counter()
    logger(message)
    try:
        yield
    finally:
        end = time.perf_counter()
        logger(f"{message} took {end - start:.2f}s")


@functools.lru_cache()
def load_json(path: str) -> dict:
    with open(path, "r", encoding="utf-8") as file:
        return json.load(file)


@functools.lru_cache()
def read_file(path: str) -> str:
    with open(path, "r", encoding="utf-8") as file:
        return file.read()


def disable_progress_bars():
    transformers_logging.disable_progress_bar()
    diffusers_logging.disable_progress_bar()


def enable_progress_bars():
    # warns if `HF_HUB_DISABLE_PROGRESS_BARS` env var is not None
    transformers_logging.enable_progress_bar()
    diffusers_logging.enable_progress_bar()


def safe_progress(progress, current=0, total=0, desc=""):
    if progress is not None:
        progress((current, total), desc=desc)


def clear_cuda_cache():
    if torch.cuda.is_available():
        torch.cuda.empty_cache()
        torch.cuda.ipc_collect()
        torch.cuda.reset_peak_memory_stats()
        torch.cuda.synchronize()


def download_repo_files(repo_id, allow_patterns, token=None):
    was_disabled = are_progress_bars_disabled()
    enable_progress_bars()
    snapshot_path = snapshot_download(
        repo_id=repo_id,
        repo_type="model",
        revision="main",
        token=token,
        allow_patterns=allow_patterns,
        ignore_patterns=None,
    )
    if was_disabled:
        disable_progress_bars()
    return snapshot_path


def download_civit_file(lora_id, version_id, file_path=".", token=None):
    base_url = "https://civitai.com/api/download/models"
    file = f"{file_path}/{lora_id}.{version_id}.safetensors"
    log = Logger("download_civit_file")

    if os.path.exists(file):
        return

    try:
        params = {"token": token}
        response = httpx.get(
            f"{base_url}/{version_id}",
            timeout=None,
            params=params,
            follow_redirects=True,
        )

        response.raise_for_status()
        os.makedirs(file_path, exist_ok=True)

        with open(file, "wb") as f:
            f.write(response.content)
    except httpx.HTTPStatusError as e:
        log.error(f"{e.response.status_code} {e.response.text}")
    except httpx.RequestError as e:
        log.error(f"RequestError: {e}")


def image_to_pil(image: Image.Image):
    """Converts various image inputs to RGB PIL Image."""
    if isinstance(image, str) and os.path.isfile(image):
        image = Image.open(image)
    if isinstance(image, np.ndarray):
        image = Image.fromarray(image)
    if isinstance(image, Image.Image):
        return image.convert("RGB")
    raise ValueError("Invalid image input")


def get_valid_image_size(
    width: int,
    height: int,
    step=64,
    min_size=512,
    max_size=4096,
):
    """Get new image dimensions while preserving aspect ratio."""

    def round_down(x):
        return int((x // step) * step)

    def clamp(x):
        return max(min_size, min(x, max_size))

    aspect_ratio = width / height

    # try width first
    if width > height:
        new_width = round_down(clamp(width))
        new_height = round_down(new_width / aspect_ratio)
    else:
        new_height = round_down(clamp(height))
        new_width = round_down(new_height * aspect_ratio)

    # if new dimensions are out of bounds, try height
    if not min_size <= new_width <= max_size:
        new_width = round_down(clamp(width))
        new_height = round_down(new_width / aspect_ratio)
    if not min_size <= new_height <= max_size:
        new_height = round_down(clamp(height))
        new_width = round_down(new_height * aspect_ratio)

    return (new_width, new_height)


def resize_image(
    image: Image.Image,
    size: Tuple[int, int] = None,
    resampling: Image.Resampling = None,
):
    """Resize image with proper interpolation and dimension constraints."""
    image = image_to_pil(image)
    if size is None:
        size = get_valid_image_size(*image.size)
    if resampling is None:
        resampling = Image.Resampling.LANCZOS
    return image.resize(size, resampling)


def annotate_image(image: Image.Image, annotator="canny"):
    """Get the feature map of an image using the specified annotator."""
    size = get_valid_image_size(*image.size)
    image = resize_image(image, size)
    if annotator.lower() == "canny":
        canny = CannyAnnotator()
        return canny(image, size)
    raise ValueError(f"Invalid annotator: {annotator}")


# Like the original but supports args and kwargs instead of a dict
# https://github.com/huggingface/huggingface-inference-toolkit/blob/0.2.0/src/huggingface_inference_toolkit/async_utils.py
async def async_call(fn: Callable[P, T], *args: P.args, **kwargs: P.kwargs) -> T:
    async with MAX_THREADS_GUARD:
        sig = inspect.signature(fn)
        bound_args = sig.bind(*args, **kwargs)
        bound_args.apply_defaults()
        partial_fn = functools.partial(fn, **bound_args.arguments)
        return await anyio.to_thread.run_sync(partial_fn)