diff --git a/fn_gen/ones_affine_scale/0/distortion.png b/fn_gen/ones_affine_scale/0/distortion.png new file mode 100644 index 0000000000000000000000000000000000000000..156a7bff908b8cfdca12a2022d33664d789e0948 Binary files /dev/null and b/fn_gen/ones_affine_scale/0/distortion.png differ diff --git a/fn_gen/ones_affine_scale/0/expressions.txt b/fn_gen/ones_affine_scale/0/expressions.txt new file mode 100644 index 0000000000000000000000000000000000000000..ed99293c42843616c361d59b23d32ae553cc0f8d --- /dev/null +++ b/fn_gen/ones_affine_scale/0/expressions.txt @@ -0,0 +1,2 @@ +atanh(_0*x)/_s +tanh(_s*x)/_0 \ No newline at end of file diff --git a/fn_gen/ones_affine_scale/0/fn.py b/fn_gen/ones_affine_scale/0/fn.py new file mode 100644 index 0000000000000000000000000000000000000000..206a3753e76bec4f6ea0d5472cb2db5e753085bc --- /dev/null +++ b/fn_gen/ones_affine_scale/0/fn.py @@ -0,0 +1,493 @@ +from __future__ import annotations + +import torch +from torch import amin # Necessary for arcsin +import copy +import torch.nn as nn +import numpy as np + +from scipy.optimize import curve_fit +from typing import Dict, Any, Tuple, List, Callable + + +def quantization(x, **params): + return (torch.div(1, replace_num(params['_s'], num=0, to=10000)) * torch.atanh(domain_guard((params['_0'] * x), min=-0.9999, max=0.9999, nan=0))) + + +def dequantization(x, **params): + return (torch.div(1, replace_num(params['_0'], num=0, to=10000)) * torch.tanh((params['_s'] * x))) + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_params(x: torch.Tensor, **kwargs: Dict[str, Any]) -> Dict[str, nn.Parameter]: + params = { + '_0': init_ones(x, qtz_func=quantization, deqtz_func=dequantization, param='_0', params_list=['_0', '_s'], **kwargs), + } + params['_s'] = init_linear_scale(x, params=params, qtz_func=quantization, **kwargs) + params = {k: nn.Parameter(v, requires_grad=False) for k, v in params.items()} + + if 'post_init_hook' in kwargs: + kwargs['post_init_hook'](parameters=params) + + + if 'post_train_hook' in kwargs: + kwargs['post_train_hook'](parameters=params) + + return params + + +############### Numpy Qtz ############### + + +def np_quantization(x, _0, _s): + return (np.divide(1, np_replace_num(_s, num=0, to=10000)) * np.arctanh(np_domain_guard((_0 * x), min=-0.9999, max=0.9999, nan=0))) + + +def np_dequantization(x, _0, _s): + return (np.divide(1, np_replace_num(_0, num=0, to=10000)) * np.tanh((_s * x))) + + +def fit_func(x, _0, _s): + x_ = np_quantization(x, _0, _s) + x_ = np_dequantization(x_, _0, _s) + return x_ + + + +############### HELPERS ############### + +def domain_guard( + x: torch.Tensor, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> torch.Tensor: + """Guard a tensor to a valid domain.""" + x = torch.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = torch.clamp(x, min=min, max=max) + return x + + +def replace_num(x: torch.Tensor, num: float, to: float) -> torch.Tensor: + """Replace a number in a tensor with another number. + + Args: + x (torch.Tensor): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + torch.Tensor: The tensor with the number replaced. + """ + return torch.where(x == num, to, x) + + +def guarded_torch_power(x: torch.Tensor, exp: float) -> torch.Tensor: + """Guard the power operation to a valid domain.""" + return torch.pow(x, exp) if exp >= 1 else torch.pow(torch.relu(x), exp) + + +def init_ones(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.ones_like(val, dtype=torch.float32, device=x.device) + + +def init_rand(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.randn_like(val, dtype=torch.float32, device=x.device) + + +def init_space_search( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + def _build_initial_param(tensor: torch.Tensor, max_initial: int, n_params: int): + """Generates the initial set of parameters. The first iteration generates 10 times more parameters.""" + for _ in range(n_params * 10): # The first iteration generates 10 times more parameters + yield init_rand(tensor) * max_initial # Generates n_params in range [-max_initial, max_initial] + + def _search_param(tensors: List[torch.tensor], n_params): + """Takes the best parameters and generates new parameters around the mean of the best parameters.""" + torch_tensors = torch.stack(tensors) + min_vals, max_vals = torch.aminmax(torch_tensors, dim=0) + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + mean = torch.mean(torch_tensors, dim=0) + for _ in range(n_params): # Generates n_params around the mean of the tensors + yield torch.randn_like(min_vals) * abs_max_val_per_ch + mean + + def _calc(x, qtz_func, deqtz_func, **params): + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params) + x_ = deqtz_func(x=x_, **params) + x_ = x_.transpose(0, 1) + return x_ + + assert "qtz_func" in kwargs, "qtz_func must be provided." + assert "deqtz_func" in kwargs, "deqtz_func must be provided." + assert "params_list" in kwargs, "params list must be provided." + assert "param" in kwargs, "param must be provided." + + qtz_func = kwargs.get('qtz_func') + deqtz_func = kwargs.get('deqtz_func') + params_list = kwargs.get('params_list') + param = kwargs.get('param') + + n_runs = 50 # Number of runs to try to find the best parameters + n_random_params = 50 # Number of random parameters to generate + n_best_to_pick = 5 # Number of best parameters to pick after each run + max_initial = 10000 # Maximum value to initialize the parameters + + # Initializes the parameters + base_params = { p: init_ones(x, **kwargs) for p in params_list if p != param } + params = _build_initial_param(x, max_initial, n_random_params) + + # Performs the search + for _ in range(n_runs): + + best_params = [] + for param_ in params: + try: + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: param_}) + loss_ones = nn.MSELoss()(x, x_) + + if len(best_params) < n_best_to_pick: + best_params.append((param_, loss_ones.item())) + best_params = sorted(best_params, key=lambda x: x[1]) + elif loss_ones < best_params[-1][1]: + best_params[-1] = (param_, loss_ones.item()) + best_params = sorted(best_params, key=lambda x: x[1]) + + except Exception: # The parameters might not be valid for the function's domain + continue + + # Generates new parameters around the mean + params = _search_param([p for p, _ in best_params], n_random_params) + + # Checks if the best parameter is better than the init_ones + p_ones = init_ones(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_ones}) + loss_ones = nn.MSELoss()(x, x_) + + # Checks if the best parameter is better than the init_rand + p_rand = init_rand(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_rand}) + loss_rand = nn.MSELoss()(x, x_) + + if loss_rand < best_params[0][1] and loss_rand < loss_ones: + return p_rand + elif loss_ones < best_params[0][1] and loss_ones < loss_rand: + return p_ones + else: + return best_params[0][0] + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_non_linear_regression_fit( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + assert "params_list" in kwargs, "params list must be provided." + assert "np_fit_func" in kwargs, "np_fit_func must be provided." + assert "p0" in kwargs, "p0 must be provided." + np_fit_func = kwargs.get('np_fit_func') + params_list = kwargs.get('params_list') + p0 = kwargs.get('p0') + + def _fit(xdata: np.ndarray, ydata: np.ndarray, func: Callable, p0: List[float]): + popt, _ = curve_fit( + func, + xdata, + ydata, + maxfev=1000, + p0=p0, + method='lm' + ) + return popt + + # 1. Needs to convert the torch tensor to numpy tensor + xdata = x.cpu().numpy() + + # 2. Sorts the data so that it makes it easier to fit to it + sorted_xdata = np.sort(xdata, axis=-1) + + p0 = {k: v.cpu().numpy() for k, v in p0.items()} + params_list = sorted(params_list) # We need to make sure that it matches the numpy fit func arg order + + # 3. Finds the best parameters for each channel + try: + params = [] + for i in range(sorted_xdata.shape[0]): + xdata_ = sorted_xdata[i] + p0_ = [p0[p][i] for p in params_list] + ch_params = _fit(xdata_, xdata_, np_fit_func, p0_) + params.append(ch_params) + + # 4. Builds the parameters + result = {} + for i, p in enumerate(params_list): + result[p] = torch.tensor([p_[i] for p_ in params], dtype=torch.float32).to(x.device) + + return result + + except ValueError as e: + print(f"Could not fit the function with error: {e}") + print(f"Using fallback result...") + return { + k: torch.tensor(v, dtype=torch.float32).to(x.device) for k, v in p0.items() + } + + +def init_zeros(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.zeros_like(val, dtype=torch.float32, device=x.device) + + +def init_inner_scale(tensor: torch.Tensor, _min: float = torch.inf, _max: float = torch.inf) -> torch.Tensor: + # Calculate the original minimum and maximum values + min_vals, max_vals = torch.aminmax(tensor, dim=-1) + x_min = torch.min(min_vals, torch.zeros_like(min_vals)) + x_max = torch.max(max_vals, torch.zeros_like(max_vals)) + + if _max is torch.inf: # We do not need to scale the tensor. Just need to move it + return torch.ones_like(x_min) + + # Calculate the scale factor + scale = (_max - _min) / (x_max - x_min) + return scale + + + +############## Quant ############### + +@torch.enable_grad() +def learn_parameters( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + qtz_func: nn.Module, + deqtz_func: nn.Module, + bits: int, + target_dtype: torch.dtype, + epochs: int = 1000, + early_stop: bool = True, + do_report: bool = False +) -> Tuple[Dict[str, nn.Parameter], torch.Tensor]: + + # Requires gradients in the parameters + for p in params.values(): + p.requires_grad = True + p.grad = None + + param_keys = list(params.keys()) + param_values = list(params.values()) + + # Defines optimizer and loss function + optimizer = torch.optim.Adam(param_values, lr=0.001) + scheduler = torch.optim.lr_scheduler.LinearLR(optimizer, start_factor=1.0, end_factor=0.001, total_iters=epochs // 10) + loss_fn = nn.MSELoss() + + # Contains the best loss and the best parameters + best_loss = float("inf") + best_params = None + + # Used to stop the search early + min_delta = 1e-7 + acc_loss = [] + percent_epochs_before_stop = 0.1 + + for i in range(epochs): + optimizer.zero_grad() + + quant = quantize(x, params, qtz_func, bits, target_dtype) + dequant = dequantize(quant, params, deqtz_func, bits, x.dtype) + loss = loss_fn(x, dequant) + + if loss.isnan() or loss.isinf(): + raise Exception("Loss is NaN or Inf. Stopping the search.") + + loss.backward() + optimizer.step() + scheduler.step() + + acc_loss.append(loss.item()) + + # Reports loss every 10 steps + if i % 10 == 0 and do_report: + print(f"Epoch {i}: Loss {loss.item()}") + + # Optimizes the parameter search by storing the best loss and the parameters + if loss.item() < best_loss: + best_loss = loss.item() + best_params = copy.deepcopy({ + k: v for k, v in params.items() if k in param_keys + }) + + # We also stop the search if the loss has not considerably during the last 10% epochs + if early_stop: + epochs_before_stop = int(epochs * percent_epochs_before_stop) + if i > epochs_before_stop and abs(acc_loss[i - epochs_before_stop] - acc_loss[i]) < min_delta: + break + + # No longer requires gradients in the parameters + for p in best_params.values(): + p.requires_grad = False + p.grad = None + + if do_report: + return best_params, acc_loss + else: + return best_params + + +def quantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + target_dtype: torch.dtype = torch.int8 +) -> torch.Tensor: + quant_min, quant_max = get_min_max_from_bits_signed(bits) + x = x.transpose(0, 1) # Aligns shapes + x = func(x=x, **params) + x = x.transpose(0, 1) + x = torch.clamp(round_func_BPDA(x), quant_min, quant_max).to(target_dtype) + return x + + +def dequantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + out_dtype: torch.dtype +) -> torch.Tensor: + x = x.to(dtype=out_dtype) + x = x.transpose(0, 1) + x = func(x=x, **params) + x = x.transpose(0, 1) + return x + + +def round_func_BPDA(input): + # This is equivalent to replacing round function (non-differentiable) with + # an identity function (differentiable) only when backward. + forward_value = torch.round(input) + out = input.clone() + out.data = forward_value.data + return out + + +def get_min_max_from_bits_signed(bit_width: int) -> Tuple[int, int]: + return -2 ** (bit_width - 1), 2 ** (bit_width - 1) - 1 + + + +############## Numpy ############### + +def np_domain_guard( + x: np.ndarray, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> np.ndarray: + """Guard a tensor to a valid domain.""" + x = np.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = np.clip(x, min, max) + return x + + +def np_replace_num(x: np.ndarray, num: float, to: float) -> np.ndarray: + """Replace a number in a tensor with another number. + + Args: + x (np.ndarray): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + np.ndarray: The tensor with the number replaced. + """ + return np.where(x == num, to, x) + + +def np_guarded_power(x: np.ndarray, exp: float) -> np.ndarray: + """Guard the power operation to a valid domain.""" + return np.power(x, exp) if exp >= 1 else np.power(np.maximum(x, 0), exp) + diff --git a/fn_gen/ones_affine_scale/0/loss.png b/fn_gen/ones_affine_scale/0/loss.png new file mode 100644 index 0000000000000000000000000000000000000000..5d435ec806b531859c2105a419de1776cea0ea43 Binary files /dev/null and b/fn_gen/ones_affine_scale/0/loss.png differ diff --git a/fn_gen/ones_affine_scale/0/quantization.png b/fn_gen/ones_affine_scale/0/quantization.png new file mode 100644 index 0000000000000000000000000000000000000000..fd61b5597ab6b6fd181359148e7b51b18b08d577 Binary files /dev/null and b/fn_gen/ones_affine_scale/0/quantization.png differ diff --git a/fn_gen/ones_affine_scale/1/distortion.png b/fn_gen/ones_affine_scale/1/distortion.png new file mode 100644 index 0000000000000000000000000000000000000000..1925381ea1c3d164096db1a304bd5f609ef3597f Binary files /dev/null and b/fn_gen/ones_affine_scale/1/distortion.png differ diff --git a/fn_gen/ones_affine_scale/1/expressions.txt b/fn_gen/ones_affine_scale/1/expressions.txt new file mode 100644 index 0000000000000000000000000000000000000000..c7b68c388fdf6e1b6e2be8076f1d4b8d7bcef4f9 --- /dev/null +++ b/fn_gen/ones_affine_scale/1/expressions.txt @@ -0,0 +1,2 @@ +(_0*x)**(1/3)/_s +_s**3*x**3/_0 \ No newline at end of file diff --git a/fn_gen/ones_affine_scale/1/fn.py b/fn_gen/ones_affine_scale/1/fn.py new file mode 100644 index 0000000000000000000000000000000000000000..336cdf5549289af8944de35d0712a0c3fa1b2427 --- /dev/null +++ b/fn_gen/ones_affine_scale/1/fn.py @@ -0,0 +1,493 @@ +from __future__ import annotations + +import torch +from torch import amin # Necessary for arcsin +import copy +import torch.nn as nn +import numpy as np + +from scipy.optimize import curve_fit +from typing import Dict, Any, Tuple, List, Callable + + +def quantization(x, **params): + return (torch.div(1, replace_num(params['_s'], num=0, to=10000)) * guarded_torch_power((params['_0'] * x), 1 / 3)) + + +def dequantization(x, **params): + return (torch.div(1, replace_num(params['_0'], num=0, to=10000)) * guarded_torch_power(params['_s'], torch.tensor(3)) * guarded_torch_power(x, torch.tensor(3))) + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_params(x: torch.Tensor, **kwargs: Dict[str, Any]) -> Dict[str, nn.Parameter]: + params = { + '_0': init_ones(x, qtz_func=quantization, deqtz_func=dequantization, param='_0', params_list=['_0', '_s'], **kwargs), + } + params['_s'] = init_linear_scale(x, params=params, qtz_func=quantization, **kwargs) + params = {k: nn.Parameter(v, requires_grad=False) for k, v in params.items()} + + if 'post_init_hook' in kwargs: + kwargs['post_init_hook'](parameters=params) + + + if 'post_train_hook' in kwargs: + kwargs['post_train_hook'](parameters=params) + + return params + + +############### Numpy Qtz ############### + + +def np_quantization(x, _0, _s): + return (np.divide(1, np_replace_num(_s, num=0, to=10000)) * np_guarded_power((_0 * x), 1 / 3)) + + +def np_dequantization(x, _0, _s): + return (np.divide(1, np_replace_num(_0, num=0, to=10000)) * np_guarded_power(_s, np.array(3)) * np_guarded_power(x, np.array(3))) + + +def fit_func(x, _0, _s): + x_ = np_quantization(x, _0, _s) + x_ = np_dequantization(x_, _0, _s) + return x_ + + + +############### HELPERS ############### + +def domain_guard( + x: torch.Tensor, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> torch.Tensor: + """Guard a tensor to a valid domain.""" + x = torch.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = torch.clamp(x, min=min, max=max) + return x + + +def replace_num(x: torch.Tensor, num: float, to: float) -> torch.Tensor: + """Replace a number in a tensor with another number. + + Args: + x (torch.Tensor): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + torch.Tensor: The tensor with the number replaced. + """ + return torch.where(x == num, to, x) + + +def guarded_torch_power(x: torch.Tensor, exp: float) -> torch.Tensor: + """Guard the power operation to a valid domain.""" + return torch.pow(x, exp) if exp >= 1 else torch.pow(torch.relu(x), exp) + + +def init_ones(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.ones_like(val, dtype=torch.float32, device=x.device) + + +def init_rand(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.randn_like(val, dtype=torch.float32, device=x.device) + + +def init_space_search( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + def _build_initial_param(tensor: torch.Tensor, max_initial: int, n_params: int): + """Generates the initial set of parameters. The first iteration generates 10 times more parameters.""" + for _ in range(n_params * 10): # The first iteration generates 10 times more parameters + yield init_rand(tensor) * max_initial # Generates n_params in range [-max_initial, max_initial] + + def _search_param(tensors: List[torch.tensor], n_params): + """Takes the best parameters and generates new parameters around the mean of the best parameters.""" + torch_tensors = torch.stack(tensors) + min_vals, max_vals = torch.aminmax(torch_tensors, dim=0) + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + mean = torch.mean(torch_tensors, dim=0) + for _ in range(n_params): # Generates n_params around the mean of the tensors + yield torch.randn_like(min_vals) * abs_max_val_per_ch + mean + + def _calc(x, qtz_func, deqtz_func, **params): + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params) + x_ = deqtz_func(x=x_, **params) + x_ = x_.transpose(0, 1) + return x_ + + assert "qtz_func" in kwargs, "qtz_func must be provided." + assert "deqtz_func" in kwargs, "deqtz_func must be provided." + assert "params_list" in kwargs, "params list must be provided." + assert "param" in kwargs, "param must be provided." + + qtz_func = kwargs.get('qtz_func') + deqtz_func = kwargs.get('deqtz_func') + params_list = kwargs.get('params_list') + param = kwargs.get('param') + + n_runs = 50 # Number of runs to try to find the best parameters + n_random_params = 50 # Number of random parameters to generate + n_best_to_pick = 5 # Number of best parameters to pick after each run + max_initial = 10000 # Maximum value to initialize the parameters + + # Initializes the parameters + base_params = { p: init_ones(x, **kwargs) for p in params_list if p != param } + params = _build_initial_param(x, max_initial, n_random_params) + + # Performs the search + for _ in range(n_runs): + + best_params = [] + for param_ in params: + try: + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: param_}) + loss_ones = nn.MSELoss()(x, x_) + + if len(best_params) < n_best_to_pick: + best_params.append((param_, loss_ones.item())) + best_params = sorted(best_params, key=lambda x: x[1]) + elif loss_ones < best_params[-1][1]: + best_params[-1] = (param_, loss_ones.item()) + best_params = sorted(best_params, key=lambda x: x[1]) + + except Exception: # The parameters might not be valid for the function's domain + continue + + # Generates new parameters around the mean + params = _search_param([p for p, _ in best_params], n_random_params) + + # Checks if the best parameter is better than the init_ones + p_ones = init_ones(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_ones}) + loss_ones = nn.MSELoss()(x, x_) + + # Checks if the best parameter is better than the init_rand + p_rand = init_rand(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_rand}) + loss_rand = nn.MSELoss()(x, x_) + + if loss_rand < best_params[0][1] and loss_rand < loss_ones: + return p_rand + elif loss_ones < best_params[0][1] and loss_ones < loss_rand: + return p_ones + else: + return best_params[0][0] + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_non_linear_regression_fit( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + assert "params_list" in kwargs, "params list must be provided." + assert "np_fit_func" in kwargs, "np_fit_func must be provided." + assert "p0" in kwargs, "p0 must be provided." + np_fit_func = kwargs.get('np_fit_func') + params_list = kwargs.get('params_list') + p0 = kwargs.get('p0') + + def _fit(xdata: np.ndarray, ydata: np.ndarray, func: Callable, p0: List[float]): + popt, _ = curve_fit( + func, + xdata, + ydata, + maxfev=1000, + p0=p0, + method='lm' + ) + return popt + + # 1. Needs to convert the torch tensor to numpy tensor + xdata = x.cpu().numpy() + + # 2. Sorts the data so that it makes it easier to fit to it + sorted_xdata = np.sort(xdata, axis=-1) + + p0 = {k: v.cpu().numpy() for k, v in p0.items()} + params_list = sorted(params_list) # We need to make sure that it matches the numpy fit func arg order + + # 3. Finds the best parameters for each channel + try: + params = [] + for i in range(sorted_xdata.shape[0]): + xdata_ = sorted_xdata[i] + p0_ = [p0[p][i] for p in params_list] + ch_params = _fit(xdata_, xdata_, np_fit_func, p0_) + params.append(ch_params) + + # 4. Builds the parameters + result = {} + for i, p in enumerate(params_list): + result[p] = torch.tensor([p_[i] for p_ in params], dtype=torch.float32).to(x.device) + + return result + + except ValueError as e: + print(f"Could not fit the function with error: {e}") + print(f"Using fallback result...") + return { + k: torch.tensor(v, dtype=torch.float32).to(x.device) for k, v in p0.items() + } + + +def init_zeros(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.zeros_like(val, dtype=torch.float32, device=x.device) + + +def init_inner_scale(tensor: torch.Tensor, _min: float = torch.inf, _max: float = torch.inf) -> torch.Tensor: + # Calculate the original minimum and maximum values + min_vals, max_vals = torch.aminmax(tensor, dim=-1) + x_min = torch.min(min_vals, torch.zeros_like(min_vals)) + x_max = torch.max(max_vals, torch.zeros_like(max_vals)) + + if _max is torch.inf: # We do not need to scale the tensor. Just need to move it + return torch.ones_like(x_min) + + # Calculate the scale factor + scale = (_max - _min) / (x_max - x_min) + return scale + + + +############## Quant ############### + +@torch.enable_grad() +def learn_parameters( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + qtz_func: nn.Module, + deqtz_func: nn.Module, + bits: int, + target_dtype: torch.dtype, + epochs: int = 1000, + early_stop: bool = True, + do_report: bool = False +) -> Tuple[Dict[str, nn.Parameter], torch.Tensor]: + + # Requires gradients in the parameters + for p in params.values(): + p.requires_grad = True + p.grad = None + + param_keys = list(params.keys()) + param_values = list(params.values()) + + # Defines optimizer and loss function + optimizer = torch.optim.Adam(param_values, lr=0.001) + scheduler = torch.optim.lr_scheduler.LinearLR(optimizer, start_factor=1.0, end_factor=0.001, total_iters=epochs // 10) + loss_fn = nn.MSELoss() + + # Contains the best loss and the best parameters + best_loss = float("inf") + best_params = None + + # Used to stop the search early + min_delta = 1e-7 + acc_loss = [] + percent_epochs_before_stop = 0.1 + + for i in range(epochs): + optimizer.zero_grad() + + quant = quantize(x, params, qtz_func, bits, target_dtype) + dequant = dequantize(quant, params, deqtz_func, bits, x.dtype) + loss = loss_fn(x, dequant) + + if loss.isnan() or loss.isinf(): + raise Exception("Loss is NaN or Inf. Stopping the search.") + + loss.backward() + optimizer.step() + scheduler.step() + + acc_loss.append(loss.item()) + + # Reports loss every 10 steps + if i % 10 == 0 and do_report: + print(f"Epoch {i}: Loss {loss.item()}") + + # Optimizes the parameter search by storing the best loss and the parameters + if loss.item() < best_loss: + best_loss = loss.item() + best_params = copy.deepcopy({ + k: v for k, v in params.items() if k in param_keys + }) + + # We also stop the search if the loss has not considerably during the last 10% epochs + if early_stop: + epochs_before_stop = int(epochs * percent_epochs_before_stop) + if i > epochs_before_stop and abs(acc_loss[i - epochs_before_stop] - acc_loss[i]) < min_delta: + break + + # No longer requires gradients in the parameters + for p in best_params.values(): + p.requires_grad = False + p.grad = None + + if do_report: + return best_params, acc_loss + else: + return best_params + + +def quantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + target_dtype: torch.dtype = torch.int8 +) -> torch.Tensor: + quant_min, quant_max = get_min_max_from_bits_signed(bits) + x = x.transpose(0, 1) # Aligns shapes + x = func(x=x, **params) + x = x.transpose(0, 1) + x = torch.clamp(round_func_BPDA(x), quant_min, quant_max).to(target_dtype) + return x + + +def dequantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + out_dtype: torch.dtype +) -> torch.Tensor: + x = x.to(dtype=out_dtype) + x = x.transpose(0, 1) + x = func(x=x, **params) + x = x.transpose(0, 1) + return x + + +def round_func_BPDA(input): + # This is equivalent to replacing round function (non-differentiable) with + # an identity function (differentiable) only when backward. + forward_value = torch.round(input) + out = input.clone() + out.data = forward_value.data + return out + + +def get_min_max_from_bits_signed(bit_width: int) -> Tuple[int, int]: + return -2 ** (bit_width - 1), 2 ** (bit_width - 1) - 1 + + + +############## Numpy ############### + +def np_domain_guard( + x: np.ndarray, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> np.ndarray: + """Guard a tensor to a valid domain.""" + x = np.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = np.clip(x, min, max) + return x + + +def np_replace_num(x: np.ndarray, num: float, to: float) -> np.ndarray: + """Replace a number in a tensor with another number. + + Args: + x (np.ndarray): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + np.ndarray: The tensor with the number replaced. + """ + return np.where(x == num, to, x) + + +def np_guarded_power(x: np.ndarray, exp: float) -> np.ndarray: + """Guard the power operation to a valid domain.""" + return np.power(x, exp) if exp >= 1 else np.power(np.maximum(x, 0), exp) + diff --git a/fn_gen/ones_affine_scale/1/loss.png b/fn_gen/ones_affine_scale/1/loss.png new file mode 100644 index 0000000000000000000000000000000000000000..e67554a3f401dd71c21959548f641fd3567d6147 Binary files /dev/null and b/fn_gen/ones_affine_scale/1/loss.png differ diff --git a/fn_gen/ones_affine_scale/1/quantization.png b/fn_gen/ones_affine_scale/1/quantization.png new file mode 100644 index 0000000000000000000000000000000000000000..b171ca2ef90ed3f3df63a035b70dbe1064c23b6e Binary files /dev/null and b/fn_gen/ones_affine_scale/1/quantization.png differ diff --git a/fn_gen/ones_affine_scale/10/distortion.png b/fn_gen/ones_affine_scale/10/distortion.png new file mode 100644 index 0000000000000000000000000000000000000000..a2ee371b1e8485c42a4c71d82570d92f84b5ccd0 Binary files /dev/null and b/fn_gen/ones_affine_scale/10/distortion.png differ diff --git a/fn_gen/ones_affine_scale/10/expressions.txt b/fn_gen/ones_affine_scale/10/expressions.txt new file mode 100644 index 0000000000000000000000000000000000000000..dbb6da0fc54c6f23dc12daf2e2c3a395819e1bf4 --- /dev/null +++ b/fn_gen/ones_affine_scale/10/expressions.txt @@ -0,0 +1,2 @@ +x**2/_s +sqrt(_s*x) \ No newline at end of file diff --git a/fn_gen/ones_affine_scale/10/fn.py b/fn_gen/ones_affine_scale/10/fn.py new file mode 100644 index 0000000000000000000000000000000000000000..d9d2f630273164837deabc06e73bd11ad28c6258 --- /dev/null +++ b/fn_gen/ones_affine_scale/10/fn.py @@ -0,0 +1,492 @@ +from __future__ import annotations + +import torch +from torch import amin # Necessary for arcsin +import copy +import torch.nn as nn +import numpy as np + +from scipy.optimize import curve_fit +from typing import Dict, Any, Tuple, List, Callable + + +def quantization(x, **params): + return (torch.div(1, replace_num(params['_s'], num=0, to=10000)) * guarded_torch_power(x, torch.tensor(2))) + + +def dequantization(x, **params): + return torch.sqrt(domain_guard((params['_s'] * x), min=0.1, nan=0.1)) + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_params(x: torch.Tensor, **kwargs: Dict[str, Any]) -> Dict[str, nn.Parameter]: + params = { + } + params['_s'] = init_linear_scale(x, params=params, qtz_func=quantization, **kwargs) + params = {k: nn.Parameter(v, requires_grad=False) for k, v in params.items()} + + if 'post_init_hook' in kwargs: + kwargs['post_init_hook'](parameters=params) + + + if 'post_train_hook' in kwargs: + kwargs['post_train_hook'](parameters=params) + + return params + + +############### Numpy Qtz ############### + + +def np_quantization(x, _s): + return (np.divide(1, np_replace_num(_s, num=0, to=10000)) * np_guarded_power(x, np.array(2))) + + +def np_dequantization(x, _s): + return np.sqrt(np_domain_guard((_s * x), min=0.1, nan=0.1)) + + +def fit_func(x, _s): + x_ = np_quantization(x, _s) + x_ = np_dequantization(x_, _s) + return x_ + + + +############### HELPERS ############### + +def domain_guard( + x: torch.Tensor, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> torch.Tensor: + """Guard a tensor to a valid domain.""" + x = torch.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = torch.clamp(x, min=min, max=max) + return x + + +def replace_num(x: torch.Tensor, num: float, to: float) -> torch.Tensor: + """Replace a number in a tensor with another number. + + Args: + x (torch.Tensor): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + torch.Tensor: The tensor with the number replaced. + """ + return torch.where(x == num, to, x) + + +def guarded_torch_power(x: torch.Tensor, exp: float) -> torch.Tensor: + """Guard the power operation to a valid domain.""" + return torch.pow(x, exp) if exp >= 1 else torch.pow(torch.relu(x), exp) + + +def init_ones(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.ones_like(val, dtype=torch.float32, device=x.device) + + +def init_rand(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.randn_like(val, dtype=torch.float32, device=x.device) + + +def init_space_search( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + def _build_initial_param(tensor: torch.Tensor, max_initial: int, n_params: int): + """Generates the initial set of parameters. The first iteration generates 10 times more parameters.""" + for _ in range(n_params * 10): # The first iteration generates 10 times more parameters + yield init_rand(tensor) * max_initial # Generates n_params in range [-max_initial, max_initial] + + def _search_param(tensors: List[torch.tensor], n_params): + """Takes the best parameters and generates new parameters around the mean of the best parameters.""" + torch_tensors = torch.stack(tensors) + min_vals, max_vals = torch.aminmax(torch_tensors, dim=0) + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + mean = torch.mean(torch_tensors, dim=0) + for _ in range(n_params): # Generates n_params around the mean of the tensors + yield torch.randn_like(min_vals) * abs_max_val_per_ch + mean + + def _calc(x, qtz_func, deqtz_func, **params): + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params) + x_ = deqtz_func(x=x_, **params) + x_ = x_.transpose(0, 1) + return x_ + + assert "qtz_func" in kwargs, "qtz_func must be provided." + assert "deqtz_func" in kwargs, "deqtz_func must be provided." + assert "params_list" in kwargs, "params list must be provided." + assert "param" in kwargs, "param must be provided." + + qtz_func = kwargs.get('qtz_func') + deqtz_func = kwargs.get('deqtz_func') + params_list = kwargs.get('params_list') + param = kwargs.get('param') + + n_runs = 50 # Number of runs to try to find the best parameters + n_random_params = 50 # Number of random parameters to generate + n_best_to_pick = 5 # Number of best parameters to pick after each run + max_initial = 10000 # Maximum value to initialize the parameters + + # Initializes the parameters + base_params = { p: init_ones(x, **kwargs) for p in params_list if p != param } + params = _build_initial_param(x, max_initial, n_random_params) + + # Performs the search + for _ in range(n_runs): + + best_params = [] + for param_ in params: + try: + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: param_}) + loss_ones = nn.MSELoss()(x, x_) + + if len(best_params) < n_best_to_pick: + best_params.append((param_, loss_ones.item())) + best_params = sorted(best_params, key=lambda x: x[1]) + elif loss_ones < best_params[-1][1]: + best_params[-1] = (param_, loss_ones.item()) + best_params = sorted(best_params, key=lambda x: x[1]) + + except Exception: # The parameters might not be valid for the function's domain + continue + + # Generates new parameters around the mean + params = _search_param([p for p, _ in best_params], n_random_params) + + # Checks if the best parameter is better than the init_ones + p_ones = init_ones(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_ones}) + loss_ones = nn.MSELoss()(x, x_) + + # Checks if the best parameter is better than the init_rand + p_rand = init_rand(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_rand}) + loss_rand = nn.MSELoss()(x, x_) + + if loss_rand < best_params[0][1] and loss_rand < loss_ones: + return p_rand + elif loss_ones < best_params[0][1] and loss_ones < loss_rand: + return p_ones + else: + return best_params[0][0] + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_non_linear_regression_fit( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + assert "params_list" in kwargs, "params list must be provided." + assert "np_fit_func" in kwargs, "np_fit_func must be provided." + assert "p0" in kwargs, "p0 must be provided." + np_fit_func = kwargs.get('np_fit_func') + params_list = kwargs.get('params_list') + p0 = kwargs.get('p0') + + def _fit(xdata: np.ndarray, ydata: np.ndarray, func: Callable, p0: List[float]): + popt, _ = curve_fit( + func, + xdata, + ydata, + maxfev=1000, + p0=p0, + method='lm' + ) + return popt + + # 1. Needs to convert the torch tensor to numpy tensor + xdata = x.cpu().numpy() + + # 2. Sorts the data so that it makes it easier to fit to it + sorted_xdata = np.sort(xdata, axis=-1) + + p0 = {k: v.cpu().numpy() for k, v in p0.items()} + params_list = sorted(params_list) # We need to make sure that it matches the numpy fit func arg order + + # 3. Finds the best parameters for each channel + try: + params = [] + for i in range(sorted_xdata.shape[0]): + xdata_ = sorted_xdata[i] + p0_ = [p0[p][i] for p in params_list] + ch_params = _fit(xdata_, xdata_, np_fit_func, p0_) + params.append(ch_params) + + # 4. Builds the parameters + result = {} + for i, p in enumerate(params_list): + result[p] = torch.tensor([p_[i] for p_ in params], dtype=torch.float32).to(x.device) + + return result + + except ValueError as e: + print(f"Could not fit the function with error: {e}") + print(f"Using fallback result...") + return { + k: torch.tensor(v, dtype=torch.float32).to(x.device) for k, v in p0.items() + } + + +def init_zeros(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.zeros_like(val, dtype=torch.float32, device=x.device) + + +def init_inner_scale(tensor: torch.Tensor, _min: float = torch.inf, _max: float = torch.inf) -> torch.Tensor: + # Calculate the original minimum and maximum values + min_vals, max_vals = torch.aminmax(tensor, dim=-1) + x_min = torch.min(min_vals, torch.zeros_like(min_vals)) + x_max = torch.max(max_vals, torch.zeros_like(max_vals)) + + if _max is torch.inf: # We do not need to scale the tensor. Just need to move it + return torch.ones_like(x_min) + + # Calculate the scale factor + scale = (_max - _min) / (x_max - x_min) + return scale + + + +############## Quant ############### + +@torch.enable_grad() +def learn_parameters( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + qtz_func: nn.Module, + deqtz_func: nn.Module, + bits: int, + target_dtype: torch.dtype, + epochs: int = 1000, + early_stop: bool = True, + do_report: bool = False +) -> Tuple[Dict[str, nn.Parameter], torch.Tensor]: + + # Requires gradients in the parameters + for p in params.values(): + p.requires_grad = True + p.grad = None + + param_keys = list(params.keys()) + param_values = list(params.values()) + + # Defines optimizer and loss function + optimizer = torch.optim.Adam(param_values, lr=0.001) + scheduler = torch.optim.lr_scheduler.LinearLR(optimizer, start_factor=1.0, end_factor=0.001, total_iters=epochs // 10) + loss_fn = nn.MSELoss() + + # Contains the best loss and the best parameters + best_loss = float("inf") + best_params = None + + # Used to stop the search early + min_delta = 1e-7 + acc_loss = [] + percent_epochs_before_stop = 0.1 + + for i in range(epochs): + optimizer.zero_grad() + + quant = quantize(x, params, qtz_func, bits, target_dtype) + dequant = dequantize(quant, params, deqtz_func, bits, x.dtype) + loss = loss_fn(x, dequant) + + if loss.isnan() or loss.isinf(): + raise Exception("Loss is NaN or Inf. Stopping the search.") + + loss.backward() + optimizer.step() + scheduler.step() + + acc_loss.append(loss.item()) + + # Reports loss every 10 steps + if i % 10 == 0 and do_report: + print(f"Epoch {i}: Loss {loss.item()}") + + # Optimizes the parameter search by storing the best loss and the parameters + if loss.item() < best_loss: + best_loss = loss.item() + best_params = copy.deepcopy({ + k: v for k, v in params.items() if k in param_keys + }) + + # We also stop the search if the loss has not considerably during the last 10% epochs + if early_stop: + epochs_before_stop = int(epochs * percent_epochs_before_stop) + if i > epochs_before_stop and abs(acc_loss[i - epochs_before_stop] - acc_loss[i]) < min_delta: + break + + # No longer requires gradients in the parameters + for p in best_params.values(): + p.requires_grad = False + p.grad = None + + if do_report: + return best_params, acc_loss + else: + return best_params + + +def quantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + target_dtype: torch.dtype = torch.int8 +) -> torch.Tensor: + quant_min, quant_max = get_min_max_from_bits_signed(bits) + x = x.transpose(0, 1) # Aligns shapes + x = func(x=x, **params) + x = x.transpose(0, 1) + x = torch.clamp(round_func_BPDA(x), quant_min, quant_max).to(target_dtype) + return x + + +def dequantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + out_dtype: torch.dtype +) -> torch.Tensor: + x = x.to(dtype=out_dtype) + x = x.transpose(0, 1) + x = func(x=x, **params) + x = x.transpose(0, 1) + return x + + +def round_func_BPDA(input): + # This is equivalent to replacing round function (non-differentiable) with + # an identity function (differentiable) only when backward. + forward_value = torch.round(input) + out = input.clone() + out.data = forward_value.data + return out + + +def get_min_max_from_bits_signed(bit_width: int) -> Tuple[int, int]: + return -2 ** (bit_width - 1), 2 ** (bit_width - 1) - 1 + + + +############## Numpy ############### + +def np_domain_guard( + x: np.ndarray, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> np.ndarray: + """Guard a tensor to a valid domain.""" + x = np.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = np.clip(x, min, max) + return x + + +def np_replace_num(x: np.ndarray, num: float, to: float) -> np.ndarray: + """Replace a number in a tensor with another number. + + Args: + x (np.ndarray): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + np.ndarray: The tensor with the number replaced. + """ + return np.where(x == num, to, x) + + +def np_guarded_power(x: np.ndarray, exp: float) -> np.ndarray: + """Guard the power operation to a valid domain.""" + return np.power(x, exp) if exp >= 1 else np.power(np.maximum(x, 0), exp) + diff --git a/fn_gen/ones_affine_scale/10/loss.png b/fn_gen/ones_affine_scale/10/loss.png new file mode 100644 index 0000000000000000000000000000000000000000..7d230c224f69f460ebd107a4b030ef29a3e4c0fa Binary files /dev/null and b/fn_gen/ones_affine_scale/10/loss.png differ diff --git a/fn_gen/ones_affine_scale/10/quantization.png b/fn_gen/ones_affine_scale/10/quantization.png new file mode 100644 index 0000000000000000000000000000000000000000..114c44936bf8e8e42acc1426bb6a9100f9f30d25 Binary files /dev/null and b/fn_gen/ones_affine_scale/10/quantization.png differ diff --git a/fn_gen/ones_affine_scale/11/distortion.png b/fn_gen/ones_affine_scale/11/distortion.png new file mode 100644 index 0000000000000000000000000000000000000000..05e0b5f8ff2cb948cbccb86071b0c987a3ec94fc Binary files /dev/null and b/fn_gen/ones_affine_scale/11/distortion.png differ diff --git a/fn_gen/ones_affine_scale/11/expressions.txt b/fn_gen/ones_affine_scale/11/expressions.txt new file mode 100644 index 0000000000000000000000000000000000000000..c545adce8b3c320e195336b81461c79d0cc385e6 --- /dev/null +++ b/fn_gen/ones_affine_scale/11/expressions.txt @@ -0,0 +1,2 @@ +asinh(_0*x)/_s +sinh(_s*x)/_0 \ No newline at end of file diff --git a/fn_gen/ones_affine_scale/11/fn.py b/fn_gen/ones_affine_scale/11/fn.py new file mode 100644 index 0000000000000000000000000000000000000000..dd99c44f2945f92c9eaf2ae3ac716ae144eaf1eb --- /dev/null +++ b/fn_gen/ones_affine_scale/11/fn.py @@ -0,0 +1,493 @@ +from __future__ import annotations + +import torch +from torch import amin # Necessary for arcsin +import copy +import torch.nn as nn +import numpy as np + +from scipy.optimize import curve_fit +from typing import Dict, Any, Tuple, List, Callable + + +def quantization(x, **params): + return (torch.div(1, replace_num(params['_s'], num=0, to=10000)) * torch.asinh((params['_0'] * x))) + + +def dequantization(x, **params): + return (torch.div(1, replace_num(params['_0'], num=0, to=10000)) * torch.sinh((params['_s'] * x))) + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_params(x: torch.Tensor, **kwargs: Dict[str, Any]) -> Dict[str, nn.Parameter]: + params = { + '_0': init_ones(x, qtz_func=quantization, deqtz_func=dequantization, param='_0', params_list=['_0', '_s'], **kwargs), + } + params['_s'] = init_linear_scale(x, params=params, qtz_func=quantization, **kwargs) + params = {k: nn.Parameter(v, requires_grad=False) for k, v in params.items()} + + if 'post_init_hook' in kwargs: + kwargs['post_init_hook'](parameters=params) + + + if 'post_train_hook' in kwargs: + kwargs['post_train_hook'](parameters=params) + + return params + + +############### Numpy Qtz ############### + + +def np_quantization(x, _0, _s): + return (np.divide(1, np_replace_num(_s, num=0, to=10000)) * np.arcsinh((_0 * x))) + + +def np_dequantization(x, _0, _s): + return (np.divide(1, np_replace_num(_0, num=0, to=10000)) * np.sinh((_s * x))) + + +def fit_func(x, _0, _s): + x_ = np_quantization(x, _0, _s) + x_ = np_dequantization(x_, _0, _s) + return x_ + + + +############### HELPERS ############### + +def domain_guard( + x: torch.Tensor, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> torch.Tensor: + """Guard a tensor to a valid domain.""" + x = torch.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = torch.clamp(x, min=min, max=max) + return x + + +def replace_num(x: torch.Tensor, num: float, to: float) -> torch.Tensor: + """Replace a number in a tensor with another number. + + Args: + x (torch.Tensor): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + torch.Tensor: The tensor with the number replaced. + """ + return torch.where(x == num, to, x) + + +def guarded_torch_power(x: torch.Tensor, exp: float) -> torch.Tensor: + """Guard the power operation to a valid domain.""" + return torch.pow(x, exp) if exp >= 1 else torch.pow(torch.relu(x), exp) + + +def init_ones(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.ones_like(val, dtype=torch.float32, device=x.device) + + +def init_rand(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.randn_like(val, dtype=torch.float32, device=x.device) + + +def init_space_search( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + def _build_initial_param(tensor: torch.Tensor, max_initial: int, n_params: int): + """Generates the initial set of parameters. The first iteration generates 10 times more parameters.""" + for _ in range(n_params * 10): # The first iteration generates 10 times more parameters + yield init_rand(tensor) * max_initial # Generates n_params in range [-max_initial, max_initial] + + def _search_param(tensors: List[torch.tensor], n_params): + """Takes the best parameters and generates new parameters around the mean of the best parameters.""" + torch_tensors = torch.stack(tensors) + min_vals, max_vals = torch.aminmax(torch_tensors, dim=0) + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + mean = torch.mean(torch_tensors, dim=0) + for _ in range(n_params): # Generates n_params around the mean of the tensors + yield torch.randn_like(min_vals) * abs_max_val_per_ch + mean + + def _calc(x, qtz_func, deqtz_func, **params): + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params) + x_ = deqtz_func(x=x_, **params) + x_ = x_.transpose(0, 1) + return x_ + + assert "qtz_func" in kwargs, "qtz_func must be provided." + assert "deqtz_func" in kwargs, "deqtz_func must be provided." + assert "params_list" in kwargs, "params list must be provided." + assert "param" in kwargs, "param must be provided." + + qtz_func = kwargs.get('qtz_func') + deqtz_func = kwargs.get('deqtz_func') + params_list = kwargs.get('params_list') + param = kwargs.get('param') + + n_runs = 50 # Number of runs to try to find the best parameters + n_random_params = 50 # Number of random parameters to generate + n_best_to_pick = 5 # Number of best parameters to pick after each run + max_initial = 10000 # Maximum value to initialize the parameters + + # Initializes the parameters + base_params = { p: init_ones(x, **kwargs) for p in params_list if p != param } + params = _build_initial_param(x, max_initial, n_random_params) + + # Performs the search + for _ in range(n_runs): + + best_params = [] + for param_ in params: + try: + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: param_}) + loss_ones = nn.MSELoss()(x, x_) + + if len(best_params) < n_best_to_pick: + best_params.append((param_, loss_ones.item())) + best_params = sorted(best_params, key=lambda x: x[1]) + elif loss_ones < best_params[-1][1]: + best_params[-1] = (param_, loss_ones.item()) + best_params = sorted(best_params, key=lambda x: x[1]) + + except Exception: # The parameters might not be valid for the function's domain + continue + + # Generates new parameters around the mean + params = _search_param([p for p, _ in best_params], n_random_params) + + # Checks if the best parameter is better than the init_ones + p_ones = init_ones(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_ones}) + loss_ones = nn.MSELoss()(x, x_) + + # Checks if the best parameter is better than the init_rand + p_rand = init_rand(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_rand}) + loss_rand = nn.MSELoss()(x, x_) + + if loss_rand < best_params[0][1] and loss_rand < loss_ones: + return p_rand + elif loss_ones < best_params[0][1] and loss_ones < loss_rand: + return p_ones + else: + return best_params[0][0] + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_non_linear_regression_fit( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + assert "params_list" in kwargs, "params list must be provided." + assert "np_fit_func" in kwargs, "np_fit_func must be provided." + assert "p0" in kwargs, "p0 must be provided." + np_fit_func = kwargs.get('np_fit_func') + params_list = kwargs.get('params_list') + p0 = kwargs.get('p0') + + def _fit(xdata: np.ndarray, ydata: np.ndarray, func: Callable, p0: List[float]): + popt, _ = curve_fit( + func, + xdata, + ydata, + maxfev=1000, + p0=p0, + method='lm' + ) + return popt + + # 1. Needs to convert the torch tensor to numpy tensor + xdata = x.cpu().numpy() + + # 2. Sorts the data so that it makes it easier to fit to it + sorted_xdata = np.sort(xdata, axis=-1) + + p0 = {k: v.cpu().numpy() for k, v in p0.items()} + params_list = sorted(params_list) # We need to make sure that it matches the numpy fit func arg order + + # 3. Finds the best parameters for each channel + try: + params = [] + for i in range(sorted_xdata.shape[0]): + xdata_ = sorted_xdata[i] + p0_ = [p0[p][i] for p in params_list] + ch_params = _fit(xdata_, xdata_, np_fit_func, p0_) + params.append(ch_params) + + # 4. Builds the parameters + result = {} + for i, p in enumerate(params_list): + result[p] = torch.tensor([p_[i] for p_ in params], dtype=torch.float32).to(x.device) + + return result + + except ValueError as e: + print(f"Could not fit the function with error: {e}") + print(f"Using fallback result...") + return { + k: torch.tensor(v, dtype=torch.float32).to(x.device) for k, v in p0.items() + } + + +def init_zeros(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.zeros_like(val, dtype=torch.float32, device=x.device) + + +def init_inner_scale(tensor: torch.Tensor, _min: float = torch.inf, _max: float = torch.inf) -> torch.Tensor: + # Calculate the original minimum and maximum values + min_vals, max_vals = torch.aminmax(tensor, dim=-1) + x_min = torch.min(min_vals, torch.zeros_like(min_vals)) + x_max = torch.max(max_vals, torch.zeros_like(max_vals)) + + if _max is torch.inf: # We do not need to scale the tensor. Just need to move it + return torch.ones_like(x_min) + + # Calculate the scale factor + scale = (_max - _min) / (x_max - x_min) + return scale + + + +############## Quant ############### + +@torch.enable_grad() +def learn_parameters( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + qtz_func: nn.Module, + deqtz_func: nn.Module, + bits: int, + target_dtype: torch.dtype, + epochs: int = 1000, + early_stop: bool = True, + do_report: bool = False +) -> Tuple[Dict[str, nn.Parameter], torch.Tensor]: + + # Requires gradients in the parameters + for p in params.values(): + p.requires_grad = True + p.grad = None + + param_keys = list(params.keys()) + param_values = list(params.values()) + + # Defines optimizer and loss function + optimizer = torch.optim.Adam(param_values, lr=0.001) + scheduler = torch.optim.lr_scheduler.LinearLR(optimizer, start_factor=1.0, end_factor=0.001, total_iters=epochs // 10) + loss_fn = nn.MSELoss() + + # Contains the best loss and the best parameters + best_loss = float("inf") + best_params = None + + # Used to stop the search early + min_delta = 1e-7 + acc_loss = [] + percent_epochs_before_stop = 0.1 + + for i in range(epochs): + optimizer.zero_grad() + + quant = quantize(x, params, qtz_func, bits, target_dtype) + dequant = dequantize(quant, params, deqtz_func, bits, x.dtype) + loss = loss_fn(x, dequant) + + if loss.isnan() or loss.isinf(): + raise Exception("Loss is NaN or Inf. Stopping the search.") + + loss.backward() + optimizer.step() + scheduler.step() + + acc_loss.append(loss.item()) + + # Reports loss every 10 steps + if i % 10 == 0 and do_report: + print(f"Epoch {i}: Loss {loss.item()}") + + # Optimizes the parameter search by storing the best loss and the parameters + if loss.item() < best_loss: + best_loss = loss.item() + best_params = copy.deepcopy({ + k: v for k, v in params.items() if k in param_keys + }) + + # We also stop the search if the loss has not considerably during the last 10% epochs + if early_stop: + epochs_before_stop = int(epochs * percent_epochs_before_stop) + if i > epochs_before_stop and abs(acc_loss[i - epochs_before_stop] - acc_loss[i]) < min_delta: + break + + # No longer requires gradients in the parameters + for p in best_params.values(): + p.requires_grad = False + p.grad = None + + if do_report: + return best_params, acc_loss + else: + return best_params + + +def quantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + target_dtype: torch.dtype = torch.int8 +) -> torch.Tensor: + quant_min, quant_max = get_min_max_from_bits_signed(bits) + x = x.transpose(0, 1) # Aligns shapes + x = func(x=x, **params) + x = x.transpose(0, 1) + x = torch.clamp(round_func_BPDA(x), quant_min, quant_max).to(target_dtype) + return x + + +def dequantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + out_dtype: torch.dtype +) -> torch.Tensor: + x = x.to(dtype=out_dtype) + x = x.transpose(0, 1) + x = func(x=x, **params) + x = x.transpose(0, 1) + return x + + +def round_func_BPDA(input): + # This is equivalent to replacing round function (non-differentiable) with + # an identity function (differentiable) only when backward. + forward_value = torch.round(input) + out = input.clone() + out.data = forward_value.data + return out + + +def get_min_max_from_bits_signed(bit_width: int) -> Tuple[int, int]: + return -2 ** (bit_width - 1), 2 ** (bit_width - 1) - 1 + + + +############## Numpy ############### + +def np_domain_guard( + x: np.ndarray, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> np.ndarray: + """Guard a tensor to a valid domain.""" + x = np.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = np.clip(x, min, max) + return x + + +def np_replace_num(x: np.ndarray, num: float, to: float) -> np.ndarray: + """Replace a number in a tensor with another number. + + Args: + x (np.ndarray): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + np.ndarray: The tensor with the number replaced. + """ + return np.where(x == num, to, x) + + +def np_guarded_power(x: np.ndarray, exp: float) -> np.ndarray: + """Guard the power operation to a valid domain.""" + return np.power(x, exp) if exp >= 1 else np.power(np.maximum(x, 0), exp) + diff --git a/fn_gen/ones_affine_scale/11/loss.png b/fn_gen/ones_affine_scale/11/loss.png new file mode 100644 index 0000000000000000000000000000000000000000..2c90a40fb5510d0de82d40da19cebb60ac864d0e Binary files /dev/null and b/fn_gen/ones_affine_scale/11/loss.png differ diff --git a/fn_gen/ones_affine_scale/11/quantization.png b/fn_gen/ones_affine_scale/11/quantization.png new file mode 100644 index 0000000000000000000000000000000000000000..c675cf02a035e6fa481d6ba61c02765150a55c02 Binary files /dev/null and b/fn_gen/ones_affine_scale/11/quantization.png differ diff --git a/fn_gen/ones_affine_scale/12/distortion.png b/fn_gen/ones_affine_scale/12/distortion.png new file mode 100644 index 0000000000000000000000000000000000000000..80910b4a7d2a5c5dc6f049aa45f0f395f4451b73 Binary files /dev/null and b/fn_gen/ones_affine_scale/12/distortion.png differ diff --git a/fn_gen/ones_affine_scale/12/expressions.txt b/fn_gen/ones_affine_scale/12/expressions.txt new file mode 100644 index 0000000000000000000000000000000000000000..576ec6a351e26f9982eb17e394804ca906d4b067 --- /dev/null +++ b/fn_gen/ones_affine_scale/12/expressions.txt @@ -0,0 +1,2 @@ +acos(_0*x)/_s +cos(_s*x)/_0 \ No newline at end of file diff --git a/fn_gen/ones_affine_scale/12/fn.py b/fn_gen/ones_affine_scale/12/fn.py new file mode 100644 index 0000000000000000000000000000000000000000..50e257068c56d82b5eae1a0678f04199e32f11e1 --- /dev/null +++ b/fn_gen/ones_affine_scale/12/fn.py @@ -0,0 +1,493 @@ +from __future__ import annotations + +import torch +from torch import amin # Necessary for arcsin +import copy +import torch.nn as nn +import numpy as np + +from scipy.optimize import curve_fit +from typing import Dict, Any, Tuple, List, Callable + + +def quantization(x, **params): + return (torch.div(1, replace_num(params['_s'], num=0, to=10000)) * torch.acos(domain_guard((params['_0'] * x), min=-0.99999, max=0.99999, nan=0))) + + +def dequantization(x, **params): + return (torch.div(1, replace_num(params['_0'], num=0, to=10000)) * torch.cos((params['_s'] * x))) + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_params(x: torch.Tensor, **kwargs: Dict[str, Any]) -> Dict[str, nn.Parameter]: + params = { + '_0': init_ones(x, qtz_func=quantization, deqtz_func=dequantization, param='_0', params_list=['_0', '_s'], **kwargs), + } + params['_s'] = init_linear_scale(x, params=params, qtz_func=quantization, **kwargs) + params = {k: nn.Parameter(v, requires_grad=False) for k, v in params.items()} + + if 'post_init_hook' in kwargs: + kwargs['post_init_hook'](parameters=params) + + + if 'post_train_hook' in kwargs: + kwargs['post_train_hook'](parameters=params) + + return params + + +############### Numpy Qtz ############### + + +def np_quantization(x, _0, _s): + return (np.divide(1, np_replace_num(_s, num=0, to=10000)) * np.arccos(np_domain_guard((_0 * x), min=-0.99999, max=0.99999, nan=0))) + + +def np_dequantization(x, _0, _s): + return (np.divide(1, np_replace_num(_0, num=0, to=10000)) * np.cos((_s * x))) + + +def fit_func(x, _0, _s): + x_ = np_quantization(x, _0, _s) + x_ = np_dequantization(x_, _0, _s) + return x_ + + + +############### HELPERS ############### + +def domain_guard( + x: torch.Tensor, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> torch.Tensor: + """Guard a tensor to a valid domain.""" + x = torch.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = torch.clamp(x, min=min, max=max) + return x + + +def replace_num(x: torch.Tensor, num: float, to: float) -> torch.Tensor: + """Replace a number in a tensor with another number. + + Args: + x (torch.Tensor): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + torch.Tensor: The tensor with the number replaced. + """ + return torch.where(x == num, to, x) + + +def guarded_torch_power(x: torch.Tensor, exp: float) -> torch.Tensor: + """Guard the power operation to a valid domain.""" + return torch.pow(x, exp) if exp >= 1 else torch.pow(torch.relu(x), exp) + + +def init_ones(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.ones_like(val, dtype=torch.float32, device=x.device) + + +def init_rand(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.randn_like(val, dtype=torch.float32, device=x.device) + + +def init_space_search( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + def _build_initial_param(tensor: torch.Tensor, max_initial: int, n_params: int): + """Generates the initial set of parameters. The first iteration generates 10 times more parameters.""" + for _ in range(n_params * 10): # The first iteration generates 10 times more parameters + yield init_rand(tensor) * max_initial # Generates n_params in range [-max_initial, max_initial] + + def _search_param(tensors: List[torch.tensor], n_params): + """Takes the best parameters and generates new parameters around the mean of the best parameters.""" + torch_tensors = torch.stack(tensors) + min_vals, max_vals = torch.aminmax(torch_tensors, dim=0) + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + mean = torch.mean(torch_tensors, dim=0) + for _ in range(n_params): # Generates n_params around the mean of the tensors + yield torch.randn_like(min_vals) * abs_max_val_per_ch + mean + + def _calc(x, qtz_func, deqtz_func, **params): + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params) + x_ = deqtz_func(x=x_, **params) + x_ = x_.transpose(0, 1) + return x_ + + assert "qtz_func" in kwargs, "qtz_func must be provided." + assert "deqtz_func" in kwargs, "deqtz_func must be provided." + assert "params_list" in kwargs, "params list must be provided." + assert "param" in kwargs, "param must be provided." + + qtz_func = kwargs.get('qtz_func') + deqtz_func = kwargs.get('deqtz_func') + params_list = kwargs.get('params_list') + param = kwargs.get('param') + + n_runs = 50 # Number of runs to try to find the best parameters + n_random_params = 50 # Number of random parameters to generate + n_best_to_pick = 5 # Number of best parameters to pick after each run + max_initial = 10000 # Maximum value to initialize the parameters + + # Initializes the parameters + base_params = { p: init_ones(x, **kwargs) for p in params_list if p != param } + params = _build_initial_param(x, max_initial, n_random_params) + + # Performs the search + for _ in range(n_runs): + + best_params = [] + for param_ in params: + try: + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: param_}) + loss_ones = nn.MSELoss()(x, x_) + + if len(best_params) < n_best_to_pick: + best_params.append((param_, loss_ones.item())) + best_params = sorted(best_params, key=lambda x: x[1]) + elif loss_ones < best_params[-1][1]: + best_params[-1] = (param_, loss_ones.item()) + best_params = sorted(best_params, key=lambda x: x[1]) + + except Exception: # The parameters might not be valid for the function's domain + continue + + # Generates new parameters around the mean + params = _search_param([p for p, _ in best_params], n_random_params) + + # Checks if the best parameter is better than the init_ones + p_ones = init_ones(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_ones}) + loss_ones = nn.MSELoss()(x, x_) + + # Checks if the best parameter is better than the init_rand + p_rand = init_rand(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_rand}) + loss_rand = nn.MSELoss()(x, x_) + + if loss_rand < best_params[0][1] and loss_rand < loss_ones: + return p_rand + elif loss_ones < best_params[0][1] and loss_ones < loss_rand: + return p_ones + else: + return best_params[0][0] + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_non_linear_regression_fit( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + assert "params_list" in kwargs, "params list must be provided." + assert "np_fit_func" in kwargs, "np_fit_func must be provided." + assert "p0" in kwargs, "p0 must be provided." + np_fit_func = kwargs.get('np_fit_func') + params_list = kwargs.get('params_list') + p0 = kwargs.get('p0') + + def _fit(xdata: np.ndarray, ydata: np.ndarray, func: Callable, p0: List[float]): + popt, _ = curve_fit( + func, + xdata, + ydata, + maxfev=1000, + p0=p0, + method='lm' + ) + return popt + + # 1. Needs to convert the torch tensor to numpy tensor + xdata = x.cpu().numpy() + + # 2. Sorts the data so that it makes it easier to fit to it + sorted_xdata = np.sort(xdata, axis=-1) + + p0 = {k: v.cpu().numpy() for k, v in p0.items()} + params_list = sorted(params_list) # We need to make sure that it matches the numpy fit func arg order + + # 3. Finds the best parameters for each channel + try: + params = [] + for i in range(sorted_xdata.shape[0]): + xdata_ = sorted_xdata[i] + p0_ = [p0[p][i] for p in params_list] + ch_params = _fit(xdata_, xdata_, np_fit_func, p0_) + params.append(ch_params) + + # 4. Builds the parameters + result = {} + for i, p in enumerate(params_list): + result[p] = torch.tensor([p_[i] for p_ in params], dtype=torch.float32).to(x.device) + + return result + + except ValueError as e: + print(f"Could not fit the function with error: {e}") + print(f"Using fallback result...") + return { + k: torch.tensor(v, dtype=torch.float32).to(x.device) for k, v in p0.items() + } + + +def init_zeros(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.zeros_like(val, dtype=torch.float32, device=x.device) + + +def init_inner_scale(tensor: torch.Tensor, _min: float = torch.inf, _max: float = torch.inf) -> torch.Tensor: + # Calculate the original minimum and maximum values + min_vals, max_vals = torch.aminmax(tensor, dim=-1) + x_min = torch.min(min_vals, torch.zeros_like(min_vals)) + x_max = torch.max(max_vals, torch.zeros_like(max_vals)) + + if _max is torch.inf: # We do not need to scale the tensor. Just need to move it + return torch.ones_like(x_min) + + # Calculate the scale factor + scale = (_max - _min) / (x_max - x_min) + return scale + + + +############## Quant ############### + +@torch.enable_grad() +def learn_parameters( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + qtz_func: nn.Module, + deqtz_func: nn.Module, + bits: int, + target_dtype: torch.dtype, + epochs: int = 1000, + early_stop: bool = True, + do_report: bool = False +) -> Tuple[Dict[str, nn.Parameter], torch.Tensor]: + + # Requires gradients in the parameters + for p in params.values(): + p.requires_grad = True + p.grad = None + + param_keys = list(params.keys()) + param_values = list(params.values()) + + # Defines optimizer and loss function + optimizer = torch.optim.Adam(param_values, lr=0.001) + scheduler = torch.optim.lr_scheduler.LinearLR(optimizer, start_factor=1.0, end_factor=0.001, total_iters=epochs // 10) + loss_fn = nn.MSELoss() + + # Contains the best loss and the best parameters + best_loss = float("inf") + best_params = None + + # Used to stop the search early + min_delta = 1e-7 + acc_loss = [] + percent_epochs_before_stop = 0.1 + + for i in range(epochs): + optimizer.zero_grad() + + quant = quantize(x, params, qtz_func, bits, target_dtype) + dequant = dequantize(quant, params, deqtz_func, bits, x.dtype) + loss = loss_fn(x, dequant) + + if loss.isnan() or loss.isinf(): + raise Exception("Loss is NaN or Inf. Stopping the search.") + + loss.backward() + optimizer.step() + scheduler.step() + + acc_loss.append(loss.item()) + + # Reports loss every 10 steps + if i % 10 == 0 and do_report: + print(f"Epoch {i}: Loss {loss.item()}") + + # Optimizes the parameter search by storing the best loss and the parameters + if loss.item() < best_loss: + best_loss = loss.item() + best_params = copy.deepcopy({ + k: v for k, v in params.items() if k in param_keys + }) + + # We also stop the search if the loss has not considerably during the last 10% epochs + if early_stop: + epochs_before_stop = int(epochs * percent_epochs_before_stop) + if i > epochs_before_stop and abs(acc_loss[i - epochs_before_stop] - acc_loss[i]) < min_delta: + break + + # No longer requires gradients in the parameters + for p in best_params.values(): + p.requires_grad = False + p.grad = None + + if do_report: + return best_params, acc_loss + else: + return best_params + + +def quantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + target_dtype: torch.dtype = torch.int8 +) -> torch.Tensor: + quant_min, quant_max = get_min_max_from_bits_signed(bits) + x = x.transpose(0, 1) # Aligns shapes + x = func(x=x, **params) + x = x.transpose(0, 1) + x = torch.clamp(round_func_BPDA(x), quant_min, quant_max).to(target_dtype) + return x + + +def dequantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + out_dtype: torch.dtype +) -> torch.Tensor: + x = x.to(dtype=out_dtype) + x = x.transpose(0, 1) + x = func(x=x, **params) + x = x.transpose(0, 1) + return x + + +def round_func_BPDA(input): + # This is equivalent to replacing round function (non-differentiable) with + # an identity function (differentiable) only when backward. + forward_value = torch.round(input) + out = input.clone() + out.data = forward_value.data + return out + + +def get_min_max_from_bits_signed(bit_width: int) -> Tuple[int, int]: + return -2 ** (bit_width - 1), 2 ** (bit_width - 1) - 1 + + + +############## Numpy ############### + +def np_domain_guard( + x: np.ndarray, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> np.ndarray: + """Guard a tensor to a valid domain.""" + x = np.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = np.clip(x, min, max) + return x + + +def np_replace_num(x: np.ndarray, num: float, to: float) -> np.ndarray: + """Replace a number in a tensor with another number. + + Args: + x (np.ndarray): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + np.ndarray: The tensor with the number replaced. + """ + return np.where(x == num, to, x) + + +def np_guarded_power(x: np.ndarray, exp: float) -> np.ndarray: + """Guard the power operation to a valid domain.""" + return np.power(x, exp) if exp >= 1 else np.power(np.maximum(x, 0), exp) + diff --git a/fn_gen/ones_affine_scale/12/loss.png b/fn_gen/ones_affine_scale/12/loss.png new file mode 100644 index 0000000000000000000000000000000000000000..9209384d8ec8f07014e0cffe24e018fcf07bcf95 Binary files /dev/null and b/fn_gen/ones_affine_scale/12/loss.png differ diff --git a/fn_gen/ones_affine_scale/12/quantization.png b/fn_gen/ones_affine_scale/12/quantization.png new file mode 100644 index 0000000000000000000000000000000000000000..d16b2d8d3d05651c80c879397bf71ca136a6e8af Binary files /dev/null and b/fn_gen/ones_affine_scale/12/quantization.png differ diff --git a/fn_gen/ones_affine_scale/13/distortion.png b/fn_gen/ones_affine_scale/13/distortion.png new file mode 100644 index 0000000000000000000000000000000000000000..dd15fe9dd11c4ac984071b7e1aa590d6c8ea5c75 Binary files /dev/null and b/fn_gen/ones_affine_scale/13/distortion.png differ diff --git a/fn_gen/ones_affine_scale/13/expressions.txt b/fn_gen/ones_affine_scale/13/expressions.txt new file mode 100644 index 0000000000000000000000000000000000000000..5a7abbbdac98c7d53123fe0b9807e7644bc00acf --- /dev/null +++ b/fn_gen/ones_affine_scale/13/expressions.txt @@ -0,0 +1,2 @@ +acosh(_0*x)/_s +cosh(_s*x)/_0 \ No newline at end of file diff --git a/fn_gen/ones_affine_scale/13/fn.py b/fn_gen/ones_affine_scale/13/fn.py new file mode 100644 index 0000000000000000000000000000000000000000..e088b2c916caa14068245e170cf4e71f5cc27121 --- /dev/null +++ b/fn_gen/ones_affine_scale/13/fn.py @@ -0,0 +1,493 @@ +from __future__ import annotations + +import torch +from torch import amin # Necessary for arcsin +import copy +import torch.nn as nn +import numpy as np + +from scipy.optimize import curve_fit +from typing import Dict, Any, Tuple, List, Callable + + +def quantization(x, **params): + return (torch.div(1, replace_num(params['_s'], num=0, to=10000)) * torch.acosh(domain_guard((params['_0'] * x), min=1, nan=1))) + + +def dequantization(x, **params): + return (torch.div(1, replace_num(params['_0'], num=0, to=10000)) * torch.cosh((params['_s'] * x))) + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_params(x: torch.Tensor, **kwargs: Dict[str, Any]) -> Dict[str, nn.Parameter]: + params = { + '_0': init_ones(x, qtz_func=quantization, deqtz_func=dequantization, param='_0', params_list=['_0', '_s'], **kwargs), + } + params['_s'] = init_linear_scale(x, params=params, qtz_func=quantization, **kwargs) + params = {k: nn.Parameter(v, requires_grad=False) for k, v in params.items()} + + if 'post_init_hook' in kwargs: + kwargs['post_init_hook'](parameters=params) + + + if 'post_train_hook' in kwargs: + kwargs['post_train_hook'](parameters=params) + + return params + + +############### Numpy Qtz ############### + + +def np_quantization(x, _0, _s): + return (np.divide(1, np_replace_num(_s, num=0, to=10000)) * np.arccosh(np_domain_guard((_0 * x), min=1, nan=1))) + + +def np_dequantization(x, _0, _s): + return (np.divide(1, np_replace_num(_0, num=0, to=10000)) * np.cosh((_s * x))) + + +def fit_func(x, _0, _s): + x_ = np_quantization(x, _0, _s) + x_ = np_dequantization(x_, _0, _s) + return x_ + + + +############### HELPERS ############### + +def domain_guard( + x: torch.Tensor, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> torch.Tensor: + """Guard a tensor to a valid domain.""" + x = torch.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = torch.clamp(x, min=min, max=max) + return x + + +def replace_num(x: torch.Tensor, num: float, to: float) -> torch.Tensor: + """Replace a number in a tensor with another number. + + Args: + x (torch.Tensor): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + torch.Tensor: The tensor with the number replaced. + """ + return torch.where(x == num, to, x) + + +def guarded_torch_power(x: torch.Tensor, exp: float) -> torch.Tensor: + """Guard the power operation to a valid domain.""" + return torch.pow(x, exp) if exp >= 1 else torch.pow(torch.relu(x), exp) + + +def init_ones(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.ones_like(val, dtype=torch.float32, device=x.device) + + +def init_rand(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.randn_like(val, dtype=torch.float32, device=x.device) + + +def init_space_search( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + def _build_initial_param(tensor: torch.Tensor, max_initial: int, n_params: int): + """Generates the initial set of parameters. The first iteration generates 10 times more parameters.""" + for _ in range(n_params * 10): # The first iteration generates 10 times more parameters + yield init_rand(tensor) * max_initial # Generates n_params in range [-max_initial, max_initial] + + def _search_param(tensors: List[torch.tensor], n_params): + """Takes the best parameters and generates new parameters around the mean of the best parameters.""" + torch_tensors = torch.stack(tensors) + min_vals, max_vals = torch.aminmax(torch_tensors, dim=0) + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + mean = torch.mean(torch_tensors, dim=0) + for _ in range(n_params): # Generates n_params around the mean of the tensors + yield torch.randn_like(min_vals) * abs_max_val_per_ch + mean + + def _calc(x, qtz_func, deqtz_func, **params): + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params) + x_ = deqtz_func(x=x_, **params) + x_ = x_.transpose(0, 1) + return x_ + + assert "qtz_func" in kwargs, "qtz_func must be provided." + assert "deqtz_func" in kwargs, "deqtz_func must be provided." + assert "params_list" in kwargs, "params list must be provided." + assert "param" in kwargs, "param must be provided." + + qtz_func = kwargs.get('qtz_func') + deqtz_func = kwargs.get('deqtz_func') + params_list = kwargs.get('params_list') + param = kwargs.get('param') + + n_runs = 50 # Number of runs to try to find the best parameters + n_random_params = 50 # Number of random parameters to generate + n_best_to_pick = 5 # Number of best parameters to pick after each run + max_initial = 10000 # Maximum value to initialize the parameters + + # Initializes the parameters + base_params = { p: init_ones(x, **kwargs) for p in params_list if p != param } + params = _build_initial_param(x, max_initial, n_random_params) + + # Performs the search + for _ in range(n_runs): + + best_params = [] + for param_ in params: + try: + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: param_}) + loss_ones = nn.MSELoss()(x, x_) + + if len(best_params) < n_best_to_pick: + best_params.append((param_, loss_ones.item())) + best_params = sorted(best_params, key=lambda x: x[1]) + elif loss_ones < best_params[-1][1]: + best_params[-1] = (param_, loss_ones.item()) + best_params = sorted(best_params, key=lambda x: x[1]) + + except Exception: # The parameters might not be valid for the function's domain + continue + + # Generates new parameters around the mean + params = _search_param([p for p, _ in best_params], n_random_params) + + # Checks if the best parameter is better than the init_ones + p_ones = init_ones(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_ones}) + loss_ones = nn.MSELoss()(x, x_) + + # Checks if the best parameter is better than the init_rand + p_rand = init_rand(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_rand}) + loss_rand = nn.MSELoss()(x, x_) + + if loss_rand < best_params[0][1] and loss_rand < loss_ones: + return p_rand + elif loss_ones < best_params[0][1] and loss_ones < loss_rand: + return p_ones + else: + return best_params[0][0] + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_non_linear_regression_fit( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + assert "params_list" in kwargs, "params list must be provided." + assert "np_fit_func" in kwargs, "np_fit_func must be provided." + assert "p0" in kwargs, "p0 must be provided." + np_fit_func = kwargs.get('np_fit_func') + params_list = kwargs.get('params_list') + p0 = kwargs.get('p0') + + def _fit(xdata: np.ndarray, ydata: np.ndarray, func: Callable, p0: List[float]): + popt, _ = curve_fit( + func, + xdata, + ydata, + maxfev=1000, + p0=p0, + method='lm' + ) + return popt + + # 1. Needs to convert the torch tensor to numpy tensor + xdata = x.cpu().numpy() + + # 2. Sorts the data so that it makes it easier to fit to it + sorted_xdata = np.sort(xdata, axis=-1) + + p0 = {k: v.cpu().numpy() for k, v in p0.items()} + params_list = sorted(params_list) # We need to make sure that it matches the numpy fit func arg order + + # 3. Finds the best parameters for each channel + try: + params = [] + for i in range(sorted_xdata.shape[0]): + xdata_ = sorted_xdata[i] + p0_ = [p0[p][i] for p in params_list] + ch_params = _fit(xdata_, xdata_, np_fit_func, p0_) + params.append(ch_params) + + # 4. Builds the parameters + result = {} + for i, p in enumerate(params_list): + result[p] = torch.tensor([p_[i] for p_ in params], dtype=torch.float32).to(x.device) + + return result + + except ValueError as e: + print(f"Could not fit the function with error: {e}") + print(f"Using fallback result...") + return { + k: torch.tensor(v, dtype=torch.float32).to(x.device) for k, v in p0.items() + } + + +def init_zeros(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.zeros_like(val, dtype=torch.float32, device=x.device) + + +def init_inner_scale(tensor: torch.Tensor, _min: float = torch.inf, _max: float = torch.inf) -> torch.Tensor: + # Calculate the original minimum and maximum values + min_vals, max_vals = torch.aminmax(tensor, dim=-1) + x_min = torch.min(min_vals, torch.zeros_like(min_vals)) + x_max = torch.max(max_vals, torch.zeros_like(max_vals)) + + if _max is torch.inf: # We do not need to scale the tensor. Just need to move it + return torch.ones_like(x_min) + + # Calculate the scale factor + scale = (_max - _min) / (x_max - x_min) + return scale + + + +############## Quant ############### + +@torch.enable_grad() +def learn_parameters( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + qtz_func: nn.Module, + deqtz_func: nn.Module, + bits: int, + target_dtype: torch.dtype, + epochs: int = 1000, + early_stop: bool = True, + do_report: bool = False +) -> Tuple[Dict[str, nn.Parameter], torch.Tensor]: + + # Requires gradients in the parameters + for p in params.values(): + p.requires_grad = True + p.grad = None + + param_keys = list(params.keys()) + param_values = list(params.values()) + + # Defines optimizer and loss function + optimizer = torch.optim.Adam(param_values, lr=0.001) + scheduler = torch.optim.lr_scheduler.LinearLR(optimizer, start_factor=1.0, end_factor=0.001, total_iters=epochs // 10) + loss_fn = nn.MSELoss() + + # Contains the best loss and the best parameters + best_loss = float("inf") + best_params = None + + # Used to stop the search early + min_delta = 1e-7 + acc_loss = [] + percent_epochs_before_stop = 0.1 + + for i in range(epochs): + optimizer.zero_grad() + + quant = quantize(x, params, qtz_func, bits, target_dtype) + dequant = dequantize(quant, params, deqtz_func, bits, x.dtype) + loss = loss_fn(x, dequant) + + if loss.isnan() or loss.isinf(): + raise Exception("Loss is NaN or Inf. Stopping the search.") + + loss.backward() + optimizer.step() + scheduler.step() + + acc_loss.append(loss.item()) + + # Reports loss every 10 steps + if i % 10 == 0 and do_report: + print(f"Epoch {i}: Loss {loss.item()}") + + # Optimizes the parameter search by storing the best loss and the parameters + if loss.item() < best_loss: + best_loss = loss.item() + best_params = copy.deepcopy({ + k: v for k, v in params.items() if k in param_keys + }) + + # We also stop the search if the loss has not considerably during the last 10% epochs + if early_stop: + epochs_before_stop = int(epochs * percent_epochs_before_stop) + if i > epochs_before_stop and abs(acc_loss[i - epochs_before_stop] - acc_loss[i]) < min_delta: + break + + # No longer requires gradients in the parameters + for p in best_params.values(): + p.requires_grad = False + p.grad = None + + if do_report: + return best_params, acc_loss + else: + return best_params + + +def quantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + target_dtype: torch.dtype = torch.int8 +) -> torch.Tensor: + quant_min, quant_max = get_min_max_from_bits_signed(bits) + x = x.transpose(0, 1) # Aligns shapes + x = func(x=x, **params) + x = x.transpose(0, 1) + x = torch.clamp(round_func_BPDA(x), quant_min, quant_max).to(target_dtype) + return x + + +def dequantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + out_dtype: torch.dtype +) -> torch.Tensor: + x = x.to(dtype=out_dtype) + x = x.transpose(0, 1) + x = func(x=x, **params) + x = x.transpose(0, 1) + return x + + +def round_func_BPDA(input): + # This is equivalent to replacing round function (non-differentiable) with + # an identity function (differentiable) only when backward. + forward_value = torch.round(input) + out = input.clone() + out.data = forward_value.data + return out + + +def get_min_max_from_bits_signed(bit_width: int) -> Tuple[int, int]: + return -2 ** (bit_width - 1), 2 ** (bit_width - 1) - 1 + + + +############## Numpy ############### + +def np_domain_guard( + x: np.ndarray, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> np.ndarray: + """Guard a tensor to a valid domain.""" + x = np.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = np.clip(x, min, max) + return x + + +def np_replace_num(x: np.ndarray, num: float, to: float) -> np.ndarray: + """Replace a number in a tensor with another number. + + Args: + x (np.ndarray): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + np.ndarray: The tensor with the number replaced. + """ + return np.where(x == num, to, x) + + +def np_guarded_power(x: np.ndarray, exp: float) -> np.ndarray: + """Guard the power operation to a valid domain.""" + return np.power(x, exp) if exp >= 1 else np.power(np.maximum(x, 0), exp) + diff --git a/fn_gen/ones_affine_scale/13/loss.png b/fn_gen/ones_affine_scale/13/loss.png new file mode 100644 index 0000000000000000000000000000000000000000..c80780603e784867a0476edb47d53e4e58d659e8 Binary files /dev/null and b/fn_gen/ones_affine_scale/13/loss.png differ diff --git a/fn_gen/ones_affine_scale/13/quantization.png b/fn_gen/ones_affine_scale/13/quantization.png new file mode 100644 index 0000000000000000000000000000000000000000..14c2e2f410a2f4b55c06ad5e1d223d7c4e054aee Binary files /dev/null and b/fn_gen/ones_affine_scale/13/quantization.png differ diff --git a/fn_gen/ones_affine_scale/14/distortion.png b/fn_gen/ones_affine_scale/14/distortion.png new file mode 100644 index 0000000000000000000000000000000000000000..cc3bbdda6a18dedda3cb9b4d4037ad5f7b414fb3 Binary files /dev/null and b/fn_gen/ones_affine_scale/14/distortion.png differ diff --git a/fn_gen/ones_affine_scale/14/expressions.txt b/fn_gen/ones_affine_scale/14/expressions.txt new file mode 100644 index 0000000000000000000000000000000000000000..23606e9f370f2e4adb43ed623c49d7fcaabd7355 --- /dev/null +++ b/fn_gen/ones_affine_scale/14/expressions.txt @@ -0,0 +1,2 @@ +tan(_0*x)/_s +atan(_s*x)/_0 \ No newline at end of file diff --git a/fn_gen/ones_affine_scale/14/fn.py b/fn_gen/ones_affine_scale/14/fn.py new file mode 100644 index 0000000000000000000000000000000000000000..57d0d9be395b09bae4573b74725510e833e2ac32 --- /dev/null +++ b/fn_gen/ones_affine_scale/14/fn.py @@ -0,0 +1,493 @@ +from __future__ import annotations + +import torch +from torch import amin # Necessary for arcsin +import copy +import torch.nn as nn +import numpy as np + +from scipy.optimize import curve_fit +from typing import Dict, Any, Tuple, List, Callable + + +def quantization(x, **params): + return (torch.div(1, replace_num(params['_s'], num=0, to=10000)) * torch.tan(domain_guard((params['_0'] * x), posinf=1, neginf=-1, nan=0))) + + +def dequantization(x, **params): + return (torch.div(1, replace_num(params['_0'], num=0, to=10000)) * torch.atan((params['_s'] * x))) + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_params(x: torch.Tensor, **kwargs: Dict[str, Any]) -> Dict[str, nn.Parameter]: + params = { + '_0': init_ones(x, qtz_func=quantization, deqtz_func=dequantization, param='_0', params_list=['_0', '_s'], **kwargs), + } + params['_s'] = init_linear_scale(x, params=params, qtz_func=quantization, **kwargs) + params = {k: nn.Parameter(v, requires_grad=False) for k, v in params.items()} + + if 'post_init_hook' in kwargs: + kwargs['post_init_hook'](parameters=params) + + + if 'post_train_hook' in kwargs: + kwargs['post_train_hook'](parameters=params) + + return params + + +############### Numpy Qtz ############### + + +def np_quantization(x, _0, _s): + return (np.divide(1, np_replace_num(_s, num=0, to=10000)) * np.tan(np_domain_guard((_0 * x), posinf=1, neginf=-1, nan=0))) + + +def np_dequantization(x, _0, _s): + return (np.divide(1, np_replace_num(_0, num=0, to=10000)) * np.arctan((_s * x))) + + +def fit_func(x, _0, _s): + x_ = np_quantization(x, _0, _s) + x_ = np_dequantization(x_, _0, _s) + return x_ + + + +############### HELPERS ############### + +def domain_guard( + x: torch.Tensor, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> torch.Tensor: + """Guard a tensor to a valid domain.""" + x = torch.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = torch.clamp(x, min=min, max=max) + return x + + +def replace_num(x: torch.Tensor, num: float, to: float) -> torch.Tensor: + """Replace a number in a tensor with another number. + + Args: + x (torch.Tensor): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + torch.Tensor: The tensor with the number replaced. + """ + return torch.where(x == num, to, x) + + +def guarded_torch_power(x: torch.Tensor, exp: float) -> torch.Tensor: + """Guard the power operation to a valid domain.""" + return torch.pow(x, exp) if exp >= 1 else torch.pow(torch.relu(x), exp) + + +def init_ones(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.ones_like(val, dtype=torch.float32, device=x.device) + + +def init_rand(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.randn_like(val, dtype=torch.float32, device=x.device) + + +def init_space_search( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + def _build_initial_param(tensor: torch.Tensor, max_initial: int, n_params: int): + """Generates the initial set of parameters. The first iteration generates 10 times more parameters.""" + for _ in range(n_params * 10): # The first iteration generates 10 times more parameters + yield init_rand(tensor) * max_initial # Generates n_params in range [-max_initial, max_initial] + + def _search_param(tensors: List[torch.tensor], n_params): + """Takes the best parameters and generates new parameters around the mean of the best parameters.""" + torch_tensors = torch.stack(tensors) + min_vals, max_vals = torch.aminmax(torch_tensors, dim=0) + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + mean = torch.mean(torch_tensors, dim=0) + for _ in range(n_params): # Generates n_params around the mean of the tensors + yield torch.randn_like(min_vals) * abs_max_val_per_ch + mean + + def _calc(x, qtz_func, deqtz_func, **params): + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params) + x_ = deqtz_func(x=x_, **params) + x_ = x_.transpose(0, 1) + return x_ + + assert "qtz_func" in kwargs, "qtz_func must be provided." + assert "deqtz_func" in kwargs, "deqtz_func must be provided." + assert "params_list" in kwargs, "params list must be provided." + assert "param" in kwargs, "param must be provided." + + qtz_func = kwargs.get('qtz_func') + deqtz_func = kwargs.get('deqtz_func') + params_list = kwargs.get('params_list') + param = kwargs.get('param') + + n_runs = 50 # Number of runs to try to find the best parameters + n_random_params = 50 # Number of random parameters to generate + n_best_to_pick = 5 # Number of best parameters to pick after each run + max_initial = 10000 # Maximum value to initialize the parameters + + # Initializes the parameters + base_params = { p: init_ones(x, **kwargs) for p in params_list if p != param } + params = _build_initial_param(x, max_initial, n_random_params) + + # Performs the search + for _ in range(n_runs): + + best_params = [] + for param_ in params: + try: + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: param_}) + loss_ones = nn.MSELoss()(x, x_) + + if len(best_params) < n_best_to_pick: + best_params.append((param_, loss_ones.item())) + best_params = sorted(best_params, key=lambda x: x[1]) + elif loss_ones < best_params[-1][1]: + best_params[-1] = (param_, loss_ones.item()) + best_params = sorted(best_params, key=lambda x: x[1]) + + except Exception: # The parameters might not be valid for the function's domain + continue + + # Generates new parameters around the mean + params = _search_param([p for p, _ in best_params], n_random_params) + + # Checks if the best parameter is better than the init_ones + p_ones = init_ones(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_ones}) + loss_ones = nn.MSELoss()(x, x_) + + # Checks if the best parameter is better than the init_rand + p_rand = init_rand(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_rand}) + loss_rand = nn.MSELoss()(x, x_) + + if loss_rand < best_params[0][1] and loss_rand < loss_ones: + return p_rand + elif loss_ones < best_params[0][1] and loss_ones < loss_rand: + return p_ones + else: + return best_params[0][0] + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_non_linear_regression_fit( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + assert "params_list" in kwargs, "params list must be provided." + assert "np_fit_func" in kwargs, "np_fit_func must be provided." + assert "p0" in kwargs, "p0 must be provided." + np_fit_func = kwargs.get('np_fit_func') + params_list = kwargs.get('params_list') + p0 = kwargs.get('p0') + + def _fit(xdata: np.ndarray, ydata: np.ndarray, func: Callable, p0: List[float]): + popt, _ = curve_fit( + func, + xdata, + ydata, + maxfev=1000, + p0=p0, + method='lm' + ) + return popt + + # 1. Needs to convert the torch tensor to numpy tensor + xdata = x.cpu().numpy() + + # 2. Sorts the data so that it makes it easier to fit to it + sorted_xdata = np.sort(xdata, axis=-1) + + p0 = {k: v.cpu().numpy() for k, v in p0.items()} + params_list = sorted(params_list) # We need to make sure that it matches the numpy fit func arg order + + # 3. Finds the best parameters for each channel + try: + params = [] + for i in range(sorted_xdata.shape[0]): + xdata_ = sorted_xdata[i] + p0_ = [p0[p][i] for p in params_list] + ch_params = _fit(xdata_, xdata_, np_fit_func, p0_) + params.append(ch_params) + + # 4. Builds the parameters + result = {} + for i, p in enumerate(params_list): + result[p] = torch.tensor([p_[i] for p_ in params], dtype=torch.float32).to(x.device) + + return result + + except ValueError as e: + print(f"Could not fit the function with error: {e}") + print(f"Using fallback result...") + return { + k: torch.tensor(v, dtype=torch.float32).to(x.device) for k, v in p0.items() + } + + +def init_zeros(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.zeros_like(val, dtype=torch.float32, device=x.device) + + +def init_inner_scale(tensor: torch.Tensor, _min: float = torch.inf, _max: float = torch.inf) -> torch.Tensor: + # Calculate the original minimum and maximum values + min_vals, max_vals = torch.aminmax(tensor, dim=-1) + x_min = torch.min(min_vals, torch.zeros_like(min_vals)) + x_max = torch.max(max_vals, torch.zeros_like(max_vals)) + + if _max is torch.inf: # We do not need to scale the tensor. Just need to move it + return torch.ones_like(x_min) + + # Calculate the scale factor + scale = (_max - _min) / (x_max - x_min) + return scale + + + +############## Quant ############### + +@torch.enable_grad() +def learn_parameters( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + qtz_func: nn.Module, + deqtz_func: nn.Module, + bits: int, + target_dtype: torch.dtype, + epochs: int = 1000, + early_stop: bool = True, + do_report: bool = False +) -> Tuple[Dict[str, nn.Parameter], torch.Tensor]: + + # Requires gradients in the parameters + for p in params.values(): + p.requires_grad = True + p.grad = None + + param_keys = list(params.keys()) + param_values = list(params.values()) + + # Defines optimizer and loss function + optimizer = torch.optim.Adam(param_values, lr=0.001) + scheduler = torch.optim.lr_scheduler.LinearLR(optimizer, start_factor=1.0, end_factor=0.001, total_iters=epochs // 10) + loss_fn = nn.MSELoss() + + # Contains the best loss and the best parameters + best_loss = float("inf") + best_params = None + + # Used to stop the search early + min_delta = 1e-7 + acc_loss = [] + percent_epochs_before_stop = 0.1 + + for i in range(epochs): + optimizer.zero_grad() + + quant = quantize(x, params, qtz_func, bits, target_dtype) + dequant = dequantize(quant, params, deqtz_func, bits, x.dtype) + loss = loss_fn(x, dequant) + + if loss.isnan() or loss.isinf(): + raise Exception("Loss is NaN or Inf. Stopping the search.") + + loss.backward() + optimizer.step() + scheduler.step() + + acc_loss.append(loss.item()) + + # Reports loss every 10 steps + if i % 10 == 0 and do_report: + print(f"Epoch {i}: Loss {loss.item()}") + + # Optimizes the parameter search by storing the best loss and the parameters + if loss.item() < best_loss: + best_loss = loss.item() + best_params = copy.deepcopy({ + k: v for k, v in params.items() if k in param_keys + }) + + # We also stop the search if the loss has not considerably during the last 10% epochs + if early_stop: + epochs_before_stop = int(epochs * percent_epochs_before_stop) + if i > epochs_before_stop and abs(acc_loss[i - epochs_before_stop] - acc_loss[i]) < min_delta: + break + + # No longer requires gradients in the parameters + for p in best_params.values(): + p.requires_grad = False + p.grad = None + + if do_report: + return best_params, acc_loss + else: + return best_params + + +def quantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + target_dtype: torch.dtype = torch.int8 +) -> torch.Tensor: + quant_min, quant_max = get_min_max_from_bits_signed(bits) + x = x.transpose(0, 1) # Aligns shapes + x = func(x=x, **params) + x = x.transpose(0, 1) + x = torch.clamp(round_func_BPDA(x), quant_min, quant_max).to(target_dtype) + return x + + +def dequantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + out_dtype: torch.dtype +) -> torch.Tensor: + x = x.to(dtype=out_dtype) + x = x.transpose(0, 1) + x = func(x=x, **params) + x = x.transpose(0, 1) + return x + + +def round_func_BPDA(input): + # This is equivalent to replacing round function (non-differentiable) with + # an identity function (differentiable) only when backward. + forward_value = torch.round(input) + out = input.clone() + out.data = forward_value.data + return out + + +def get_min_max_from_bits_signed(bit_width: int) -> Tuple[int, int]: + return -2 ** (bit_width - 1), 2 ** (bit_width - 1) - 1 + + + +############## Numpy ############### + +def np_domain_guard( + x: np.ndarray, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> np.ndarray: + """Guard a tensor to a valid domain.""" + x = np.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = np.clip(x, min, max) + return x + + +def np_replace_num(x: np.ndarray, num: float, to: float) -> np.ndarray: + """Replace a number in a tensor with another number. + + Args: + x (np.ndarray): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + np.ndarray: The tensor with the number replaced. + """ + return np.where(x == num, to, x) + + +def np_guarded_power(x: np.ndarray, exp: float) -> np.ndarray: + """Guard the power operation to a valid domain.""" + return np.power(x, exp) if exp >= 1 else np.power(np.maximum(x, 0), exp) + diff --git a/fn_gen/ones_affine_scale/14/loss.png b/fn_gen/ones_affine_scale/14/loss.png new file mode 100644 index 0000000000000000000000000000000000000000..c9d490378c6066d8c750d8f8826a08961df99575 Binary files /dev/null and b/fn_gen/ones_affine_scale/14/loss.png differ diff --git a/fn_gen/ones_affine_scale/14/quantization.png b/fn_gen/ones_affine_scale/14/quantization.png new file mode 100644 index 0000000000000000000000000000000000000000..ce59e02bedbfe0645bfb04b8d94301070f57f047 Binary files /dev/null and b/fn_gen/ones_affine_scale/14/quantization.png differ diff --git a/fn_gen/ones_affine_scale/15/distortion.png b/fn_gen/ones_affine_scale/15/distortion.png new file mode 100644 index 0000000000000000000000000000000000000000..a0d8f23ff96aededdeea59feaf8d5d4627aa5438 Binary files /dev/null and b/fn_gen/ones_affine_scale/15/distortion.png differ diff --git a/fn_gen/ones_affine_scale/15/expressions.txt b/fn_gen/ones_affine_scale/15/expressions.txt new file mode 100644 index 0000000000000000000000000000000000000000..b835531ccc3a3813012a9a9487415f4f73afabc7 --- /dev/null +++ b/fn_gen/ones_affine_scale/15/expressions.txt @@ -0,0 +1,2 @@ +sinh(_0*x)/_s +log(_s*x - sqrt(_s**2*x**2 + 1))/_0 \ No newline at end of file diff --git a/fn_gen/ones_affine_scale/15/fn.py b/fn_gen/ones_affine_scale/15/fn.py new file mode 100644 index 0000000000000000000000000000000000000000..a059b671b5c5337b9b08918e8f58aa57f0f41cb9 --- /dev/null +++ b/fn_gen/ones_affine_scale/15/fn.py @@ -0,0 +1,493 @@ +from __future__ import annotations + +import torch +from torch import amin # Necessary for arcsin +import copy +import torch.nn as nn +import numpy as np + +from scipy.optimize import curve_fit +from typing import Dict, Any, Tuple, List, Callable + + +def quantization(x, **params): + return (torch.div(1, replace_num(params['_s'], num=0, to=10000)) * torch.sinh((params['_0'] * x))) + + +def dequantization(x, **params): + return (torch.div(1, replace_num(params['_0'], num=0, to=10000)) * torch.log(domain_guard(((torch.tensor(-1) * torch.sqrt(domain_guard((torch.tensor(1) + (guarded_torch_power(params['_s'], torch.tensor(2)) * guarded_torch_power(x, torch.tensor(2)))), min=0.1, nan=0.1))) + (params['_s'] * x)), min=1e-5, nan=1e-5))) + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_params(x: torch.Tensor, **kwargs: Dict[str, Any]) -> Dict[str, nn.Parameter]: + params = { + '_0': init_ones(x, qtz_func=quantization, deqtz_func=dequantization, param='_0', params_list=['_0', '_s'], **kwargs), + } + params['_s'] = init_linear_scale(x, params=params, qtz_func=quantization, **kwargs) + params = {k: nn.Parameter(v, requires_grad=False) for k, v in params.items()} + + if 'post_init_hook' in kwargs: + kwargs['post_init_hook'](parameters=params) + + + if 'post_train_hook' in kwargs: + kwargs['post_train_hook'](parameters=params) + + return params + + +############### Numpy Qtz ############### + + +def np_quantization(x, _0, _s): + return (np.divide(1, np_replace_num(_s, num=0, to=10000)) * np.sinh((_0 * x))) + + +def np_dequantization(x, _0, _s): + return (np.divide(1, np_replace_num(_0, num=0, to=10000)) * np.log(np_domain_guard(((np.array(-1) * np.sqrt(np_domain_guard((np.array(1) + (np_guarded_power(_s, np.array(2)) * np_guarded_power(x, np.array(2)))), min=0.1, nan=0.1))) + (_s * x)), min=1e-5, nan=1e-5))) + + +def fit_func(x, _0, _s): + x_ = np_quantization(x, _0, _s) + x_ = np_dequantization(x_, _0, _s) + return x_ + + + +############### HELPERS ############### + +def domain_guard( + x: torch.Tensor, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> torch.Tensor: + """Guard a tensor to a valid domain.""" + x = torch.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = torch.clamp(x, min=min, max=max) + return x + + +def replace_num(x: torch.Tensor, num: float, to: float) -> torch.Tensor: + """Replace a number in a tensor with another number. + + Args: + x (torch.Tensor): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + torch.Tensor: The tensor with the number replaced. + """ + return torch.where(x == num, to, x) + + +def guarded_torch_power(x: torch.Tensor, exp: float) -> torch.Tensor: + """Guard the power operation to a valid domain.""" + return torch.pow(x, exp) if exp >= 1 else torch.pow(torch.relu(x), exp) + + +def init_ones(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.ones_like(val, dtype=torch.float32, device=x.device) + + +def init_rand(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.randn_like(val, dtype=torch.float32, device=x.device) + + +def init_space_search( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + def _build_initial_param(tensor: torch.Tensor, max_initial: int, n_params: int): + """Generates the initial set of parameters. The first iteration generates 10 times more parameters.""" + for _ in range(n_params * 10): # The first iteration generates 10 times more parameters + yield init_rand(tensor) * max_initial # Generates n_params in range [-max_initial, max_initial] + + def _search_param(tensors: List[torch.tensor], n_params): + """Takes the best parameters and generates new parameters around the mean of the best parameters.""" + torch_tensors = torch.stack(tensors) + min_vals, max_vals = torch.aminmax(torch_tensors, dim=0) + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + mean = torch.mean(torch_tensors, dim=0) + for _ in range(n_params): # Generates n_params around the mean of the tensors + yield torch.randn_like(min_vals) * abs_max_val_per_ch + mean + + def _calc(x, qtz_func, deqtz_func, **params): + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params) + x_ = deqtz_func(x=x_, **params) + x_ = x_.transpose(0, 1) + return x_ + + assert "qtz_func" in kwargs, "qtz_func must be provided." + assert "deqtz_func" in kwargs, "deqtz_func must be provided." + assert "params_list" in kwargs, "params list must be provided." + assert "param" in kwargs, "param must be provided." + + qtz_func = kwargs.get('qtz_func') + deqtz_func = kwargs.get('deqtz_func') + params_list = kwargs.get('params_list') + param = kwargs.get('param') + + n_runs = 50 # Number of runs to try to find the best parameters + n_random_params = 50 # Number of random parameters to generate + n_best_to_pick = 5 # Number of best parameters to pick after each run + max_initial = 10000 # Maximum value to initialize the parameters + + # Initializes the parameters + base_params = { p: init_ones(x, **kwargs) for p in params_list if p != param } + params = _build_initial_param(x, max_initial, n_random_params) + + # Performs the search + for _ in range(n_runs): + + best_params = [] + for param_ in params: + try: + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: param_}) + loss_ones = nn.MSELoss()(x, x_) + + if len(best_params) < n_best_to_pick: + best_params.append((param_, loss_ones.item())) + best_params = sorted(best_params, key=lambda x: x[1]) + elif loss_ones < best_params[-1][1]: + best_params[-1] = (param_, loss_ones.item()) + best_params = sorted(best_params, key=lambda x: x[1]) + + except Exception: # The parameters might not be valid for the function's domain + continue + + # Generates new parameters around the mean + params = _search_param([p for p, _ in best_params], n_random_params) + + # Checks if the best parameter is better than the init_ones + p_ones = init_ones(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_ones}) + loss_ones = nn.MSELoss()(x, x_) + + # Checks if the best parameter is better than the init_rand + p_rand = init_rand(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_rand}) + loss_rand = nn.MSELoss()(x, x_) + + if loss_rand < best_params[0][1] and loss_rand < loss_ones: + return p_rand + elif loss_ones < best_params[0][1] and loss_ones < loss_rand: + return p_ones + else: + return best_params[0][0] + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_non_linear_regression_fit( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + assert "params_list" in kwargs, "params list must be provided." + assert "np_fit_func" in kwargs, "np_fit_func must be provided." + assert "p0" in kwargs, "p0 must be provided." + np_fit_func = kwargs.get('np_fit_func') + params_list = kwargs.get('params_list') + p0 = kwargs.get('p0') + + def _fit(xdata: np.ndarray, ydata: np.ndarray, func: Callable, p0: List[float]): + popt, _ = curve_fit( + func, + xdata, + ydata, + maxfev=1000, + p0=p0, + method='lm' + ) + return popt + + # 1. Needs to convert the torch tensor to numpy tensor + xdata = x.cpu().numpy() + + # 2. Sorts the data so that it makes it easier to fit to it + sorted_xdata = np.sort(xdata, axis=-1) + + p0 = {k: v.cpu().numpy() for k, v in p0.items()} + params_list = sorted(params_list) # We need to make sure that it matches the numpy fit func arg order + + # 3. Finds the best parameters for each channel + try: + params = [] + for i in range(sorted_xdata.shape[0]): + xdata_ = sorted_xdata[i] + p0_ = [p0[p][i] for p in params_list] + ch_params = _fit(xdata_, xdata_, np_fit_func, p0_) + params.append(ch_params) + + # 4. Builds the parameters + result = {} + for i, p in enumerate(params_list): + result[p] = torch.tensor([p_[i] for p_ in params], dtype=torch.float32).to(x.device) + + return result + + except ValueError as e: + print(f"Could not fit the function with error: {e}") + print(f"Using fallback result...") + return { + k: torch.tensor(v, dtype=torch.float32).to(x.device) for k, v in p0.items() + } + + +def init_zeros(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.zeros_like(val, dtype=torch.float32, device=x.device) + + +def init_inner_scale(tensor: torch.Tensor, _min: float = torch.inf, _max: float = torch.inf) -> torch.Tensor: + # Calculate the original minimum and maximum values + min_vals, max_vals = torch.aminmax(tensor, dim=-1) + x_min = torch.min(min_vals, torch.zeros_like(min_vals)) + x_max = torch.max(max_vals, torch.zeros_like(max_vals)) + + if _max is torch.inf: # We do not need to scale the tensor. Just need to move it + return torch.ones_like(x_min) + + # Calculate the scale factor + scale = (_max - _min) / (x_max - x_min) + return scale + + + +############## Quant ############### + +@torch.enable_grad() +def learn_parameters( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + qtz_func: nn.Module, + deqtz_func: nn.Module, + bits: int, + target_dtype: torch.dtype, + epochs: int = 1000, + early_stop: bool = True, + do_report: bool = False +) -> Tuple[Dict[str, nn.Parameter], torch.Tensor]: + + # Requires gradients in the parameters + for p in params.values(): + p.requires_grad = True + p.grad = None + + param_keys = list(params.keys()) + param_values = list(params.values()) + + # Defines optimizer and loss function + optimizer = torch.optim.Adam(param_values, lr=0.001) + scheduler = torch.optim.lr_scheduler.LinearLR(optimizer, start_factor=1.0, end_factor=0.001, total_iters=epochs // 10) + loss_fn = nn.MSELoss() + + # Contains the best loss and the best parameters + best_loss = float("inf") + best_params = None + + # Used to stop the search early + min_delta = 1e-7 + acc_loss = [] + percent_epochs_before_stop = 0.1 + + for i in range(epochs): + optimizer.zero_grad() + + quant = quantize(x, params, qtz_func, bits, target_dtype) + dequant = dequantize(quant, params, deqtz_func, bits, x.dtype) + loss = loss_fn(x, dequant) + + if loss.isnan() or loss.isinf(): + raise Exception("Loss is NaN or Inf. Stopping the search.") + + loss.backward() + optimizer.step() + scheduler.step() + + acc_loss.append(loss.item()) + + # Reports loss every 10 steps + if i % 10 == 0 and do_report: + print(f"Epoch {i}: Loss {loss.item()}") + + # Optimizes the parameter search by storing the best loss and the parameters + if loss.item() < best_loss: + best_loss = loss.item() + best_params = copy.deepcopy({ + k: v for k, v in params.items() if k in param_keys + }) + + # We also stop the search if the loss has not considerably during the last 10% epochs + if early_stop: + epochs_before_stop = int(epochs * percent_epochs_before_stop) + if i > epochs_before_stop and abs(acc_loss[i - epochs_before_stop] - acc_loss[i]) < min_delta: + break + + # No longer requires gradients in the parameters + for p in best_params.values(): + p.requires_grad = False + p.grad = None + + if do_report: + return best_params, acc_loss + else: + return best_params + + +def quantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + target_dtype: torch.dtype = torch.int8 +) -> torch.Tensor: + quant_min, quant_max = get_min_max_from_bits_signed(bits) + x = x.transpose(0, 1) # Aligns shapes + x = func(x=x, **params) + x = x.transpose(0, 1) + x = torch.clamp(round_func_BPDA(x), quant_min, quant_max).to(target_dtype) + return x + + +def dequantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + out_dtype: torch.dtype +) -> torch.Tensor: + x = x.to(dtype=out_dtype) + x = x.transpose(0, 1) + x = func(x=x, **params) + x = x.transpose(0, 1) + return x + + +def round_func_BPDA(input): + # This is equivalent to replacing round function (non-differentiable) with + # an identity function (differentiable) only when backward. + forward_value = torch.round(input) + out = input.clone() + out.data = forward_value.data + return out + + +def get_min_max_from_bits_signed(bit_width: int) -> Tuple[int, int]: + return -2 ** (bit_width - 1), 2 ** (bit_width - 1) - 1 + + + +############## Numpy ############### + +def np_domain_guard( + x: np.ndarray, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> np.ndarray: + """Guard a tensor to a valid domain.""" + x = np.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = np.clip(x, min, max) + return x + + +def np_replace_num(x: np.ndarray, num: float, to: float) -> np.ndarray: + """Replace a number in a tensor with another number. + + Args: + x (np.ndarray): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + np.ndarray: The tensor with the number replaced. + """ + return np.where(x == num, to, x) + + +def np_guarded_power(x: np.ndarray, exp: float) -> np.ndarray: + """Guard the power operation to a valid domain.""" + return np.power(x, exp) if exp >= 1 else np.power(np.maximum(x, 0), exp) + diff --git a/fn_gen/ones_affine_scale/15/loss.png b/fn_gen/ones_affine_scale/15/loss.png new file mode 100644 index 0000000000000000000000000000000000000000..49b899c9c948f2c91f8f8acc0100c4da8e46658f Binary files /dev/null and b/fn_gen/ones_affine_scale/15/loss.png differ diff --git a/fn_gen/ones_affine_scale/15/quantization.png b/fn_gen/ones_affine_scale/15/quantization.png new file mode 100644 index 0000000000000000000000000000000000000000..6d90040f1386a47ab167690a48cfaa53b1cd6501 Binary files /dev/null and b/fn_gen/ones_affine_scale/15/quantization.png differ diff --git a/fn_gen/ones_affine_scale/16/distortion.png b/fn_gen/ones_affine_scale/16/distortion.png new file mode 100644 index 0000000000000000000000000000000000000000..cca7dff69974e3a4f1f562a3b77c0cbbbc2b1f6a Binary files /dev/null and b/fn_gen/ones_affine_scale/16/distortion.png differ diff --git a/fn_gen/ones_affine_scale/16/expressions.txt b/fn_gen/ones_affine_scale/16/expressions.txt new file mode 100644 index 0000000000000000000000000000000000000000..aa32b575e8c654dbc457c94f36222e70d86dc940 --- /dev/null +++ b/fn_gen/ones_affine_scale/16/expressions.txt @@ -0,0 +1,2 @@ +atan(_0*x)/_s +tan(_s*x)/_0 \ No newline at end of file diff --git a/fn_gen/ones_affine_scale/16/fn.py b/fn_gen/ones_affine_scale/16/fn.py new file mode 100644 index 0000000000000000000000000000000000000000..bb958028967e8e02d63adffe33d13e50543b83e3 --- /dev/null +++ b/fn_gen/ones_affine_scale/16/fn.py @@ -0,0 +1,493 @@ +from __future__ import annotations + +import torch +from torch import amin # Necessary for arcsin +import copy +import torch.nn as nn +import numpy as np + +from scipy.optimize import curve_fit +from typing import Dict, Any, Tuple, List, Callable + + +def quantization(x, **params): + return (torch.div(1, replace_num(params['_s'], num=0, to=10000)) * torch.atan((params['_0'] * x))) + + +def dequantization(x, **params): + return (torch.div(1, replace_num(params['_0'], num=0, to=10000)) * torch.tan(domain_guard((params['_s'] * x), posinf=1, neginf=-1, nan=0))) + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_params(x: torch.Tensor, **kwargs: Dict[str, Any]) -> Dict[str, nn.Parameter]: + params = { + '_0': init_ones(x, qtz_func=quantization, deqtz_func=dequantization, param='_0', params_list=['_0', '_s'], **kwargs), + } + params['_s'] = init_linear_scale(x, params=params, qtz_func=quantization, **kwargs) + params = {k: nn.Parameter(v, requires_grad=False) for k, v in params.items()} + + if 'post_init_hook' in kwargs: + kwargs['post_init_hook'](parameters=params) + + + if 'post_train_hook' in kwargs: + kwargs['post_train_hook'](parameters=params) + + return params + + +############### Numpy Qtz ############### + + +def np_quantization(x, _0, _s): + return (np.divide(1, np_replace_num(_s, num=0, to=10000)) * np.arctan((_0 * x))) + + +def np_dequantization(x, _0, _s): + return (np.divide(1, np_replace_num(_0, num=0, to=10000)) * np.tan(np_domain_guard((_s * x), posinf=1, neginf=-1, nan=0))) + + +def fit_func(x, _0, _s): + x_ = np_quantization(x, _0, _s) + x_ = np_dequantization(x_, _0, _s) + return x_ + + + +############### HELPERS ############### + +def domain_guard( + x: torch.Tensor, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> torch.Tensor: + """Guard a tensor to a valid domain.""" + x = torch.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = torch.clamp(x, min=min, max=max) + return x + + +def replace_num(x: torch.Tensor, num: float, to: float) -> torch.Tensor: + """Replace a number in a tensor with another number. + + Args: + x (torch.Tensor): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + torch.Tensor: The tensor with the number replaced. + """ + return torch.where(x == num, to, x) + + +def guarded_torch_power(x: torch.Tensor, exp: float) -> torch.Tensor: + """Guard the power operation to a valid domain.""" + return torch.pow(x, exp) if exp >= 1 else torch.pow(torch.relu(x), exp) + + +def init_ones(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.ones_like(val, dtype=torch.float32, device=x.device) + + +def init_rand(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.randn_like(val, dtype=torch.float32, device=x.device) + + +def init_space_search( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + def _build_initial_param(tensor: torch.Tensor, max_initial: int, n_params: int): + """Generates the initial set of parameters. The first iteration generates 10 times more parameters.""" + for _ in range(n_params * 10): # The first iteration generates 10 times more parameters + yield init_rand(tensor) * max_initial # Generates n_params in range [-max_initial, max_initial] + + def _search_param(tensors: List[torch.tensor], n_params): + """Takes the best parameters and generates new parameters around the mean of the best parameters.""" + torch_tensors = torch.stack(tensors) + min_vals, max_vals = torch.aminmax(torch_tensors, dim=0) + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + mean = torch.mean(torch_tensors, dim=0) + for _ in range(n_params): # Generates n_params around the mean of the tensors + yield torch.randn_like(min_vals) * abs_max_val_per_ch + mean + + def _calc(x, qtz_func, deqtz_func, **params): + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params) + x_ = deqtz_func(x=x_, **params) + x_ = x_.transpose(0, 1) + return x_ + + assert "qtz_func" in kwargs, "qtz_func must be provided." + assert "deqtz_func" in kwargs, "deqtz_func must be provided." + assert "params_list" in kwargs, "params list must be provided." + assert "param" in kwargs, "param must be provided." + + qtz_func = kwargs.get('qtz_func') + deqtz_func = kwargs.get('deqtz_func') + params_list = kwargs.get('params_list') + param = kwargs.get('param') + + n_runs = 50 # Number of runs to try to find the best parameters + n_random_params = 50 # Number of random parameters to generate + n_best_to_pick = 5 # Number of best parameters to pick after each run + max_initial = 10000 # Maximum value to initialize the parameters + + # Initializes the parameters + base_params = { p: init_ones(x, **kwargs) for p in params_list if p != param } + params = _build_initial_param(x, max_initial, n_random_params) + + # Performs the search + for _ in range(n_runs): + + best_params = [] + for param_ in params: + try: + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: param_}) + loss_ones = nn.MSELoss()(x, x_) + + if len(best_params) < n_best_to_pick: + best_params.append((param_, loss_ones.item())) + best_params = sorted(best_params, key=lambda x: x[1]) + elif loss_ones < best_params[-1][1]: + best_params[-1] = (param_, loss_ones.item()) + best_params = sorted(best_params, key=lambda x: x[1]) + + except Exception: # The parameters might not be valid for the function's domain + continue + + # Generates new parameters around the mean + params = _search_param([p for p, _ in best_params], n_random_params) + + # Checks if the best parameter is better than the init_ones + p_ones = init_ones(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_ones}) + loss_ones = nn.MSELoss()(x, x_) + + # Checks if the best parameter is better than the init_rand + p_rand = init_rand(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_rand}) + loss_rand = nn.MSELoss()(x, x_) + + if loss_rand < best_params[0][1] and loss_rand < loss_ones: + return p_rand + elif loss_ones < best_params[0][1] and loss_ones < loss_rand: + return p_ones + else: + return best_params[0][0] + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_non_linear_regression_fit( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + assert "params_list" in kwargs, "params list must be provided." + assert "np_fit_func" in kwargs, "np_fit_func must be provided." + assert "p0" in kwargs, "p0 must be provided." + np_fit_func = kwargs.get('np_fit_func') + params_list = kwargs.get('params_list') + p0 = kwargs.get('p0') + + def _fit(xdata: np.ndarray, ydata: np.ndarray, func: Callable, p0: List[float]): + popt, _ = curve_fit( + func, + xdata, + ydata, + maxfev=1000, + p0=p0, + method='lm' + ) + return popt + + # 1. Needs to convert the torch tensor to numpy tensor + xdata = x.cpu().numpy() + + # 2. Sorts the data so that it makes it easier to fit to it + sorted_xdata = np.sort(xdata, axis=-1) + + p0 = {k: v.cpu().numpy() for k, v in p0.items()} + params_list = sorted(params_list) # We need to make sure that it matches the numpy fit func arg order + + # 3. Finds the best parameters for each channel + try: + params = [] + for i in range(sorted_xdata.shape[0]): + xdata_ = sorted_xdata[i] + p0_ = [p0[p][i] for p in params_list] + ch_params = _fit(xdata_, xdata_, np_fit_func, p0_) + params.append(ch_params) + + # 4. Builds the parameters + result = {} + for i, p in enumerate(params_list): + result[p] = torch.tensor([p_[i] for p_ in params], dtype=torch.float32).to(x.device) + + return result + + except ValueError as e: + print(f"Could not fit the function with error: {e}") + print(f"Using fallback result...") + return { + k: torch.tensor(v, dtype=torch.float32).to(x.device) for k, v in p0.items() + } + + +def init_zeros(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.zeros_like(val, dtype=torch.float32, device=x.device) + + +def init_inner_scale(tensor: torch.Tensor, _min: float = torch.inf, _max: float = torch.inf) -> torch.Tensor: + # Calculate the original minimum and maximum values + min_vals, max_vals = torch.aminmax(tensor, dim=-1) + x_min = torch.min(min_vals, torch.zeros_like(min_vals)) + x_max = torch.max(max_vals, torch.zeros_like(max_vals)) + + if _max is torch.inf: # We do not need to scale the tensor. Just need to move it + return torch.ones_like(x_min) + + # Calculate the scale factor + scale = (_max - _min) / (x_max - x_min) + return scale + + + +############## Quant ############### + +@torch.enable_grad() +def learn_parameters( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + qtz_func: nn.Module, + deqtz_func: nn.Module, + bits: int, + target_dtype: torch.dtype, + epochs: int = 1000, + early_stop: bool = True, + do_report: bool = False +) -> Tuple[Dict[str, nn.Parameter], torch.Tensor]: + + # Requires gradients in the parameters + for p in params.values(): + p.requires_grad = True + p.grad = None + + param_keys = list(params.keys()) + param_values = list(params.values()) + + # Defines optimizer and loss function + optimizer = torch.optim.Adam(param_values, lr=0.001) + scheduler = torch.optim.lr_scheduler.LinearLR(optimizer, start_factor=1.0, end_factor=0.001, total_iters=epochs // 10) + loss_fn = nn.MSELoss() + + # Contains the best loss and the best parameters + best_loss = float("inf") + best_params = None + + # Used to stop the search early + min_delta = 1e-7 + acc_loss = [] + percent_epochs_before_stop = 0.1 + + for i in range(epochs): + optimizer.zero_grad() + + quant = quantize(x, params, qtz_func, bits, target_dtype) + dequant = dequantize(quant, params, deqtz_func, bits, x.dtype) + loss = loss_fn(x, dequant) + + if loss.isnan() or loss.isinf(): + raise Exception("Loss is NaN or Inf. Stopping the search.") + + loss.backward() + optimizer.step() + scheduler.step() + + acc_loss.append(loss.item()) + + # Reports loss every 10 steps + if i % 10 == 0 and do_report: + print(f"Epoch {i}: Loss {loss.item()}") + + # Optimizes the parameter search by storing the best loss and the parameters + if loss.item() < best_loss: + best_loss = loss.item() + best_params = copy.deepcopy({ + k: v for k, v in params.items() if k in param_keys + }) + + # We also stop the search if the loss has not considerably during the last 10% epochs + if early_stop: + epochs_before_stop = int(epochs * percent_epochs_before_stop) + if i > epochs_before_stop and abs(acc_loss[i - epochs_before_stop] - acc_loss[i]) < min_delta: + break + + # No longer requires gradients in the parameters + for p in best_params.values(): + p.requires_grad = False + p.grad = None + + if do_report: + return best_params, acc_loss + else: + return best_params + + +def quantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + target_dtype: torch.dtype = torch.int8 +) -> torch.Tensor: + quant_min, quant_max = get_min_max_from_bits_signed(bits) + x = x.transpose(0, 1) # Aligns shapes + x = func(x=x, **params) + x = x.transpose(0, 1) + x = torch.clamp(round_func_BPDA(x), quant_min, quant_max).to(target_dtype) + return x + + +def dequantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + out_dtype: torch.dtype +) -> torch.Tensor: + x = x.to(dtype=out_dtype) + x = x.transpose(0, 1) + x = func(x=x, **params) + x = x.transpose(0, 1) + return x + + +def round_func_BPDA(input): + # This is equivalent to replacing round function (non-differentiable) with + # an identity function (differentiable) only when backward. + forward_value = torch.round(input) + out = input.clone() + out.data = forward_value.data + return out + + +def get_min_max_from_bits_signed(bit_width: int) -> Tuple[int, int]: + return -2 ** (bit_width - 1), 2 ** (bit_width - 1) - 1 + + + +############## Numpy ############### + +def np_domain_guard( + x: np.ndarray, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> np.ndarray: + """Guard a tensor to a valid domain.""" + x = np.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = np.clip(x, min, max) + return x + + +def np_replace_num(x: np.ndarray, num: float, to: float) -> np.ndarray: + """Replace a number in a tensor with another number. + + Args: + x (np.ndarray): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + np.ndarray: The tensor with the number replaced. + """ + return np.where(x == num, to, x) + + +def np_guarded_power(x: np.ndarray, exp: float) -> np.ndarray: + """Guard the power operation to a valid domain.""" + return np.power(x, exp) if exp >= 1 else np.power(np.maximum(x, 0), exp) + diff --git a/fn_gen/ones_affine_scale/16/loss.png b/fn_gen/ones_affine_scale/16/loss.png new file mode 100644 index 0000000000000000000000000000000000000000..9a60d987b6e6f1bcb73281b03fc7fd03ed99e93d Binary files /dev/null and b/fn_gen/ones_affine_scale/16/loss.png differ diff --git a/fn_gen/ones_affine_scale/16/quantization.png b/fn_gen/ones_affine_scale/16/quantization.png new file mode 100644 index 0000000000000000000000000000000000000000..3dbbbb24a7d2bf31caaf62ea3a7759fd7fef962e Binary files /dev/null and b/fn_gen/ones_affine_scale/16/quantization.png differ diff --git a/fn_gen/ones_affine_scale/17/distortion.png b/fn_gen/ones_affine_scale/17/distortion.png new file mode 100644 index 0000000000000000000000000000000000000000..86e16ff94cb77db76575ee92a03c2cc157c311d1 Binary files /dev/null and b/fn_gen/ones_affine_scale/17/distortion.png differ diff --git a/fn_gen/ones_affine_scale/17/expressions.txt b/fn_gen/ones_affine_scale/17/expressions.txt new file mode 100644 index 0000000000000000000000000000000000000000..ec55493201f7b2b8effaefed75e0a9258fc25c56 --- /dev/null +++ b/fn_gen/ones_affine_scale/17/expressions.txt @@ -0,0 +1,2 @@ +tanh(_0*x)/_s +log((-_s*x - 1)/(_s*x - 1))/_0 \ No newline at end of file diff --git a/fn_gen/ones_affine_scale/17/fn.py b/fn_gen/ones_affine_scale/17/fn.py new file mode 100644 index 0000000000000000000000000000000000000000..28d248734690413305d39a727b492af919d2a934 --- /dev/null +++ b/fn_gen/ones_affine_scale/17/fn.py @@ -0,0 +1,493 @@ +from __future__ import annotations + +import torch +from torch import amin # Necessary for arcsin +import copy +import torch.nn as nn +import numpy as np + +from scipy.optimize import curve_fit +from typing import Dict, Any, Tuple, List, Callable + + +def quantization(x, **params): + return (torch.div(1, replace_num(params['_s'], num=0, to=10000)) * torch.tanh((params['_0'] * x))) + + +def dequantization(x, **params): + return (torch.div(1, replace_num(params['_0'], num=0, to=10000)) * torch.log(domain_guard((torch.div(1, replace_num((torch.tensor(-1) + (params['_s'] * x)), num=0, to=10000)) * (torch.tensor(-1) + (torch.tensor(-1) * params['_s'] * x))), min=1e-5, nan=1e-5))) + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_params(x: torch.Tensor, **kwargs: Dict[str, Any]) -> Dict[str, nn.Parameter]: + params = { + '_0': init_ones(x, qtz_func=quantization, deqtz_func=dequantization, param='_0', params_list=['_0', '_s'], **kwargs), + } + params['_s'] = init_linear_scale(x, params=params, qtz_func=quantization, **kwargs) + params = {k: nn.Parameter(v, requires_grad=False) for k, v in params.items()} + + if 'post_init_hook' in kwargs: + kwargs['post_init_hook'](parameters=params) + + + if 'post_train_hook' in kwargs: + kwargs['post_train_hook'](parameters=params) + + return params + + +############### Numpy Qtz ############### + + +def np_quantization(x, _0, _s): + return (np.divide(1, np_replace_num(_s, num=0, to=10000)) * np.tanh((_0 * x))) + + +def np_dequantization(x, _0, _s): + return (np.divide(1, np_replace_num(_0, num=0, to=10000)) * np.log(np_domain_guard((np.divide(1, np_replace_num((np.array(-1) + (_s * x)), num=0, to=10000)) * (np.array(-1) + (np.array(-1) * _s * x))), min=1e-5, nan=1e-5))) + + +def fit_func(x, _0, _s): + x_ = np_quantization(x, _0, _s) + x_ = np_dequantization(x_, _0, _s) + return x_ + + + +############### HELPERS ############### + +def domain_guard( + x: torch.Tensor, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> torch.Tensor: + """Guard a tensor to a valid domain.""" + x = torch.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = torch.clamp(x, min=min, max=max) + return x + + +def replace_num(x: torch.Tensor, num: float, to: float) -> torch.Tensor: + """Replace a number in a tensor with another number. + + Args: + x (torch.Tensor): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + torch.Tensor: The tensor with the number replaced. + """ + return torch.where(x == num, to, x) + + +def guarded_torch_power(x: torch.Tensor, exp: float) -> torch.Tensor: + """Guard the power operation to a valid domain.""" + return torch.pow(x, exp) if exp >= 1 else torch.pow(torch.relu(x), exp) + + +def init_ones(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.ones_like(val, dtype=torch.float32, device=x.device) + + +def init_rand(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.randn_like(val, dtype=torch.float32, device=x.device) + + +def init_space_search( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + def _build_initial_param(tensor: torch.Tensor, max_initial: int, n_params: int): + """Generates the initial set of parameters. The first iteration generates 10 times more parameters.""" + for _ in range(n_params * 10): # The first iteration generates 10 times more parameters + yield init_rand(tensor) * max_initial # Generates n_params in range [-max_initial, max_initial] + + def _search_param(tensors: List[torch.tensor], n_params): + """Takes the best parameters and generates new parameters around the mean of the best parameters.""" + torch_tensors = torch.stack(tensors) + min_vals, max_vals = torch.aminmax(torch_tensors, dim=0) + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + mean = torch.mean(torch_tensors, dim=0) + for _ in range(n_params): # Generates n_params around the mean of the tensors + yield torch.randn_like(min_vals) * abs_max_val_per_ch + mean + + def _calc(x, qtz_func, deqtz_func, **params): + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params) + x_ = deqtz_func(x=x_, **params) + x_ = x_.transpose(0, 1) + return x_ + + assert "qtz_func" in kwargs, "qtz_func must be provided." + assert "deqtz_func" in kwargs, "deqtz_func must be provided." + assert "params_list" in kwargs, "params list must be provided." + assert "param" in kwargs, "param must be provided." + + qtz_func = kwargs.get('qtz_func') + deqtz_func = kwargs.get('deqtz_func') + params_list = kwargs.get('params_list') + param = kwargs.get('param') + + n_runs = 50 # Number of runs to try to find the best parameters + n_random_params = 50 # Number of random parameters to generate + n_best_to_pick = 5 # Number of best parameters to pick after each run + max_initial = 10000 # Maximum value to initialize the parameters + + # Initializes the parameters + base_params = { p: init_ones(x, **kwargs) for p in params_list if p != param } + params = _build_initial_param(x, max_initial, n_random_params) + + # Performs the search + for _ in range(n_runs): + + best_params = [] + for param_ in params: + try: + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: param_}) + loss_ones = nn.MSELoss()(x, x_) + + if len(best_params) < n_best_to_pick: + best_params.append((param_, loss_ones.item())) + best_params = sorted(best_params, key=lambda x: x[1]) + elif loss_ones < best_params[-1][1]: + best_params[-1] = (param_, loss_ones.item()) + best_params = sorted(best_params, key=lambda x: x[1]) + + except Exception: # The parameters might not be valid for the function's domain + continue + + # Generates new parameters around the mean + params = _search_param([p for p, _ in best_params], n_random_params) + + # Checks if the best parameter is better than the init_ones + p_ones = init_ones(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_ones}) + loss_ones = nn.MSELoss()(x, x_) + + # Checks if the best parameter is better than the init_rand + p_rand = init_rand(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_rand}) + loss_rand = nn.MSELoss()(x, x_) + + if loss_rand < best_params[0][1] and loss_rand < loss_ones: + return p_rand + elif loss_ones < best_params[0][1] and loss_ones < loss_rand: + return p_ones + else: + return best_params[0][0] + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_non_linear_regression_fit( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + assert "params_list" in kwargs, "params list must be provided." + assert "np_fit_func" in kwargs, "np_fit_func must be provided." + assert "p0" in kwargs, "p0 must be provided." + np_fit_func = kwargs.get('np_fit_func') + params_list = kwargs.get('params_list') + p0 = kwargs.get('p0') + + def _fit(xdata: np.ndarray, ydata: np.ndarray, func: Callable, p0: List[float]): + popt, _ = curve_fit( + func, + xdata, + ydata, + maxfev=1000, + p0=p0, + method='lm' + ) + return popt + + # 1. Needs to convert the torch tensor to numpy tensor + xdata = x.cpu().numpy() + + # 2. Sorts the data so that it makes it easier to fit to it + sorted_xdata = np.sort(xdata, axis=-1) + + p0 = {k: v.cpu().numpy() for k, v in p0.items()} + params_list = sorted(params_list) # We need to make sure that it matches the numpy fit func arg order + + # 3. Finds the best parameters for each channel + try: + params = [] + for i in range(sorted_xdata.shape[0]): + xdata_ = sorted_xdata[i] + p0_ = [p0[p][i] for p in params_list] + ch_params = _fit(xdata_, xdata_, np_fit_func, p0_) + params.append(ch_params) + + # 4. Builds the parameters + result = {} + for i, p in enumerate(params_list): + result[p] = torch.tensor([p_[i] for p_ in params], dtype=torch.float32).to(x.device) + + return result + + except ValueError as e: + print(f"Could not fit the function with error: {e}") + print(f"Using fallback result...") + return { + k: torch.tensor(v, dtype=torch.float32).to(x.device) for k, v in p0.items() + } + + +def init_zeros(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.zeros_like(val, dtype=torch.float32, device=x.device) + + +def init_inner_scale(tensor: torch.Tensor, _min: float = torch.inf, _max: float = torch.inf) -> torch.Tensor: + # Calculate the original minimum and maximum values + min_vals, max_vals = torch.aminmax(tensor, dim=-1) + x_min = torch.min(min_vals, torch.zeros_like(min_vals)) + x_max = torch.max(max_vals, torch.zeros_like(max_vals)) + + if _max is torch.inf: # We do not need to scale the tensor. Just need to move it + return torch.ones_like(x_min) + + # Calculate the scale factor + scale = (_max - _min) / (x_max - x_min) + return scale + + + +############## Quant ############### + +@torch.enable_grad() +def learn_parameters( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + qtz_func: nn.Module, + deqtz_func: nn.Module, + bits: int, + target_dtype: torch.dtype, + epochs: int = 1000, + early_stop: bool = True, + do_report: bool = False +) -> Tuple[Dict[str, nn.Parameter], torch.Tensor]: + + # Requires gradients in the parameters + for p in params.values(): + p.requires_grad = True + p.grad = None + + param_keys = list(params.keys()) + param_values = list(params.values()) + + # Defines optimizer and loss function + optimizer = torch.optim.Adam(param_values, lr=0.001) + scheduler = torch.optim.lr_scheduler.LinearLR(optimizer, start_factor=1.0, end_factor=0.001, total_iters=epochs // 10) + loss_fn = nn.MSELoss() + + # Contains the best loss and the best parameters + best_loss = float("inf") + best_params = None + + # Used to stop the search early + min_delta = 1e-7 + acc_loss = [] + percent_epochs_before_stop = 0.1 + + for i in range(epochs): + optimizer.zero_grad() + + quant = quantize(x, params, qtz_func, bits, target_dtype) + dequant = dequantize(quant, params, deqtz_func, bits, x.dtype) + loss = loss_fn(x, dequant) + + if loss.isnan() or loss.isinf(): + raise Exception("Loss is NaN or Inf. Stopping the search.") + + loss.backward() + optimizer.step() + scheduler.step() + + acc_loss.append(loss.item()) + + # Reports loss every 10 steps + if i % 10 == 0 and do_report: + print(f"Epoch {i}: Loss {loss.item()}") + + # Optimizes the parameter search by storing the best loss and the parameters + if loss.item() < best_loss: + best_loss = loss.item() + best_params = copy.deepcopy({ + k: v for k, v in params.items() if k in param_keys + }) + + # We also stop the search if the loss has not considerably during the last 10% epochs + if early_stop: + epochs_before_stop = int(epochs * percent_epochs_before_stop) + if i > epochs_before_stop and abs(acc_loss[i - epochs_before_stop] - acc_loss[i]) < min_delta: + break + + # No longer requires gradients in the parameters + for p in best_params.values(): + p.requires_grad = False + p.grad = None + + if do_report: + return best_params, acc_loss + else: + return best_params + + +def quantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + target_dtype: torch.dtype = torch.int8 +) -> torch.Tensor: + quant_min, quant_max = get_min_max_from_bits_signed(bits) + x = x.transpose(0, 1) # Aligns shapes + x = func(x=x, **params) + x = x.transpose(0, 1) + x = torch.clamp(round_func_BPDA(x), quant_min, quant_max).to(target_dtype) + return x + + +def dequantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + out_dtype: torch.dtype +) -> torch.Tensor: + x = x.to(dtype=out_dtype) + x = x.transpose(0, 1) + x = func(x=x, **params) + x = x.transpose(0, 1) + return x + + +def round_func_BPDA(input): + # This is equivalent to replacing round function (non-differentiable) with + # an identity function (differentiable) only when backward. + forward_value = torch.round(input) + out = input.clone() + out.data = forward_value.data + return out + + +def get_min_max_from_bits_signed(bit_width: int) -> Tuple[int, int]: + return -2 ** (bit_width - 1), 2 ** (bit_width - 1) - 1 + + + +############## Numpy ############### + +def np_domain_guard( + x: np.ndarray, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> np.ndarray: + """Guard a tensor to a valid domain.""" + x = np.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = np.clip(x, min, max) + return x + + +def np_replace_num(x: np.ndarray, num: float, to: float) -> np.ndarray: + """Replace a number in a tensor with another number. + + Args: + x (np.ndarray): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + np.ndarray: The tensor with the number replaced. + """ + return np.where(x == num, to, x) + + +def np_guarded_power(x: np.ndarray, exp: float) -> np.ndarray: + """Guard the power operation to a valid domain.""" + return np.power(x, exp) if exp >= 1 else np.power(np.maximum(x, 0), exp) + diff --git a/fn_gen/ones_affine_scale/17/loss.png b/fn_gen/ones_affine_scale/17/loss.png new file mode 100644 index 0000000000000000000000000000000000000000..1ad89af7d00c3e028a6df9b97700fdb38f6f2630 Binary files /dev/null and b/fn_gen/ones_affine_scale/17/loss.png differ diff --git a/fn_gen/ones_affine_scale/17/quantization.png b/fn_gen/ones_affine_scale/17/quantization.png new file mode 100644 index 0000000000000000000000000000000000000000..e8b6c8567bf141823095a769ddb1d3c03e763605 Binary files /dev/null and b/fn_gen/ones_affine_scale/17/quantization.png differ diff --git a/fn_gen/ones_affine_scale/18/distortion.png b/fn_gen/ones_affine_scale/18/distortion.png new file mode 100644 index 0000000000000000000000000000000000000000..e6dd019f8dddcfe8c2a501ae1769eb66c4ca64c8 Binary files /dev/null and b/fn_gen/ones_affine_scale/18/distortion.png differ diff --git a/fn_gen/ones_affine_scale/18/expressions.txt b/fn_gen/ones_affine_scale/18/expressions.txt new file mode 100644 index 0000000000000000000000000000000000000000..03413827fa8f4c8ad49a40b543460cf31d1ce803 --- /dev/null +++ b/fn_gen/ones_affine_scale/18/expressions.txt @@ -0,0 +1,2 @@ +asin(_0*x)/_s +sin(_s*x)/_0 \ No newline at end of file diff --git a/fn_gen/ones_affine_scale/18/fn.py b/fn_gen/ones_affine_scale/18/fn.py new file mode 100644 index 0000000000000000000000000000000000000000..f035da5a1304b82c8818da9fd3dca20a60bddcfe --- /dev/null +++ b/fn_gen/ones_affine_scale/18/fn.py @@ -0,0 +1,493 @@ +from __future__ import annotations + +import torch +from torch import amin # Necessary for arcsin +import copy +import torch.nn as nn +import numpy as np + +from scipy.optimize import curve_fit +from typing import Dict, Any, Tuple, List, Callable + + +def quantization(x, **params): + return (torch.div(1, replace_num(params['_s'], num=0, to=10000)) * torch.asin(domain_guard((params['_0'] * x), min=-0.99999, max=0.99999, nan=0))) + + +def dequantization(x, **params): + return (torch.div(1, replace_num(params['_0'], num=0, to=10000)) * torch.sin((params['_s'] * x))) + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_params(x: torch.Tensor, **kwargs: Dict[str, Any]) -> Dict[str, nn.Parameter]: + params = { + '_0': init_ones(x, qtz_func=quantization, deqtz_func=dequantization, param='_0', params_list=['_0', '_s'], **kwargs), + } + params['_s'] = init_linear_scale(x, params=params, qtz_func=quantization, **kwargs) + params = {k: nn.Parameter(v, requires_grad=False) for k, v in params.items()} + + if 'post_init_hook' in kwargs: + kwargs['post_init_hook'](parameters=params) + + + if 'post_train_hook' in kwargs: + kwargs['post_train_hook'](parameters=params) + + return params + + +############### Numpy Qtz ############### + + +def np_quantization(x, _0, _s): + return (np.divide(1, np_replace_num(_s, num=0, to=10000)) * np.arcsin(np_domain_guard((_0 * x), min=-0.99999, max=0.99999, nan=0))) + + +def np_dequantization(x, _0, _s): + return (np.divide(1, np_replace_num(_0, num=0, to=10000)) * np.sin((_s * x))) + + +def fit_func(x, _0, _s): + x_ = np_quantization(x, _0, _s) + x_ = np_dequantization(x_, _0, _s) + return x_ + + + +############### HELPERS ############### + +def domain_guard( + x: torch.Tensor, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> torch.Tensor: + """Guard a tensor to a valid domain.""" + x = torch.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = torch.clamp(x, min=min, max=max) + return x + + +def replace_num(x: torch.Tensor, num: float, to: float) -> torch.Tensor: + """Replace a number in a tensor with another number. + + Args: + x (torch.Tensor): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + torch.Tensor: The tensor with the number replaced. + """ + return torch.where(x == num, to, x) + + +def guarded_torch_power(x: torch.Tensor, exp: float) -> torch.Tensor: + """Guard the power operation to a valid domain.""" + return torch.pow(x, exp) if exp >= 1 else torch.pow(torch.relu(x), exp) + + +def init_ones(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.ones_like(val, dtype=torch.float32, device=x.device) + + +def init_rand(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.randn_like(val, dtype=torch.float32, device=x.device) + + +def init_space_search( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + def _build_initial_param(tensor: torch.Tensor, max_initial: int, n_params: int): + """Generates the initial set of parameters. The first iteration generates 10 times more parameters.""" + for _ in range(n_params * 10): # The first iteration generates 10 times more parameters + yield init_rand(tensor) * max_initial # Generates n_params in range [-max_initial, max_initial] + + def _search_param(tensors: List[torch.tensor], n_params): + """Takes the best parameters and generates new parameters around the mean of the best parameters.""" + torch_tensors = torch.stack(tensors) + min_vals, max_vals = torch.aminmax(torch_tensors, dim=0) + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + mean = torch.mean(torch_tensors, dim=0) + for _ in range(n_params): # Generates n_params around the mean of the tensors + yield torch.randn_like(min_vals) * abs_max_val_per_ch + mean + + def _calc(x, qtz_func, deqtz_func, **params): + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params) + x_ = deqtz_func(x=x_, **params) + x_ = x_.transpose(0, 1) + return x_ + + assert "qtz_func" in kwargs, "qtz_func must be provided." + assert "deqtz_func" in kwargs, "deqtz_func must be provided." + assert "params_list" in kwargs, "params list must be provided." + assert "param" in kwargs, "param must be provided." + + qtz_func = kwargs.get('qtz_func') + deqtz_func = kwargs.get('deqtz_func') + params_list = kwargs.get('params_list') + param = kwargs.get('param') + + n_runs = 50 # Number of runs to try to find the best parameters + n_random_params = 50 # Number of random parameters to generate + n_best_to_pick = 5 # Number of best parameters to pick after each run + max_initial = 10000 # Maximum value to initialize the parameters + + # Initializes the parameters + base_params = { p: init_ones(x, **kwargs) for p in params_list if p != param } + params = _build_initial_param(x, max_initial, n_random_params) + + # Performs the search + for _ in range(n_runs): + + best_params = [] + for param_ in params: + try: + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: param_}) + loss_ones = nn.MSELoss()(x, x_) + + if len(best_params) < n_best_to_pick: + best_params.append((param_, loss_ones.item())) + best_params = sorted(best_params, key=lambda x: x[1]) + elif loss_ones < best_params[-1][1]: + best_params[-1] = (param_, loss_ones.item()) + best_params = sorted(best_params, key=lambda x: x[1]) + + except Exception: # The parameters might not be valid for the function's domain + continue + + # Generates new parameters around the mean + params = _search_param([p for p, _ in best_params], n_random_params) + + # Checks if the best parameter is better than the init_ones + p_ones = init_ones(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_ones}) + loss_ones = nn.MSELoss()(x, x_) + + # Checks if the best parameter is better than the init_rand + p_rand = init_rand(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_rand}) + loss_rand = nn.MSELoss()(x, x_) + + if loss_rand < best_params[0][1] and loss_rand < loss_ones: + return p_rand + elif loss_ones < best_params[0][1] and loss_ones < loss_rand: + return p_ones + else: + return best_params[0][0] + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_non_linear_regression_fit( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + assert "params_list" in kwargs, "params list must be provided." + assert "np_fit_func" in kwargs, "np_fit_func must be provided." + assert "p0" in kwargs, "p0 must be provided." + np_fit_func = kwargs.get('np_fit_func') + params_list = kwargs.get('params_list') + p0 = kwargs.get('p0') + + def _fit(xdata: np.ndarray, ydata: np.ndarray, func: Callable, p0: List[float]): + popt, _ = curve_fit( + func, + xdata, + ydata, + maxfev=1000, + p0=p0, + method='lm' + ) + return popt + + # 1. Needs to convert the torch tensor to numpy tensor + xdata = x.cpu().numpy() + + # 2. Sorts the data so that it makes it easier to fit to it + sorted_xdata = np.sort(xdata, axis=-1) + + p0 = {k: v.cpu().numpy() for k, v in p0.items()} + params_list = sorted(params_list) # We need to make sure that it matches the numpy fit func arg order + + # 3. Finds the best parameters for each channel + try: + params = [] + for i in range(sorted_xdata.shape[0]): + xdata_ = sorted_xdata[i] + p0_ = [p0[p][i] for p in params_list] + ch_params = _fit(xdata_, xdata_, np_fit_func, p0_) + params.append(ch_params) + + # 4. Builds the parameters + result = {} + for i, p in enumerate(params_list): + result[p] = torch.tensor([p_[i] for p_ in params], dtype=torch.float32).to(x.device) + + return result + + except ValueError as e: + print(f"Could not fit the function with error: {e}") + print(f"Using fallback result...") + return { + k: torch.tensor(v, dtype=torch.float32).to(x.device) for k, v in p0.items() + } + + +def init_zeros(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.zeros_like(val, dtype=torch.float32, device=x.device) + + +def init_inner_scale(tensor: torch.Tensor, _min: float = torch.inf, _max: float = torch.inf) -> torch.Tensor: + # Calculate the original minimum and maximum values + min_vals, max_vals = torch.aminmax(tensor, dim=-1) + x_min = torch.min(min_vals, torch.zeros_like(min_vals)) + x_max = torch.max(max_vals, torch.zeros_like(max_vals)) + + if _max is torch.inf: # We do not need to scale the tensor. Just need to move it + return torch.ones_like(x_min) + + # Calculate the scale factor + scale = (_max - _min) / (x_max - x_min) + return scale + + + +############## Quant ############### + +@torch.enable_grad() +def learn_parameters( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + qtz_func: nn.Module, + deqtz_func: nn.Module, + bits: int, + target_dtype: torch.dtype, + epochs: int = 1000, + early_stop: bool = True, + do_report: bool = False +) -> Tuple[Dict[str, nn.Parameter], torch.Tensor]: + + # Requires gradients in the parameters + for p in params.values(): + p.requires_grad = True + p.grad = None + + param_keys = list(params.keys()) + param_values = list(params.values()) + + # Defines optimizer and loss function + optimizer = torch.optim.Adam(param_values, lr=0.001) + scheduler = torch.optim.lr_scheduler.LinearLR(optimizer, start_factor=1.0, end_factor=0.001, total_iters=epochs // 10) + loss_fn = nn.MSELoss() + + # Contains the best loss and the best parameters + best_loss = float("inf") + best_params = None + + # Used to stop the search early + min_delta = 1e-7 + acc_loss = [] + percent_epochs_before_stop = 0.1 + + for i in range(epochs): + optimizer.zero_grad() + + quant = quantize(x, params, qtz_func, bits, target_dtype) + dequant = dequantize(quant, params, deqtz_func, bits, x.dtype) + loss = loss_fn(x, dequant) + + if loss.isnan() or loss.isinf(): + raise Exception("Loss is NaN or Inf. Stopping the search.") + + loss.backward() + optimizer.step() + scheduler.step() + + acc_loss.append(loss.item()) + + # Reports loss every 10 steps + if i % 10 == 0 and do_report: + print(f"Epoch {i}: Loss {loss.item()}") + + # Optimizes the parameter search by storing the best loss and the parameters + if loss.item() < best_loss: + best_loss = loss.item() + best_params = copy.deepcopy({ + k: v for k, v in params.items() if k in param_keys + }) + + # We also stop the search if the loss has not considerably during the last 10% epochs + if early_stop: + epochs_before_stop = int(epochs * percent_epochs_before_stop) + if i > epochs_before_stop and abs(acc_loss[i - epochs_before_stop] - acc_loss[i]) < min_delta: + break + + # No longer requires gradients in the parameters + for p in best_params.values(): + p.requires_grad = False + p.grad = None + + if do_report: + return best_params, acc_loss + else: + return best_params + + +def quantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + target_dtype: torch.dtype = torch.int8 +) -> torch.Tensor: + quant_min, quant_max = get_min_max_from_bits_signed(bits) + x = x.transpose(0, 1) # Aligns shapes + x = func(x=x, **params) + x = x.transpose(0, 1) + x = torch.clamp(round_func_BPDA(x), quant_min, quant_max).to(target_dtype) + return x + + +def dequantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + out_dtype: torch.dtype +) -> torch.Tensor: + x = x.to(dtype=out_dtype) + x = x.transpose(0, 1) + x = func(x=x, **params) + x = x.transpose(0, 1) + return x + + +def round_func_BPDA(input): + # This is equivalent to replacing round function (non-differentiable) with + # an identity function (differentiable) only when backward. + forward_value = torch.round(input) + out = input.clone() + out.data = forward_value.data + return out + + +def get_min_max_from_bits_signed(bit_width: int) -> Tuple[int, int]: + return -2 ** (bit_width - 1), 2 ** (bit_width - 1) - 1 + + + +############## Numpy ############### + +def np_domain_guard( + x: np.ndarray, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> np.ndarray: + """Guard a tensor to a valid domain.""" + x = np.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = np.clip(x, min, max) + return x + + +def np_replace_num(x: np.ndarray, num: float, to: float) -> np.ndarray: + """Replace a number in a tensor with another number. + + Args: + x (np.ndarray): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + np.ndarray: The tensor with the number replaced. + """ + return np.where(x == num, to, x) + + +def np_guarded_power(x: np.ndarray, exp: float) -> np.ndarray: + """Guard the power operation to a valid domain.""" + return np.power(x, exp) if exp >= 1 else np.power(np.maximum(x, 0), exp) + diff --git a/fn_gen/ones_affine_scale/18/loss.png b/fn_gen/ones_affine_scale/18/loss.png new file mode 100644 index 0000000000000000000000000000000000000000..6fbd903b26cd2a21df00aab6a46d839437386839 Binary files /dev/null and b/fn_gen/ones_affine_scale/18/loss.png differ diff --git a/fn_gen/ones_affine_scale/18/quantization.png b/fn_gen/ones_affine_scale/18/quantization.png new file mode 100644 index 0000000000000000000000000000000000000000..05fbb2ceb741d3b7b0137785dbd01aa1e867e7cd Binary files /dev/null and b/fn_gen/ones_affine_scale/18/quantization.png differ diff --git a/fn_gen/ones_affine_scale/2/distortion.png b/fn_gen/ones_affine_scale/2/distortion.png new file mode 100644 index 0000000000000000000000000000000000000000..55f649d865800d8c6b638878ea726ab6db8259d4 Binary files /dev/null and b/fn_gen/ones_affine_scale/2/distortion.png differ diff --git a/fn_gen/ones_affine_scale/2/expressions.txt b/fn_gen/ones_affine_scale/2/expressions.txt new file mode 100644 index 0000000000000000000000000000000000000000..74791fc40576643d62f6366a8b4eda20eb1ad252 --- /dev/null +++ b/fn_gen/ones_affine_scale/2/expressions.txt @@ -0,0 +1,2 @@ +x**3/_s +(_s*x)**(1/3) \ No newline at end of file diff --git a/fn_gen/ones_affine_scale/2/fn.py b/fn_gen/ones_affine_scale/2/fn.py new file mode 100644 index 0000000000000000000000000000000000000000..a01e9d86bcb7ff432bd7364ad3b6b592000ad62e --- /dev/null +++ b/fn_gen/ones_affine_scale/2/fn.py @@ -0,0 +1,492 @@ +from __future__ import annotations + +import torch +from torch import amin # Necessary for arcsin +import copy +import torch.nn as nn +import numpy as np + +from scipy.optimize import curve_fit +from typing import Dict, Any, Tuple, List, Callable + + +def quantization(x, **params): + return (torch.div(1, replace_num(params['_s'], num=0, to=10000)) * guarded_torch_power(x, torch.tensor(3))) + + +def dequantization(x, **params): + return guarded_torch_power((params['_s'] * x), 1 / 3) + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_params(x: torch.Tensor, **kwargs: Dict[str, Any]) -> Dict[str, nn.Parameter]: + params = { + } + params['_s'] = init_linear_scale(x, params=params, qtz_func=quantization, **kwargs) + params = {k: nn.Parameter(v, requires_grad=False) for k, v in params.items()} + + if 'post_init_hook' in kwargs: + kwargs['post_init_hook'](parameters=params) + + + if 'post_train_hook' in kwargs: + kwargs['post_train_hook'](parameters=params) + + return params + + +############### Numpy Qtz ############### + + +def np_quantization(x, _s): + return (np.divide(1, np_replace_num(_s, num=0, to=10000)) * np_guarded_power(x, np.array(3))) + + +def np_dequantization(x, _s): + return np_guarded_power((_s * x), 1 / 3) + + +def fit_func(x, _s): + x_ = np_quantization(x, _s) + x_ = np_dequantization(x_, _s) + return x_ + + + +############### HELPERS ############### + +def domain_guard( + x: torch.Tensor, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> torch.Tensor: + """Guard a tensor to a valid domain.""" + x = torch.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = torch.clamp(x, min=min, max=max) + return x + + +def replace_num(x: torch.Tensor, num: float, to: float) -> torch.Tensor: + """Replace a number in a tensor with another number. + + Args: + x (torch.Tensor): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + torch.Tensor: The tensor with the number replaced. + """ + return torch.where(x == num, to, x) + + +def guarded_torch_power(x: torch.Tensor, exp: float) -> torch.Tensor: + """Guard the power operation to a valid domain.""" + return torch.pow(x, exp) if exp >= 1 else torch.pow(torch.relu(x), exp) + + +def init_ones(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.ones_like(val, dtype=torch.float32, device=x.device) + + +def init_rand(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.randn_like(val, dtype=torch.float32, device=x.device) + + +def init_space_search( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + def _build_initial_param(tensor: torch.Tensor, max_initial: int, n_params: int): + """Generates the initial set of parameters. The first iteration generates 10 times more parameters.""" + for _ in range(n_params * 10): # The first iteration generates 10 times more parameters + yield init_rand(tensor) * max_initial # Generates n_params in range [-max_initial, max_initial] + + def _search_param(tensors: List[torch.tensor], n_params): + """Takes the best parameters and generates new parameters around the mean of the best parameters.""" + torch_tensors = torch.stack(tensors) + min_vals, max_vals = torch.aminmax(torch_tensors, dim=0) + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + mean = torch.mean(torch_tensors, dim=0) + for _ in range(n_params): # Generates n_params around the mean of the tensors + yield torch.randn_like(min_vals) * abs_max_val_per_ch + mean + + def _calc(x, qtz_func, deqtz_func, **params): + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params) + x_ = deqtz_func(x=x_, **params) + x_ = x_.transpose(0, 1) + return x_ + + assert "qtz_func" in kwargs, "qtz_func must be provided." + assert "deqtz_func" in kwargs, "deqtz_func must be provided." + assert "params_list" in kwargs, "params list must be provided." + assert "param" in kwargs, "param must be provided." + + qtz_func = kwargs.get('qtz_func') + deqtz_func = kwargs.get('deqtz_func') + params_list = kwargs.get('params_list') + param = kwargs.get('param') + + n_runs = 50 # Number of runs to try to find the best parameters + n_random_params = 50 # Number of random parameters to generate + n_best_to_pick = 5 # Number of best parameters to pick after each run + max_initial = 10000 # Maximum value to initialize the parameters + + # Initializes the parameters + base_params = { p: init_ones(x, **kwargs) for p in params_list if p != param } + params = _build_initial_param(x, max_initial, n_random_params) + + # Performs the search + for _ in range(n_runs): + + best_params = [] + for param_ in params: + try: + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: param_}) + loss_ones = nn.MSELoss()(x, x_) + + if len(best_params) < n_best_to_pick: + best_params.append((param_, loss_ones.item())) + best_params = sorted(best_params, key=lambda x: x[1]) + elif loss_ones < best_params[-1][1]: + best_params[-1] = (param_, loss_ones.item()) + best_params = sorted(best_params, key=lambda x: x[1]) + + except Exception: # The parameters might not be valid for the function's domain + continue + + # Generates new parameters around the mean + params = _search_param([p for p, _ in best_params], n_random_params) + + # Checks if the best parameter is better than the init_ones + p_ones = init_ones(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_ones}) + loss_ones = nn.MSELoss()(x, x_) + + # Checks if the best parameter is better than the init_rand + p_rand = init_rand(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_rand}) + loss_rand = nn.MSELoss()(x, x_) + + if loss_rand < best_params[0][1] and loss_rand < loss_ones: + return p_rand + elif loss_ones < best_params[0][1] and loss_ones < loss_rand: + return p_ones + else: + return best_params[0][0] + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_non_linear_regression_fit( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + assert "params_list" in kwargs, "params list must be provided." + assert "np_fit_func" in kwargs, "np_fit_func must be provided." + assert "p0" in kwargs, "p0 must be provided." + np_fit_func = kwargs.get('np_fit_func') + params_list = kwargs.get('params_list') + p0 = kwargs.get('p0') + + def _fit(xdata: np.ndarray, ydata: np.ndarray, func: Callable, p0: List[float]): + popt, _ = curve_fit( + func, + xdata, + ydata, + maxfev=1000, + p0=p0, + method='lm' + ) + return popt + + # 1. Needs to convert the torch tensor to numpy tensor + xdata = x.cpu().numpy() + + # 2. Sorts the data so that it makes it easier to fit to it + sorted_xdata = np.sort(xdata, axis=-1) + + p0 = {k: v.cpu().numpy() for k, v in p0.items()} + params_list = sorted(params_list) # We need to make sure that it matches the numpy fit func arg order + + # 3. Finds the best parameters for each channel + try: + params = [] + for i in range(sorted_xdata.shape[0]): + xdata_ = sorted_xdata[i] + p0_ = [p0[p][i] for p in params_list] + ch_params = _fit(xdata_, xdata_, np_fit_func, p0_) + params.append(ch_params) + + # 4. Builds the parameters + result = {} + for i, p in enumerate(params_list): + result[p] = torch.tensor([p_[i] for p_ in params], dtype=torch.float32).to(x.device) + + return result + + except ValueError as e: + print(f"Could not fit the function with error: {e}") + print(f"Using fallback result...") + return { + k: torch.tensor(v, dtype=torch.float32).to(x.device) for k, v in p0.items() + } + + +def init_zeros(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.zeros_like(val, dtype=torch.float32, device=x.device) + + +def init_inner_scale(tensor: torch.Tensor, _min: float = torch.inf, _max: float = torch.inf) -> torch.Tensor: + # Calculate the original minimum and maximum values + min_vals, max_vals = torch.aminmax(tensor, dim=-1) + x_min = torch.min(min_vals, torch.zeros_like(min_vals)) + x_max = torch.max(max_vals, torch.zeros_like(max_vals)) + + if _max is torch.inf: # We do not need to scale the tensor. Just need to move it + return torch.ones_like(x_min) + + # Calculate the scale factor + scale = (_max - _min) / (x_max - x_min) + return scale + + + +############## Quant ############### + +@torch.enable_grad() +def learn_parameters( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + qtz_func: nn.Module, + deqtz_func: nn.Module, + bits: int, + target_dtype: torch.dtype, + epochs: int = 1000, + early_stop: bool = True, + do_report: bool = False +) -> Tuple[Dict[str, nn.Parameter], torch.Tensor]: + + # Requires gradients in the parameters + for p in params.values(): + p.requires_grad = True + p.grad = None + + param_keys = list(params.keys()) + param_values = list(params.values()) + + # Defines optimizer and loss function + optimizer = torch.optim.Adam(param_values, lr=0.001) + scheduler = torch.optim.lr_scheduler.LinearLR(optimizer, start_factor=1.0, end_factor=0.001, total_iters=epochs // 10) + loss_fn = nn.MSELoss() + + # Contains the best loss and the best parameters + best_loss = float("inf") + best_params = None + + # Used to stop the search early + min_delta = 1e-7 + acc_loss = [] + percent_epochs_before_stop = 0.1 + + for i in range(epochs): + optimizer.zero_grad() + + quant = quantize(x, params, qtz_func, bits, target_dtype) + dequant = dequantize(quant, params, deqtz_func, bits, x.dtype) + loss = loss_fn(x, dequant) + + if loss.isnan() or loss.isinf(): + raise Exception("Loss is NaN or Inf. Stopping the search.") + + loss.backward() + optimizer.step() + scheduler.step() + + acc_loss.append(loss.item()) + + # Reports loss every 10 steps + if i % 10 == 0 and do_report: + print(f"Epoch {i}: Loss {loss.item()}") + + # Optimizes the parameter search by storing the best loss and the parameters + if loss.item() < best_loss: + best_loss = loss.item() + best_params = copy.deepcopy({ + k: v for k, v in params.items() if k in param_keys + }) + + # We also stop the search if the loss has not considerably during the last 10% epochs + if early_stop: + epochs_before_stop = int(epochs * percent_epochs_before_stop) + if i > epochs_before_stop and abs(acc_loss[i - epochs_before_stop] - acc_loss[i]) < min_delta: + break + + # No longer requires gradients in the parameters + for p in best_params.values(): + p.requires_grad = False + p.grad = None + + if do_report: + return best_params, acc_loss + else: + return best_params + + +def quantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + target_dtype: torch.dtype = torch.int8 +) -> torch.Tensor: + quant_min, quant_max = get_min_max_from_bits_signed(bits) + x = x.transpose(0, 1) # Aligns shapes + x = func(x=x, **params) + x = x.transpose(0, 1) + x = torch.clamp(round_func_BPDA(x), quant_min, quant_max).to(target_dtype) + return x + + +def dequantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + out_dtype: torch.dtype +) -> torch.Tensor: + x = x.to(dtype=out_dtype) + x = x.transpose(0, 1) + x = func(x=x, **params) + x = x.transpose(0, 1) + return x + + +def round_func_BPDA(input): + # This is equivalent to replacing round function (non-differentiable) with + # an identity function (differentiable) only when backward. + forward_value = torch.round(input) + out = input.clone() + out.data = forward_value.data + return out + + +def get_min_max_from_bits_signed(bit_width: int) -> Tuple[int, int]: + return -2 ** (bit_width - 1), 2 ** (bit_width - 1) - 1 + + + +############## Numpy ############### + +def np_domain_guard( + x: np.ndarray, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> np.ndarray: + """Guard a tensor to a valid domain.""" + x = np.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = np.clip(x, min, max) + return x + + +def np_replace_num(x: np.ndarray, num: float, to: float) -> np.ndarray: + """Replace a number in a tensor with another number. + + Args: + x (np.ndarray): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + np.ndarray: The tensor with the number replaced. + """ + return np.where(x == num, to, x) + + +def np_guarded_power(x: np.ndarray, exp: float) -> np.ndarray: + """Guard the power operation to a valid domain.""" + return np.power(x, exp) if exp >= 1 else np.power(np.maximum(x, 0), exp) + diff --git a/fn_gen/ones_affine_scale/2/loss.png b/fn_gen/ones_affine_scale/2/loss.png new file mode 100644 index 0000000000000000000000000000000000000000..748f3772a7dc6e1ca3d9ddd90576072c340c43c9 Binary files /dev/null and b/fn_gen/ones_affine_scale/2/loss.png differ diff --git a/fn_gen/ones_affine_scale/2/quantization.png b/fn_gen/ones_affine_scale/2/quantization.png new file mode 100644 index 0000000000000000000000000000000000000000..c462294edeec80629d618b908c3ccb397aa9b238 Binary files /dev/null and b/fn_gen/ones_affine_scale/2/quantization.png differ diff --git a/fn_gen/ones_affine_scale/3/distortion.png b/fn_gen/ones_affine_scale/3/distortion.png new file mode 100644 index 0000000000000000000000000000000000000000..d56c26e277e24d69cd96738b95d7afa34f80e1d5 Binary files /dev/null and b/fn_gen/ones_affine_scale/3/distortion.png differ diff --git a/fn_gen/ones_affine_scale/3/expressions.txt b/fn_gen/ones_affine_scale/3/expressions.txt new file mode 100644 index 0000000000000000000000000000000000000000..3758ee2a62aa8d95c3b7da1dd3fafa11b027ad9b --- /dev/null +++ b/fn_gen/ones_affine_scale/3/expressions.txt @@ -0,0 +1,2 @@ +cosh(_0*x)/_s +log(_s*x - sqrt(_s**2*x**2 - 1))/_0 \ No newline at end of file diff --git a/fn_gen/ones_affine_scale/3/fn.py b/fn_gen/ones_affine_scale/3/fn.py new file mode 100644 index 0000000000000000000000000000000000000000..a9789f9f651892f1410be03cb738ad0a11e5a67b --- /dev/null +++ b/fn_gen/ones_affine_scale/3/fn.py @@ -0,0 +1,493 @@ +from __future__ import annotations + +import torch +from torch import amin # Necessary for arcsin +import copy +import torch.nn as nn +import numpy as np + +from scipy.optimize import curve_fit +from typing import Dict, Any, Tuple, List, Callable + + +def quantization(x, **params): + return (torch.div(1, replace_num(params['_s'], num=0, to=10000)) * torch.cosh((params['_0'] * x))) + + +def dequantization(x, **params): + return (torch.div(1, replace_num(params['_0'], num=0, to=10000)) * torch.log(domain_guard(((torch.tensor(-1) * torch.sqrt(domain_guard((torch.tensor(-1) + (guarded_torch_power(params['_s'], torch.tensor(2)) * guarded_torch_power(x, torch.tensor(2)))), min=0.1, nan=0.1))) + (params['_s'] * x)), min=1e-5, nan=1e-5))) + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_params(x: torch.Tensor, **kwargs: Dict[str, Any]) -> Dict[str, nn.Parameter]: + params = { + '_0': init_ones(x, qtz_func=quantization, deqtz_func=dequantization, param='_0', params_list=['_0', '_s'], **kwargs), + } + params['_s'] = init_linear_scale(x, params=params, qtz_func=quantization, **kwargs) + params = {k: nn.Parameter(v, requires_grad=False) for k, v in params.items()} + + if 'post_init_hook' in kwargs: + kwargs['post_init_hook'](parameters=params) + + + if 'post_train_hook' in kwargs: + kwargs['post_train_hook'](parameters=params) + + return params + + +############### Numpy Qtz ############### + + +def np_quantization(x, _0, _s): + return (np.divide(1, np_replace_num(_s, num=0, to=10000)) * np.cosh((_0 * x))) + + +def np_dequantization(x, _0, _s): + return (np.divide(1, np_replace_num(_0, num=0, to=10000)) * np.log(np_domain_guard(((np.array(-1) * np.sqrt(np_domain_guard((np.array(-1) + (np_guarded_power(_s, np.array(2)) * np_guarded_power(x, np.array(2)))), min=0.1, nan=0.1))) + (_s * x)), min=1e-5, nan=1e-5))) + + +def fit_func(x, _0, _s): + x_ = np_quantization(x, _0, _s) + x_ = np_dequantization(x_, _0, _s) + return x_ + + + +############### HELPERS ############### + +def domain_guard( + x: torch.Tensor, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> torch.Tensor: + """Guard a tensor to a valid domain.""" + x = torch.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = torch.clamp(x, min=min, max=max) + return x + + +def replace_num(x: torch.Tensor, num: float, to: float) -> torch.Tensor: + """Replace a number in a tensor with another number. + + Args: + x (torch.Tensor): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + torch.Tensor: The tensor with the number replaced. + """ + return torch.where(x == num, to, x) + + +def guarded_torch_power(x: torch.Tensor, exp: float) -> torch.Tensor: + """Guard the power operation to a valid domain.""" + return torch.pow(x, exp) if exp >= 1 else torch.pow(torch.relu(x), exp) + + +def init_ones(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.ones_like(val, dtype=torch.float32, device=x.device) + + +def init_rand(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.randn_like(val, dtype=torch.float32, device=x.device) + + +def init_space_search( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + def _build_initial_param(tensor: torch.Tensor, max_initial: int, n_params: int): + """Generates the initial set of parameters. The first iteration generates 10 times more parameters.""" + for _ in range(n_params * 10): # The first iteration generates 10 times more parameters + yield init_rand(tensor) * max_initial # Generates n_params in range [-max_initial, max_initial] + + def _search_param(tensors: List[torch.tensor], n_params): + """Takes the best parameters and generates new parameters around the mean of the best parameters.""" + torch_tensors = torch.stack(tensors) + min_vals, max_vals = torch.aminmax(torch_tensors, dim=0) + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + mean = torch.mean(torch_tensors, dim=0) + for _ in range(n_params): # Generates n_params around the mean of the tensors + yield torch.randn_like(min_vals) * abs_max_val_per_ch + mean + + def _calc(x, qtz_func, deqtz_func, **params): + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params) + x_ = deqtz_func(x=x_, **params) + x_ = x_.transpose(0, 1) + return x_ + + assert "qtz_func" in kwargs, "qtz_func must be provided." + assert "deqtz_func" in kwargs, "deqtz_func must be provided." + assert "params_list" in kwargs, "params list must be provided." + assert "param" in kwargs, "param must be provided." + + qtz_func = kwargs.get('qtz_func') + deqtz_func = kwargs.get('deqtz_func') + params_list = kwargs.get('params_list') + param = kwargs.get('param') + + n_runs = 50 # Number of runs to try to find the best parameters + n_random_params = 50 # Number of random parameters to generate + n_best_to_pick = 5 # Number of best parameters to pick after each run + max_initial = 10000 # Maximum value to initialize the parameters + + # Initializes the parameters + base_params = { p: init_ones(x, **kwargs) for p in params_list if p != param } + params = _build_initial_param(x, max_initial, n_random_params) + + # Performs the search + for _ in range(n_runs): + + best_params = [] + for param_ in params: + try: + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: param_}) + loss_ones = nn.MSELoss()(x, x_) + + if len(best_params) < n_best_to_pick: + best_params.append((param_, loss_ones.item())) + best_params = sorted(best_params, key=lambda x: x[1]) + elif loss_ones < best_params[-1][1]: + best_params[-1] = (param_, loss_ones.item()) + best_params = sorted(best_params, key=lambda x: x[1]) + + except Exception: # The parameters might not be valid for the function's domain + continue + + # Generates new parameters around the mean + params = _search_param([p for p, _ in best_params], n_random_params) + + # Checks if the best parameter is better than the init_ones + p_ones = init_ones(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_ones}) + loss_ones = nn.MSELoss()(x, x_) + + # Checks if the best parameter is better than the init_rand + p_rand = init_rand(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_rand}) + loss_rand = nn.MSELoss()(x, x_) + + if loss_rand < best_params[0][1] and loss_rand < loss_ones: + return p_rand + elif loss_ones < best_params[0][1] and loss_ones < loss_rand: + return p_ones + else: + return best_params[0][0] + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_non_linear_regression_fit( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + assert "params_list" in kwargs, "params list must be provided." + assert "np_fit_func" in kwargs, "np_fit_func must be provided." + assert "p0" in kwargs, "p0 must be provided." + np_fit_func = kwargs.get('np_fit_func') + params_list = kwargs.get('params_list') + p0 = kwargs.get('p0') + + def _fit(xdata: np.ndarray, ydata: np.ndarray, func: Callable, p0: List[float]): + popt, _ = curve_fit( + func, + xdata, + ydata, + maxfev=1000, + p0=p0, + method='lm' + ) + return popt + + # 1. Needs to convert the torch tensor to numpy tensor + xdata = x.cpu().numpy() + + # 2. Sorts the data so that it makes it easier to fit to it + sorted_xdata = np.sort(xdata, axis=-1) + + p0 = {k: v.cpu().numpy() for k, v in p0.items()} + params_list = sorted(params_list) # We need to make sure that it matches the numpy fit func arg order + + # 3. Finds the best parameters for each channel + try: + params = [] + for i in range(sorted_xdata.shape[0]): + xdata_ = sorted_xdata[i] + p0_ = [p0[p][i] for p in params_list] + ch_params = _fit(xdata_, xdata_, np_fit_func, p0_) + params.append(ch_params) + + # 4. Builds the parameters + result = {} + for i, p in enumerate(params_list): + result[p] = torch.tensor([p_[i] for p_ in params], dtype=torch.float32).to(x.device) + + return result + + except ValueError as e: + print(f"Could not fit the function with error: {e}") + print(f"Using fallback result...") + return { + k: torch.tensor(v, dtype=torch.float32).to(x.device) for k, v in p0.items() + } + + +def init_zeros(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.zeros_like(val, dtype=torch.float32, device=x.device) + + +def init_inner_scale(tensor: torch.Tensor, _min: float = torch.inf, _max: float = torch.inf) -> torch.Tensor: + # Calculate the original minimum and maximum values + min_vals, max_vals = torch.aminmax(tensor, dim=-1) + x_min = torch.min(min_vals, torch.zeros_like(min_vals)) + x_max = torch.max(max_vals, torch.zeros_like(max_vals)) + + if _max is torch.inf: # We do not need to scale the tensor. Just need to move it + return torch.ones_like(x_min) + + # Calculate the scale factor + scale = (_max - _min) / (x_max - x_min) + return scale + + + +############## Quant ############### + +@torch.enable_grad() +def learn_parameters( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + qtz_func: nn.Module, + deqtz_func: nn.Module, + bits: int, + target_dtype: torch.dtype, + epochs: int = 1000, + early_stop: bool = True, + do_report: bool = False +) -> Tuple[Dict[str, nn.Parameter], torch.Tensor]: + + # Requires gradients in the parameters + for p in params.values(): + p.requires_grad = True + p.grad = None + + param_keys = list(params.keys()) + param_values = list(params.values()) + + # Defines optimizer and loss function + optimizer = torch.optim.Adam(param_values, lr=0.001) + scheduler = torch.optim.lr_scheduler.LinearLR(optimizer, start_factor=1.0, end_factor=0.001, total_iters=epochs // 10) + loss_fn = nn.MSELoss() + + # Contains the best loss and the best parameters + best_loss = float("inf") + best_params = None + + # Used to stop the search early + min_delta = 1e-7 + acc_loss = [] + percent_epochs_before_stop = 0.1 + + for i in range(epochs): + optimizer.zero_grad() + + quant = quantize(x, params, qtz_func, bits, target_dtype) + dequant = dequantize(quant, params, deqtz_func, bits, x.dtype) + loss = loss_fn(x, dequant) + + if loss.isnan() or loss.isinf(): + raise Exception("Loss is NaN or Inf. Stopping the search.") + + loss.backward() + optimizer.step() + scheduler.step() + + acc_loss.append(loss.item()) + + # Reports loss every 10 steps + if i % 10 == 0 and do_report: + print(f"Epoch {i}: Loss {loss.item()}") + + # Optimizes the parameter search by storing the best loss and the parameters + if loss.item() < best_loss: + best_loss = loss.item() + best_params = copy.deepcopy({ + k: v for k, v in params.items() if k in param_keys + }) + + # We also stop the search if the loss has not considerably during the last 10% epochs + if early_stop: + epochs_before_stop = int(epochs * percent_epochs_before_stop) + if i > epochs_before_stop and abs(acc_loss[i - epochs_before_stop] - acc_loss[i]) < min_delta: + break + + # No longer requires gradients in the parameters + for p in best_params.values(): + p.requires_grad = False + p.grad = None + + if do_report: + return best_params, acc_loss + else: + return best_params + + +def quantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + target_dtype: torch.dtype = torch.int8 +) -> torch.Tensor: + quant_min, quant_max = get_min_max_from_bits_signed(bits) + x = x.transpose(0, 1) # Aligns shapes + x = func(x=x, **params) + x = x.transpose(0, 1) + x = torch.clamp(round_func_BPDA(x), quant_min, quant_max).to(target_dtype) + return x + + +def dequantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + out_dtype: torch.dtype +) -> torch.Tensor: + x = x.to(dtype=out_dtype) + x = x.transpose(0, 1) + x = func(x=x, **params) + x = x.transpose(0, 1) + return x + + +def round_func_BPDA(input): + # This is equivalent to replacing round function (non-differentiable) with + # an identity function (differentiable) only when backward. + forward_value = torch.round(input) + out = input.clone() + out.data = forward_value.data + return out + + +def get_min_max_from_bits_signed(bit_width: int) -> Tuple[int, int]: + return -2 ** (bit_width - 1), 2 ** (bit_width - 1) - 1 + + + +############## Numpy ############### + +def np_domain_guard( + x: np.ndarray, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> np.ndarray: + """Guard a tensor to a valid domain.""" + x = np.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = np.clip(x, min, max) + return x + + +def np_replace_num(x: np.ndarray, num: float, to: float) -> np.ndarray: + """Replace a number in a tensor with another number. + + Args: + x (np.ndarray): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + np.ndarray: The tensor with the number replaced. + """ + return np.where(x == num, to, x) + + +def np_guarded_power(x: np.ndarray, exp: float) -> np.ndarray: + """Guard the power operation to a valid domain.""" + return np.power(x, exp) if exp >= 1 else np.power(np.maximum(x, 0), exp) + diff --git a/fn_gen/ones_affine_scale/3/loss.png b/fn_gen/ones_affine_scale/3/loss.png new file mode 100644 index 0000000000000000000000000000000000000000..f2084c54fd3cdffa85b1191dfa30aba2603a5383 Binary files /dev/null and b/fn_gen/ones_affine_scale/3/loss.png differ diff --git a/fn_gen/ones_affine_scale/3/quantization.png b/fn_gen/ones_affine_scale/3/quantization.png new file mode 100644 index 0000000000000000000000000000000000000000..12677f1b3c8c01904ebb753dbcf817a290d3d0a0 Binary files /dev/null and b/fn_gen/ones_affine_scale/3/quantization.png differ diff --git a/fn_gen/ones_affine_scale/4/distortion.png b/fn_gen/ones_affine_scale/4/distortion.png new file mode 100644 index 0000000000000000000000000000000000000000..6a5a2909a18d47a2377cbdb8b7d56644a8a14e2a Binary files /dev/null and b/fn_gen/ones_affine_scale/4/distortion.png differ diff --git a/fn_gen/ones_affine_scale/4/expressions.txt b/fn_gen/ones_affine_scale/4/expressions.txt new file mode 100644 index 0000000000000000000000000000000000000000..0a7e5be4566beeb4727d82f95d24241966d158dc --- /dev/null +++ b/fn_gen/ones_affine_scale/4/expressions.txt @@ -0,0 +1,2 @@ +log(_0*x)/_s +exp(_s*x)/_0 \ No newline at end of file diff --git a/fn_gen/ones_affine_scale/4/fn.py b/fn_gen/ones_affine_scale/4/fn.py new file mode 100644 index 0000000000000000000000000000000000000000..95083ec871b92d9ee0f05ea5e0535e1f53e550f4 --- /dev/null +++ b/fn_gen/ones_affine_scale/4/fn.py @@ -0,0 +1,493 @@ +from __future__ import annotations + +import torch +from torch import amin # Necessary for arcsin +import copy +import torch.nn as nn +import numpy as np + +from scipy.optimize import curve_fit +from typing import Dict, Any, Tuple, List, Callable + + +def quantization(x, **params): + return (torch.div(1, replace_num(params['_s'], num=0, to=10000)) * torch.log(domain_guard((params['_0'] * x), min=1e-5, nan=1e-5))) + + +def dequantization(x, **params): + return (torch.div(1, replace_num(params['_0'], num=0, to=10000)) * torch.exp((params['_s'] * x))) + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_params(x: torch.Tensor, **kwargs: Dict[str, Any]) -> Dict[str, nn.Parameter]: + params = { + '_0': init_ones(x, qtz_func=quantization, deqtz_func=dequantization, param='_0', params_list=['_0', '_s'], **kwargs), + } + params['_s'] = init_linear_scale(x, params=params, qtz_func=quantization, **kwargs) + params = {k: nn.Parameter(v, requires_grad=False) for k, v in params.items()} + + if 'post_init_hook' in kwargs: + kwargs['post_init_hook'](parameters=params) + + + if 'post_train_hook' in kwargs: + kwargs['post_train_hook'](parameters=params) + + return params + + +############### Numpy Qtz ############### + + +def np_quantization(x, _0, _s): + return (np.divide(1, np_replace_num(_s, num=0, to=10000)) * np.log(np_domain_guard((_0 * x), min=1e-5, nan=1e-5))) + + +def np_dequantization(x, _0, _s): + return (np.divide(1, np_replace_num(_0, num=0, to=10000)) * np.exp((_s * x))) + + +def fit_func(x, _0, _s): + x_ = np_quantization(x, _0, _s) + x_ = np_dequantization(x_, _0, _s) + return x_ + + + +############### HELPERS ############### + +def domain_guard( + x: torch.Tensor, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> torch.Tensor: + """Guard a tensor to a valid domain.""" + x = torch.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = torch.clamp(x, min=min, max=max) + return x + + +def replace_num(x: torch.Tensor, num: float, to: float) -> torch.Tensor: + """Replace a number in a tensor with another number. + + Args: + x (torch.Tensor): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + torch.Tensor: The tensor with the number replaced. + """ + return torch.where(x == num, to, x) + + +def guarded_torch_power(x: torch.Tensor, exp: float) -> torch.Tensor: + """Guard the power operation to a valid domain.""" + return torch.pow(x, exp) if exp >= 1 else torch.pow(torch.relu(x), exp) + + +def init_ones(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.ones_like(val, dtype=torch.float32, device=x.device) + + +def init_rand(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.randn_like(val, dtype=torch.float32, device=x.device) + + +def init_space_search( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + def _build_initial_param(tensor: torch.Tensor, max_initial: int, n_params: int): + """Generates the initial set of parameters. The first iteration generates 10 times more parameters.""" + for _ in range(n_params * 10): # The first iteration generates 10 times more parameters + yield init_rand(tensor) * max_initial # Generates n_params in range [-max_initial, max_initial] + + def _search_param(tensors: List[torch.tensor], n_params): + """Takes the best parameters and generates new parameters around the mean of the best parameters.""" + torch_tensors = torch.stack(tensors) + min_vals, max_vals = torch.aminmax(torch_tensors, dim=0) + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + mean = torch.mean(torch_tensors, dim=0) + for _ in range(n_params): # Generates n_params around the mean of the tensors + yield torch.randn_like(min_vals) * abs_max_val_per_ch + mean + + def _calc(x, qtz_func, deqtz_func, **params): + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params) + x_ = deqtz_func(x=x_, **params) + x_ = x_.transpose(0, 1) + return x_ + + assert "qtz_func" in kwargs, "qtz_func must be provided." + assert "deqtz_func" in kwargs, "deqtz_func must be provided." + assert "params_list" in kwargs, "params list must be provided." + assert "param" in kwargs, "param must be provided." + + qtz_func = kwargs.get('qtz_func') + deqtz_func = kwargs.get('deqtz_func') + params_list = kwargs.get('params_list') + param = kwargs.get('param') + + n_runs = 50 # Number of runs to try to find the best parameters + n_random_params = 50 # Number of random parameters to generate + n_best_to_pick = 5 # Number of best parameters to pick after each run + max_initial = 10000 # Maximum value to initialize the parameters + + # Initializes the parameters + base_params = { p: init_ones(x, **kwargs) for p in params_list if p != param } + params = _build_initial_param(x, max_initial, n_random_params) + + # Performs the search + for _ in range(n_runs): + + best_params = [] + for param_ in params: + try: + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: param_}) + loss_ones = nn.MSELoss()(x, x_) + + if len(best_params) < n_best_to_pick: + best_params.append((param_, loss_ones.item())) + best_params = sorted(best_params, key=lambda x: x[1]) + elif loss_ones < best_params[-1][1]: + best_params[-1] = (param_, loss_ones.item()) + best_params = sorted(best_params, key=lambda x: x[1]) + + except Exception: # The parameters might not be valid for the function's domain + continue + + # Generates new parameters around the mean + params = _search_param([p for p, _ in best_params], n_random_params) + + # Checks if the best parameter is better than the init_ones + p_ones = init_ones(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_ones}) + loss_ones = nn.MSELoss()(x, x_) + + # Checks if the best parameter is better than the init_rand + p_rand = init_rand(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_rand}) + loss_rand = nn.MSELoss()(x, x_) + + if loss_rand < best_params[0][1] and loss_rand < loss_ones: + return p_rand + elif loss_ones < best_params[0][1] and loss_ones < loss_rand: + return p_ones + else: + return best_params[0][0] + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_non_linear_regression_fit( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + assert "params_list" in kwargs, "params list must be provided." + assert "np_fit_func" in kwargs, "np_fit_func must be provided." + assert "p0" in kwargs, "p0 must be provided." + np_fit_func = kwargs.get('np_fit_func') + params_list = kwargs.get('params_list') + p0 = kwargs.get('p0') + + def _fit(xdata: np.ndarray, ydata: np.ndarray, func: Callable, p0: List[float]): + popt, _ = curve_fit( + func, + xdata, + ydata, + maxfev=1000, + p0=p0, + method='lm' + ) + return popt + + # 1. Needs to convert the torch tensor to numpy tensor + xdata = x.cpu().numpy() + + # 2. Sorts the data so that it makes it easier to fit to it + sorted_xdata = np.sort(xdata, axis=-1) + + p0 = {k: v.cpu().numpy() for k, v in p0.items()} + params_list = sorted(params_list) # We need to make sure that it matches the numpy fit func arg order + + # 3. Finds the best parameters for each channel + try: + params = [] + for i in range(sorted_xdata.shape[0]): + xdata_ = sorted_xdata[i] + p0_ = [p0[p][i] for p in params_list] + ch_params = _fit(xdata_, xdata_, np_fit_func, p0_) + params.append(ch_params) + + # 4. Builds the parameters + result = {} + for i, p in enumerate(params_list): + result[p] = torch.tensor([p_[i] for p_ in params], dtype=torch.float32).to(x.device) + + return result + + except ValueError as e: + print(f"Could not fit the function with error: {e}") + print(f"Using fallback result...") + return { + k: torch.tensor(v, dtype=torch.float32).to(x.device) for k, v in p0.items() + } + + +def init_zeros(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.zeros_like(val, dtype=torch.float32, device=x.device) + + +def init_inner_scale(tensor: torch.Tensor, _min: float = torch.inf, _max: float = torch.inf) -> torch.Tensor: + # Calculate the original minimum and maximum values + min_vals, max_vals = torch.aminmax(tensor, dim=-1) + x_min = torch.min(min_vals, torch.zeros_like(min_vals)) + x_max = torch.max(max_vals, torch.zeros_like(max_vals)) + + if _max is torch.inf: # We do not need to scale the tensor. Just need to move it + return torch.ones_like(x_min) + + # Calculate the scale factor + scale = (_max - _min) / (x_max - x_min) + return scale + + + +############## Quant ############### + +@torch.enable_grad() +def learn_parameters( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + qtz_func: nn.Module, + deqtz_func: nn.Module, + bits: int, + target_dtype: torch.dtype, + epochs: int = 1000, + early_stop: bool = True, + do_report: bool = False +) -> Tuple[Dict[str, nn.Parameter], torch.Tensor]: + + # Requires gradients in the parameters + for p in params.values(): + p.requires_grad = True + p.grad = None + + param_keys = list(params.keys()) + param_values = list(params.values()) + + # Defines optimizer and loss function + optimizer = torch.optim.Adam(param_values, lr=0.001) + scheduler = torch.optim.lr_scheduler.LinearLR(optimizer, start_factor=1.0, end_factor=0.001, total_iters=epochs // 10) + loss_fn = nn.MSELoss() + + # Contains the best loss and the best parameters + best_loss = float("inf") + best_params = None + + # Used to stop the search early + min_delta = 1e-7 + acc_loss = [] + percent_epochs_before_stop = 0.1 + + for i in range(epochs): + optimizer.zero_grad() + + quant = quantize(x, params, qtz_func, bits, target_dtype) + dequant = dequantize(quant, params, deqtz_func, bits, x.dtype) + loss = loss_fn(x, dequant) + + if loss.isnan() or loss.isinf(): + raise Exception("Loss is NaN or Inf. Stopping the search.") + + loss.backward() + optimizer.step() + scheduler.step() + + acc_loss.append(loss.item()) + + # Reports loss every 10 steps + if i % 10 == 0 and do_report: + print(f"Epoch {i}: Loss {loss.item()}") + + # Optimizes the parameter search by storing the best loss and the parameters + if loss.item() < best_loss: + best_loss = loss.item() + best_params = copy.deepcopy({ + k: v for k, v in params.items() if k in param_keys + }) + + # We also stop the search if the loss has not considerably during the last 10% epochs + if early_stop: + epochs_before_stop = int(epochs * percent_epochs_before_stop) + if i > epochs_before_stop and abs(acc_loss[i - epochs_before_stop] - acc_loss[i]) < min_delta: + break + + # No longer requires gradients in the parameters + for p in best_params.values(): + p.requires_grad = False + p.grad = None + + if do_report: + return best_params, acc_loss + else: + return best_params + + +def quantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + target_dtype: torch.dtype = torch.int8 +) -> torch.Tensor: + quant_min, quant_max = get_min_max_from_bits_signed(bits) + x = x.transpose(0, 1) # Aligns shapes + x = func(x=x, **params) + x = x.transpose(0, 1) + x = torch.clamp(round_func_BPDA(x), quant_min, quant_max).to(target_dtype) + return x + + +def dequantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + out_dtype: torch.dtype +) -> torch.Tensor: + x = x.to(dtype=out_dtype) + x = x.transpose(0, 1) + x = func(x=x, **params) + x = x.transpose(0, 1) + return x + + +def round_func_BPDA(input): + # This is equivalent to replacing round function (non-differentiable) with + # an identity function (differentiable) only when backward. + forward_value = torch.round(input) + out = input.clone() + out.data = forward_value.data + return out + + +def get_min_max_from_bits_signed(bit_width: int) -> Tuple[int, int]: + return -2 ** (bit_width - 1), 2 ** (bit_width - 1) - 1 + + + +############## Numpy ############### + +def np_domain_guard( + x: np.ndarray, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> np.ndarray: + """Guard a tensor to a valid domain.""" + x = np.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = np.clip(x, min, max) + return x + + +def np_replace_num(x: np.ndarray, num: float, to: float) -> np.ndarray: + """Replace a number in a tensor with another number. + + Args: + x (np.ndarray): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + np.ndarray: The tensor with the number replaced. + """ + return np.where(x == num, to, x) + + +def np_guarded_power(x: np.ndarray, exp: float) -> np.ndarray: + """Guard the power operation to a valid domain.""" + return np.power(x, exp) if exp >= 1 else np.power(np.maximum(x, 0), exp) + diff --git a/fn_gen/ones_affine_scale/4/loss.png b/fn_gen/ones_affine_scale/4/loss.png new file mode 100644 index 0000000000000000000000000000000000000000..fb9b06dd5314e2c915290be944d4a50dcd401a14 Binary files /dev/null and b/fn_gen/ones_affine_scale/4/loss.png differ diff --git a/fn_gen/ones_affine_scale/4/quantization.png b/fn_gen/ones_affine_scale/4/quantization.png new file mode 100644 index 0000000000000000000000000000000000000000..3b122702b879b1b251eb56c02412e5a5a3286ff7 Binary files /dev/null and b/fn_gen/ones_affine_scale/4/quantization.png differ diff --git a/fn_gen/ones_affine_scale/5/distortion.png b/fn_gen/ones_affine_scale/5/distortion.png new file mode 100644 index 0000000000000000000000000000000000000000..85ee6b518bcf3d13e4f464c9c857e966d4709744 Binary files /dev/null and b/fn_gen/ones_affine_scale/5/distortion.png differ diff --git a/fn_gen/ones_affine_scale/5/expressions.txt b/fn_gen/ones_affine_scale/5/expressions.txt new file mode 100644 index 0000000000000000000000000000000000000000..6d6553d091cd1d343d7aa9b52b85ef6ec88ea854 --- /dev/null +++ b/fn_gen/ones_affine_scale/5/expressions.txt @@ -0,0 +1,2 @@ +x/_s +_s*x \ No newline at end of file diff --git a/fn_gen/ones_affine_scale/5/fn.py b/fn_gen/ones_affine_scale/5/fn.py new file mode 100644 index 0000000000000000000000000000000000000000..1c5649aca25a9156eec414be64b82a997dd28fc2 --- /dev/null +++ b/fn_gen/ones_affine_scale/5/fn.py @@ -0,0 +1,492 @@ +from __future__ import annotations + +import torch +from torch import amin # Necessary for arcsin +import copy +import torch.nn as nn +import numpy as np + +from scipy.optimize import curve_fit +from typing import Dict, Any, Tuple, List, Callable + + +def quantization(x, **params): + return (x * torch.div(1, replace_num(params['_s'], num=0, to=10000))) + + +def dequantization(x, **params): + return (params['_s'] * x) + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_params(x: torch.Tensor, **kwargs: Dict[str, Any]) -> Dict[str, nn.Parameter]: + params = { + } + params['_s'] = init_linear_scale(x, params=params, qtz_func=quantization, **kwargs) + params = {k: nn.Parameter(v, requires_grad=False) for k, v in params.items()} + + if 'post_init_hook' in kwargs: + kwargs['post_init_hook'](parameters=params) + + + if 'post_train_hook' in kwargs: + kwargs['post_train_hook'](parameters=params) + + return params + + +############### Numpy Qtz ############### + + +def np_quantization(x, _s): + return (x * np.divide(1, np_replace_num(_s, num=0, to=10000))) + + +def np_dequantization(x, _s): + return (_s * x) + + +def fit_func(x, _s): + x_ = np_quantization(x, _s) + x_ = np_dequantization(x_, _s) + return x_ + + + +############### HELPERS ############### + +def domain_guard( + x: torch.Tensor, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> torch.Tensor: + """Guard a tensor to a valid domain.""" + x = torch.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = torch.clamp(x, min=min, max=max) + return x + + +def replace_num(x: torch.Tensor, num: float, to: float) -> torch.Tensor: + """Replace a number in a tensor with another number. + + Args: + x (torch.Tensor): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + torch.Tensor: The tensor with the number replaced. + """ + return torch.where(x == num, to, x) + + +def guarded_torch_power(x: torch.Tensor, exp: float) -> torch.Tensor: + """Guard the power operation to a valid domain.""" + return torch.pow(x, exp) if exp >= 1 else torch.pow(torch.relu(x), exp) + + +def init_ones(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.ones_like(val, dtype=torch.float32, device=x.device) + + +def init_rand(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.randn_like(val, dtype=torch.float32, device=x.device) + + +def init_space_search( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + def _build_initial_param(tensor: torch.Tensor, max_initial: int, n_params: int): + """Generates the initial set of parameters. The first iteration generates 10 times more parameters.""" + for _ in range(n_params * 10): # The first iteration generates 10 times more parameters + yield init_rand(tensor) * max_initial # Generates n_params in range [-max_initial, max_initial] + + def _search_param(tensors: List[torch.tensor], n_params): + """Takes the best parameters and generates new parameters around the mean of the best parameters.""" + torch_tensors = torch.stack(tensors) + min_vals, max_vals = torch.aminmax(torch_tensors, dim=0) + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + mean = torch.mean(torch_tensors, dim=0) + for _ in range(n_params): # Generates n_params around the mean of the tensors + yield torch.randn_like(min_vals) * abs_max_val_per_ch + mean + + def _calc(x, qtz_func, deqtz_func, **params): + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params) + x_ = deqtz_func(x=x_, **params) + x_ = x_.transpose(0, 1) + return x_ + + assert "qtz_func" in kwargs, "qtz_func must be provided." + assert "deqtz_func" in kwargs, "deqtz_func must be provided." + assert "params_list" in kwargs, "params list must be provided." + assert "param" in kwargs, "param must be provided." + + qtz_func = kwargs.get('qtz_func') + deqtz_func = kwargs.get('deqtz_func') + params_list = kwargs.get('params_list') + param = kwargs.get('param') + + n_runs = 50 # Number of runs to try to find the best parameters + n_random_params = 50 # Number of random parameters to generate + n_best_to_pick = 5 # Number of best parameters to pick after each run + max_initial = 10000 # Maximum value to initialize the parameters + + # Initializes the parameters + base_params = { p: init_ones(x, **kwargs) for p in params_list if p != param } + params = _build_initial_param(x, max_initial, n_random_params) + + # Performs the search + for _ in range(n_runs): + + best_params = [] + for param_ in params: + try: + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: param_}) + loss_ones = nn.MSELoss()(x, x_) + + if len(best_params) < n_best_to_pick: + best_params.append((param_, loss_ones.item())) + best_params = sorted(best_params, key=lambda x: x[1]) + elif loss_ones < best_params[-1][1]: + best_params[-1] = (param_, loss_ones.item()) + best_params = sorted(best_params, key=lambda x: x[1]) + + except Exception: # The parameters might not be valid for the function's domain + continue + + # Generates new parameters around the mean + params = _search_param([p for p, _ in best_params], n_random_params) + + # Checks if the best parameter is better than the init_ones + p_ones = init_ones(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_ones}) + loss_ones = nn.MSELoss()(x, x_) + + # Checks if the best parameter is better than the init_rand + p_rand = init_rand(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_rand}) + loss_rand = nn.MSELoss()(x, x_) + + if loss_rand < best_params[0][1] and loss_rand < loss_ones: + return p_rand + elif loss_ones < best_params[0][1] and loss_ones < loss_rand: + return p_ones + else: + return best_params[0][0] + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_non_linear_regression_fit( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + assert "params_list" in kwargs, "params list must be provided." + assert "np_fit_func" in kwargs, "np_fit_func must be provided." + assert "p0" in kwargs, "p0 must be provided." + np_fit_func = kwargs.get('np_fit_func') + params_list = kwargs.get('params_list') + p0 = kwargs.get('p0') + + def _fit(xdata: np.ndarray, ydata: np.ndarray, func: Callable, p0: List[float]): + popt, _ = curve_fit( + func, + xdata, + ydata, + maxfev=1000, + p0=p0, + method='lm' + ) + return popt + + # 1. Needs to convert the torch tensor to numpy tensor + xdata = x.cpu().numpy() + + # 2. Sorts the data so that it makes it easier to fit to it + sorted_xdata = np.sort(xdata, axis=-1) + + p0 = {k: v.cpu().numpy() for k, v in p0.items()} + params_list = sorted(params_list) # We need to make sure that it matches the numpy fit func arg order + + # 3. Finds the best parameters for each channel + try: + params = [] + for i in range(sorted_xdata.shape[0]): + xdata_ = sorted_xdata[i] + p0_ = [p0[p][i] for p in params_list] + ch_params = _fit(xdata_, xdata_, np_fit_func, p0_) + params.append(ch_params) + + # 4. Builds the parameters + result = {} + for i, p in enumerate(params_list): + result[p] = torch.tensor([p_[i] for p_ in params], dtype=torch.float32).to(x.device) + + return result + + except ValueError as e: + print(f"Could not fit the function with error: {e}") + print(f"Using fallback result...") + return { + k: torch.tensor(v, dtype=torch.float32).to(x.device) for k, v in p0.items() + } + + +def init_zeros(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.zeros_like(val, dtype=torch.float32, device=x.device) + + +def init_inner_scale(tensor: torch.Tensor, _min: float = torch.inf, _max: float = torch.inf) -> torch.Tensor: + # Calculate the original minimum and maximum values + min_vals, max_vals = torch.aminmax(tensor, dim=-1) + x_min = torch.min(min_vals, torch.zeros_like(min_vals)) + x_max = torch.max(max_vals, torch.zeros_like(max_vals)) + + if _max is torch.inf: # We do not need to scale the tensor. Just need to move it + return torch.ones_like(x_min) + + # Calculate the scale factor + scale = (_max - _min) / (x_max - x_min) + return scale + + + +############## Quant ############### + +@torch.enable_grad() +def learn_parameters( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + qtz_func: nn.Module, + deqtz_func: nn.Module, + bits: int, + target_dtype: torch.dtype, + epochs: int = 1000, + early_stop: bool = True, + do_report: bool = False +) -> Tuple[Dict[str, nn.Parameter], torch.Tensor]: + + # Requires gradients in the parameters + for p in params.values(): + p.requires_grad = True + p.grad = None + + param_keys = list(params.keys()) + param_values = list(params.values()) + + # Defines optimizer and loss function + optimizer = torch.optim.Adam(param_values, lr=0.001) + scheduler = torch.optim.lr_scheduler.LinearLR(optimizer, start_factor=1.0, end_factor=0.001, total_iters=epochs // 10) + loss_fn = nn.MSELoss() + + # Contains the best loss and the best parameters + best_loss = float("inf") + best_params = None + + # Used to stop the search early + min_delta = 1e-7 + acc_loss = [] + percent_epochs_before_stop = 0.1 + + for i in range(epochs): + optimizer.zero_grad() + + quant = quantize(x, params, qtz_func, bits, target_dtype) + dequant = dequantize(quant, params, deqtz_func, bits, x.dtype) + loss = loss_fn(x, dequant) + + if loss.isnan() or loss.isinf(): + raise Exception("Loss is NaN or Inf. Stopping the search.") + + loss.backward() + optimizer.step() + scheduler.step() + + acc_loss.append(loss.item()) + + # Reports loss every 10 steps + if i % 10 == 0 and do_report: + print(f"Epoch {i}: Loss {loss.item()}") + + # Optimizes the parameter search by storing the best loss and the parameters + if loss.item() < best_loss: + best_loss = loss.item() + best_params = copy.deepcopy({ + k: v for k, v in params.items() if k in param_keys + }) + + # We also stop the search if the loss has not considerably during the last 10% epochs + if early_stop: + epochs_before_stop = int(epochs * percent_epochs_before_stop) + if i > epochs_before_stop and abs(acc_loss[i - epochs_before_stop] - acc_loss[i]) < min_delta: + break + + # No longer requires gradients in the parameters + for p in best_params.values(): + p.requires_grad = False + p.grad = None + + if do_report: + return best_params, acc_loss + else: + return best_params + + +def quantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + target_dtype: torch.dtype = torch.int8 +) -> torch.Tensor: + quant_min, quant_max = get_min_max_from_bits_signed(bits) + x = x.transpose(0, 1) # Aligns shapes + x = func(x=x, **params) + x = x.transpose(0, 1) + x = torch.clamp(round_func_BPDA(x), quant_min, quant_max).to(target_dtype) + return x + + +def dequantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + out_dtype: torch.dtype +) -> torch.Tensor: + x = x.to(dtype=out_dtype) + x = x.transpose(0, 1) + x = func(x=x, **params) + x = x.transpose(0, 1) + return x + + +def round_func_BPDA(input): + # This is equivalent to replacing round function (non-differentiable) with + # an identity function (differentiable) only when backward. + forward_value = torch.round(input) + out = input.clone() + out.data = forward_value.data + return out + + +def get_min_max_from_bits_signed(bit_width: int) -> Tuple[int, int]: + return -2 ** (bit_width - 1), 2 ** (bit_width - 1) - 1 + + + +############## Numpy ############### + +def np_domain_guard( + x: np.ndarray, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> np.ndarray: + """Guard a tensor to a valid domain.""" + x = np.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = np.clip(x, min, max) + return x + + +def np_replace_num(x: np.ndarray, num: float, to: float) -> np.ndarray: + """Replace a number in a tensor with another number. + + Args: + x (np.ndarray): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + np.ndarray: The tensor with the number replaced. + """ + return np.where(x == num, to, x) + + +def np_guarded_power(x: np.ndarray, exp: float) -> np.ndarray: + """Guard the power operation to a valid domain.""" + return np.power(x, exp) if exp >= 1 else np.power(np.maximum(x, 0), exp) + diff --git a/fn_gen/ones_affine_scale/5/loss.png b/fn_gen/ones_affine_scale/5/loss.png new file mode 100644 index 0000000000000000000000000000000000000000..0fdf64738fe3f6e179e76624266632056543e927 Binary files /dev/null and b/fn_gen/ones_affine_scale/5/loss.png differ diff --git a/fn_gen/ones_affine_scale/5/quantization.png b/fn_gen/ones_affine_scale/5/quantization.png new file mode 100644 index 0000000000000000000000000000000000000000..0cb7f54f4e5d37769ed1d172e8764493be5b43c2 Binary files /dev/null and b/fn_gen/ones_affine_scale/5/quantization.png differ diff --git a/fn_gen/ones_affine_scale/6/distortion.png b/fn_gen/ones_affine_scale/6/distortion.png new file mode 100644 index 0000000000000000000000000000000000000000..9e1d3cfa811777f9657d7dcf115dfcadb56144a7 Binary files /dev/null and b/fn_gen/ones_affine_scale/6/distortion.png differ diff --git a/fn_gen/ones_affine_scale/6/expressions.txt b/fn_gen/ones_affine_scale/6/expressions.txt new file mode 100644 index 0000000000000000000000000000000000000000..e8458af52eb4cfce21cf8459f3c454003cd78158 --- /dev/null +++ b/fn_gen/ones_affine_scale/6/expressions.txt @@ -0,0 +1,2 @@ +sqrt(_0*x)/_s +_s**2*x**2/_0 \ No newline at end of file diff --git a/fn_gen/ones_affine_scale/6/fn.py b/fn_gen/ones_affine_scale/6/fn.py new file mode 100644 index 0000000000000000000000000000000000000000..f0c1eaa1f0c03678c1265e97148ab4b9801b93d4 --- /dev/null +++ b/fn_gen/ones_affine_scale/6/fn.py @@ -0,0 +1,493 @@ +from __future__ import annotations + +import torch +from torch import amin # Necessary for arcsin +import copy +import torch.nn as nn +import numpy as np + +from scipy.optimize import curve_fit +from typing import Dict, Any, Tuple, List, Callable + + +def quantization(x, **params): + return (torch.div(1, replace_num(params['_s'], num=0, to=10000)) * torch.sqrt(domain_guard((params['_0'] * x), min=0.1, nan=0.1))) + + +def dequantization(x, **params): + return (torch.div(1, replace_num(params['_0'], num=0, to=10000)) * guarded_torch_power(params['_s'], torch.tensor(2)) * guarded_torch_power(x, torch.tensor(2))) + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_params(x: torch.Tensor, **kwargs: Dict[str, Any]) -> Dict[str, nn.Parameter]: + params = { + '_0': init_ones(x, qtz_func=quantization, deqtz_func=dequantization, param='_0', params_list=['_0', '_s'], **kwargs), + } + params['_s'] = init_linear_scale(x, params=params, qtz_func=quantization, **kwargs) + params = {k: nn.Parameter(v, requires_grad=False) for k, v in params.items()} + + if 'post_init_hook' in kwargs: + kwargs['post_init_hook'](parameters=params) + + + if 'post_train_hook' in kwargs: + kwargs['post_train_hook'](parameters=params) + + return params + + +############### Numpy Qtz ############### + + +def np_quantization(x, _0, _s): + return (np.divide(1, np_replace_num(_s, num=0, to=10000)) * np.sqrt(np_domain_guard((_0 * x), min=0.1, nan=0.1))) + + +def np_dequantization(x, _0, _s): + return (np.divide(1, np_replace_num(_0, num=0, to=10000)) * np_guarded_power(_s, np.array(2)) * np_guarded_power(x, np.array(2))) + + +def fit_func(x, _0, _s): + x_ = np_quantization(x, _0, _s) + x_ = np_dequantization(x_, _0, _s) + return x_ + + + +############### HELPERS ############### + +def domain_guard( + x: torch.Tensor, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> torch.Tensor: + """Guard a tensor to a valid domain.""" + x = torch.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = torch.clamp(x, min=min, max=max) + return x + + +def replace_num(x: torch.Tensor, num: float, to: float) -> torch.Tensor: + """Replace a number in a tensor with another number. + + Args: + x (torch.Tensor): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + torch.Tensor: The tensor with the number replaced. + """ + return torch.where(x == num, to, x) + + +def guarded_torch_power(x: torch.Tensor, exp: float) -> torch.Tensor: + """Guard the power operation to a valid domain.""" + return torch.pow(x, exp) if exp >= 1 else torch.pow(torch.relu(x), exp) + + +def init_ones(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.ones_like(val, dtype=torch.float32, device=x.device) + + +def init_rand(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.randn_like(val, dtype=torch.float32, device=x.device) + + +def init_space_search( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + def _build_initial_param(tensor: torch.Tensor, max_initial: int, n_params: int): + """Generates the initial set of parameters. The first iteration generates 10 times more parameters.""" + for _ in range(n_params * 10): # The first iteration generates 10 times more parameters + yield init_rand(tensor) * max_initial # Generates n_params in range [-max_initial, max_initial] + + def _search_param(tensors: List[torch.tensor], n_params): + """Takes the best parameters and generates new parameters around the mean of the best parameters.""" + torch_tensors = torch.stack(tensors) + min_vals, max_vals = torch.aminmax(torch_tensors, dim=0) + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + mean = torch.mean(torch_tensors, dim=0) + for _ in range(n_params): # Generates n_params around the mean of the tensors + yield torch.randn_like(min_vals) * abs_max_val_per_ch + mean + + def _calc(x, qtz_func, deqtz_func, **params): + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params) + x_ = deqtz_func(x=x_, **params) + x_ = x_.transpose(0, 1) + return x_ + + assert "qtz_func" in kwargs, "qtz_func must be provided." + assert "deqtz_func" in kwargs, "deqtz_func must be provided." + assert "params_list" in kwargs, "params list must be provided." + assert "param" in kwargs, "param must be provided." + + qtz_func = kwargs.get('qtz_func') + deqtz_func = kwargs.get('deqtz_func') + params_list = kwargs.get('params_list') + param = kwargs.get('param') + + n_runs = 50 # Number of runs to try to find the best parameters + n_random_params = 50 # Number of random parameters to generate + n_best_to_pick = 5 # Number of best parameters to pick after each run + max_initial = 10000 # Maximum value to initialize the parameters + + # Initializes the parameters + base_params = { p: init_ones(x, **kwargs) for p in params_list if p != param } + params = _build_initial_param(x, max_initial, n_random_params) + + # Performs the search + for _ in range(n_runs): + + best_params = [] + for param_ in params: + try: + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: param_}) + loss_ones = nn.MSELoss()(x, x_) + + if len(best_params) < n_best_to_pick: + best_params.append((param_, loss_ones.item())) + best_params = sorted(best_params, key=lambda x: x[1]) + elif loss_ones < best_params[-1][1]: + best_params[-1] = (param_, loss_ones.item()) + best_params = sorted(best_params, key=lambda x: x[1]) + + except Exception: # The parameters might not be valid for the function's domain + continue + + # Generates new parameters around the mean + params = _search_param([p for p, _ in best_params], n_random_params) + + # Checks if the best parameter is better than the init_ones + p_ones = init_ones(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_ones}) + loss_ones = nn.MSELoss()(x, x_) + + # Checks if the best parameter is better than the init_rand + p_rand = init_rand(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_rand}) + loss_rand = nn.MSELoss()(x, x_) + + if loss_rand < best_params[0][1] and loss_rand < loss_ones: + return p_rand + elif loss_ones < best_params[0][1] and loss_ones < loss_rand: + return p_ones + else: + return best_params[0][0] + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_non_linear_regression_fit( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + assert "params_list" in kwargs, "params list must be provided." + assert "np_fit_func" in kwargs, "np_fit_func must be provided." + assert "p0" in kwargs, "p0 must be provided." + np_fit_func = kwargs.get('np_fit_func') + params_list = kwargs.get('params_list') + p0 = kwargs.get('p0') + + def _fit(xdata: np.ndarray, ydata: np.ndarray, func: Callable, p0: List[float]): + popt, _ = curve_fit( + func, + xdata, + ydata, + maxfev=1000, + p0=p0, + method='lm' + ) + return popt + + # 1. Needs to convert the torch tensor to numpy tensor + xdata = x.cpu().numpy() + + # 2. Sorts the data so that it makes it easier to fit to it + sorted_xdata = np.sort(xdata, axis=-1) + + p0 = {k: v.cpu().numpy() for k, v in p0.items()} + params_list = sorted(params_list) # We need to make sure that it matches the numpy fit func arg order + + # 3. Finds the best parameters for each channel + try: + params = [] + for i in range(sorted_xdata.shape[0]): + xdata_ = sorted_xdata[i] + p0_ = [p0[p][i] for p in params_list] + ch_params = _fit(xdata_, xdata_, np_fit_func, p0_) + params.append(ch_params) + + # 4. Builds the parameters + result = {} + for i, p in enumerate(params_list): + result[p] = torch.tensor([p_[i] for p_ in params], dtype=torch.float32).to(x.device) + + return result + + except ValueError as e: + print(f"Could not fit the function with error: {e}") + print(f"Using fallback result...") + return { + k: torch.tensor(v, dtype=torch.float32).to(x.device) for k, v in p0.items() + } + + +def init_zeros(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.zeros_like(val, dtype=torch.float32, device=x.device) + + +def init_inner_scale(tensor: torch.Tensor, _min: float = torch.inf, _max: float = torch.inf) -> torch.Tensor: + # Calculate the original minimum and maximum values + min_vals, max_vals = torch.aminmax(tensor, dim=-1) + x_min = torch.min(min_vals, torch.zeros_like(min_vals)) + x_max = torch.max(max_vals, torch.zeros_like(max_vals)) + + if _max is torch.inf: # We do not need to scale the tensor. Just need to move it + return torch.ones_like(x_min) + + # Calculate the scale factor + scale = (_max - _min) / (x_max - x_min) + return scale + + + +############## Quant ############### + +@torch.enable_grad() +def learn_parameters( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + qtz_func: nn.Module, + deqtz_func: nn.Module, + bits: int, + target_dtype: torch.dtype, + epochs: int = 1000, + early_stop: bool = True, + do_report: bool = False +) -> Tuple[Dict[str, nn.Parameter], torch.Tensor]: + + # Requires gradients in the parameters + for p in params.values(): + p.requires_grad = True + p.grad = None + + param_keys = list(params.keys()) + param_values = list(params.values()) + + # Defines optimizer and loss function + optimizer = torch.optim.Adam(param_values, lr=0.001) + scheduler = torch.optim.lr_scheduler.LinearLR(optimizer, start_factor=1.0, end_factor=0.001, total_iters=epochs // 10) + loss_fn = nn.MSELoss() + + # Contains the best loss and the best parameters + best_loss = float("inf") + best_params = None + + # Used to stop the search early + min_delta = 1e-7 + acc_loss = [] + percent_epochs_before_stop = 0.1 + + for i in range(epochs): + optimizer.zero_grad() + + quant = quantize(x, params, qtz_func, bits, target_dtype) + dequant = dequantize(quant, params, deqtz_func, bits, x.dtype) + loss = loss_fn(x, dequant) + + if loss.isnan() or loss.isinf(): + raise Exception("Loss is NaN or Inf. Stopping the search.") + + loss.backward() + optimizer.step() + scheduler.step() + + acc_loss.append(loss.item()) + + # Reports loss every 10 steps + if i % 10 == 0 and do_report: + print(f"Epoch {i}: Loss {loss.item()}") + + # Optimizes the parameter search by storing the best loss and the parameters + if loss.item() < best_loss: + best_loss = loss.item() + best_params = copy.deepcopy({ + k: v for k, v in params.items() if k in param_keys + }) + + # We also stop the search if the loss has not considerably during the last 10% epochs + if early_stop: + epochs_before_stop = int(epochs * percent_epochs_before_stop) + if i > epochs_before_stop and abs(acc_loss[i - epochs_before_stop] - acc_loss[i]) < min_delta: + break + + # No longer requires gradients in the parameters + for p in best_params.values(): + p.requires_grad = False + p.grad = None + + if do_report: + return best_params, acc_loss + else: + return best_params + + +def quantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + target_dtype: torch.dtype = torch.int8 +) -> torch.Tensor: + quant_min, quant_max = get_min_max_from_bits_signed(bits) + x = x.transpose(0, 1) # Aligns shapes + x = func(x=x, **params) + x = x.transpose(0, 1) + x = torch.clamp(round_func_BPDA(x), quant_min, quant_max).to(target_dtype) + return x + + +def dequantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + out_dtype: torch.dtype +) -> torch.Tensor: + x = x.to(dtype=out_dtype) + x = x.transpose(0, 1) + x = func(x=x, **params) + x = x.transpose(0, 1) + return x + + +def round_func_BPDA(input): + # This is equivalent to replacing round function (non-differentiable) with + # an identity function (differentiable) only when backward. + forward_value = torch.round(input) + out = input.clone() + out.data = forward_value.data + return out + + +def get_min_max_from_bits_signed(bit_width: int) -> Tuple[int, int]: + return -2 ** (bit_width - 1), 2 ** (bit_width - 1) - 1 + + + +############## Numpy ############### + +def np_domain_guard( + x: np.ndarray, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> np.ndarray: + """Guard a tensor to a valid domain.""" + x = np.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = np.clip(x, min, max) + return x + + +def np_replace_num(x: np.ndarray, num: float, to: float) -> np.ndarray: + """Replace a number in a tensor with another number. + + Args: + x (np.ndarray): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + np.ndarray: The tensor with the number replaced. + """ + return np.where(x == num, to, x) + + +def np_guarded_power(x: np.ndarray, exp: float) -> np.ndarray: + """Guard the power operation to a valid domain.""" + return np.power(x, exp) if exp >= 1 else np.power(np.maximum(x, 0), exp) + diff --git a/fn_gen/ones_affine_scale/6/loss.png b/fn_gen/ones_affine_scale/6/loss.png new file mode 100644 index 0000000000000000000000000000000000000000..0d4479e9a04d85f4994557e2d82661ed0c21f53a Binary files /dev/null and b/fn_gen/ones_affine_scale/6/loss.png differ diff --git a/fn_gen/ones_affine_scale/6/quantization.png b/fn_gen/ones_affine_scale/6/quantization.png new file mode 100644 index 0000000000000000000000000000000000000000..e2ff36182f45ca4dec1e693e2b6aa826782c0ebf Binary files /dev/null and b/fn_gen/ones_affine_scale/6/quantization.png differ diff --git a/fn_gen/ones_affine_scale/7/distortion.png b/fn_gen/ones_affine_scale/7/distortion.png new file mode 100644 index 0000000000000000000000000000000000000000..1c561123304d3e1ac7ba582cd123c5fdc59c7a86 Binary files /dev/null and b/fn_gen/ones_affine_scale/7/distortion.png differ diff --git a/fn_gen/ones_affine_scale/7/expressions.txt b/fn_gen/ones_affine_scale/7/expressions.txt new file mode 100644 index 0000000000000000000000000000000000000000..ecd6e238827dcdb95f4bcb390c1c300696f34254 --- /dev/null +++ b/fn_gen/ones_affine_scale/7/expressions.txt @@ -0,0 +1,2 @@ +sin(_0*x)/_s +asin(_s*x)/_0 \ No newline at end of file diff --git a/fn_gen/ones_affine_scale/7/fn.py b/fn_gen/ones_affine_scale/7/fn.py new file mode 100644 index 0000000000000000000000000000000000000000..1622b1ffb8bbc4c021ce5f32245f6e6bd8822759 --- /dev/null +++ b/fn_gen/ones_affine_scale/7/fn.py @@ -0,0 +1,493 @@ +from __future__ import annotations + +import torch +from torch import amin # Necessary for arcsin +import copy +import torch.nn as nn +import numpy as np + +from scipy.optimize import curve_fit +from typing import Dict, Any, Tuple, List, Callable + + +def quantization(x, **params): + return (torch.div(1, replace_num(params['_s'], num=0, to=10000)) * torch.sin((params['_0'] * x))) + + +def dequantization(x, **params): + return (torch.div(1, replace_num(params['_0'], num=0, to=10000)) * torch.asin(domain_guard((params['_s'] * x), min=-0.99999, max=0.99999, nan=0))) + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_params(x: torch.Tensor, **kwargs: Dict[str, Any]) -> Dict[str, nn.Parameter]: + params = { + '_0': init_ones(x, qtz_func=quantization, deqtz_func=dequantization, param='_0', params_list=['_0', '_s'], **kwargs), + } + params['_s'] = init_linear_scale(x, params=params, qtz_func=quantization, **kwargs) + params = {k: nn.Parameter(v, requires_grad=False) for k, v in params.items()} + + if 'post_init_hook' in kwargs: + kwargs['post_init_hook'](parameters=params) + + + if 'post_train_hook' in kwargs: + kwargs['post_train_hook'](parameters=params) + + return params + + +############### Numpy Qtz ############### + + +def np_quantization(x, _0, _s): + return (np.divide(1, np_replace_num(_s, num=0, to=10000)) * np.sin((_0 * x))) + + +def np_dequantization(x, _0, _s): + return (np.divide(1, np_replace_num(_0, num=0, to=10000)) * np.arcsin(np_domain_guard((_s * x), min=-0.99999, max=0.99999, nan=0))) + + +def fit_func(x, _0, _s): + x_ = np_quantization(x, _0, _s) + x_ = np_dequantization(x_, _0, _s) + return x_ + + + +############### HELPERS ############### + +def domain_guard( + x: torch.Tensor, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> torch.Tensor: + """Guard a tensor to a valid domain.""" + x = torch.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = torch.clamp(x, min=min, max=max) + return x + + +def replace_num(x: torch.Tensor, num: float, to: float) -> torch.Tensor: + """Replace a number in a tensor with another number. + + Args: + x (torch.Tensor): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + torch.Tensor: The tensor with the number replaced. + """ + return torch.where(x == num, to, x) + + +def guarded_torch_power(x: torch.Tensor, exp: float) -> torch.Tensor: + """Guard the power operation to a valid domain.""" + return torch.pow(x, exp) if exp >= 1 else torch.pow(torch.relu(x), exp) + + +def init_ones(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.ones_like(val, dtype=torch.float32, device=x.device) + + +def init_rand(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.randn_like(val, dtype=torch.float32, device=x.device) + + +def init_space_search( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + def _build_initial_param(tensor: torch.Tensor, max_initial: int, n_params: int): + """Generates the initial set of parameters. The first iteration generates 10 times more parameters.""" + for _ in range(n_params * 10): # The first iteration generates 10 times more parameters + yield init_rand(tensor) * max_initial # Generates n_params in range [-max_initial, max_initial] + + def _search_param(tensors: List[torch.tensor], n_params): + """Takes the best parameters and generates new parameters around the mean of the best parameters.""" + torch_tensors = torch.stack(tensors) + min_vals, max_vals = torch.aminmax(torch_tensors, dim=0) + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + mean = torch.mean(torch_tensors, dim=0) + for _ in range(n_params): # Generates n_params around the mean of the tensors + yield torch.randn_like(min_vals) * abs_max_val_per_ch + mean + + def _calc(x, qtz_func, deqtz_func, **params): + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params) + x_ = deqtz_func(x=x_, **params) + x_ = x_.transpose(0, 1) + return x_ + + assert "qtz_func" in kwargs, "qtz_func must be provided." + assert "deqtz_func" in kwargs, "deqtz_func must be provided." + assert "params_list" in kwargs, "params list must be provided." + assert "param" in kwargs, "param must be provided." + + qtz_func = kwargs.get('qtz_func') + deqtz_func = kwargs.get('deqtz_func') + params_list = kwargs.get('params_list') + param = kwargs.get('param') + + n_runs = 50 # Number of runs to try to find the best parameters + n_random_params = 50 # Number of random parameters to generate + n_best_to_pick = 5 # Number of best parameters to pick after each run + max_initial = 10000 # Maximum value to initialize the parameters + + # Initializes the parameters + base_params = { p: init_ones(x, **kwargs) for p in params_list if p != param } + params = _build_initial_param(x, max_initial, n_random_params) + + # Performs the search + for _ in range(n_runs): + + best_params = [] + for param_ in params: + try: + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: param_}) + loss_ones = nn.MSELoss()(x, x_) + + if len(best_params) < n_best_to_pick: + best_params.append((param_, loss_ones.item())) + best_params = sorted(best_params, key=lambda x: x[1]) + elif loss_ones < best_params[-1][1]: + best_params[-1] = (param_, loss_ones.item()) + best_params = sorted(best_params, key=lambda x: x[1]) + + except Exception: # The parameters might not be valid for the function's domain + continue + + # Generates new parameters around the mean + params = _search_param([p for p, _ in best_params], n_random_params) + + # Checks if the best parameter is better than the init_ones + p_ones = init_ones(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_ones}) + loss_ones = nn.MSELoss()(x, x_) + + # Checks if the best parameter is better than the init_rand + p_rand = init_rand(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_rand}) + loss_rand = nn.MSELoss()(x, x_) + + if loss_rand < best_params[0][1] and loss_rand < loss_ones: + return p_rand + elif loss_ones < best_params[0][1] and loss_ones < loss_rand: + return p_ones + else: + return best_params[0][0] + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_non_linear_regression_fit( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + assert "params_list" in kwargs, "params list must be provided." + assert "np_fit_func" in kwargs, "np_fit_func must be provided." + assert "p0" in kwargs, "p0 must be provided." + np_fit_func = kwargs.get('np_fit_func') + params_list = kwargs.get('params_list') + p0 = kwargs.get('p0') + + def _fit(xdata: np.ndarray, ydata: np.ndarray, func: Callable, p0: List[float]): + popt, _ = curve_fit( + func, + xdata, + ydata, + maxfev=1000, + p0=p0, + method='lm' + ) + return popt + + # 1. Needs to convert the torch tensor to numpy tensor + xdata = x.cpu().numpy() + + # 2. Sorts the data so that it makes it easier to fit to it + sorted_xdata = np.sort(xdata, axis=-1) + + p0 = {k: v.cpu().numpy() for k, v in p0.items()} + params_list = sorted(params_list) # We need to make sure that it matches the numpy fit func arg order + + # 3. Finds the best parameters for each channel + try: + params = [] + for i in range(sorted_xdata.shape[0]): + xdata_ = sorted_xdata[i] + p0_ = [p0[p][i] for p in params_list] + ch_params = _fit(xdata_, xdata_, np_fit_func, p0_) + params.append(ch_params) + + # 4. Builds the parameters + result = {} + for i, p in enumerate(params_list): + result[p] = torch.tensor([p_[i] for p_ in params], dtype=torch.float32).to(x.device) + + return result + + except ValueError as e: + print(f"Could not fit the function with error: {e}") + print(f"Using fallback result...") + return { + k: torch.tensor(v, dtype=torch.float32).to(x.device) for k, v in p0.items() + } + + +def init_zeros(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.zeros_like(val, dtype=torch.float32, device=x.device) + + +def init_inner_scale(tensor: torch.Tensor, _min: float = torch.inf, _max: float = torch.inf) -> torch.Tensor: + # Calculate the original minimum and maximum values + min_vals, max_vals = torch.aminmax(tensor, dim=-1) + x_min = torch.min(min_vals, torch.zeros_like(min_vals)) + x_max = torch.max(max_vals, torch.zeros_like(max_vals)) + + if _max is torch.inf: # We do not need to scale the tensor. Just need to move it + return torch.ones_like(x_min) + + # Calculate the scale factor + scale = (_max - _min) / (x_max - x_min) + return scale + + + +############## Quant ############### + +@torch.enable_grad() +def learn_parameters( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + qtz_func: nn.Module, + deqtz_func: nn.Module, + bits: int, + target_dtype: torch.dtype, + epochs: int = 1000, + early_stop: bool = True, + do_report: bool = False +) -> Tuple[Dict[str, nn.Parameter], torch.Tensor]: + + # Requires gradients in the parameters + for p in params.values(): + p.requires_grad = True + p.grad = None + + param_keys = list(params.keys()) + param_values = list(params.values()) + + # Defines optimizer and loss function + optimizer = torch.optim.Adam(param_values, lr=0.001) + scheduler = torch.optim.lr_scheduler.LinearLR(optimizer, start_factor=1.0, end_factor=0.001, total_iters=epochs // 10) + loss_fn = nn.MSELoss() + + # Contains the best loss and the best parameters + best_loss = float("inf") + best_params = None + + # Used to stop the search early + min_delta = 1e-7 + acc_loss = [] + percent_epochs_before_stop = 0.1 + + for i in range(epochs): + optimizer.zero_grad() + + quant = quantize(x, params, qtz_func, bits, target_dtype) + dequant = dequantize(quant, params, deqtz_func, bits, x.dtype) + loss = loss_fn(x, dequant) + + if loss.isnan() or loss.isinf(): + raise Exception("Loss is NaN or Inf. Stopping the search.") + + loss.backward() + optimizer.step() + scheduler.step() + + acc_loss.append(loss.item()) + + # Reports loss every 10 steps + if i % 10 == 0 and do_report: + print(f"Epoch {i}: Loss {loss.item()}") + + # Optimizes the parameter search by storing the best loss and the parameters + if loss.item() < best_loss: + best_loss = loss.item() + best_params = copy.deepcopy({ + k: v for k, v in params.items() if k in param_keys + }) + + # We also stop the search if the loss has not considerably during the last 10% epochs + if early_stop: + epochs_before_stop = int(epochs * percent_epochs_before_stop) + if i > epochs_before_stop and abs(acc_loss[i - epochs_before_stop] - acc_loss[i]) < min_delta: + break + + # No longer requires gradients in the parameters + for p in best_params.values(): + p.requires_grad = False + p.grad = None + + if do_report: + return best_params, acc_loss + else: + return best_params + + +def quantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + target_dtype: torch.dtype = torch.int8 +) -> torch.Tensor: + quant_min, quant_max = get_min_max_from_bits_signed(bits) + x = x.transpose(0, 1) # Aligns shapes + x = func(x=x, **params) + x = x.transpose(0, 1) + x = torch.clamp(round_func_BPDA(x), quant_min, quant_max).to(target_dtype) + return x + + +def dequantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + out_dtype: torch.dtype +) -> torch.Tensor: + x = x.to(dtype=out_dtype) + x = x.transpose(0, 1) + x = func(x=x, **params) + x = x.transpose(0, 1) + return x + + +def round_func_BPDA(input): + # This is equivalent to replacing round function (non-differentiable) with + # an identity function (differentiable) only when backward. + forward_value = torch.round(input) + out = input.clone() + out.data = forward_value.data + return out + + +def get_min_max_from_bits_signed(bit_width: int) -> Tuple[int, int]: + return -2 ** (bit_width - 1), 2 ** (bit_width - 1) - 1 + + + +############## Numpy ############### + +def np_domain_guard( + x: np.ndarray, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> np.ndarray: + """Guard a tensor to a valid domain.""" + x = np.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = np.clip(x, min, max) + return x + + +def np_replace_num(x: np.ndarray, num: float, to: float) -> np.ndarray: + """Replace a number in a tensor with another number. + + Args: + x (np.ndarray): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + np.ndarray: The tensor with the number replaced. + """ + return np.where(x == num, to, x) + + +def np_guarded_power(x: np.ndarray, exp: float) -> np.ndarray: + """Guard the power operation to a valid domain.""" + return np.power(x, exp) if exp >= 1 else np.power(np.maximum(x, 0), exp) + diff --git a/fn_gen/ones_affine_scale/7/loss.png b/fn_gen/ones_affine_scale/7/loss.png new file mode 100644 index 0000000000000000000000000000000000000000..c16a1dac62c9ed229be2cbf64c9befa04d5f10dd Binary files /dev/null and b/fn_gen/ones_affine_scale/7/loss.png differ diff --git a/fn_gen/ones_affine_scale/7/quantization.png b/fn_gen/ones_affine_scale/7/quantization.png new file mode 100644 index 0000000000000000000000000000000000000000..d3161c2dad009969989220a19b1f79bab7e01a3c Binary files /dev/null and b/fn_gen/ones_affine_scale/7/quantization.png differ diff --git a/fn_gen/ones_affine_scale/8/distortion.png b/fn_gen/ones_affine_scale/8/distortion.png new file mode 100644 index 0000000000000000000000000000000000000000..d25ba7aa65071a0b7c255f0c20a3a2a988bbc7a7 Binary files /dev/null and b/fn_gen/ones_affine_scale/8/distortion.png differ diff --git a/fn_gen/ones_affine_scale/8/expressions.txt b/fn_gen/ones_affine_scale/8/expressions.txt new file mode 100644 index 0000000000000000000000000000000000000000..9aa25379a9d1d5a93d60659c6609b2e24e79234d --- /dev/null +++ b/fn_gen/ones_affine_scale/8/expressions.txt @@ -0,0 +1,2 @@ +exp(_0*x)/_s +log(_s*x)/_0 \ No newline at end of file diff --git a/fn_gen/ones_affine_scale/8/fn.py b/fn_gen/ones_affine_scale/8/fn.py new file mode 100644 index 0000000000000000000000000000000000000000..bf198efe34fdd2fa76aade81d8751126f80ec4a0 --- /dev/null +++ b/fn_gen/ones_affine_scale/8/fn.py @@ -0,0 +1,493 @@ +from __future__ import annotations + +import torch +from torch import amin # Necessary for arcsin +import copy +import torch.nn as nn +import numpy as np + +from scipy.optimize import curve_fit +from typing import Dict, Any, Tuple, List, Callable + + +def quantization(x, **params): + return (torch.div(1, replace_num(params['_s'], num=0, to=10000)) * torch.exp((params['_0'] * x))) + + +def dequantization(x, **params): + return (torch.div(1, replace_num(params['_0'], num=0, to=10000)) * torch.log(domain_guard((params['_s'] * x), min=1e-5, nan=1e-5))) + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_params(x: torch.Tensor, **kwargs: Dict[str, Any]) -> Dict[str, nn.Parameter]: + params = { + '_0': init_ones(x, qtz_func=quantization, deqtz_func=dequantization, param='_0', params_list=['_0', '_s'], **kwargs), + } + params['_s'] = init_linear_scale(x, params=params, qtz_func=quantization, **kwargs) + params = {k: nn.Parameter(v, requires_grad=False) for k, v in params.items()} + + if 'post_init_hook' in kwargs: + kwargs['post_init_hook'](parameters=params) + + + if 'post_train_hook' in kwargs: + kwargs['post_train_hook'](parameters=params) + + return params + + +############### Numpy Qtz ############### + + +def np_quantization(x, _0, _s): + return (np.divide(1, np_replace_num(_s, num=0, to=10000)) * np.exp((_0 * x))) + + +def np_dequantization(x, _0, _s): + return (np.divide(1, np_replace_num(_0, num=0, to=10000)) * np.log(np_domain_guard((_s * x), min=1e-5, nan=1e-5))) + + +def fit_func(x, _0, _s): + x_ = np_quantization(x, _0, _s) + x_ = np_dequantization(x_, _0, _s) + return x_ + + + +############### HELPERS ############### + +def domain_guard( + x: torch.Tensor, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> torch.Tensor: + """Guard a tensor to a valid domain.""" + x = torch.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = torch.clamp(x, min=min, max=max) + return x + + +def replace_num(x: torch.Tensor, num: float, to: float) -> torch.Tensor: + """Replace a number in a tensor with another number. + + Args: + x (torch.Tensor): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + torch.Tensor: The tensor with the number replaced. + """ + return torch.where(x == num, to, x) + + +def guarded_torch_power(x: torch.Tensor, exp: float) -> torch.Tensor: + """Guard the power operation to a valid domain.""" + return torch.pow(x, exp) if exp >= 1 else torch.pow(torch.relu(x), exp) + + +def init_ones(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.ones_like(val, dtype=torch.float32, device=x.device) + + +def init_rand(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.randn_like(val, dtype=torch.float32, device=x.device) + + +def init_space_search( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + def _build_initial_param(tensor: torch.Tensor, max_initial: int, n_params: int): + """Generates the initial set of parameters. The first iteration generates 10 times more parameters.""" + for _ in range(n_params * 10): # The first iteration generates 10 times more parameters + yield init_rand(tensor) * max_initial # Generates n_params in range [-max_initial, max_initial] + + def _search_param(tensors: List[torch.tensor], n_params): + """Takes the best parameters and generates new parameters around the mean of the best parameters.""" + torch_tensors = torch.stack(tensors) + min_vals, max_vals = torch.aminmax(torch_tensors, dim=0) + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + mean = torch.mean(torch_tensors, dim=0) + for _ in range(n_params): # Generates n_params around the mean of the tensors + yield torch.randn_like(min_vals) * abs_max_val_per_ch + mean + + def _calc(x, qtz_func, deqtz_func, **params): + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params) + x_ = deqtz_func(x=x_, **params) + x_ = x_.transpose(0, 1) + return x_ + + assert "qtz_func" in kwargs, "qtz_func must be provided." + assert "deqtz_func" in kwargs, "deqtz_func must be provided." + assert "params_list" in kwargs, "params list must be provided." + assert "param" in kwargs, "param must be provided." + + qtz_func = kwargs.get('qtz_func') + deqtz_func = kwargs.get('deqtz_func') + params_list = kwargs.get('params_list') + param = kwargs.get('param') + + n_runs = 50 # Number of runs to try to find the best parameters + n_random_params = 50 # Number of random parameters to generate + n_best_to_pick = 5 # Number of best parameters to pick after each run + max_initial = 10000 # Maximum value to initialize the parameters + + # Initializes the parameters + base_params = { p: init_ones(x, **kwargs) for p in params_list if p != param } + params = _build_initial_param(x, max_initial, n_random_params) + + # Performs the search + for _ in range(n_runs): + + best_params = [] + for param_ in params: + try: + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: param_}) + loss_ones = nn.MSELoss()(x, x_) + + if len(best_params) < n_best_to_pick: + best_params.append((param_, loss_ones.item())) + best_params = sorted(best_params, key=lambda x: x[1]) + elif loss_ones < best_params[-1][1]: + best_params[-1] = (param_, loss_ones.item()) + best_params = sorted(best_params, key=lambda x: x[1]) + + except Exception: # The parameters might not be valid for the function's domain + continue + + # Generates new parameters around the mean + params = _search_param([p for p, _ in best_params], n_random_params) + + # Checks if the best parameter is better than the init_ones + p_ones = init_ones(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_ones}) + loss_ones = nn.MSELoss()(x, x_) + + # Checks if the best parameter is better than the init_rand + p_rand = init_rand(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_rand}) + loss_rand = nn.MSELoss()(x, x_) + + if loss_rand < best_params[0][1] and loss_rand < loss_ones: + return p_rand + elif loss_ones < best_params[0][1] and loss_ones < loss_rand: + return p_ones + else: + return best_params[0][0] + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_non_linear_regression_fit( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + assert "params_list" in kwargs, "params list must be provided." + assert "np_fit_func" in kwargs, "np_fit_func must be provided." + assert "p0" in kwargs, "p0 must be provided." + np_fit_func = kwargs.get('np_fit_func') + params_list = kwargs.get('params_list') + p0 = kwargs.get('p0') + + def _fit(xdata: np.ndarray, ydata: np.ndarray, func: Callable, p0: List[float]): + popt, _ = curve_fit( + func, + xdata, + ydata, + maxfev=1000, + p0=p0, + method='lm' + ) + return popt + + # 1. Needs to convert the torch tensor to numpy tensor + xdata = x.cpu().numpy() + + # 2. Sorts the data so that it makes it easier to fit to it + sorted_xdata = np.sort(xdata, axis=-1) + + p0 = {k: v.cpu().numpy() for k, v in p0.items()} + params_list = sorted(params_list) # We need to make sure that it matches the numpy fit func arg order + + # 3. Finds the best parameters for each channel + try: + params = [] + for i in range(sorted_xdata.shape[0]): + xdata_ = sorted_xdata[i] + p0_ = [p0[p][i] for p in params_list] + ch_params = _fit(xdata_, xdata_, np_fit_func, p0_) + params.append(ch_params) + + # 4. Builds the parameters + result = {} + for i, p in enumerate(params_list): + result[p] = torch.tensor([p_[i] for p_ in params], dtype=torch.float32).to(x.device) + + return result + + except ValueError as e: + print(f"Could not fit the function with error: {e}") + print(f"Using fallback result...") + return { + k: torch.tensor(v, dtype=torch.float32).to(x.device) for k, v in p0.items() + } + + +def init_zeros(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.zeros_like(val, dtype=torch.float32, device=x.device) + + +def init_inner_scale(tensor: torch.Tensor, _min: float = torch.inf, _max: float = torch.inf) -> torch.Tensor: + # Calculate the original minimum and maximum values + min_vals, max_vals = torch.aminmax(tensor, dim=-1) + x_min = torch.min(min_vals, torch.zeros_like(min_vals)) + x_max = torch.max(max_vals, torch.zeros_like(max_vals)) + + if _max is torch.inf: # We do not need to scale the tensor. Just need to move it + return torch.ones_like(x_min) + + # Calculate the scale factor + scale = (_max - _min) / (x_max - x_min) + return scale + + + +############## Quant ############### + +@torch.enable_grad() +def learn_parameters( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + qtz_func: nn.Module, + deqtz_func: nn.Module, + bits: int, + target_dtype: torch.dtype, + epochs: int = 1000, + early_stop: bool = True, + do_report: bool = False +) -> Tuple[Dict[str, nn.Parameter], torch.Tensor]: + + # Requires gradients in the parameters + for p in params.values(): + p.requires_grad = True + p.grad = None + + param_keys = list(params.keys()) + param_values = list(params.values()) + + # Defines optimizer and loss function + optimizer = torch.optim.Adam(param_values, lr=0.001) + scheduler = torch.optim.lr_scheduler.LinearLR(optimizer, start_factor=1.0, end_factor=0.001, total_iters=epochs // 10) + loss_fn = nn.MSELoss() + + # Contains the best loss and the best parameters + best_loss = float("inf") + best_params = None + + # Used to stop the search early + min_delta = 1e-7 + acc_loss = [] + percent_epochs_before_stop = 0.1 + + for i in range(epochs): + optimizer.zero_grad() + + quant = quantize(x, params, qtz_func, bits, target_dtype) + dequant = dequantize(quant, params, deqtz_func, bits, x.dtype) + loss = loss_fn(x, dequant) + + if loss.isnan() or loss.isinf(): + raise Exception("Loss is NaN or Inf. Stopping the search.") + + loss.backward() + optimizer.step() + scheduler.step() + + acc_loss.append(loss.item()) + + # Reports loss every 10 steps + if i % 10 == 0 and do_report: + print(f"Epoch {i}: Loss {loss.item()}") + + # Optimizes the parameter search by storing the best loss and the parameters + if loss.item() < best_loss: + best_loss = loss.item() + best_params = copy.deepcopy({ + k: v for k, v in params.items() if k in param_keys + }) + + # We also stop the search if the loss has not considerably during the last 10% epochs + if early_stop: + epochs_before_stop = int(epochs * percent_epochs_before_stop) + if i > epochs_before_stop and abs(acc_loss[i - epochs_before_stop] - acc_loss[i]) < min_delta: + break + + # No longer requires gradients in the parameters + for p in best_params.values(): + p.requires_grad = False + p.grad = None + + if do_report: + return best_params, acc_loss + else: + return best_params + + +def quantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + target_dtype: torch.dtype = torch.int8 +) -> torch.Tensor: + quant_min, quant_max = get_min_max_from_bits_signed(bits) + x = x.transpose(0, 1) # Aligns shapes + x = func(x=x, **params) + x = x.transpose(0, 1) + x = torch.clamp(round_func_BPDA(x), quant_min, quant_max).to(target_dtype) + return x + + +def dequantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + out_dtype: torch.dtype +) -> torch.Tensor: + x = x.to(dtype=out_dtype) + x = x.transpose(0, 1) + x = func(x=x, **params) + x = x.transpose(0, 1) + return x + + +def round_func_BPDA(input): + # This is equivalent to replacing round function (non-differentiable) with + # an identity function (differentiable) only when backward. + forward_value = torch.round(input) + out = input.clone() + out.data = forward_value.data + return out + + +def get_min_max_from_bits_signed(bit_width: int) -> Tuple[int, int]: + return -2 ** (bit_width - 1), 2 ** (bit_width - 1) - 1 + + + +############## Numpy ############### + +def np_domain_guard( + x: np.ndarray, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> np.ndarray: + """Guard a tensor to a valid domain.""" + x = np.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = np.clip(x, min, max) + return x + + +def np_replace_num(x: np.ndarray, num: float, to: float) -> np.ndarray: + """Replace a number in a tensor with another number. + + Args: + x (np.ndarray): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + np.ndarray: The tensor with the number replaced. + """ + return np.where(x == num, to, x) + + +def np_guarded_power(x: np.ndarray, exp: float) -> np.ndarray: + """Guard the power operation to a valid domain.""" + return np.power(x, exp) if exp >= 1 else np.power(np.maximum(x, 0), exp) + diff --git a/fn_gen/ones_affine_scale/8/loss.png b/fn_gen/ones_affine_scale/8/loss.png new file mode 100644 index 0000000000000000000000000000000000000000..59dc5ad7d2c24219932f12a801d93893f204c41e Binary files /dev/null and b/fn_gen/ones_affine_scale/8/loss.png differ diff --git a/fn_gen/ones_affine_scale/8/quantization.png b/fn_gen/ones_affine_scale/8/quantization.png new file mode 100644 index 0000000000000000000000000000000000000000..d42469728537d6f02213e555571c8bbcfb715dad Binary files /dev/null and b/fn_gen/ones_affine_scale/8/quantization.png differ diff --git a/fn_gen/ones_affine_scale/9/distortion.png b/fn_gen/ones_affine_scale/9/distortion.png new file mode 100644 index 0000000000000000000000000000000000000000..53aff53fbe1def14c3515ade14bf092786b36746 Binary files /dev/null and b/fn_gen/ones_affine_scale/9/distortion.png differ diff --git a/fn_gen/ones_affine_scale/9/expressions.txt b/fn_gen/ones_affine_scale/9/expressions.txt new file mode 100644 index 0000000000000000000000000000000000000000..8c0b1579c06c048d5603aa39c80e392c5906a879 --- /dev/null +++ b/fn_gen/ones_affine_scale/9/expressions.txt @@ -0,0 +1,2 @@ +cos(_0*x)/_s +acos(_s*x)/_0 \ No newline at end of file diff --git a/fn_gen/ones_affine_scale/9/fn.py b/fn_gen/ones_affine_scale/9/fn.py new file mode 100644 index 0000000000000000000000000000000000000000..31ec03a55de208b22e6135d3278d0ad88a635b0a --- /dev/null +++ b/fn_gen/ones_affine_scale/9/fn.py @@ -0,0 +1,493 @@ +from __future__ import annotations + +import torch +from torch import amin # Necessary for arcsin +import copy +import torch.nn as nn +import numpy as np + +from scipy.optimize import curve_fit +from typing import Dict, Any, Tuple, List, Callable + + +def quantization(x, **params): + return (torch.div(1, replace_num(params['_s'], num=0, to=10000)) * torch.cos((params['_0'] * x))) + + +def dequantization(x, **params): + return (torch.div(1, replace_num(params['_0'], num=0, to=10000)) * torch.acos(domain_guard((params['_s'] * x), min=-0.99999, max=0.99999, nan=0))) + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_params(x: torch.Tensor, **kwargs: Dict[str, Any]) -> Dict[str, nn.Parameter]: + params = { + '_0': init_ones(x, qtz_func=quantization, deqtz_func=dequantization, param='_0', params_list=['_0', '_s'], **kwargs), + } + params['_s'] = init_linear_scale(x, params=params, qtz_func=quantization, **kwargs) + params = {k: nn.Parameter(v, requires_grad=False) for k, v in params.items()} + + if 'post_init_hook' in kwargs: + kwargs['post_init_hook'](parameters=params) + + + if 'post_train_hook' in kwargs: + kwargs['post_train_hook'](parameters=params) + + return params + + +############### Numpy Qtz ############### + + +def np_quantization(x, _0, _s): + return (np.divide(1, np_replace_num(_s, num=0, to=10000)) * np.cos((_0 * x))) + + +def np_dequantization(x, _0, _s): + return (np.divide(1, np_replace_num(_0, num=0, to=10000)) * np.arccos(np_domain_guard((_s * x), min=-0.99999, max=0.99999, nan=0))) + + +def fit_func(x, _0, _s): + x_ = np_quantization(x, _0, _s) + x_ = np_dequantization(x_, _0, _s) + return x_ + + + +############### HELPERS ############### + +def domain_guard( + x: torch.Tensor, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> torch.Tensor: + """Guard a tensor to a valid domain.""" + x = torch.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = torch.clamp(x, min=min, max=max) + return x + + +def replace_num(x: torch.Tensor, num: float, to: float) -> torch.Tensor: + """Replace a number in a tensor with another number. + + Args: + x (torch.Tensor): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + torch.Tensor: The tensor with the number replaced. + """ + return torch.where(x == num, to, x) + + +def guarded_torch_power(x: torch.Tensor, exp: float) -> torch.Tensor: + """Guard the power operation to a valid domain.""" + return torch.pow(x, exp) if exp >= 1 else torch.pow(torch.relu(x), exp) + + +def init_ones(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.ones_like(val, dtype=torch.float32, device=x.device) + + +def init_rand(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.randn_like(val, dtype=torch.float32, device=x.device) + + +def init_space_search( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + def _build_initial_param(tensor: torch.Tensor, max_initial: int, n_params: int): + """Generates the initial set of parameters. The first iteration generates 10 times more parameters.""" + for _ in range(n_params * 10): # The first iteration generates 10 times more parameters + yield init_rand(tensor) * max_initial # Generates n_params in range [-max_initial, max_initial] + + def _search_param(tensors: List[torch.tensor], n_params): + """Takes the best parameters and generates new parameters around the mean of the best parameters.""" + torch_tensors = torch.stack(tensors) + min_vals, max_vals = torch.aminmax(torch_tensors, dim=0) + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + mean = torch.mean(torch_tensors, dim=0) + for _ in range(n_params): # Generates n_params around the mean of the tensors + yield torch.randn_like(min_vals) * abs_max_val_per_ch + mean + + def _calc(x, qtz_func, deqtz_func, **params): + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params) + x_ = deqtz_func(x=x_, **params) + x_ = x_.transpose(0, 1) + return x_ + + assert "qtz_func" in kwargs, "qtz_func must be provided." + assert "deqtz_func" in kwargs, "deqtz_func must be provided." + assert "params_list" in kwargs, "params list must be provided." + assert "param" in kwargs, "param must be provided." + + qtz_func = kwargs.get('qtz_func') + deqtz_func = kwargs.get('deqtz_func') + params_list = kwargs.get('params_list') + param = kwargs.get('param') + + n_runs = 50 # Number of runs to try to find the best parameters + n_random_params = 50 # Number of random parameters to generate + n_best_to_pick = 5 # Number of best parameters to pick after each run + max_initial = 10000 # Maximum value to initialize the parameters + + # Initializes the parameters + base_params = { p: init_ones(x, **kwargs) for p in params_list if p != param } + params = _build_initial_param(x, max_initial, n_random_params) + + # Performs the search + for _ in range(n_runs): + + best_params = [] + for param_ in params: + try: + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: param_}) + loss_ones = nn.MSELoss()(x, x_) + + if len(best_params) < n_best_to_pick: + best_params.append((param_, loss_ones.item())) + best_params = sorted(best_params, key=lambda x: x[1]) + elif loss_ones < best_params[-1][1]: + best_params[-1] = (param_, loss_ones.item()) + best_params = sorted(best_params, key=lambda x: x[1]) + + except Exception: # The parameters might not be valid for the function's domain + continue + + # Generates new parameters around the mean + params = _search_param([p for p, _ in best_params], n_random_params) + + # Checks if the best parameter is better than the init_ones + p_ones = init_ones(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_ones}) + loss_ones = nn.MSELoss()(x, x_) + + # Checks if the best parameter is better than the init_rand + p_rand = init_rand(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_rand}) + loss_rand = nn.MSELoss()(x, x_) + + if loss_rand < best_params[0][1] and loss_rand < loss_ones: + return p_rand + elif loss_ones < best_params[0][1] and loss_ones < loss_rand: + return p_ones + else: + return best_params[0][0] + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_non_linear_regression_fit( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + assert "params_list" in kwargs, "params list must be provided." + assert "np_fit_func" in kwargs, "np_fit_func must be provided." + assert "p0" in kwargs, "p0 must be provided." + np_fit_func = kwargs.get('np_fit_func') + params_list = kwargs.get('params_list') + p0 = kwargs.get('p0') + + def _fit(xdata: np.ndarray, ydata: np.ndarray, func: Callable, p0: List[float]): + popt, _ = curve_fit( + func, + xdata, + ydata, + maxfev=1000, + p0=p0, + method='lm' + ) + return popt + + # 1. Needs to convert the torch tensor to numpy tensor + xdata = x.cpu().numpy() + + # 2. Sorts the data so that it makes it easier to fit to it + sorted_xdata = np.sort(xdata, axis=-1) + + p0 = {k: v.cpu().numpy() for k, v in p0.items()} + params_list = sorted(params_list) # We need to make sure that it matches the numpy fit func arg order + + # 3. Finds the best parameters for each channel + try: + params = [] + for i in range(sorted_xdata.shape[0]): + xdata_ = sorted_xdata[i] + p0_ = [p0[p][i] for p in params_list] + ch_params = _fit(xdata_, xdata_, np_fit_func, p0_) + params.append(ch_params) + + # 4. Builds the parameters + result = {} + for i, p in enumerate(params_list): + result[p] = torch.tensor([p_[i] for p_ in params], dtype=torch.float32).to(x.device) + + return result + + except ValueError as e: + print(f"Could not fit the function with error: {e}") + print(f"Using fallback result...") + return { + k: torch.tensor(v, dtype=torch.float32).to(x.device) for k, v in p0.items() + } + + +def init_zeros(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.zeros_like(val, dtype=torch.float32, device=x.device) + + +def init_inner_scale(tensor: torch.Tensor, _min: float = torch.inf, _max: float = torch.inf) -> torch.Tensor: + # Calculate the original minimum and maximum values + min_vals, max_vals = torch.aminmax(tensor, dim=-1) + x_min = torch.min(min_vals, torch.zeros_like(min_vals)) + x_max = torch.max(max_vals, torch.zeros_like(max_vals)) + + if _max is torch.inf: # We do not need to scale the tensor. Just need to move it + return torch.ones_like(x_min) + + # Calculate the scale factor + scale = (_max - _min) / (x_max - x_min) + return scale + + + +############## Quant ############### + +@torch.enable_grad() +def learn_parameters( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + qtz_func: nn.Module, + deqtz_func: nn.Module, + bits: int, + target_dtype: torch.dtype, + epochs: int = 1000, + early_stop: bool = True, + do_report: bool = False +) -> Tuple[Dict[str, nn.Parameter], torch.Tensor]: + + # Requires gradients in the parameters + for p in params.values(): + p.requires_grad = True + p.grad = None + + param_keys = list(params.keys()) + param_values = list(params.values()) + + # Defines optimizer and loss function + optimizer = torch.optim.Adam(param_values, lr=0.001) + scheduler = torch.optim.lr_scheduler.LinearLR(optimizer, start_factor=1.0, end_factor=0.001, total_iters=epochs // 10) + loss_fn = nn.MSELoss() + + # Contains the best loss and the best parameters + best_loss = float("inf") + best_params = None + + # Used to stop the search early + min_delta = 1e-7 + acc_loss = [] + percent_epochs_before_stop = 0.1 + + for i in range(epochs): + optimizer.zero_grad() + + quant = quantize(x, params, qtz_func, bits, target_dtype) + dequant = dequantize(quant, params, deqtz_func, bits, x.dtype) + loss = loss_fn(x, dequant) + + if loss.isnan() or loss.isinf(): + raise Exception("Loss is NaN or Inf. Stopping the search.") + + loss.backward() + optimizer.step() + scheduler.step() + + acc_loss.append(loss.item()) + + # Reports loss every 10 steps + if i % 10 == 0 and do_report: + print(f"Epoch {i}: Loss {loss.item()}") + + # Optimizes the parameter search by storing the best loss and the parameters + if loss.item() < best_loss: + best_loss = loss.item() + best_params = copy.deepcopy({ + k: v for k, v in params.items() if k in param_keys + }) + + # We also stop the search if the loss has not considerably during the last 10% epochs + if early_stop: + epochs_before_stop = int(epochs * percent_epochs_before_stop) + if i > epochs_before_stop and abs(acc_loss[i - epochs_before_stop] - acc_loss[i]) < min_delta: + break + + # No longer requires gradients in the parameters + for p in best_params.values(): + p.requires_grad = False + p.grad = None + + if do_report: + return best_params, acc_loss + else: + return best_params + + +def quantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + target_dtype: torch.dtype = torch.int8 +) -> torch.Tensor: + quant_min, quant_max = get_min_max_from_bits_signed(bits) + x = x.transpose(0, 1) # Aligns shapes + x = func(x=x, **params) + x = x.transpose(0, 1) + x = torch.clamp(round_func_BPDA(x), quant_min, quant_max).to(target_dtype) + return x + + +def dequantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + out_dtype: torch.dtype +) -> torch.Tensor: + x = x.to(dtype=out_dtype) + x = x.transpose(0, 1) + x = func(x=x, **params) + x = x.transpose(0, 1) + return x + + +def round_func_BPDA(input): + # This is equivalent to replacing round function (non-differentiable) with + # an identity function (differentiable) only when backward. + forward_value = torch.round(input) + out = input.clone() + out.data = forward_value.data + return out + + +def get_min_max_from_bits_signed(bit_width: int) -> Tuple[int, int]: + return -2 ** (bit_width - 1), 2 ** (bit_width - 1) - 1 + + + +############## Numpy ############### + +def np_domain_guard( + x: np.ndarray, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> np.ndarray: + """Guard a tensor to a valid domain.""" + x = np.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = np.clip(x, min, max) + return x + + +def np_replace_num(x: np.ndarray, num: float, to: float) -> np.ndarray: + """Replace a number in a tensor with another number. + + Args: + x (np.ndarray): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + np.ndarray: The tensor with the number replaced. + """ + return np.where(x == num, to, x) + + +def np_guarded_power(x: np.ndarray, exp: float) -> np.ndarray: + """Guard the power operation to a valid domain.""" + return np.power(x, exp) if exp >= 1 else np.power(np.maximum(x, 0), exp) + diff --git a/fn_gen/ones_affine_scale/9/loss.png b/fn_gen/ones_affine_scale/9/loss.png new file mode 100644 index 0000000000000000000000000000000000000000..cc5d5c847745eac146eb11c1e66c0ca0c49d8518 Binary files /dev/null and b/fn_gen/ones_affine_scale/9/loss.png differ diff --git a/fn_gen/ones_affine_scale/9/quantization.png b/fn_gen/ones_affine_scale/9/quantization.png new file mode 100644 index 0000000000000000000000000000000000000000..27d2ff1d24fd942078941b41713ae61c93d69f04 Binary files /dev/null and b/fn_gen/ones_affine_scale/9/quantization.png differ diff --git a/fn_gen/rnd_affine_scale/0/distortion.png b/fn_gen/rnd_affine_scale/0/distortion.png new file mode 100644 index 0000000000000000000000000000000000000000..8f7e9c811c964347f5b2d21f725b32edd21d6f65 Binary files /dev/null and b/fn_gen/rnd_affine_scale/0/distortion.png differ diff --git a/fn_gen/rnd_affine_scale/0/expressions.txt b/fn_gen/rnd_affine_scale/0/expressions.txt new file mode 100644 index 0000000000000000000000000000000000000000..3758ee2a62aa8d95c3b7da1dd3fafa11b027ad9b --- /dev/null +++ b/fn_gen/rnd_affine_scale/0/expressions.txt @@ -0,0 +1,2 @@ +cosh(_0*x)/_s +log(_s*x - sqrt(_s**2*x**2 - 1))/_0 \ No newline at end of file diff --git a/fn_gen/rnd_affine_scale/0/fn.py b/fn_gen/rnd_affine_scale/0/fn.py new file mode 100644 index 0000000000000000000000000000000000000000..2d66af0ba5699506d8e11055103cd62473452395 --- /dev/null +++ b/fn_gen/rnd_affine_scale/0/fn.py @@ -0,0 +1,493 @@ +from __future__ import annotations + +import torch +from torch import amin # Necessary for arcsin +import copy +import torch.nn as nn +import numpy as np + +from scipy.optimize import curve_fit +from typing import Dict, Any, Tuple, List, Callable + + +def quantization(x, **params): + return (torch.div(1, replace_num(params['_s'], num=0, to=10000)) * torch.cosh((params['_0'] * x))) + + +def dequantization(x, **params): + return (torch.div(1, replace_num(params['_0'], num=0, to=10000)) * torch.log(domain_guard(((torch.tensor(-1) * torch.sqrt(domain_guard((torch.tensor(-1) + (guarded_torch_power(params['_s'], torch.tensor(2)) * guarded_torch_power(x, torch.tensor(2)))), min=0.1, nan=0.1))) + (params['_s'] * x)), min=1e-5, nan=1e-5))) + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_params(x: torch.Tensor, **kwargs: Dict[str, Any]) -> Dict[str, nn.Parameter]: + params = { + '_0': init_rand(x, qtz_func=quantization, deqtz_func=dequantization, param='_0', params_list=['_0', '_s'], **kwargs), + } + params['_s'] = init_linear_scale(x, params=params, qtz_func=quantization, **kwargs) + params = {k: nn.Parameter(v, requires_grad=False) for k, v in params.items()} + + if 'post_init_hook' in kwargs: + kwargs['post_init_hook'](parameters=params) + + + if 'post_train_hook' in kwargs: + kwargs['post_train_hook'](parameters=params) + + return params + + +############### Numpy Qtz ############### + + +def np_quantization(x, _0, _s): + return (np.divide(1, np_replace_num(_s, num=0, to=10000)) * np.cosh((_0 * x))) + + +def np_dequantization(x, _0, _s): + return (np.divide(1, np_replace_num(_0, num=0, to=10000)) * np.log(np_domain_guard(((np.array(-1) * np.sqrt(np_domain_guard((np.array(-1) + (np_guarded_power(_s, np.array(2)) * np_guarded_power(x, np.array(2)))), min=0.1, nan=0.1))) + (_s * x)), min=1e-5, nan=1e-5))) + + +def fit_func(x, _0, _s): + x_ = np_quantization(x, _0, _s) + x_ = np_dequantization(x_, _0, _s) + return x_ + + + +############### HELPERS ############### + +def domain_guard( + x: torch.Tensor, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> torch.Tensor: + """Guard a tensor to a valid domain.""" + x = torch.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = torch.clamp(x, min=min, max=max) + return x + + +def replace_num(x: torch.Tensor, num: float, to: float) -> torch.Tensor: + """Replace a number in a tensor with another number. + + Args: + x (torch.Tensor): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + torch.Tensor: The tensor with the number replaced. + """ + return torch.where(x == num, to, x) + + +def guarded_torch_power(x: torch.Tensor, exp: float) -> torch.Tensor: + """Guard the power operation to a valid domain.""" + return torch.pow(x, exp) if exp >= 1 else torch.pow(torch.relu(x), exp) + + +def init_ones(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.ones_like(val, dtype=torch.float32, device=x.device) + + +def init_rand(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.randn_like(val, dtype=torch.float32, device=x.device) + + +def init_space_search( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + def _build_initial_param(tensor: torch.Tensor, max_initial: int, n_params: int): + """Generates the initial set of parameters. The first iteration generates 10 times more parameters.""" + for _ in range(n_params * 10): # The first iteration generates 10 times more parameters + yield init_rand(tensor) * max_initial # Generates n_params in range [-max_initial, max_initial] + + def _search_param(tensors: List[torch.tensor], n_params): + """Takes the best parameters and generates new parameters around the mean of the best parameters.""" + torch_tensors = torch.stack(tensors) + min_vals, max_vals = torch.aminmax(torch_tensors, dim=0) + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + mean = torch.mean(torch_tensors, dim=0) + for _ in range(n_params): # Generates n_params around the mean of the tensors + yield torch.randn_like(min_vals) * abs_max_val_per_ch + mean + + def _calc(x, qtz_func, deqtz_func, **params): + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params) + x_ = deqtz_func(x=x_, **params) + x_ = x_.transpose(0, 1) + return x_ + + assert "qtz_func" in kwargs, "qtz_func must be provided." + assert "deqtz_func" in kwargs, "deqtz_func must be provided." + assert "params_list" in kwargs, "params list must be provided." + assert "param" in kwargs, "param must be provided." + + qtz_func = kwargs.get('qtz_func') + deqtz_func = kwargs.get('deqtz_func') + params_list = kwargs.get('params_list') + param = kwargs.get('param') + + n_runs = 50 # Number of runs to try to find the best parameters + n_random_params = 50 # Number of random parameters to generate + n_best_to_pick = 5 # Number of best parameters to pick after each run + max_initial = 10000 # Maximum value to initialize the parameters + + # Initializes the parameters + base_params = { p: init_ones(x, **kwargs) for p in params_list if p != param } + params = _build_initial_param(x, max_initial, n_random_params) + + # Performs the search + for _ in range(n_runs): + + best_params = [] + for param_ in params: + try: + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: param_}) + loss_ones = nn.MSELoss()(x, x_) + + if len(best_params) < n_best_to_pick: + best_params.append((param_, loss_ones.item())) + best_params = sorted(best_params, key=lambda x: x[1]) + elif loss_ones < best_params[-1][1]: + best_params[-1] = (param_, loss_ones.item()) + best_params = sorted(best_params, key=lambda x: x[1]) + + except Exception: # The parameters might not be valid for the function's domain + continue + + # Generates new parameters around the mean + params = _search_param([p for p, _ in best_params], n_random_params) + + # Checks if the best parameter is better than the init_ones + p_ones = init_ones(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_ones}) + loss_ones = nn.MSELoss()(x, x_) + + # Checks if the best parameter is better than the init_rand + p_rand = init_rand(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_rand}) + loss_rand = nn.MSELoss()(x, x_) + + if loss_rand < best_params[0][1] and loss_rand < loss_ones: + return p_rand + elif loss_ones < best_params[0][1] and loss_ones < loss_rand: + return p_ones + else: + return best_params[0][0] + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_non_linear_regression_fit( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + assert "params_list" in kwargs, "params list must be provided." + assert "np_fit_func" in kwargs, "np_fit_func must be provided." + assert "p0" in kwargs, "p0 must be provided." + np_fit_func = kwargs.get('np_fit_func') + params_list = kwargs.get('params_list') + p0 = kwargs.get('p0') + + def _fit(xdata: np.ndarray, ydata: np.ndarray, func: Callable, p0: List[float]): + popt, _ = curve_fit( + func, + xdata, + ydata, + maxfev=1000, + p0=p0, + method='lm' + ) + return popt + + # 1. Needs to convert the torch tensor to numpy tensor + xdata = x.cpu().numpy() + + # 2. Sorts the data so that it makes it easier to fit to it + sorted_xdata = np.sort(xdata, axis=-1) + + p0 = {k: v.cpu().numpy() for k, v in p0.items()} + params_list = sorted(params_list) # We need to make sure that it matches the numpy fit func arg order + + # 3. Finds the best parameters for each channel + try: + params = [] + for i in range(sorted_xdata.shape[0]): + xdata_ = sorted_xdata[i] + p0_ = [p0[p][i] for p in params_list] + ch_params = _fit(xdata_, xdata_, np_fit_func, p0_) + params.append(ch_params) + + # 4. Builds the parameters + result = {} + for i, p in enumerate(params_list): + result[p] = torch.tensor([p_[i] for p_ in params], dtype=torch.float32).to(x.device) + + return result + + except ValueError as e: + print(f"Could not fit the function with error: {e}") + print(f"Using fallback result...") + return { + k: torch.tensor(v, dtype=torch.float32).to(x.device) for k, v in p0.items() + } + + +def init_zeros(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.zeros_like(val, dtype=torch.float32, device=x.device) + + +def init_inner_scale(tensor: torch.Tensor, _min: float = torch.inf, _max: float = torch.inf) -> torch.Tensor: + # Calculate the original minimum and maximum values + min_vals, max_vals = torch.aminmax(tensor, dim=-1) + x_min = torch.min(min_vals, torch.zeros_like(min_vals)) + x_max = torch.max(max_vals, torch.zeros_like(max_vals)) + + if _max is torch.inf: # We do not need to scale the tensor. Just need to move it + return torch.ones_like(x_min) + + # Calculate the scale factor + scale = (_max - _min) / (x_max - x_min) + return scale + + + +############## Quant ############### + +@torch.enable_grad() +def learn_parameters( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + qtz_func: nn.Module, + deqtz_func: nn.Module, + bits: int, + target_dtype: torch.dtype, + epochs: int = 1000, + early_stop: bool = True, + do_report: bool = False +) -> Tuple[Dict[str, nn.Parameter], torch.Tensor]: + + # Requires gradients in the parameters + for p in params.values(): + p.requires_grad = True + p.grad = None + + param_keys = list(params.keys()) + param_values = list(params.values()) + + # Defines optimizer and loss function + optimizer = torch.optim.Adam(param_values, lr=0.001) + scheduler = torch.optim.lr_scheduler.LinearLR(optimizer, start_factor=1.0, end_factor=0.001, total_iters=epochs // 10) + loss_fn = nn.MSELoss() + + # Contains the best loss and the best parameters + best_loss = float("inf") + best_params = None + + # Used to stop the search early + min_delta = 1e-7 + acc_loss = [] + percent_epochs_before_stop = 0.1 + + for i in range(epochs): + optimizer.zero_grad() + + quant = quantize(x, params, qtz_func, bits, target_dtype) + dequant = dequantize(quant, params, deqtz_func, bits, x.dtype) + loss = loss_fn(x, dequant) + + if loss.isnan() or loss.isinf(): + raise Exception("Loss is NaN or Inf. Stopping the search.") + + loss.backward() + optimizer.step() + scheduler.step() + + acc_loss.append(loss.item()) + + # Reports loss every 10 steps + if i % 10 == 0 and do_report: + print(f"Epoch {i}: Loss {loss.item()}") + + # Optimizes the parameter search by storing the best loss and the parameters + if loss.item() < best_loss: + best_loss = loss.item() + best_params = copy.deepcopy({ + k: v for k, v in params.items() if k in param_keys + }) + + # We also stop the search if the loss has not considerably during the last 10% epochs + if early_stop: + epochs_before_stop = int(epochs * percent_epochs_before_stop) + if i > epochs_before_stop and abs(acc_loss[i - epochs_before_stop] - acc_loss[i]) < min_delta: + break + + # No longer requires gradients in the parameters + for p in best_params.values(): + p.requires_grad = False + p.grad = None + + if do_report: + return best_params, acc_loss + else: + return best_params + + +def quantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + target_dtype: torch.dtype = torch.int8 +) -> torch.Tensor: + quant_min, quant_max = get_min_max_from_bits_signed(bits) + x = x.transpose(0, 1) # Aligns shapes + x = func(x=x, **params) + x = x.transpose(0, 1) + x = torch.clamp(round_func_BPDA(x), quant_min, quant_max).to(target_dtype) + return x + + +def dequantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + out_dtype: torch.dtype +) -> torch.Tensor: + x = x.to(dtype=out_dtype) + x = x.transpose(0, 1) + x = func(x=x, **params) + x = x.transpose(0, 1) + return x + + +def round_func_BPDA(input): + # This is equivalent to replacing round function (non-differentiable) with + # an identity function (differentiable) only when backward. + forward_value = torch.round(input) + out = input.clone() + out.data = forward_value.data + return out + + +def get_min_max_from_bits_signed(bit_width: int) -> Tuple[int, int]: + return -2 ** (bit_width - 1), 2 ** (bit_width - 1) - 1 + + + +############## Numpy ############### + +def np_domain_guard( + x: np.ndarray, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> np.ndarray: + """Guard a tensor to a valid domain.""" + x = np.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = np.clip(x, min, max) + return x + + +def np_replace_num(x: np.ndarray, num: float, to: float) -> np.ndarray: + """Replace a number in a tensor with another number. + + Args: + x (np.ndarray): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + np.ndarray: The tensor with the number replaced. + """ + return np.where(x == num, to, x) + + +def np_guarded_power(x: np.ndarray, exp: float) -> np.ndarray: + """Guard the power operation to a valid domain.""" + return np.power(x, exp) if exp >= 1 else np.power(np.maximum(x, 0), exp) + diff --git a/fn_gen/rnd_affine_scale/0/loss.png b/fn_gen/rnd_affine_scale/0/loss.png new file mode 100644 index 0000000000000000000000000000000000000000..deffb83ed3267af9411b05dce0e7eaee2a68e3fa Binary files /dev/null and b/fn_gen/rnd_affine_scale/0/loss.png differ diff --git a/fn_gen/rnd_affine_scale/0/quantization.png b/fn_gen/rnd_affine_scale/0/quantization.png new file mode 100644 index 0000000000000000000000000000000000000000..6cf3ddbaa3467b2e7a037ee9d53bb6aa340afbe8 Binary files /dev/null and b/fn_gen/rnd_affine_scale/0/quantization.png differ diff --git a/fn_gen/rnd_affine_scale/1/distortion.png b/fn_gen/rnd_affine_scale/1/distortion.png new file mode 100644 index 0000000000000000000000000000000000000000..3ee347ec5f21031384c36a1055c9a422ad2aa728 Binary files /dev/null and b/fn_gen/rnd_affine_scale/1/distortion.png differ diff --git a/fn_gen/rnd_affine_scale/1/expressions.txt b/fn_gen/rnd_affine_scale/1/expressions.txt new file mode 100644 index 0000000000000000000000000000000000000000..03413827fa8f4c8ad49a40b543460cf31d1ce803 --- /dev/null +++ b/fn_gen/rnd_affine_scale/1/expressions.txt @@ -0,0 +1,2 @@ +asin(_0*x)/_s +sin(_s*x)/_0 \ No newline at end of file diff --git a/fn_gen/rnd_affine_scale/1/fn.py b/fn_gen/rnd_affine_scale/1/fn.py new file mode 100644 index 0000000000000000000000000000000000000000..93f9ebdc3b51bb4fe1feaa2be70d0961f2e8fca1 --- /dev/null +++ b/fn_gen/rnd_affine_scale/1/fn.py @@ -0,0 +1,493 @@ +from __future__ import annotations + +import torch +from torch import amin # Necessary for arcsin +import copy +import torch.nn as nn +import numpy as np + +from scipy.optimize import curve_fit +from typing import Dict, Any, Tuple, List, Callable + + +def quantization(x, **params): + return (torch.div(1, replace_num(params['_s'], num=0, to=10000)) * torch.asin(domain_guard((params['_0'] * x), min=-0.99999, max=0.99999, nan=0))) + + +def dequantization(x, **params): + return (torch.div(1, replace_num(params['_0'], num=0, to=10000)) * torch.sin((params['_s'] * x))) + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_params(x: torch.Tensor, **kwargs: Dict[str, Any]) -> Dict[str, nn.Parameter]: + params = { + '_0': init_rand(x, qtz_func=quantization, deqtz_func=dequantization, param='_0', params_list=['_0', '_s'], **kwargs), + } + params['_s'] = init_linear_scale(x, params=params, qtz_func=quantization, **kwargs) + params = {k: nn.Parameter(v, requires_grad=False) for k, v in params.items()} + + if 'post_init_hook' in kwargs: + kwargs['post_init_hook'](parameters=params) + + + if 'post_train_hook' in kwargs: + kwargs['post_train_hook'](parameters=params) + + return params + + +############### Numpy Qtz ############### + + +def np_quantization(x, _0, _s): + return (np.divide(1, np_replace_num(_s, num=0, to=10000)) * np.arcsin(np_domain_guard((_0 * x), min=-0.99999, max=0.99999, nan=0))) + + +def np_dequantization(x, _0, _s): + return (np.divide(1, np_replace_num(_0, num=0, to=10000)) * np.sin((_s * x))) + + +def fit_func(x, _0, _s): + x_ = np_quantization(x, _0, _s) + x_ = np_dequantization(x_, _0, _s) + return x_ + + + +############### HELPERS ############### + +def domain_guard( + x: torch.Tensor, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> torch.Tensor: + """Guard a tensor to a valid domain.""" + x = torch.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = torch.clamp(x, min=min, max=max) + return x + + +def replace_num(x: torch.Tensor, num: float, to: float) -> torch.Tensor: + """Replace a number in a tensor with another number. + + Args: + x (torch.Tensor): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + torch.Tensor: The tensor with the number replaced. + """ + return torch.where(x == num, to, x) + + +def guarded_torch_power(x: torch.Tensor, exp: float) -> torch.Tensor: + """Guard the power operation to a valid domain.""" + return torch.pow(x, exp) if exp >= 1 else torch.pow(torch.relu(x), exp) + + +def init_ones(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.ones_like(val, dtype=torch.float32, device=x.device) + + +def init_rand(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.randn_like(val, dtype=torch.float32, device=x.device) + + +def init_space_search( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + def _build_initial_param(tensor: torch.Tensor, max_initial: int, n_params: int): + """Generates the initial set of parameters. The first iteration generates 10 times more parameters.""" + for _ in range(n_params * 10): # The first iteration generates 10 times more parameters + yield init_rand(tensor) * max_initial # Generates n_params in range [-max_initial, max_initial] + + def _search_param(tensors: List[torch.tensor], n_params): + """Takes the best parameters and generates new parameters around the mean of the best parameters.""" + torch_tensors = torch.stack(tensors) + min_vals, max_vals = torch.aminmax(torch_tensors, dim=0) + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + mean = torch.mean(torch_tensors, dim=0) + for _ in range(n_params): # Generates n_params around the mean of the tensors + yield torch.randn_like(min_vals) * abs_max_val_per_ch + mean + + def _calc(x, qtz_func, deqtz_func, **params): + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params) + x_ = deqtz_func(x=x_, **params) + x_ = x_.transpose(0, 1) + return x_ + + assert "qtz_func" in kwargs, "qtz_func must be provided." + assert "deqtz_func" in kwargs, "deqtz_func must be provided." + assert "params_list" in kwargs, "params list must be provided." + assert "param" in kwargs, "param must be provided." + + qtz_func = kwargs.get('qtz_func') + deqtz_func = kwargs.get('deqtz_func') + params_list = kwargs.get('params_list') + param = kwargs.get('param') + + n_runs = 50 # Number of runs to try to find the best parameters + n_random_params = 50 # Number of random parameters to generate + n_best_to_pick = 5 # Number of best parameters to pick after each run + max_initial = 10000 # Maximum value to initialize the parameters + + # Initializes the parameters + base_params = { p: init_ones(x, **kwargs) for p in params_list if p != param } + params = _build_initial_param(x, max_initial, n_random_params) + + # Performs the search + for _ in range(n_runs): + + best_params = [] + for param_ in params: + try: + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: param_}) + loss_ones = nn.MSELoss()(x, x_) + + if len(best_params) < n_best_to_pick: + best_params.append((param_, loss_ones.item())) + best_params = sorted(best_params, key=lambda x: x[1]) + elif loss_ones < best_params[-1][1]: + best_params[-1] = (param_, loss_ones.item()) + best_params = sorted(best_params, key=lambda x: x[1]) + + except Exception: # The parameters might not be valid for the function's domain + continue + + # Generates new parameters around the mean + params = _search_param([p for p, _ in best_params], n_random_params) + + # Checks if the best parameter is better than the init_ones + p_ones = init_ones(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_ones}) + loss_ones = nn.MSELoss()(x, x_) + + # Checks if the best parameter is better than the init_rand + p_rand = init_rand(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_rand}) + loss_rand = nn.MSELoss()(x, x_) + + if loss_rand < best_params[0][1] and loss_rand < loss_ones: + return p_rand + elif loss_ones < best_params[0][1] and loss_ones < loss_rand: + return p_ones + else: + return best_params[0][0] + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_non_linear_regression_fit( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + assert "params_list" in kwargs, "params list must be provided." + assert "np_fit_func" in kwargs, "np_fit_func must be provided." + assert "p0" in kwargs, "p0 must be provided." + np_fit_func = kwargs.get('np_fit_func') + params_list = kwargs.get('params_list') + p0 = kwargs.get('p0') + + def _fit(xdata: np.ndarray, ydata: np.ndarray, func: Callable, p0: List[float]): + popt, _ = curve_fit( + func, + xdata, + ydata, + maxfev=1000, + p0=p0, + method='lm' + ) + return popt + + # 1. Needs to convert the torch tensor to numpy tensor + xdata = x.cpu().numpy() + + # 2. Sorts the data so that it makes it easier to fit to it + sorted_xdata = np.sort(xdata, axis=-1) + + p0 = {k: v.cpu().numpy() for k, v in p0.items()} + params_list = sorted(params_list) # We need to make sure that it matches the numpy fit func arg order + + # 3. Finds the best parameters for each channel + try: + params = [] + for i in range(sorted_xdata.shape[0]): + xdata_ = sorted_xdata[i] + p0_ = [p0[p][i] for p in params_list] + ch_params = _fit(xdata_, xdata_, np_fit_func, p0_) + params.append(ch_params) + + # 4. Builds the parameters + result = {} + for i, p in enumerate(params_list): + result[p] = torch.tensor([p_[i] for p_ in params], dtype=torch.float32).to(x.device) + + return result + + except ValueError as e: + print(f"Could not fit the function with error: {e}") + print(f"Using fallback result...") + return { + k: torch.tensor(v, dtype=torch.float32).to(x.device) for k, v in p0.items() + } + + +def init_zeros(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.zeros_like(val, dtype=torch.float32, device=x.device) + + +def init_inner_scale(tensor: torch.Tensor, _min: float = torch.inf, _max: float = torch.inf) -> torch.Tensor: + # Calculate the original minimum and maximum values + min_vals, max_vals = torch.aminmax(tensor, dim=-1) + x_min = torch.min(min_vals, torch.zeros_like(min_vals)) + x_max = torch.max(max_vals, torch.zeros_like(max_vals)) + + if _max is torch.inf: # We do not need to scale the tensor. Just need to move it + return torch.ones_like(x_min) + + # Calculate the scale factor + scale = (_max - _min) / (x_max - x_min) + return scale + + + +############## Quant ############### + +@torch.enable_grad() +def learn_parameters( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + qtz_func: nn.Module, + deqtz_func: nn.Module, + bits: int, + target_dtype: torch.dtype, + epochs: int = 1000, + early_stop: bool = True, + do_report: bool = False +) -> Tuple[Dict[str, nn.Parameter], torch.Tensor]: + + # Requires gradients in the parameters + for p in params.values(): + p.requires_grad = True + p.grad = None + + param_keys = list(params.keys()) + param_values = list(params.values()) + + # Defines optimizer and loss function + optimizer = torch.optim.Adam(param_values, lr=0.001) + scheduler = torch.optim.lr_scheduler.LinearLR(optimizer, start_factor=1.0, end_factor=0.001, total_iters=epochs // 10) + loss_fn = nn.MSELoss() + + # Contains the best loss and the best parameters + best_loss = float("inf") + best_params = None + + # Used to stop the search early + min_delta = 1e-7 + acc_loss = [] + percent_epochs_before_stop = 0.1 + + for i in range(epochs): + optimizer.zero_grad() + + quant = quantize(x, params, qtz_func, bits, target_dtype) + dequant = dequantize(quant, params, deqtz_func, bits, x.dtype) + loss = loss_fn(x, dequant) + + if loss.isnan() or loss.isinf(): + raise Exception("Loss is NaN or Inf. Stopping the search.") + + loss.backward() + optimizer.step() + scheduler.step() + + acc_loss.append(loss.item()) + + # Reports loss every 10 steps + if i % 10 == 0 and do_report: + print(f"Epoch {i}: Loss {loss.item()}") + + # Optimizes the parameter search by storing the best loss and the parameters + if loss.item() < best_loss: + best_loss = loss.item() + best_params = copy.deepcopy({ + k: v for k, v in params.items() if k in param_keys + }) + + # We also stop the search if the loss has not considerably during the last 10% epochs + if early_stop: + epochs_before_stop = int(epochs * percent_epochs_before_stop) + if i > epochs_before_stop and abs(acc_loss[i - epochs_before_stop] - acc_loss[i]) < min_delta: + break + + # No longer requires gradients in the parameters + for p in best_params.values(): + p.requires_grad = False + p.grad = None + + if do_report: + return best_params, acc_loss + else: + return best_params + + +def quantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + target_dtype: torch.dtype = torch.int8 +) -> torch.Tensor: + quant_min, quant_max = get_min_max_from_bits_signed(bits) + x = x.transpose(0, 1) # Aligns shapes + x = func(x=x, **params) + x = x.transpose(0, 1) + x = torch.clamp(round_func_BPDA(x), quant_min, quant_max).to(target_dtype) + return x + + +def dequantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + out_dtype: torch.dtype +) -> torch.Tensor: + x = x.to(dtype=out_dtype) + x = x.transpose(0, 1) + x = func(x=x, **params) + x = x.transpose(0, 1) + return x + + +def round_func_BPDA(input): + # This is equivalent to replacing round function (non-differentiable) with + # an identity function (differentiable) only when backward. + forward_value = torch.round(input) + out = input.clone() + out.data = forward_value.data + return out + + +def get_min_max_from_bits_signed(bit_width: int) -> Tuple[int, int]: + return -2 ** (bit_width - 1), 2 ** (bit_width - 1) - 1 + + + +############## Numpy ############### + +def np_domain_guard( + x: np.ndarray, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> np.ndarray: + """Guard a tensor to a valid domain.""" + x = np.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = np.clip(x, min, max) + return x + + +def np_replace_num(x: np.ndarray, num: float, to: float) -> np.ndarray: + """Replace a number in a tensor with another number. + + Args: + x (np.ndarray): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + np.ndarray: The tensor with the number replaced. + """ + return np.where(x == num, to, x) + + +def np_guarded_power(x: np.ndarray, exp: float) -> np.ndarray: + """Guard the power operation to a valid domain.""" + return np.power(x, exp) if exp >= 1 else np.power(np.maximum(x, 0), exp) + diff --git a/fn_gen/rnd_affine_scale/1/loss.png b/fn_gen/rnd_affine_scale/1/loss.png new file mode 100644 index 0000000000000000000000000000000000000000..cdb82e6913f192a92f124698d925f359cdc3d719 Binary files /dev/null and b/fn_gen/rnd_affine_scale/1/loss.png differ diff --git a/fn_gen/rnd_affine_scale/1/quantization.png b/fn_gen/rnd_affine_scale/1/quantization.png new file mode 100644 index 0000000000000000000000000000000000000000..42e9a17c648c030b078d8dddf48a7ffc2a4bd6ce Binary files /dev/null and b/fn_gen/rnd_affine_scale/1/quantization.png differ diff --git a/fn_gen/rnd_affine_scale/10/distortion.png b/fn_gen/rnd_affine_scale/10/distortion.png new file mode 100644 index 0000000000000000000000000000000000000000..a2ee371b1e8485c42a4c71d82570d92f84b5ccd0 Binary files /dev/null and b/fn_gen/rnd_affine_scale/10/distortion.png differ diff --git a/fn_gen/rnd_affine_scale/10/expressions.txt b/fn_gen/rnd_affine_scale/10/expressions.txt new file mode 100644 index 0000000000000000000000000000000000000000..dbb6da0fc54c6f23dc12daf2e2c3a395819e1bf4 --- /dev/null +++ b/fn_gen/rnd_affine_scale/10/expressions.txt @@ -0,0 +1,2 @@ +x**2/_s +sqrt(_s*x) \ No newline at end of file diff --git a/fn_gen/rnd_affine_scale/10/fn.py b/fn_gen/rnd_affine_scale/10/fn.py new file mode 100644 index 0000000000000000000000000000000000000000..d9d2f630273164837deabc06e73bd11ad28c6258 --- /dev/null +++ b/fn_gen/rnd_affine_scale/10/fn.py @@ -0,0 +1,492 @@ +from __future__ import annotations + +import torch +from torch import amin # Necessary for arcsin +import copy +import torch.nn as nn +import numpy as np + +from scipy.optimize import curve_fit +from typing import Dict, Any, Tuple, List, Callable + + +def quantization(x, **params): + return (torch.div(1, replace_num(params['_s'], num=0, to=10000)) * guarded_torch_power(x, torch.tensor(2))) + + +def dequantization(x, **params): + return torch.sqrt(domain_guard((params['_s'] * x), min=0.1, nan=0.1)) + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_params(x: torch.Tensor, **kwargs: Dict[str, Any]) -> Dict[str, nn.Parameter]: + params = { + } + params['_s'] = init_linear_scale(x, params=params, qtz_func=quantization, **kwargs) + params = {k: nn.Parameter(v, requires_grad=False) for k, v in params.items()} + + if 'post_init_hook' in kwargs: + kwargs['post_init_hook'](parameters=params) + + + if 'post_train_hook' in kwargs: + kwargs['post_train_hook'](parameters=params) + + return params + + +############### Numpy Qtz ############### + + +def np_quantization(x, _s): + return (np.divide(1, np_replace_num(_s, num=0, to=10000)) * np_guarded_power(x, np.array(2))) + + +def np_dequantization(x, _s): + return np.sqrt(np_domain_guard((_s * x), min=0.1, nan=0.1)) + + +def fit_func(x, _s): + x_ = np_quantization(x, _s) + x_ = np_dequantization(x_, _s) + return x_ + + + +############### HELPERS ############### + +def domain_guard( + x: torch.Tensor, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> torch.Tensor: + """Guard a tensor to a valid domain.""" + x = torch.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = torch.clamp(x, min=min, max=max) + return x + + +def replace_num(x: torch.Tensor, num: float, to: float) -> torch.Tensor: + """Replace a number in a tensor with another number. + + Args: + x (torch.Tensor): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + torch.Tensor: The tensor with the number replaced. + """ + return torch.where(x == num, to, x) + + +def guarded_torch_power(x: torch.Tensor, exp: float) -> torch.Tensor: + """Guard the power operation to a valid domain.""" + return torch.pow(x, exp) if exp >= 1 else torch.pow(torch.relu(x), exp) + + +def init_ones(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.ones_like(val, dtype=torch.float32, device=x.device) + + +def init_rand(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.randn_like(val, dtype=torch.float32, device=x.device) + + +def init_space_search( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + def _build_initial_param(tensor: torch.Tensor, max_initial: int, n_params: int): + """Generates the initial set of parameters. The first iteration generates 10 times more parameters.""" + for _ in range(n_params * 10): # The first iteration generates 10 times more parameters + yield init_rand(tensor) * max_initial # Generates n_params in range [-max_initial, max_initial] + + def _search_param(tensors: List[torch.tensor], n_params): + """Takes the best parameters and generates new parameters around the mean of the best parameters.""" + torch_tensors = torch.stack(tensors) + min_vals, max_vals = torch.aminmax(torch_tensors, dim=0) + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + mean = torch.mean(torch_tensors, dim=0) + for _ in range(n_params): # Generates n_params around the mean of the tensors + yield torch.randn_like(min_vals) * abs_max_val_per_ch + mean + + def _calc(x, qtz_func, deqtz_func, **params): + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params) + x_ = deqtz_func(x=x_, **params) + x_ = x_.transpose(0, 1) + return x_ + + assert "qtz_func" in kwargs, "qtz_func must be provided." + assert "deqtz_func" in kwargs, "deqtz_func must be provided." + assert "params_list" in kwargs, "params list must be provided." + assert "param" in kwargs, "param must be provided." + + qtz_func = kwargs.get('qtz_func') + deqtz_func = kwargs.get('deqtz_func') + params_list = kwargs.get('params_list') + param = kwargs.get('param') + + n_runs = 50 # Number of runs to try to find the best parameters + n_random_params = 50 # Number of random parameters to generate + n_best_to_pick = 5 # Number of best parameters to pick after each run + max_initial = 10000 # Maximum value to initialize the parameters + + # Initializes the parameters + base_params = { p: init_ones(x, **kwargs) for p in params_list if p != param } + params = _build_initial_param(x, max_initial, n_random_params) + + # Performs the search + for _ in range(n_runs): + + best_params = [] + for param_ in params: + try: + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: param_}) + loss_ones = nn.MSELoss()(x, x_) + + if len(best_params) < n_best_to_pick: + best_params.append((param_, loss_ones.item())) + best_params = sorted(best_params, key=lambda x: x[1]) + elif loss_ones < best_params[-1][1]: + best_params[-1] = (param_, loss_ones.item()) + best_params = sorted(best_params, key=lambda x: x[1]) + + except Exception: # The parameters might not be valid for the function's domain + continue + + # Generates new parameters around the mean + params = _search_param([p for p, _ in best_params], n_random_params) + + # Checks if the best parameter is better than the init_ones + p_ones = init_ones(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_ones}) + loss_ones = nn.MSELoss()(x, x_) + + # Checks if the best parameter is better than the init_rand + p_rand = init_rand(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_rand}) + loss_rand = nn.MSELoss()(x, x_) + + if loss_rand < best_params[0][1] and loss_rand < loss_ones: + return p_rand + elif loss_ones < best_params[0][1] and loss_ones < loss_rand: + return p_ones + else: + return best_params[0][0] + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_non_linear_regression_fit( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + assert "params_list" in kwargs, "params list must be provided." + assert "np_fit_func" in kwargs, "np_fit_func must be provided." + assert "p0" in kwargs, "p0 must be provided." + np_fit_func = kwargs.get('np_fit_func') + params_list = kwargs.get('params_list') + p0 = kwargs.get('p0') + + def _fit(xdata: np.ndarray, ydata: np.ndarray, func: Callable, p0: List[float]): + popt, _ = curve_fit( + func, + xdata, + ydata, + maxfev=1000, + p0=p0, + method='lm' + ) + return popt + + # 1. Needs to convert the torch tensor to numpy tensor + xdata = x.cpu().numpy() + + # 2. Sorts the data so that it makes it easier to fit to it + sorted_xdata = np.sort(xdata, axis=-1) + + p0 = {k: v.cpu().numpy() for k, v in p0.items()} + params_list = sorted(params_list) # We need to make sure that it matches the numpy fit func arg order + + # 3. Finds the best parameters for each channel + try: + params = [] + for i in range(sorted_xdata.shape[0]): + xdata_ = sorted_xdata[i] + p0_ = [p0[p][i] for p in params_list] + ch_params = _fit(xdata_, xdata_, np_fit_func, p0_) + params.append(ch_params) + + # 4. Builds the parameters + result = {} + for i, p in enumerate(params_list): + result[p] = torch.tensor([p_[i] for p_ in params], dtype=torch.float32).to(x.device) + + return result + + except ValueError as e: + print(f"Could not fit the function with error: {e}") + print(f"Using fallback result...") + return { + k: torch.tensor(v, dtype=torch.float32).to(x.device) for k, v in p0.items() + } + + +def init_zeros(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.zeros_like(val, dtype=torch.float32, device=x.device) + + +def init_inner_scale(tensor: torch.Tensor, _min: float = torch.inf, _max: float = torch.inf) -> torch.Tensor: + # Calculate the original minimum and maximum values + min_vals, max_vals = torch.aminmax(tensor, dim=-1) + x_min = torch.min(min_vals, torch.zeros_like(min_vals)) + x_max = torch.max(max_vals, torch.zeros_like(max_vals)) + + if _max is torch.inf: # We do not need to scale the tensor. Just need to move it + return torch.ones_like(x_min) + + # Calculate the scale factor + scale = (_max - _min) / (x_max - x_min) + return scale + + + +############## Quant ############### + +@torch.enable_grad() +def learn_parameters( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + qtz_func: nn.Module, + deqtz_func: nn.Module, + bits: int, + target_dtype: torch.dtype, + epochs: int = 1000, + early_stop: bool = True, + do_report: bool = False +) -> Tuple[Dict[str, nn.Parameter], torch.Tensor]: + + # Requires gradients in the parameters + for p in params.values(): + p.requires_grad = True + p.grad = None + + param_keys = list(params.keys()) + param_values = list(params.values()) + + # Defines optimizer and loss function + optimizer = torch.optim.Adam(param_values, lr=0.001) + scheduler = torch.optim.lr_scheduler.LinearLR(optimizer, start_factor=1.0, end_factor=0.001, total_iters=epochs // 10) + loss_fn = nn.MSELoss() + + # Contains the best loss and the best parameters + best_loss = float("inf") + best_params = None + + # Used to stop the search early + min_delta = 1e-7 + acc_loss = [] + percent_epochs_before_stop = 0.1 + + for i in range(epochs): + optimizer.zero_grad() + + quant = quantize(x, params, qtz_func, bits, target_dtype) + dequant = dequantize(quant, params, deqtz_func, bits, x.dtype) + loss = loss_fn(x, dequant) + + if loss.isnan() or loss.isinf(): + raise Exception("Loss is NaN or Inf. Stopping the search.") + + loss.backward() + optimizer.step() + scheduler.step() + + acc_loss.append(loss.item()) + + # Reports loss every 10 steps + if i % 10 == 0 and do_report: + print(f"Epoch {i}: Loss {loss.item()}") + + # Optimizes the parameter search by storing the best loss and the parameters + if loss.item() < best_loss: + best_loss = loss.item() + best_params = copy.deepcopy({ + k: v for k, v in params.items() if k in param_keys + }) + + # We also stop the search if the loss has not considerably during the last 10% epochs + if early_stop: + epochs_before_stop = int(epochs * percent_epochs_before_stop) + if i > epochs_before_stop and abs(acc_loss[i - epochs_before_stop] - acc_loss[i]) < min_delta: + break + + # No longer requires gradients in the parameters + for p in best_params.values(): + p.requires_grad = False + p.grad = None + + if do_report: + return best_params, acc_loss + else: + return best_params + + +def quantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + target_dtype: torch.dtype = torch.int8 +) -> torch.Tensor: + quant_min, quant_max = get_min_max_from_bits_signed(bits) + x = x.transpose(0, 1) # Aligns shapes + x = func(x=x, **params) + x = x.transpose(0, 1) + x = torch.clamp(round_func_BPDA(x), quant_min, quant_max).to(target_dtype) + return x + + +def dequantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + out_dtype: torch.dtype +) -> torch.Tensor: + x = x.to(dtype=out_dtype) + x = x.transpose(0, 1) + x = func(x=x, **params) + x = x.transpose(0, 1) + return x + + +def round_func_BPDA(input): + # This is equivalent to replacing round function (non-differentiable) with + # an identity function (differentiable) only when backward. + forward_value = torch.round(input) + out = input.clone() + out.data = forward_value.data + return out + + +def get_min_max_from_bits_signed(bit_width: int) -> Tuple[int, int]: + return -2 ** (bit_width - 1), 2 ** (bit_width - 1) - 1 + + + +############## Numpy ############### + +def np_domain_guard( + x: np.ndarray, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> np.ndarray: + """Guard a tensor to a valid domain.""" + x = np.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = np.clip(x, min, max) + return x + + +def np_replace_num(x: np.ndarray, num: float, to: float) -> np.ndarray: + """Replace a number in a tensor with another number. + + Args: + x (np.ndarray): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + np.ndarray: The tensor with the number replaced. + """ + return np.where(x == num, to, x) + + +def np_guarded_power(x: np.ndarray, exp: float) -> np.ndarray: + """Guard the power operation to a valid domain.""" + return np.power(x, exp) if exp >= 1 else np.power(np.maximum(x, 0), exp) + diff --git a/fn_gen/rnd_affine_scale/10/loss.png b/fn_gen/rnd_affine_scale/10/loss.png new file mode 100644 index 0000000000000000000000000000000000000000..01c390ed79f56ee41ce39eb049d5119875ab8e40 Binary files /dev/null and b/fn_gen/rnd_affine_scale/10/loss.png differ diff --git a/fn_gen/rnd_affine_scale/10/quantization.png b/fn_gen/rnd_affine_scale/10/quantization.png new file mode 100644 index 0000000000000000000000000000000000000000..114c44936bf8e8e42acc1426bb6a9100f9f30d25 Binary files /dev/null and b/fn_gen/rnd_affine_scale/10/quantization.png differ diff --git a/fn_gen/rnd_affine_scale/11/distortion.png b/fn_gen/rnd_affine_scale/11/distortion.png new file mode 100644 index 0000000000000000000000000000000000000000..e11ad4d1cef9d9cacd0d00151482778be0612b0f Binary files /dev/null and b/fn_gen/rnd_affine_scale/11/distortion.png differ diff --git a/fn_gen/rnd_affine_scale/11/expressions.txt b/fn_gen/rnd_affine_scale/11/expressions.txt new file mode 100644 index 0000000000000000000000000000000000000000..9aa25379a9d1d5a93d60659c6609b2e24e79234d --- /dev/null +++ b/fn_gen/rnd_affine_scale/11/expressions.txt @@ -0,0 +1,2 @@ +exp(_0*x)/_s +log(_s*x)/_0 \ No newline at end of file diff --git a/fn_gen/rnd_affine_scale/11/fn.py b/fn_gen/rnd_affine_scale/11/fn.py new file mode 100644 index 0000000000000000000000000000000000000000..ebfc20faab64daa375c23a9ad6da255d6f7af91d --- /dev/null +++ b/fn_gen/rnd_affine_scale/11/fn.py @@ -0,0 +1,493 @@ +from __future__ import annotations + +import torch +from torch import amin # Necessary for arcsin +import copy +import torch.nn as nn +import numpy as np + +from scipy.optimize import curve_fit +from typing import Dict, Any, Tuple, List, Callable + + +def quantization(x, **params): + return (torch.div(1, replace_num(params['_s'], num=0, to=10000)) * torch.exp((params['_0'] * x))) + + +def dequantization(x, **params): + return (torch.div(1, replace_num(params['_0'], num=0, to=10000)) * torch.log(domain_guard((params['_s'] * x), min=1e-5, nan=1e-5))) + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_params(x: torch.Tensor, **kwargs: Dict[str, Any]) -> Dict[str, nn.Parameter]: + params = { + '_0': init_rand(x, qtz_func=quantization, deqtz_func=dequantization, param='_0', params_list=['_0', '_s'], **kwargs), + } + params['_s'] = init_linear_scale(x, params=params, qtz_func=quantization, **kwargs) + params = {k: nn.Parameter(v, requires_grad=False) for k, v in params.items()} + + if 'post_init_hook' in kwargs: + kwargs['post_init_hook'](parameters=params) + + + if 'post_train_hook' in kwargs: + kwargs['post_train_hook'](parameters=params) + + return params + + +############### Numpy Qtz ############### + + +def np_quantization(x, _0, _s): + return (np.divide(1, np_replace_num(_s, num=0, to=10000)) * np.exp((_0 * x))) + + +def np_dequantization(x, _0, _s): + return (np.divide(1, np_replace_num(_0, num=0, to=10000)) * np.log(np_domain_guard((_s * x), min=1e-5, nan=1e-5))) + + +def fit_func(x, _0, _s): + x_ = np_quantization(x, _0, _s) + x_ = np_dequantization(x_, _0, _s) + return x_ + + + +############### HELPERS ############### + +def domain_guard( + x: torch.Tensor, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> torch.Tensor: + """Guard a tensor to a valid domain.""" + x = torch.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = torch.clamp(x, min=min, max=max) + return x + + +def replace_num(x: torch.Tensor, num: float, to: float) -> torch.Tensor: + """Replace a number in a tensor with another number. + + Args: + x (torch.Tensor): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + torch.Tensor: The tensor with the number replaced. + """ + return torch.where(x == num, to, x) + + +def guarded_torch_power(x: torch.Tensor, exp: float) -> torch.Tensor: + """Guard the power operation to a valid domain.""" + return torch.pow(x, exp) if exp >= 1 else torch.pow(torch.relu(x), exp) + + +def init_ones(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.ones_like(val, dtype=torch.float32, device=x.device) + + +def init_rand(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.randn_like(val, dtype=torch.float32, device=x.device) + + +def init_space_search( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + def _build_initial_param(tensor: torch.Tensor, max_initial: int, n_params: int): + """Generates the initial set of parameters. The first iteration generates 10 times more parameters.""" + for _ in range(n_params * 10): # The first iteration generates 10 times more parameters + yield init_rand(tensor) * max_initial # Generates n_params in range [-max_initial, max_initial] + + def _search_param(tensors: List[torch.tensor], n_params): + """Takes the best parameters and generates new parameters around the mean of the best parameters.""" + torch_tensors = torch.stack(tensors) + min_vals, max_vals = torch.aminmax(torch_tensors, dim=0) + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + mean = torch.mean(torch_tensors, dim=0) + for _ in range(n_params): # Generates n_params around the mean of the tensors + yield torch.randn_like(min_vals) * abs_max_val_per_ch + mean + + def _calc(x, qtz_func, deqtz_func, **params): + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params) + x_ = deqtz_func(x=x_, **params) + x_ = x_.transpose(0, 1) + return x_ + + assert "qtz_func" in kwargs, "qtz_func must be provided." + assert "deqtz_func" in kwargs, "deqtz_func must be provided." + assert "params_list" in kwargs, "params list must be provided." + assert "param" in kwargs, "param must be provided." + + qtz_func = kwargs.get('qtz_func') + deqtz_func = kwargs.get('deqtz_func') + params_list = kwargs.get('params_list') + param = kwargs.get('param') + + n_runs = 50 # Number of runs to try to find the best parameters + n_random_params = 50 # Number of random parameters to generate + n_best_to_pick = 5 # Number of best parameters to pick after each run + max_initial = 10000 # Maximum value to initialize the parameters + + # Initializes the parameters + base_params = { p: init_ones(x, **kwargs) for p in params_list if p != param } + params = _build_initial_param(x, max_initial, n_random_params) + + # Performs the search + for _ in range(n_runs): + + best_params = [] + for param_ in params: + try: + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: param_}) + loss_ones = nn.MSELoss()(x, x_) + + if len(best_params) < n_best_to_pick: + best_params.append((param_, loss_ones.item())) + best_params = sorted(best_params, key=lambda x: x[1]) + elif loss_ones < best_params[-1][1]: + best_params[-1] = (param_, loss_ones.item()) + best_params = sorted(best_params, key=lambda x: x[1]) + + except Exception: # The parameters might not be valid for the function's domain + continue + + # Generates new parameters around the mean + params = _search_param([p for p, _ in best_params], n_random_params) + + # Checks if the best parameter is better than the init_ones + p_ones = init_ones(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_ones}) + loss_ones = nn.MSELoss()(x, x_) + + # Checks if the best parameter is better than the init_rand + p_rand = init_rand(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_rand}) + loss_rand = nn.MSELoss()(x, x_) + + if loss_rand < best_params[0][1] and loss_rand < loss_ones: + return p_rand + elif loss_ones < best_params[0][1] and loss_ones < loss_rand: + return p_ones + else: + return best_params[0][0] + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_non_linear_regression_fit( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + assert "params_list" in kwargs, "params list must be provided." + assert "np_fit_func" in kwargs, "np_fit_func must be provided." + assert "p0" in kwargs, "p0 must be provided." + np_fit_func = kwargs.get('np_fit_func') + params_list = kwargs.get('params_list') + p0 = kwargs.get('p0') + + def _fit(xdata: np.ndarray, ydata: np.ndarray, func: Callable, p0: List[float]): + popt, _ = curve_fit( + func, + xdata, + ydata, + maxfev=1000, + p0=p0, + method='lm' + ) + return popt + + # 1. Needs to convert the torch tensor to numpy tensor + xdata = x.cpu().numpy() + + # 2. Sorts the data so that it makes it easier to fit to it + sorted_xdata = np.sort(xdata, axis=-1) + + p0 = {k: v.cpu().numpy() for k, v in p0.items()} + params_list = sorted(params_list) # We need to make sure that it matches the numpy fit func arg order + + # 3. Finds the best parameters for each channel + try: + params = [] + for i in range(sorted_xdata.shape[0]): + xdata_ = sorted_xdata[i] + p0_ = [p0[p][i] for p in params_list] + ch_params = _fit(xdata_, xdata_, np_fit_func, p0_) + params.append(ch_params) + + # 4. Builds the parameters + result = {} + for i, p in enumerate(params_list): + result[p] = torch.tensor([p_[i] for p_ in params], dtype=torch.float32).to(x.device) + + return result + + except ValueError as e: + print(f"Could not fit the function with error: {e}") + print(f"Using fallback result...") + return { + k: torch.tensor(v, dtype=torch.float32).to(x.device) for k, v in p0.items() + } + + +def init_zeros(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.zeros_like(val, dtype=torch.float32, device=x.device) + + +def init_inner_scale(tensor: torch.Tensor, _min: float = torch.inf, _max: float = torch.inf) -> torch.Tensor: + # Calculate the original minimum and maximum values + min_vals, max_vals = torch.aminmax(tensor, dim=-1) + x_min = torch.min(min_vals, torch.zeros_like(min_vals)) + x_max = torch.max(max_vals, torch.zeros_like(max_vals)) + + if _max is torch.inf: # We do not need to scale the tensor. Just need to move it + return torch.ones_like(x_min) + + # Calculate the scale factor + scale = (_max - _min) / (x_max - x_min) + return scale + + + +############## Quant ############### + +@torch.enable_grad() +def learn_parameters( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + qtz_func: nn.Module, + deqtz_func: nn.Module, + bits: int, + target_dtype: torch.dtype, + epochs: int = 1000, + early_stop: bool = True, + do_report: bool = False +) -> Tuple[Dict[str, nn.Parameter], torch.Tensor]: + + # Requires gradients in the parameters + for p in params.values(): + p.requires_grad = True + p.grad = None + + param_keys = list(params.keys()) + param_values = list(params.values()) + + # Defines optimizer and loss function + optimizer = torch.optim.Adam(param_values, lr=0.001) + scheduler = torch.optim.lr_scheduler.LinearLR(optimizer, start_factor=1.0, end_factor=0.001, total_iters=epochs // 10) + loss_fn = nn.MSELoss() + + # Contains the best loss and the best parameters + best_loss = float("inf") + best_params = None + + # Used to stop the search early + min_delta = 1e-7 + acc_loss = [] + percent_epochs_before_stop = 0.1 + + for i in range(epochs): + optimizer.zero_grad() + + quant = quantize(x, params, qtz_func, bits, target_dtype) + dequant = dequantize(quant, params, deqtz_func, bits, x.dtype) + loss = loss_fn(x, dequant) + + if loss.isnan() or loss.isinf(): + raise Exception("Loss is NaN or Inf. Stopping the search.") + + loss.backward() + optimizer.step() + scheduler.step() + + acc_loss.append(loss.item()) + + # Reports loss every 10 steps + if i % 10 == 0 and do_report: + print(f"Epoch {i}: Loss {loss.item()}") + + # Optimizes the parameter search by storing the best loss and the parameters + if loss.item() < best_loss: + best_loss = loss.item() + best_params = copy.deepcopy({ + k: v for k, v in params.items() if k in param_keys + }) + + # We also stop the search if the loss has not considerably during the last 10% epochs + if early_stop: + epochs_before_stop = int(epochs * percent_epochs_before_stop) + if i > epochs_before_stop and abs(acc_loss[i - epochs_before_stop] - acc_loss[i]) < min_delta: + break + + # No longer requires gradients in the parameters + for p in best_params.values(): + p.requires_grad = False + p.grad = None + + if do_report: + return best_params, acc_loss + else: + return best_params + + +def quantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + target_dtype: torch.dtype = torch.int8 +) -> torch.Tensor: + quant_min, quant_max = get_min_max_from_bits_signed(bits) + x = x.transpose(0, 1) # Aligns shapes + x = func(x=x, **params) + x = x.transpose(0, 1) + x = torch.clamp(round_func_BPDA(x), quant_min, quant_max).to(target_dtype) + return x + + +def dequantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + out_dtype: torch.dtype +) -> torch.Tensor: + x = x.to(dtype=out_dtype) + x = x.transpose(0, 1) + x = func(x=x, **params) + x = x.transpose(0, 1) + return x + + +def round_func_BPDA(input): + # This is equivalent to replacing round function (non-differentiable) with + # an identity function (differentiable) only when backward. + forward_value = torch.round(input) + out = input.clone() + out.data = forward_value.data + return out + + +def get_min_max_from_bits_signed(bit_width: int) -> Tuple[int, int]: + return -2 ** (bit_width - 1), 2 ** (bit_width - 1) - 1 + + + +############## Numpy ############### + +def np_domain_guard( + x: np.ndarray, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> np.ndarray: + """Guard a tensor to a valid domain.""" + x = np.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = np.clip(x, min, max) + return x + + +def np_replace_num(x: np.ndarray, num: float, to: float) -> np.ndarray: + """Replace a number in a tensor with another number. + + Args: + x (np.ndarray): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + np.ndarray: The tensor with the number replaced. + """ + return np.where(x == num, to, x) + + +def np_guarded_power(x: np.ndarray, exp: float) -> np.ndarray: + """Guard the power operation to a valid domain.""" + return np.power(x, exp) if exp >= 1 else np.power(np.maximum(x, 0), exp) + diff --git a/fn_gen/rnd_affine_scale/11/loss.png b/fn_gen/rnd_affine_scale/11/loss.png new file mode 100644 index 0000000000000000000000000000000000000000..2bb4f6d028d19815e4d96a94f618c3940ac80167 Binary files /dev/null and b/fn_gen/rnd_affine_scale/11/loss.png differ diff --git a/fn_gen/rnd_affine_scale/11/quantization.png b/fn_gen/rnd_affine_scale/11/quantization.png new file mode 100644 index 0000000000000000000000000000000000000000..bb438afcfde4b14b310fed90c82c2986080a6d54 Binary files /dev/null and b/fn_gen/rnd_affine_scale/11/quantization.png differ diff --git a/fn_gen/rnd_affine_scale/12/distortion.png b/fn_gen/rnd_affine_scale/12/distortion.png new file mode 100644 index 0000000000000000000000000000000000000000..c0eea75f079d06123476abfe3f037b9214ed360c Binary files /dev/null and b/fn_gen/rnd_affine_scale/12/distortion.png differ diff --git a/fn_gen/rnd_affine_scale/12/expressions.txt b/fn_gen/rnd_affine_scale/12/expressions.txt new file mode 100644 index 0000000000000000000000000000000000000000..b835531ccc3a3813012a9a9487415f4f73afabc7 --- /dev/null +++ b/fn_gen/rnd_affine_scale/12/expressions.txt @@ -0,0 +1,2 @@ +sinh(_0*x)/_s +log(_s*x - sqrt(_s**2*x**2 + 1))/_0 \ No newline at end of file diff --git a/fn_gen/rnd_affine_scale/12/fn.py b/fn_gen/rnd_affine_scale/12/fn.py new file mode 100644 index 0000000000000000000000000000000000000000..5c025df77a8869a9ad0540e6a7e57a3dfda9f70c --- /dev/null +++ b/fn_gen/rnd_affine_scale/12/fn.py @@ -0,0 +1,493 @@ +from __future__ import annotations + +import torch +from torch import amin # Necessary for arcsin +import copy +import torch.nn as nn +import numpy as np + +from scipy.optimize import curve_fit +from typing import Dict, Any, Tuple, List, Callable + + +def quantization(x, **params): + return (torch.div(1, replace_num(params['_s'], num=0, to=10000)) * torch.sinh((params['_0'] * x))) + + +def dequantization(x, **params): + return (torch.div(1, replace_num(params['_0'], num=0, to=10000)) * torch.log(domain_guard(((torch.tensor(-1) * torch.sqrt(domain_guard((torch.tensor(1) + (guarded_torch_power(params['_s'], torch.tensor(2)) * guarded_torch_power(x, torch.tensor(2)))), min=0.1, nan=0.1))) + (params['_s'] * x)), min=1e-5, nan=1e-5))) + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_params(x: torch.Tensor, **kwargs: Dict[str, Any]) -> Dict[str, nn.Parameter]: + params = { + '_0': init_rand(x, qtz_func=quantization, deqtz_func=dequantization, param='_0', params_list=['_0', '_s'], **kwargs), + } + params['_s'] = init_linear_scale(x, params=params, qtz_func=quantization, **kwargs) + params = {k: nn.Parameter(v, requires_grad=False) for k, v in params.items()} + + if 'post_init_hook' in kwargs: + kwargs['post_init_hook'](parameters=params) + + + if 'post_train_hook' in kwargs: + kwargs['post_train_hook'](parameters=params) + + return params + + +############### Numpy Qtz ############### + + +def np_quantization(x, _0, _s): + return (np.divide(1, np_replace_num(_s, num=0, to=10000)) * np.sinh((_0 * x))) + + +def np_dequantization(x, _0, _s): + return (np.divide(1, np_replace_num(_0, num=0, to=10000)) * np.log(np_domain_guard(((np.array(-1) * np.sqrt(np_domain_guard((np.array(1) + (np_guarded_power(_s, np.array(2)) * np_guarded_power(x, np.array(2)))), min=0.1, nan=0.1))) + (_s * x)), min=1e-5, nan=1e-5))) + + +def fit_func(x, _0, _s): + x_ = np_quantization(x, _0, _s) + x_ = np_dequantization(x_, _0, _s) + return x_ + + + +############### HELPERS ############### + +def domain_guard( + x: torch.Tensor, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> torch.Tensor: + """Guard a tensor to a valid domain.""" + x = torch.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = torch.clamp(x, min=min, max=max) + return x + + +def replace_num(x: torch.Tensor, num: float, to: float) -> torch.Tensor: + """Replace a number in a tensor with another number. + + Args: + x (torch.Tensor): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + torch.Tensor: The tensor with the number replaced. + """ + return torch.where(x == num, to, x) + + +def guarded_torch_power(x: torch.Tensor, exp: float) -> torch.Tensor: + """Guard the power operation to a valid domain.""" + return torch.pow(x, exp) if exp >= 1 else torch.pow(torch.relu(x), exp) + + +def init_ones(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.ones_like(val, dtype=torch.float32, device=x.device) + + +def init_rand(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.randn_like(val, dtype=torch.float32, device=x.device) + + +def init_space_search( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + def _build_initial_param(tensor: torch.Tensor, max_initial: int, n_params: int): + """Generates the initial set of parameters. The first iteration generates 10 times more parameters.""" + for _ in range(n_params * 10): # The first iteration generates 10 times more parameters + yield init_rand(tensor) * max_initial # Generates n_params in range [-max_initial, max_initial] + + def _search_param(tensors: List[torch.tensor], n_params): + """Takes the best parameters and generates new parameters around the mean of the best parameters.""" + torch_tensors = torch.stack(tensors) + min_vals, max_vals = torch.aminmax(torch_tensors, dim=0) + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + mean = torch.mean(torch_tensors, dim=0) + for _ in range(n_params): # Generates n_params around the mean of the tensors + yield torch.randn_like(min_vals) * abs_max_val_per_ch + mean + + def _calc(x, qtz_func, deqtz_func, **params): + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params) + x_ = deqtz_func(x=x_, **params) + x_ = x_.transpose(0, 1) + return x_ + + assert "qtz_func" in kwargs, "qtz_func must be provided." + assert "deqtz_func" in kwargs, "deqtz_func must be provided." + assert "params_list" in kwargs, "params list must be provided." + assert "param" in kwargs, "param must be provided." + + qtz_func = kwargs.get('qtz_func') + deqtz_func = kwargs.get('deqtz_func') + params_list = kwargs.get('params_list') + param = kwargs.get('param') + + n_runs = 50 # Number of runs to try to find the best parameters + n_random_params = 50 # Number of random parameters to generate + n_best_to_pick = 5 # Number of best parameters to pick after each run + max_initial = 10000 # Maximum value to initialize the parameters + + # Initializes the parameters + base_params = { p: init_ones(x, **kwargs) for p in params_list if p != param } + params = _build_initial_param(x, max_initial, n_random_params) + + # Performs the search + for _ in range(n_runs): + + best_params = [] + for param_ in params: + try: + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: param_}) + loss_ones = nn.MSELoss()(x, x_) + + if len(best_params) < n_best_to_pick: + best_params.append((param_, loss_ones.item())) + best_params = sorted(best_params, key=lambda x: x[1]) + elif loss_ones < best_params[-1][1]: + best_params[-1] = (param_, loss_ones.item()) + best_params = sorted(best_params, key=lambda x: x[1]) + + except Exception: # The parameters might not be valid for the function's domain + continue + + # Generates new parameters around the mean + params = _search_param([p for p, _ in best_params], n_random_params) + + # Checks if the best parameter is better than the init_ones + p_ones = init_ones(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_ones}) + loss_ones = nn.MSELoss()(x, x_) + + # Checks if the best parameter is better than the init_rand + p_rand = init_rand(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_rand}) + loss_rand = nn.MSELoss()(x, x_) + + if loss_rand < best_params[0][1] and loss_rand < loss_ones: + return p_rand + elif loss_ones < best_params[0][1] and loss_ones < loss_rand: + return p_ones + else: + return best_params[0][0] + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_non_linear_regression_fit( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + assert "params_list" in kwargs, "params list must be provided." + assert "np_fit_func" in kwargs, "np_fit_func must be provided." + assert "p0" in kwargs, "p0 must be provided." + np_fit_func = kwargs.get('np_fit_func') + params_list = kwargs.get('params_list') + p0 = kwargs.get('p0') + + def _fit(xdata: np.ndarray, ydata: np.ndarray, func: Callable, p0: List[float]): + popt, _ = curve_fit( + func, + xdata, + ydata, + maxfev=1000, + p0=p0, + method='lm' + ) + return popt + + # 1. Needs to convert the torch tensor to numpy tensor + xdata = x.cpu().numpy() + + # 2. Sorts the data so that it makes it easier to fit to it + sorted_xdata = np.sort(xdata, axis=-1) + + p0 = {k: v.cpu().numpy() for k, v in p0.items()} + params_list = sorted(params_list) # We need to make sure that it matches the numpy fit func arg order + + # 3. Finds the best parameters for each channel + try: + params = [] + for i in range(sorted_xdata.shape[0]): + xdata_ = sorted_xdata[i] + p0_ = [p0[p][i] for p in params_list] + ch_params = _fit(xdata_, xdata_, np_fit_func, p0_) + params.append(ch_params) + + # 4. Builds the parameters + result = {} + for i, p in enumerate(params_list): + result[p] = torch.tensor([p_[i] for p_ in params], dtype=torch.float32).to(x.device) + + return result + + except ValueError as e: + print(f"Could not fit the function with error: {e}") + print(f"Using fallback result...") + return { + k: torch.tensor(v, dtype=torch.float32).to(x.device) for k, v in p0.items() + } + + +def init_zeros(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.zeros_like(val, dtype=torch.float32, device=x.device) + + +def init_inner_scale(tensor: torch.Tensor, _min: float = torch.inf, _max: float = torch.inf) -> torch.Tensor: + # Calculate the original minimum and maximum values + min_vals, max_vals = torch.aminmax(tensor, dim=-1) + x_min = torch.min(min_vals, torch.zeros_like(min_vals)) + x_max = torch.max(max_vals, torch.zeros_like(max_vals)) + + if _max is torch.inf: # We do not need to scale the tensor. Just need to move it + return torch.ones_like(x_min) + + # Calculate the scale factor + scale = (_max - _min) / (x_max - x_min) + return scale + + + +############## Quant ############### + +@torch.enable_grad() +def learn_parameters( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + qtz_func: nn.Module, + deqtz_func: nn.Module, + bits: int, + target_dtype: torch.dtype, + epochs: int = 1000, + early_stop: bool = True, + do_report: bool = False +) -> Tuple[Dict[str, nn.Parameter], torch.Tensor]: + + # Requires gradients in the parameters + for p in params.values(): + p.requires_grad = True + p.grad = None + + param_keys = list(params.keys()) + param_values = list(params.values()) + + # Defines optimizer and loss function + optimizer = torch.optim.Adam(param_values, lr=0.001) + scheduler = torch.optim.lr_scheduler.LinearLR(optimizer, start_factor=1.0, end_factor=0.001, total_iters=epochs // 10) + loss_fn = nn.MSELoss() + + # Contains the best loss and the best parameters + best_loss = float("inf") + best_params = None + + # Used to stop the search early + min_delta = 1e-7 + acc_loss = [] + percent_epochs_before_stop = 0.1 + + for i in range(epochs): + optimizer.zero_grad() + + quant = quantize(x, params, qtz_func, bits, target_dtype) + dequant = dequantize(quant, params, deqtz_func, bits, x.dtype) + loss = loss_fn(x, dequant) + + if loss.isnan() or loss.isinf(): + raise Exception("Loss is NaN or Inf. Stopping the search.") + + loss.backward() + optimizer.step() + scheduler.step() + + acc_loss.append(loss.item()) + + # Reports loss every 10 steps + if i % 10 == 0 and do_report: + print(f"Epoch {i}: Loss {loss.item()}") + + # Optimizes the parameter search by storing the best loss and the parameters + if loss.item() < best_loss: + best_loss = loss.item() + best_params = copy.deepcopy({ + k: v for k, v in params.items() if k in param_keys + }) + + # We also stop the search if the loss has not considerably during the last 10% epochs + if early_stop: + epochs_before_stop = int(epochs * percent_epochs_before_stop) + if i > epochs_before_stop and abs(acc_loss[i - epochs_before_stop] - acc_loss[i]) < min_delta: + break + + # No longer requires gradients in the parameters + for p in best_params.values(): + p.requires_grad = False + p.grad = None + + if do_report: + return best_params, acc_loss + else: + return best_params + + +def quantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + target_dtype: torch.dtype = torch.int8 +) -> torch.Tensor: + quant_min, quant_max = get_min_max_from_bits_signed(bits) + x = x.transpose(0, 1) # Aligns shapes + x = func(x=x, **params) + x = x.transpose(0, 1) + x = torch.clamp(round_func_BPDA(x), quant_min, quant_max).to(target_dtype) + return x + + +def dequantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + out_dtype: torch.dtype +) -> torch.Tensor: + x = x.to(dtype=out_dtype) + x = x.transpose(0, 1) + x = func(x=x, **params) + x = x.transpose(0, 1) + return x + + +def round_func_BPDA(input): + # This is equivalent to replacing round function (non-differentiable) with + # an identity function (differentiable) only when backward. + forward_value = torch.round(input) + out = input.clone() + out.data = forward_value.data + return out + + +def get_min_max_from_bits_signed(bit_width: int) -> Tuple[int, int]: + return -2 ** (bit_width - 1), 2 ** (bit_width - 1) - 1 + + + +############## Numpy ############### + +def np_domain_guard( + x: np.ndarray, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> np.ndarray: + """Guard a tensor to a valid domain.""" + x = np.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = np.clip(x, min, max) + return x + + +def np_replace_num(x: np.ndarray, num: float, to: float) -> np.ndarray: + """Replace a number in a tensor with another number. + + Args: + x (np.ndarray): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + np.ndarray: The tensor with the number replaced. + """ + return np.where(x == num, to, x) + + +def np_guarded_power(x: np.ndarray, exp: float) -> np.ndarray: + """Guard the power operation to a valid domain.""" + return np.power(x, exp) if exp >= 1 else np.power(np.maximum(x, 0), exp) + diff --git a/fn_gen/rnd_affine_scale/12/loss.png b/fn_gen/rnd_affine_scale/12/loss.png new file mode 100644 index 0000000000000000000000000000000000000000..eac2130fc2fb7f7d8a4c3d457f0e8728fcb09b58 Binary files /dev/null and b/fn_gen/rnd_affine_scale/12/loss.png differ diff --git a/fn_gen/rnd_affine_scale/12/quantization.png b/fn_gen/rnd_affine_scale/12/quantization.png new file mode 100644 index 0000000000000000000000000000000000000000..5cc4b487a5d7512a8225d155385cf1d5b2300bdc Binary files /dev/null and b/fn_gen/rnd_affine_scale/12/quantization.png differ diff --git a/fn_gen/rnd_affine_scale/13/distortion.png b/fn_gen/rnd_affine_scale/13/distortion.png new file mode 100644 index 0000000000000000000000000000000000000000..f69e5b66fc9e47b8bc0969fca6c1cb9fef2c44d0 Binary files /dev/null and b/fn_gen/rnd_affine_scale/13/distortion.png differ diff --git a/fn_gen/rnd_affine_scale/13/expressions.txt b/fn_gen/rnd_affine_scale/13/expressions.txt new file mode 100644 index 0000000000000000000000000000000000000000..ec55493201f7b2b8effaefed75e0a9258fc25c56 --- /dev/null +++ b/fn_gen/rnd_affine_scale/13/expressions.txt @@ -0,0 +1,2 @@ +tanh(_0*x)/_s +log((-_s*x - 1)/(_s*x - 1))/_0 \ No newline at end of file diff --git a/fn_gen/rnd_affine_scale/13/fn.py b/fn_gen/rnd_affine_scale/13/fn.py new file mode 100644 index 0000000000000000000000000000000000000000..7ac0e4def25a5552200d3172cf009ab2db0bb240 --- /dev/null +++ b/fn_gen/rnd_affine_scale/13/fn.py @@ -0,0 +1,493 @@ +from __future__ import annotations + +import torch +from torch import amin # Necessary for arcsin +import copy +import torch.nn as nn +import numpy as np + +from scipy.optimize import curve_fit +from typing import Dict, Any, Tuple, List, Callable + + +def quantization(x, **params): + return (torch.div(1, replace_num(params['_s'], num=0, to=10000)) * torch.tanh((params['_0'] * x))) + + +def dequantization(x, **params): + return (torch.div(1, replace_num(params['_0'], num=0, to=10000)) * torch.log(domain_guard((torch.div(1, replace_num((torch.tensor(-1) + (params['_s'] * x)), num=0, to=10000)) * (torch.tensor(-1) + (torch.tensor(-1) * params['_s'] * x))), min=1e-5, nan=1e-5))) + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_params(x: torch.Tensor, **kwargs: Dict[str, Any]) -> Dict[str, nn.Parameter]: + params = { + '_0': init_rand(x, qtz_func=quantization, deqtz_func=dequantization, param='_0', params_list=['_0', '_s'], **kwargs), + } + params['_s'] = init_linear_scale(x, params=params, qtz_func=quantization, **kwargs) + params = {k: nn.Parameter(v, requires_grad=False) for k, v in params.items()} + + if 'post_init_hook' in kwargs: + kwargs['post_init_hook'](parameters=params) + + + if 'post_train_hook' in kwargs: + kwargs['post_train_hook'](parameters=params) + + return params + + +############### Numpy Qtz ############### + + +def np_quantization(x, _0, _s): + return (np.divide(1, np_replace_num(_s, num=0, to=10000)) * np.tanh((_0 * x))) + + +def np_dequantization(x, _0, _s): + return (np.divide(1, np_replace_num(_0, num=0, to=10000)) * np.log(np_domain_guard((np.divide(1, np_replace_num((np.array(-1) + (_s * x)), num=0, to=10000)) * (np.array(-1) + (np.array(-1) * _s * x))), min=1e-5, nan=1e-5))) + + +def fit_func(x, _0, _s): + x_ = np_quantization(x, _0, _s) + x_ = np_dequantization(x_, _0, _s) + return x_ + + + +############### HELPERS ############### + +def domain_guard( + x: torch.Tensor, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> torch.Tensor: + """Guard a tensor to a valid domain.""" + x = torch.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = torch.clamp(x, min=min, max=max) + return x + + +def replace_num(x: torch.Tensor, num: float, to: float) -> torch.Tensor: + """Replace a number in a tensor with another number. + + Args: + x (torch.Tensor): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + torch.Tensor: The tensor with the number replaced. + """ + return torch.where(x == num, to, x) + + +def guarded_torch_power(x: torch.Tensor, exp: float) -> torch.Tensor: + """Guard the power operation to a valid domain.""" + return torch.pow(x, exp) if exp >= 1 else torch.pow(torch.relu(x), exp) + + +def init_ones(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.ones_like(val, dtype=torch.float32, device=x.device) + + +def init_rand(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.randn_like(val, dtype=torch.float32, device=x.device) + + +def init_space_search( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + def _build_initial_param(tensor: torch.Tensor, max_initial: int, n_params: int): + """Generates the initial set of parameters. The first iteration generates 10 times more parameters.""" + for _ in range(n_params * 10): # The first iteration generates 10 times more parameters + yield init_rand(tensor) * max_initial # Generates n_params in range [-max_initial, max_initial] + + def _search_param(tensors: List[torch.tensor], n_params): + """Takes the best parameters and generates new parameters around the mean of the best parameters.""" + torch_tensors = torch.stack(tensors) + min_vals, max_vals = torch.aminmax(torch_tensors, dim=0) + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + mean = torch.mean(torch_tensors, dim=0) + for _ in range(n_params): # Generates n_params around the mean of the tensors + yield torch.randn_like(min_vals) * abs_max_val_per_ch + mean + + def _calc(x, qtz_func, deqtz_func, **params): + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params) + x_ = deqtz_func(x=x_, **params) + x_ = x_.transpose(0, 1) + return x_ + + assert "qtz_func" in kwargs, "qtz_func must be provided." + assert "deqtz_func" in kwargs, "deqtz_func must be provided." + assert "params_list" in kwargs, "params list must be provided." + assert "param" in kwargs, "param must be provided." + + qtz_func = kwargs.get('qtz_func') + deqtz_func = kwargs.get('deqtz_func') + params_list = kwargs.get('params_list') + param = kwargs.get('param') + + n_runs = 50 # Number of runs to try to find the best parameters + n_random_params = 50 # Number of random parameters to generate + n_best_to_pick = 5 # Number of best parameters to pick after each run + max_initial = 10000 # Maximum value to initialize the parameters + + # Initializes the parameters + base_params = { p: init_ones(x, **kwargs) for p in params_list if p != param } + params = _build_initial_param(x, max_initial, n_random_params) + + # Performs the search + for _ in range(n_runs): + + best_params = [] + for param_ in params: + try: + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: param_}) + loss_ones = nn.MSELoss()(x, x_) + + if len(best_params) < n_best_to_pick: + best_params.append((param_, loss_ones.item())) + best_params = sorted(best_params, key=lambda x: x[1]) + elif loss_ones < best_params[-1][1]: + best_params[-1] = (param_, loss_ones.item()) + best_params = sorted(best_params, key=lambda x: x[1]) + + except Exception: # The parameters might not be valid for the function's domain + continue + + # Generates new parameters around the mean + params = _search_param([p for p, _ in best_params], n_random_params) + + # Checks if the best parameter is better than the init_ones + p_ones = init_ones(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_ones}) + loss_ones = nn.MSELoss()(x, x_) + + # Checks if the best parameter is better than the init_rand + p_rand = init_rand(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_rand}) + loss_rand = nn.MSELoss()(x, x_) + + if loss_rand < best_params[0][1] and loss_rand < loss_ones: + return p_rand + elif loss_ones < best_params[0][1] and loss_ones < loss_rand: + return p_ones + else: + return best_params[0][0] + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_non_linear_regression_fit( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + assert "params_list" in kwargs, "params list must be provided." + assert "np_fit_func" in kwargs, "np_fit_func must be provided." + assert "p0" in kwargs, "p0 must be provided." + np_fit_func = kwargs.get('np_fit_func') + params_list = kwargs.get('params_list') + p0 = kwargs.get('p0') + + def _fit(xdata: np.ndarray, ydata: np.ndarray, func: Callable, p0: List[float]): + popt, _ = curve_fit( + func, + xdata, + ydata, + maxfev=1000, + p0=p0, + method='lm' + ) + return popt + + # 1. Needs to convert the torch tensor to numpy tensor + xdata = x.cpu().numpy() + + # 2. Sorts the data so that it makes it easier to fit to it + sorted_xdata = np.sort(xdata, axis=-1) + + p0 = {k: v.cpu().numpy() for k, v in p0.items()} + params_list = sorted(params_list) # We need to make sure that it matches the numpy fit func arg order + + # 3. Finds the best parameters for each channel + try: + params = [] + for i in range(sorted_xdata.shape[0]): + xdata_ = sorted_xdata[i] + p0_ = [p0[p][i] for p in params_list] + ch_params = _fit(xdata_, xdata_, np_fit_func, p0_) + params.append(ch_params) + + # 4. Builds the parameters + result = {} + for i, p in enumerate(params_list): + result[p] = torch.tensor([p_[i] for p_ in params], dtype=torch.float32).to(x.device) + + return result + + except ValueError as e: + print(f"Could not fit the function with error: {e}") + print(f"Using fallback result...") + return { + k: torch.tensor(v, dtype=torch.float32).to(x.device) for k, v in p0.items() + } + + +def init_zeros(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.zeros_like(val, dtype=torch.float32, device=x.device) + + +def init_inner_scale(tensor: torch.Tensor, _min: float = torch.inf, _max: float = torch.inf) -> torch.Tensor: + # Calculate the original minimum and maximum values + min_vals, max_vals = torch.aminmax(tensor, dim=-1) + x_min = torch.min(min_vals, torch.zeros_like(min_vals)) + x_max = torch.max(max_vals, torch.zeros_like(max_vals)) + + if _max is torch.inf: # We do not need to scale the tensor. Just need to move it + return torch.ones_like(x_min) + + # Calculate the scale factor + scale = (_max - _min) / (x_max - x_min) + return scale + + + +############## Quant ############### + +@torch.enable_grad() +def learn_parameters( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + qtz_func: nn.Module, + deqtz_func: nn.Module, + bits: int, + target_dtype: torch.dtype, + epochs: int = 1000, + early_stop: bool = True, + do_report: bool = False +) -> Tuple[Dict[str, nn.Parameter], torch.Tensor]: + + # Requires gradients in the parameters + for p in params.values(): + p.requires_grad = True + p.grad = None + + param_keys = list(params.keys()) + param_values = list(params.values()) + + # Defines optimizer and loss function + optimizer = torch.optim.Adam(param_values, lr=0.001) + scheduler = torch.optim.lr_scheduler.LinearLR(optimizer, start_factor=1.0, end_factor=0.001, total_iters=epochs // 10) + loss_fn = nn.MSELoss() + + # Contains the best loss and the best parameters + best_loss = float("inf") + best_params = None + + # Used to stop the search early + min_delta = 1e-7 + acc_loss = [] + percent_epochs_before_stop = 0.1 + + for i in range(epochs): + optimizer.zero_grad() + + quant = quantize(x, params, qtz_func, bits, target_dtype) + dequant = dequantize(quant, params, deqtz_func, bits, x.dtype) + loss = loss_fn(x, dequant) + + if loss.isnan() or loss.isinf(): + raise Exception("Loss is NaN or Inf. Stopping the search.") + + loss.backward() + optimizer.step() + scheduler.step() + + acc_loss.append(loss.item()) + + # Reports loss every 10 steps + if i % 10 == 0 and do_report: + print(f"Epoch {i}: Loss {loss.item()}") + + # Optimizes the parameter search by storing the best loss and the parameters + if loss.item() < best_loss: + best_loss = loss.item() + best_params = copy.deepcopy({ + k: v for k, v in params.items() if k in param_keys + }) + + # We also stop the search if the loss has not considerably during the last 10% epochs + if early_stop: + epochs_before_stop = int(epochs * percent_epochs_before_stop) + if i > epochs_before_stop and abs(acc_loss[i - epochs_before_stop] - acc_loss[i]) < min_delta: + break + + # No longer requires gradients in the parameters + for p in best_params.values(): + p.requires_grad = False + p.grad = None + + if do_report: + return best_params, acc_loss + else: + return best_params + + +def quantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + target_dtype: torch.dtype = torch.int8 +) -> torch.Tensor: + quant_min, quant_max = get_min_max_from_bits_signed(bits) + x = x.transpose(0, 1) # Aligns shapes + x = func(x=x, **params) + x = x.transpose(0, 1) + x = torch.clamp(round_func_BPDA(x), quant_min, quant_max).to(target_dtype) + return x + + +def dequantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + out_dtype: torch.dtype +) -> torch.Tensor: + x = x.to(dtype=out_dtype) + x = x.transpose(0, 1) + x = func(x=x, **params) + x = x.transpose(0, 1) + return x + + +def round_func_BPDA(input): + # This is equivalent to replacing round function (non-differentiable) with + # an identity function (differentiable) only when backward. + forward_value = torch.round(input) + out = input.clone() + out.data = forward_value.data + return out + + +def get_min_max_from_bits_signed(bit_width: int) -> Tuple[int, int]: + return -2 ** (bit_width - 1), 2 ** (bit_width - 1) - 1 + + + +############## Numpy ############### + +def np_domain_guard( + x: np.ndarray, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> np.ndarray: + """Guard a tensor to a valid domain.""" + x = np.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = np.clip(x, min, max) + return x + + +def np_replace_num(x: np.ndarray, num: float, to: float) -> np.ndarray: + """Replace a number in a tensor with another number. + + Args: + x (np.ndarray): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + np.ndarray: The tensor with the number replaced. + """ + return np.where(x == num, to, x) + + +def np_guarded_power(x: np.ndarray, exp: float) -> np.ndarray: + """Guard the power operation to a valid domain.""" + return np.power(x, exp) if exp >= 1 else np.power(np.maximum(x, 0), exp) + diff --git a/fn_gen/rnd_affine_scale/13/loss.png b/fn_gen/rnd_affine_scale/13/loss.png new file mode 100644 index 0000000000000000000000000000000000000000..0760cb4e3cd3bccb44400b86afeee64c58a1a788 Binary files /dev/null and b/fn_gen/rnd_affine_scale/13/loss.png differ diff --git a/fn_gen/rnd_affine_scale/13/quantization.png b/fn_gen/rnd_affine_scale/13/quantization.png new file mode 100644 index 0000000000000000000000000000000000000000..72509e381790413432f1d5dc1d8dd70ae86ccd47 Binary files /dev/null and b/fn_gen/rnd_affine_scale/13/quantization.png differ diff --git a/fn_gen/rnd_affine_scale/14/distortion.png b/fn_gen/rnd_affine_scale/14/distortion.png new file mode 100644 index 0000000000000000000000000000000000000000..23b81503ab3f93411cc48d1c514f820b2d51580f Binary files /dev/null and b/fn_gen/rnd_affine_scale/14/distortion.png differ diff --git a/fn_gen/rnd_affine_scale/14/expressions.txt b/fn_gen/rnd_affine_scale/14/expressions.txt new file mode 100644 index 0000000000000000000000000000000000000000..c545adce8b3c320e195336b81461c79d0cc385e6 --- /dev/null +++ b/fn_gen/rnd_affine_scale/14/expressions.txt @@ -0,0 +1,2 @@ +asinh(_0*x)/_s +sinh(_s*x)/_0 \ No newline at end of file diff --git a/fn_gen/rnd_affine_scale/14/fn.py b/fn_gen/rnd_affine_scale/14/fn.py new file mode 100644 index 0000000000000000000000000000000000000000..ea320e4122220f98a51e1ee8646c831a1f9c1a4a --- /dev/null +++ b/fn_gen/rnd_affine_scale/14/fn.py @@ -0,0 +1,493 @@ +from __future__ import annotations + +import torch +from torch import amin # Necessary for arcsin +import copy +import torch.nn as nn +import numpy as np + +from scipy.optimize import curve_fit +from typing import Dict, Any, Tuple, List, Callable + + +def quantization(x, **params): + return (torch.div(1, replace_num(params['_s'], num=0, to=10000)) * torch.asinh((params['_0'] * x))) + + +def dequantization(x, **params): + return (torch.div(1, replace_num(params['_0'], num=0, to=10000)) * torch.sinh((params['_s'] * x))) + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_params(x: torch.Tensor, **kwargs: Dict[str, Any]) -> Dict[str, nn.Parameter]: + params = { + '_0': init_rand(x, qtz_func=quantization, deqtz_func=dequantization, param='_0', params_list=['_0', '_s'], **kwargs), + } + params['_s'] = init_linear_scale(x, params=params, qtz_func=quantization, **kwargs) + params = {k: nn.Parameter(v, requires_grad=False) for k, v in params.items()} + + if 'post_init_hook' in kwargs: + kwargs['post_init_hook'](parameters=params) + + + if 'post_train_hook' in kwargs: + kwargs['post_train_hook'](parameters=params) + + return params + + +############### Numpy Qtz ############### + + +def np_quantization(x, _0, _s): + return (np.divide(1, np_replace_num(_s, num=0, to=10000)) * np.arcsinh((_0 * x))) + + +def np_dequantization(x, _0, _s): + return (np.divide(1, np_replace_num(_0, num=0, to=10000)) * np.sinh((_s * x))) + + +def fit_func(x, _0, _s): + x_ = np_quantization(x, _0, _s) + x_ = np_dequantization(x_, _0, _s) + return x_ + + + +############### HELPERS ############### + +def domain_guard( + x: torch.Tensor, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> torch.Tensor: + """Guard a tensor to a valid domain.""" + x = torch.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = torch.clamp(x, min=min, max=max) + return x + + +def replace_num(x: torch.Tensor, num: float, to: float) -> torch.Tensor: + """Replace a number in a tensor with another number. + + Args: + x (torch.Tensor): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + torch.Tensor: The tensor with the number replaced. + """ + return torch.where(x == num, to, x) + + +def guarded_torch_power(x: torch.Tensor, exp: float) -> torch.Tensor: + """Guard the power operation to a valid domain.""" + return torch.pow(x, exp) if exp >= 1 else torch.pow(torch.relu(x), exp) + + +def init_ones(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.ones_like(val, dtype=torch.float32, device=x.device) + + +def init_rand(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.randn_like(val, dtype=torch.float32, device=x.device) + + +def init_space_search( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + def _build_initial_param(tensor: torch.Tensor, max_initial: int, n_params: int): + """Generates the initial set of parameters. The first iteration generates 10 times more parameters.""" + for _ in range(n_params * 10): # The first iteration generates 10 times more parameters + yield init_rand(tensor) * max_initial # Generates n_params in range [-max_initial, max_initial] + + def _search_param(tensors: List[torch.tensor], n_params): + """Takes the best parameters and generates new parameters around the mean of the best parameters.""" + torch_tensors = torch.stack(tensors) + min_vals, max_vals = torch.aminmax(torch_tensors, dim=0) + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + mean = torch.mean(torch_tensors, dim=0) + for _ in range(n_params): # Generates n_params around the mean of the tensors + yield torch.randn_like(min_vals) * abs_max_val_per_ch + mean + + def _calc(x, qtz_func, deqtz_func, **params): + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params) + x_ = deqtz_func(x=x_, **params) + x_ = x_.transpose(0, 1) + return x_ + + assert "qtz_func" in kwargs, "qtz_func must be provided." + assert "deqtz_func" in kwargs, "deqtz_func must be provided." + assert "params_list" in kwargs, "params list must be provided." + assert "param" in kwargs, "param must be provided." + + qtz_func = kwargs.get('qtz_func') + deqtz_func = kwargs.get('deqtz_func') + params_list = kwargs.get('params_list') + param = kwargs.get('param') + + n_runs = 50 # Number of runs to try to find the best parameters + n_random_params = 50 # Number of random parameters to generate + n_best_to_pick = 5 # Number of best parameters to pick after each run + max_initial = 10000 # Maximum value to initialize the parameters + + # Initializes the parameters + base_params = { p: init_ones(x, **kwargs) for p in params_list if p != param } + params = _build_initial_param(x, max_initial, n_random_params) + + # Performs the search + for _ in range(n_runs): + + best_params = [] + for param_ in params: + try: + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: param_}) + loss_ones = nn.MSELoss()(x, x_) + + if len(best_params) < n_best_to_pick: + best_params.append((param_, loss_ones.item())) + best_params = sorted(best_params, key=lambda x: x[1]) + elif loss_ones < best_params[-1][1]: + best_params[-1] = (param_, loss_ones.item()) + best_params = sorted(best_params, key=lambda x: x[1]) + + except Exception: # The parameters might not be valid for the function's domain + continue + + # Generates new parameters around the mean + params = _search_param([p for p, _ in best_params], n_random_params) + + # Checks if the best parameter is better than the init_ones + p_ones = init_ones(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_ones}) + loss_ones = nn.MSELoss()(x, x_) + + # Checks if the best parameter is better than the init_rand + p_rand = init_rand(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_rand}) + loss_rand = nn.MSELoss()(x, x_) + + if loss_rand < best_params[0][1] and loss_rand < loss_ones: + return p_rand + elif loss_ones < best_params[0][1] and loss_ones < loss_rand: + return p_ones + else: + return best_params[0][0] + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_non_linear_regression_fit( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + assert "params_list" in kwargs, "params list must be provided." + assert "np_fit_func" in kwargs, "np_fit_func must be provided." + assert "p0" in kwargs, "p0 must be provided." + np_fit_func = kwargs.get('np_fit_func') + params_list = kwargs.get('params_list') + p0 = kwargs.get('p0') + + def _fit(xdata: np.ndarray, ydata: np.ndarray, func: Callable, p0: List[float]): + popt, _ = curve_fit( + func, + xdata, + ydata, + maxfev=1000, + p0=p0, + method='lm' + ) + return popt + + # 1. Needs to convert the torch tensor to numpy tensor + xdata = x.cpu().numpy() + + # 2. Sorts the data so that it makes it easier to fit to it + sorted_xdata = np.sort(xdata, axis=-1) + + p0 = {k: v.cpu().numpy() for k, v in p0.items()} + params_list = sorted(params_list) # We need to make sure that it matches the numpy fit func arg order + + # 3. Finds the best parameters for each channel + try: + params = [] + for i in range(sorted_xdata.shape[0]): + xdata_ = sorted_xdata[i] + p0_ = [p0[p][i] for p in params_list] + ch_params = _fit(xdata_, xdata_, np_fit_func, p0_) + params.append(ch_params) + + # 4. Builds the parameters + result = {} + for i, p in enumerate(params_list): + result[p] = torch.tensor([p_[i] for p_ in params], dtype=torch.float32).to(x.device) + + return result + + except ValueError as e: + print(f"Could not fit the function with error: {e}") + print(f"Using fallback result...") + return { + k: torch.tensor(v, dtype=torch.float32).to(x.device) for k, v in p0.items() + } + + +def init_zeros(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.zeros_like(val, dtype=torch.float32, device=x.device) + + +def init_inner_scale(tensor: torch.Tensor, _min: float = torch.inf, _max: float = torch.inf) -> torch.Tensor: + # Calculate the original minimum and maximum values + min_vals, max_vals = torch.aminmax(tensor, dim=-1) + x_min = torch.min(min_vals, torch.zeros_like(min_vals)) + x_max = torch.max(max_vals, torch.zeros_like(max_vals)) + + if _max is torch.inf: # We do not need to scale the tensor. Just need to move it + return torch.ones_like(x_min) + + # Calculate the scale factor + scale = (_max - _min) / (x_max - x_min) + return scale + + + +############## Quant ############### + +@torch.enable_grad() +def learn_parameters( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + qtz_func: nn.Module, + deqtz_func: nn.Module, + bits: int, + target_dtype: torch.dtype, + epochs: int = 1000, + early_stop: bool = True, + do_report: bool = False +) -> Tuple[Dict[str, nn.Parameter], torch.Tensor]: + + # Requires gradients in the parameters + for p in params.values(): + p.requires_grad = True + p.grad = None + + param_keys = list(params.keys()) + param_values = list(params.values()) + + # Defines optimizer and loss function + optimizer = torch.optim.Adam(param_values, lr=0.001) + scheduler = torch.optim.lr_scheduler.LinearLR(optimizer, start_factor=1.0, end_factor=0.001, total_iters=epochs // 10) + loss_fn = nn.MSELoss() + + # Contains the best loss and the best parameters + best_loss = float("inf") + best_params = None + + # Used to stop the search early + min_delta = 1e-7 + acc_loss = [] + percent_epochs_before_stop = 0.1 + + for i in range(epochs): + optimizer.zero_grad() + + quant = quantize(x, params, qtz_func, bits, target_dtype) + dequant = dequantize(quant, params, deqtz_func, bits, x.dtype) + loss = loss_fn(x, dequant) + + if loss.isnan() or loss.isinf(): + raise Exception("Loss is NaN or Inf. Stopping the search.") + + loss.backward() + optimizer.step() + scheduler.step() + + acc_loss.append(loss.item()) + + # Reports loss every 10 steps + if i % 10 == 0 and do_report: + print(f"Epoch {i}: Loss {loss.item()}") + + # Optimizes the parameter search by storing the best loss and the parameters + if loss.item() < best_loss: + best_loss = loss.item() + best_params = copy.deepcopy({ + k: v for k, v in params.items() if k in param_keys + }) + + # We also stop the search if the loss has not considerably during the last 10% epochs + if early_stop: + epochs_before_stop = int(epochs * percent_epochs_before_stop) + if i > epochs_before_stop and abs(acc_loss[i - epochs_before_stop] - acc_loss[i]) < min_delta: + break + + # No longer requires gradients in the parameters + for p in best_params.values(): + p.requires_grad = False + p.grad = None + + if do_report: + return best_params, acc_loss + else: + return best_params + + +def quantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + target_dtype: torch.dtype = torch.int8 +) -> torch.Tensor: + quant_min, quant_max = get_min_max_from_bits_signed(bits) + x = x.transpose(0, 1) # Aligns shapes + x = func(x=x, **params) + x = x.transpose(0, 1) + x = torch.clamp(round_func_BPDA(x), quant_min, quant_max).to(target_dtype) + return x + + +def dequantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + out_dtype: torch.dtype +) -> torch.Tensor: + x = x.to(dtype=out_dtype) + x = x.transpose(0, 1) + x = func(x=x, **params) + x = x.transpose(0, 1) + return x + + +def round_func_BPDA(input): + # This is equivalent to replacing round function (non-differentiable) with + # an identity function (differentiable) only when backward. + forward_value = torch.round(input) + out = input.clone() + out.data = forward_value.data + return out + + +def get_min_max_from_bits_signed(bit_width: int) -> Tuple[int, int]: + return -2 ** (bit_width - 1), 2 ** (bit_width - 1) - 1 + + + +############## Numpy ############### + +def np_domain_guard( + x: np.ndarray, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> np.ndarray: + """Guard a tensor to a valid domain.""" + x = np.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = np.clip(x, min, max) + return x + + +def np_replace_num(x: np.ndarray, num: float, to: float) -> np.ndarray: + """Replace a number in a tensor with another number. + + Args: + x (np.ndarray): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + np.ndarray: The tensor with the number replaced. + """ + return np.where(x == num, to, x) + + +def np_guarded_power(x: np.ndarray, exp: float) -> np.ndarray: + """Guard the power operation to a valid domain.""" + return np.power(x, exp) if exp >= 1 else np.power(np.maximum(x, 0), exp) + diff --git a/fn_gen/rnd_affine_scale/14/loss.png b/fn_gen/rnd_affine_scale/14/loss.png new file mode 100644 index 0000000000000000000000000000000000000000..d457d48e7d499decffa0e341e34c0a94038279a3 Binary files /dev/null and b/fn_gen/rnd_affine_scale/14/loss.png differ diff --git a/fn_gen/rnd_affine_scale/14/quantization.png b/fn_gen/rnd_affine_scale/14/quantization.png new file mode 100644 index 0000000000000000000000000000000000000000..91eb37b4257b4a4937b38a6d0464a351423531cf Binary files /dev/null and b/fn_gen/rnd_affine_scale/14/quantization.png differ diff --git a/fn_gen/rnd_affine_scale/15/distortion.png b/fn_gen/rnd_affine_scale/15/distortion.png new file mode 100644 index 0000000000000000000000000000000000000000..80062d81ca535bbfdae23e9d3f2f8e6950ff54d1 Binary files /dev/null and b/fn_gen/rnd_affine_scale/15/distortion.png differ diff --git a/fn_gen/rnd_affine_scale/15/expressions.txt b/fn_gen/rnd_affine_scale/15/expressions.txt new file mode 100644 index 0000000000000000000000000000000000000000..0a7e5be4566beeb4727d82f95d24241966d158dc --- /dev/null +++ b/fn_gen/rnd_affine_scale/15/expressions.txt @@ -0,0 +1,2 @@ +log(_0*x)/_s +exp(_s*x)/_0 \ No newline at end of file diff --git a/fn_gen/rnd_affine_scale/15/fn.py b/fn_gen/rnd_affine_scale/15/fn.py new file mode 100644 index 0000000000000000000000000000000000000000..caf8214d285731aad8f9ecfa204fc2c0cdcd0fb9 --- /dev/null +++ b/fn_gen/rnd_affine_scale/15/fn.py @@ -0,0 +1,493 @@ +from __future__ import annotations + +import torch +from torch import amin # Necessary for arcsin +import copy +import torch.nn as nn +import numpy as np + +from scipy.optimize import curve_fit +from typing import Dict, Any, Tuple, List, Callable + + +def quantization(x, **params): + return (torch.div(1, replace_num(params['_s'], num=0, to=10000)) * torch.log(domain_guard((params['_0'] * x), min=1e-5, nan=1e-5))) + + +def dequantization(x, **params): + return (torch.div(1, replace_num(params['_0'], num=0, to=10000)) * torch.exp((params['_s'] * x))) + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_params(x: torch.Tensor, **kwargs: Dict[str, Any]) -> Dict[str, nn.Parameter]: + params = { + '_0': init_rand(x, qtz_func=quantization, deqtz_func=dequantization, param='_0', params_list=['_0', '_s'], **kwargs), + } + params['_s'] = init_linear_scale(x, params=params, qtz_func=quantization, **kwargs) + params = {k: nn.Parameter(v, requires_grad=False) for k, v in params.items()} + + if 'post_init_hook' in kwargs: + kwargs['post_init_hook'](parameters=params) + + + if 'post_train_hook' in kwargs: + kwargs['post_train_hook'](parameters=params) + + return params + + +############### Numpy Qtz ############### + + +def np_quantization(x, _0, _s): + return (np.divide(1, np_replace_num(_s, num=0, to=10000)) * np.log(np_domain_guard((_0 * x), min=1e-5, nan=1e-5))) + + +def np_dequantization(x, _0, _s): + return (np.divide(1, np_replace_num(_0, num=0, to=10000)) * np.exp((_s * x))) + + +def fit_func(x, _0, _s): + x_ = np_quantization(x, _0, _s) + x_ = np_dequantization(x_, _0, _s) + return x_ + + + +############### HELPERS ############### + +def domain_guard( + x: torch.Tensor, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> torch.Tensor: + """Guard a tensor to a valid domain.""" + x = torch.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = torch.clamp(x, min=min, max=max) + return x + + +def replace_num(x: torch.Tensor, num: float, to: float) -> torch.Tensor: + """Replace a number in a tensor with another number. + + Args: + x (torch.Tensor): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + torch.Tensor: The tensor with the number replaced. + """ + return torch.where(x == num, to, x) + + +def guarded_torch_power(x: torch.Tensor, exp: float) -> torch.Tensor: + """Guard the power operation to a valid domain.""" + return torch.pow(x, exp) if exp >= 1 else torch.pow(torch.relu(x), exp) + + +def init_ones(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.ones_like(val, dtype=torch.float32, device=x.device) + + +def init_rand(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.randn_like(val, dtype=torch.float32, device=x.device) + + +def init_space_search( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + def _build_initial_param(tensor: torch.Tensor, max_initial: int, n_params: int): + """Generates the initial set of parameters. The first iteration generates 10 times more parameters.""" + for _ in range(n_params * 10): # The first iteration generates 10 times more parameters + yield init_rand(tensor) * max_initial # Generates n_params in range [-max_initial, max_initial] + + def _search_param(tensors: List[torch.tensor], n_params): + """Takes the best parameters and generates new parameters around the mean of the best parameters.""" + torch_tensors = torch.stack(tensors) + min_vals, max_vals = torch.aminmax(torch_tensors, dim=0) + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + mean = torch.mean(torch_tensors, dim=0) + for _ in range(n_params): # Generates n_params around the mean of the tensors + yield torch.randn_like(min_vals) * abs_max_val_per_ch + mean + + def _calc(x, qtz_func, deqtz_func, **params): + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params) + x_ = deqtz_func(x=x_, **params) + x_ = x_.transpose(0, 1) + return x_ + + assert "qtz_func" in kwargs, "qtz_func must be provided." + assert "deqtz_func" in kwargs, "deqtz_func must be provided." + assert "params_list" in kwargs, "params list must be provided." + assert "param" in kwargs, "param must be provided." + + qtz_func = kwargs.get('qtz_func') + deqtz_func = kwargs.get('deqtz_func') + params_list = kwargs.get('params_list') + param = kwargs.get('param') + + n_runs = 50 # Number of runs to try to find the best parameters + n_random_params = 50 # Number of random parameters to generate + n_best_to_pick = 5 # Number of best parameters to pick after each run + max_initial = 10000 # Maximum value to initialize the parameters + + # Initializes the parameters + base_params = { p: init_ones(x, **kwargs) for p in params_list if p != param } + params = _build_initial_param(x, max_initial, n_random_params) + + # Performs the search + for _ in range(n_runs): + + best_params = [] + for param_ in params: + try: + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: param_}) + loss_ones = nn.MSELoss()(x, x_) + + if len(best_params) < n_best_to_pick: + best_params.append((param_, loss_ones.item())) + best_params = sorted(best_params, key=lambda x: x[1]) + elif loss_ones < best_params[-1][1]: + best_params[-1] = (param_, loss_ones.item()) + best_params = sorted(best_params, key=lambda x: x[1]) + + except Exception: # The parameters might not be valid for the function's domain + continue + + # Generates new parameters around the mean + params = _search_param([p for p, _ in best_params], n_random_params) + + # Checks if the best parameter is better than the init_ones + p_ones = init_ones(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_ones}) + loss_ones = nn.MSELoss()(x, x_) + + # Checks if the best parameter is better than the init_rand + p_rand = init_rand(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_rand}) + loss_rand = nn.MSELoss()(x, x_) + + if loss_rand < best_params[0][1] and loss_rand < loss_ones: + return p_rand + elif loss_ones < best_params[0][1] and loss_ones < loss_rand: + return p_ones + else: + return best_params[0][0] + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_non_linear_regression_fit( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + assert "params_list" in kwargs, "params list must be provided." + assert "np_fit_func" in kwargs, "np_fit_func must be provided." + assert "p0" in kwargs, "p0 must be provided." + np_fit_func = kwargs.get('np_fit_func') + params_list = kwargs.get('params_list') + p0 = kwargs.get('p0') + + def _fit(xdata: np.ndarray, ydata: np.ndarray, func: Callable, p0: List[float]): + popt, _ = curve_fit( + func, + xdata, + ydata, + maxfev=1000, + p0=p0, + method='lm' + ) + return popt + + # 1. Needs to convert the torch tensor to numpy tensor + xdata = x.cpu().numpy() + + # 2. Sorts the data so that it makes it easier to fit to it + sorted_xdata = np.sort(xdata, axis=-1) + + p0 = {k: v.cpu().numpy() for k, v in p0.items()} + params_list = sorted(params_list) # We need to make sure that it matches the numpy fit func arg order + + # 3. Finds the best parameters for each channel + try: + params = [] + for i in range(sorted_xdata.shape[0]): + xdata_ = sorted_xdata[i] + p0_ = [p0[p][i] for p in params_list] + ch_params = _fit(xdata_, xdata_, np_fit_func, p0_) + params.append(ch_params) + + # 4. Builds the parameters + result = {} + for i, p in enumerate(params_list): + result[p] = torch.tensor([p_[i] for p_ in params], dtype=torch.float32).to(x.device) + + return result + + except ValueError as e: + print(f"Could not fit the function with error: {e}") + print(f"Using fallback result...") + return { + k: torch.tensor(v, dtype=torch.float32).to(x.device) for k, v in p0.items() + } + + +def init_zeros(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.zeros_like(val, dtype=torch.float32, device=x.device) + + +def init_inner_scale(tensor: torch.Tensor, _min: float = torch.inf, _max: float = torch.inf) -> torch.Tensor: + # Calculate the original minimum and maximum values + min_vals, max_vals = torch.aminmax(tensor, dim=-1) + x_min = torch.min(min_vals, torch.zeros_like(min_vals)) + x_max = torch.max(max_vals, torch.zeros_like(max_vals)) + + if _max is torch.inf: # We do not need to scale the tensor. Just need to move it + return torch.ones_like(x_min) + + # Calculate the scale factor + scale = (_max - _min) / (x_max - x_min) + return scale + + + +############## Quant ############### + +@torch.enable_grad() +def learn_parameters( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + qtz_func: nn.Module, + deqtz_func: nn.Module, + bits: int, + target_dtype: torch.dtype, + epochs: int = 1000, + early_stop: bool = True, + do_report: bool = False +) -> Tuple[Dict[str, nn.Parameter], torch.Tensor]: + + # Requires gradients in the parameters + for p in params.values(): + p.requires_grad = True + p.grad = None + + param_keys = list(params.keys()) + param_values = list(params.values()) + + # Defines optimizer and loss function + optimizer = torch.optim.Adam(param_values, lr=0.001) + scheduler = torch.optim.lr_scheduler.LinearLR(optimizer, start_factor=1.0, end_factor=0.001, total_iters=epochs // 10) + loss_fn = nn.MSELoss() + + # Contains the best loss and the best parameters + best_loss = float("inf") + best_params = None + + # Used to stop the search early + min_delta = 1e-7 + acc_loss = [] + percent_epochs_before_stop = 0.1 + + for i in range(epochs): + optimizer.zero_grad() + + quant = quantize(x, params, qtz_func, bits, target_dtype) + dequant = dequantize(quant, params, deqtz_func, bits, x.dtype) + loss = loss_fn(x, dequant) + + if loss.isnan() or loss.isinf(): + raise Exception("Loss is NaN or Inf. Stopping the search.") + + loss.backward() + optimizer.step() + scheduler.step() + + acc_loss.append(loss.item()) + + # Reports loss every 10 steps + if i % 10 == 0 and do_report: + print(f"Epoch {i}: Loss {loss.item()}") + + # Optimizes the parameter search by storing the best loss and the parameters + if loss.item() < best_loss: + best_loss = loss.item() + best_params = copy.deepcopy({ + k: v for k, v in params.items() if k in param_keys + }) + + # We also stop the search if the loss has not considerably during the last 10% epochs + if early_stop: + epochs_before_stop = int(epochs * percent_epochs_before_stop) + if i > epochs_before_stop and abs(acc_loss[i - epochs_before_stop] - acc_loss[i]) < min_delta: + break + + # No longer requires gradients in the parameters + for p in best_params.values(): + p.requires_grad = False + p.grad = None + + if do_report: + return best_params, acc_loss + else: + return best_params + + +def quantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + target_dtype: torch.dtype = torch.int8 +) -> torch.Tensor: + quant_min, quant_max = get_min_max_from_bits_signed(bits) + x = x.transpose(0, 1) # Aligns shapes + x = func(x=x, **params) + x = x.transpose(0, 1) + x = torch.clamp(round_func_BPDA(x), quant_min, quant_max).to(target_dtype) + return x + + +def dequantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + out_dtype: torch.dtype +) -> torch.Tensor: + x = x.to(dtype=out_dtype) + x = x.transpose(0, 1) + x = func(x=x, **params) + x = x.transpose(0, 1) + return x + + +def round_func_BPDA(input): + # This is equivalent to replacing round function (non-differentiable) with + # an identity function (differentiable) only when backward. + forward_value = torch.round(input) + out = input.clone() + out.data = forward_value.data + return out + + +def get_min_max_from_bits_signed(bit_width: int) -> Tuple[int, int]: + return -2 ** (bit_width - 1), 2 ** (bit_width - 1) - 1 + + + +############## Numpy ############### + +def np_domain_guard( + x: np.ndarray, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> np.ndarray: + """Guard a tensor to a valid domain.""" + x = np.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = np.clip(x, min, max) + return x + + +def np_replace_num(x: np.ndarray, num: float, to: float) -> np.ndarray: + """Replace a number in a tensor with another number. + + Args: + x (np.ndarray): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + np.ndarray: The tensor with the number replaced. + """ + return np.where(x == num, to, x) + + +def np_guarded_power(x: np.ndarray, exp: float) -> np.ndarray: + """Guard the power operation to a valid domain.""" + return np.power(x, exp) if exp >= 1 else np.power(np.maximum(x, 0), exp) + diff --git a/fn_gen/rnd_affine_scale/15/loss.png b/fn_gen/rnd_affine_scale/15/loss.png new file mode 100644 index 0000000000000000000000000000000000000000..3e5a3c424d5ac813af18c85be25b90b581df2e8b Binary files /dev/null and b/fn_gen/rnd_affine_scale/15/loss.png differ diff --git a/fn_gen/rnd_affine_scale/15/quantization.png b/fn_gen/rnd_affine_scale/15/quantization.png new file mode 100644 index 0000000000000000000000000000000000000000..b46838d6620843aafb7df900133a2050de17bc11 Binary files /dev/null and b/fn_gen/rnd_affine_scale/15/quantization.png differ diff --git a/fn_gen/rnd_affine_scale/16/distortion.png b/fn_gen/rnd_affine_scale/16/distortion.png new file mode 100644 index 0000000000000000000000000000000000000000..35c09c0264d6fac7b064f7bfcf39e8bcf6a25f54 Binary files /dev/null and b/fn_gen/rnd_affine_scale/16/distortion.png differ diff --git a/fn_gen/rnd_affine_scale/16/expressions.txt b/fn_gen/rnd_affine_scale/16/expressions.txt new file mode 100644 index 0000000000000000000000000000000000000000..576ec6a351e26f9982eb17e394804ca906d4b067 --- /dev/null +++ b/fn_gen/rnd_affine_scale/16/expressions.txt @@ -0,0 +1,2 @@ +acos(_0*x)/_s +cos(_s*x)/_0 \ No newline at end of file diff --git a/fn_gen/rnd_affine_scale/16/fn.py b/fn_gen/rnd_affine_scale/16/fn.py new file mode 100644 index 0000000000000000000000000000000000000000..7a473cf5ec8f767c567289ad4d103e798214efc2 --- /dev/null +++ b/fn_gen/rnd_affine_scale/16/fn.py @@ -0,0 +1,493 @@ +from __future__ import annotations + +import torch +from torch import amin # Necessary for arcsin +import copy +import torch.nn as nn +import numpy as np + +from scipy.optimize import curve_fit +from typing import Dict, Any, Tuple, List, Callable + + +def quantization(x, **params): + return (torch.div(1, replace_num(params['_s'], num=0, to=10000)) * torch.acos(domain_guard((params['_0'] * x), min=-0.99999, max=0.99999, nan=0))) + + +def dequantization(x, **params): + return (torch.div(1, replace_num(params['_0'], num=0, to=10000)) * torch.cos((params['_s'] * x))) + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_params(x: torch.Tensor, **kwargs: Dict[str, Any]) -> Dict[str, nn.Parameter]: + params = { + '_0': init_rand(x, qtz_func=quantization, deqtz_func=dequantization, param='_0', params_list=['_0', '_s'], **kwargs), + } + params['_s'] = init_linear_scale(x, params=params, qtz_func=quantization, **kwargs) + params = {k: nn.Parameter(v, requires_grad=False) for k, v in params.items()} + + if 'post_init_hook' in kwargs: + kwargs['post_init_hook'](parameters=params) + + + if 'post_train_hook' in kwargs: + kwargs['post_train_hook'](parameters=params) + + return params + + +############### Numpy Qtz ############### + + +def np_quantization(x, _0, _s): + return (np.divide(1, np_replace_num(_s, num=0, to=10000)) * np.arccos(np_domain_guard((_0 * x), min=-0.99999, max=0.99999, nan=0))) + + +def np_dequantization(x, _0, _s): + return (np.divide(1, np_replace_num(_0, num=0, to=10000)) * np.cos((_s * x))) + + +def fit_func(x, _0, _s): + x_ = np_quantization(x, _0, _s) + x_ = np_dequantization(x_, _0, _s) + return x_ + + + +############### HELPERS ############### + +def domain_guard( + x: torch.Tensor, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> torch.Tensor: + """Guard a tensor to a valid domain.""" + x = torch.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = torch.clamp(x, min=min, max=max) + return x + + +def replace_num(x: torch.Tensor, num: float, to: float) -> torch.Tensor: + """Replace a number in a tensor with another number. + + Args: + x (torch.Tensor): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + torch.Tensor: The tensor with the number replaced. + """ + return torch.where(x == num, to, x) + + +def guarded_torch_power(x: torch.Tensor, exp: float) -> torch.Tensor: + """Guard the power operation to a valid domain.""" + return torch.pow(x, exp) if exp >= 1 else torch.pow(torch.relu(x), exp) + + +def init_ones(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.ones_like(val, dtype=torch.float32, device=x.device) + + +def init_rand(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.randn_like(val, dtype=torch.float32, device=x.device) + + +def init_space_search( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + def _build_initial_param(tensor: torch.Tensor, max_initial: int, n_params: int): + """Generates the initial set of parameters. The first iteration generates 10 times more parameters.""" + for _ in range(n_params * 10): # The first iteration generates 10 times more parameters + yield init_rand(tensor) * max_initial # Generates n_params in range [-max_initial, max_initial] + + def _search_param(tensors: List[torch.tensor], n_params): + """Takes the best parameters and generates new parameters around the mean of the best parameters.""" + torch_tensors = torch.stack(tensors) + min_vals, max_vals = torch.aminmax(torch_tensors, dim=0) + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + mean = torch.mean(torch_tensors, dim=0) + for _ in range(n_params): # Generates n_params around the mean of the tensors + yield torch.randn_like(min_vals) * abs_max_val_per_ch + mean + + def _calc(x, qtz_func, deqtz_func, **params): + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params) + x_ = deqtz_func(x=x_, **params) + x_ = x_.transpose(0, 1) + return x_ + + assert "qtz_func" in kwargs, "qtz_func must be provided." + assert "deqtz_func" in kwargs, "deqtz_func must be provided." + assert "params_list" in kwargs, "params list must be provided." + assert "param" in kwargs, "param must be provided." + + qtz_func = kwargs.get('qtz_func') + deqtz_func = kwargs.get('deqtz_func') + params_list = kwargs.get('params_list') + param = kwargs.get('param') + + n_runs = 50 # Number of runs to try to find the best parameters + n_random_params = 50 # Number of random parameters to generate + n_best_to_pick = 5 # Number of best parameters to pick after each run + max_initial = 10000 # Maximum value to initialize the parameters + + # Initializes the parameters + base_params = { p: init_ones(x, **kwargs) for p in params_list if p != param } + params = _build_initial_param(x, max_initial, n_random_params) + + # Performs the search + for _ in range(n_runs): + + best_params = [] + for param_ in params: + try: + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: param_}) + loss_ones = nn.MSELoss()(x, x_) + + if len(best_params) < n_best_to_pick: + best_params.append((param_, loss_ones.item())) + best_params = sorted(best_params, key=lambda x: x[1]) + elif loss_ones < best_params[-1][1]: + best_params[-1] = (param_, loss_ones.item()) + best_params = sorted(best_params, key=lambda x: x[1]) + + except Exception: # The parameters might not be valid for the function's domain + continue + + # Generates new parameters around the mean + params = _search_param([p for p, _ in best_params], n_random_params) + + # Checks if the best parameter is better than the init_ones + p_ones = init_ones(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_ones}) + loss_ones = nn.MSELoss()(x, x_) + + # Checks if the best parameter is better than the init_rand + p_rand = init_rand(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_rand}) + loss_rand = nn.MSELoss()(x, x_) + + if loss_rand < best_params[0][1] and loss_rand < loss_ones: + return p_rand + elif loss_ones < best_params[0][1] and loss_ones < loss_rand: + return p_ones + else: + return best_params[0][0] + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_non_linear_regression_fit( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + assert "params_list" in kwargs, "params list must be provided." + assert "np_fit_func" in kwargs, "np_fit_func must be provided." + assert "p0" in kwargs, "p0 must be provided." + np_fit_func = kwargs.get('np_fit_func') + params_list = kwargs.get('params_list') + p0 = kwargs.get('p0') + + def _fit(xdata: np.ndarray, ydata: np.ndarray, func: Callable, p0: List[float]): + popt, _ = curve_fit( + func, + xdata, + ydata, + maxfev=1000, + p0=p0, + method='lm' + ) + return popt + + # 1. Needs to convert the torch tensor to numpy tensor + xdata = x.cpu().numpy() + + # 2. Sorts the data so that it makes it easier to fit to it + sorted_xdata = np.sort(xdata, axis=-1) + + p0 = {k: v.cpu().numpy() for k, v in p0.items()} + params_list = sorted(params_list) # We need to make sure that it matches the numpy fit func arg order + + # 3. Finds the best parameters for each channel + try: + params = [] + for i in range(sorted_xdata.shape[0]): + xdata_ = sorted_xdata[i] + p0_ = [p0[p][i] for p in params_list] + ch_params = _fit(xdata_, xdata_, np_fit_func, p0_) + params.append(ch_params) + + # 4. Builds the parameters + result = {} + for i, p in enumerate(params_list): + result[p] = torch.tensor([p_[i] for p_ in params], dtype=torch.float32).to(x.device) + + return result + + except ValueError as e: + print(f"Could not fit the function with error: {e}") + print(f"Using fallback result...") + return { + k: torch.tensor(v, dtype=torch.float32).to(x.device) for k, v in p0.items() + } + + +def init_zeros(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.zeros_like(val, dtype=torch.float32, device=x.device) + + +def init_inner_scale(tensor: torch.Tensor, _min: float = torch.inf, _max: float = torch.inf) -> torch.Tensor: + # Calculate the original minimum and maximum values + min_vals, max_vals = torch.aminmax(tensor, dim=-1) + x_min = torch.min(min_vals, torch.zeros_like(min_vals)) + x_max = torch.max(max_vals, torch.zeros_like(max_vals)) + + if _max is torch.inf: # We do not need to scale the tensor. Just need to move it + return torch.ones_like(x_min) + + # Calculate the scale factor + scale = (_max - _min) / (x_max - x_min) + return scale + + + +############## Quant ############### + +@torch.enable_grad() +def learn_parameters( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + qtz_func: nn.Module, + deqtz_func: nn.Module, + bits: int, + target_dtype: torch.dtype, + epochs: int = 1000, + early_stop: bool = True, + do_report: bool = False +) -> Tuple[Dict[str, nn.Parameter], torch.Tensor]: + + # Requires gradients in the parameters + for p in params.values(): + p.requires_grad = True + p.grad = None + + param_keys = list(params.keys()) + param_values = list(params.values()) + + # Defines optimizer and loss function + optimizer = torch.optim.Adam(param_values, lr=0.001) + scheduler = torch.optim.lr_scheduler.LinearLR(optimizer, start_factor=1.0, end_factor=0.001, total_iters=epochs // 10) + loss_fn = nn.MSELoss() + + # Contains the best loss and the best parameters + best_loss = float("inf") + best_params = None + + # Used to stop the search early + min_delta = 1e-7 + acc_loss = [] + percent_epochs_before_stop = 0.1 + + for i in range(epochs): + optimizer.zero_grad() + + quant = quantize(x, params, qtz_func, bits, target_dtype) + dequant = dequantize(quant, params, deqtz_func, bits, x.dtype) + loss = loss_fn(x, dequant) + + if loss.isnan() or loss.isinf(): + raise Exception("Loss is NaN or Inf. Stopping the search.") + + loss.backward() + optimizer.step() + scheduler.step() + + acc_loss.append(loss.item()) + + # Reports loss every 10 steps + if i % 10 == 0 and do_report: + print(f"Epoch {i}: Loss {loss.item()}") + + # Optimizes the parameter search by storing the best loss and the parameters + if loss.item() < best_loss: + best_loss = loss.item() + best_params = copy.deepcopy({ + k: v for k, v in params.items() if k in param_keys + }) + + # We also stop the search if the loss has not considerably during the last 10% epochs + if early_stop: + epochs_before_stop = int(epochs * percent_epochs_before_stop) + if i > epochs_before_stop and abs(acc_loss[i - epochs_before_stop] - acc_loss[i]) < min_delta: + break + + # No longer requires gradients in the parameters + for p in best_params.values(): + p.requires_grad = False + p.grad = None + + if do_report: + return best_params, acc_loss + else: + return best_params + + +def quantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + target_dtype: torch.dtype = torch.int8 +) -> torch.Tensor: + quant_min, quant_max = get_min_max_from_bits_signed(bits) + x = x.transpose(0, 1) # Aligns shapes + x = func(x=x, **params) + x = x.transpose(0, 1) + x = torch.clamp(round_func_BPDA(x), quant_min, quant_max).to(target_dtype) + return x + + +def dequantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + out_dtype: torch.dtype +) -> torch.Tensor: + x = x.to(dtype=out_dtype) + x = x.transpose(0, 1) + x = func(x=x, **params) + x = x.transpose(0, 1) + return x + + +def round_func_BPDA(input): + # This is equivalent to replacing round function (non-differentiable) with + # an identity function (differentiable) only when backward. + forward_value = torch.round(input) + out = input.clone() + out.data = forward_value.data + return out + + +def get_min_max_from_bits_signed(bit_width: int) -> Tuple[int, int]: + return -2 ** (bit_width - 1), 2 ** (bit_width - 1) - 1 + + + +############## Numpy ############### + +def np_domain_guard( + x: np.ndarray, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> np.ndarray: + """Guard a tensor to a valid domain.""" + x = np.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = np.clip(x, min, max) + return x + + +def np_replace_num(x: np.ndarray, num: float, to: float) -> np.ndarray: + """Replace a number in a tensor with another number. + + Args: + x (np.ndarray): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + np.ndarray: The tensor with the number replaced. + """ + return np.where(x == num, to, x) + + +def np_guarded_power(x: np.ndarray, exp: float) -> np.ndarray: + """Guard the power operation to a valid domain.""" + return np.power(x, exp) if exp >= 1 else np.power(np.maximum(x, 0), exp) + diff --git a/fn_gen/rnd_affine_scale/16/loss.png b/fn_gen/rnd_affine_scale/16/loss.png new file mode 100644 index 0000000000000000000000000000000000000000..7aa6b266ea7f53a972d40e74b41f2337ee7d3cf3 Binary files /dev/null and b/fn_gen/rnd_affine_scale/16/loss.png differ diff --git a/fn_gen/rnd_affine_scale/16/quantization.png b/fn_gen/rnd_affine_scale/16/quantization.png new file mode 100644 index 0000000000000000000000000000000000000000..6acf7b6a3a58b6e4e1b1dbaa147b88e2a3256e00 Binary files /dev/null and b/fn_gen/rnd_affine_scale/16/quantization.png differ diff --git a/fn_gen/rnd_affine_scale/17/distortion.png b/fn_gen/rnd_affine_scale/17/distortion.png new file mode 100644 index 0000000000000000000000000000000000000000..2e641b141773a233349873850aaf3afe07ae267b Binary files /dev/null and b/fn_gen/rnd_affine_scale/17/distortion.png differ diff --git a/fn_gen/rnd_affine_scale/17/expressions.txt b/fn_gen/rnd_affine_scale/17/expressions.txt new file mode 100644 index 0000000000000000000000000000000000000000..23606e9f370f2e4adb43ed623c49d7fcaabd7355 --- /dev/null +++ b/fn_gen/rnd_affine_scale/17/expressions.txt @@ -0,0 +1,2 @@ +tan(_0*x)/_s +atan(_s*x)/_0 \ No newline at end of file diff --git a/fn_gen/rnd_affine_scale/17/fn.py b/fn_gen/rnd_affine_scale/17/fn.py new file mode 100644 index 0000000000000000000000000000000000000000..6c200b911566d95a3ca32d60b71e1f4f0564287b --- /dev/null +++ b/fn_gen/rnd_affine_scale/17/fn.py @@ -0,0 +1,493 @@ +from __future__ import annotations + +import torch +from torch import amin # Necessary for arcsin +import copy +import torch.nn as nn +import numpy as np + +from scipy.optimize import curve_fit +from typing import Dict, Any, Tuple, List, Callable + + +def quantization(x, **params): + return (torch.div(1, replace_num(params['_s'], num=0, to=10000)) * torch.tan(domain_guard((params['_0'] * x), posinf=1, neginf=-1, nan=0))) + + +def dequantization(x, **params): + return (torch.div(1, replace_num(params['_0'], num=0, to=10000)) * torch.atan((params['_s'] * x))) + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_params(x: torch.Tensor, **kwargs: Dict[str, Any]) -> Dict[str, nn.Parameter]: + params = { + '_0': init_rand(x, qtz_func=quantization, deqtz_func=dequantization, param='_0', params_list=['_0', '_s'], **kwargs), + } + params['_s'] = init_linear_scale(x, params=params, qtz_func=quantization, **kwargs) + params = {k: nn.Parameter(v, requires_grad=False) for k, v in params.items()} + + if 'post_init_hook' in kwargs: + kwargs['post_init_hook'](parameters=params) + + + if 'post_train_hook' in kwargs: + kwargs['post_train_hook'](parameters=params) + + return params + + +############### Numpy Qtz ############### + + +def np_quantization(x, _0, _s): + return (np.divide(1, np_replace_num(_s, num=0, to=10000)) * np.tan(np_domain_guard((_0 * x), posinf=1, neginf=-1, nan=0))) + + +def np_dequantization(x, _0, _s): + return (np.divide(1, np_replace_num(_0, num=0, to=10000)) * np.arctan((_s * x))) + + +def fit_func(x, _0, _s): + x_ = np_quantization(x, _0, _s) + x_ = np_dequantization(x_, _0, _s) + return x_ + + + +############### HELPERS ############### + +def domain_guard( + x: torch.Tensor, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> torch.Tensor: + """Guard a tensor to a valid domain.""" + x = torch.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = torch.clamp(x, min=min, max=max) + return x + + +def replace_num(x: torch.Tensor, num: float, to: float) -> torch.Tensor: + """Replace a number in a tensor with another number. + + Args: + x (torch.Tensor): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + torch.Tensor: The tensor with the number replaced. + """ + return torch.where(x == num, to, x) + + +def guarded_torch_power(x: torch.Tensor, exp: float) -> torch.Tensor: + """Guard the power operation to a valid domain.""" + return torch.pow(x, exp) if exp >= 1 else torch.pow(torch.relu(x), exp) + + +def init_ones(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.ones_like(val, dtype=torch.float32, device=x.device) + + +def init_rand(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.randn_like(val, dtype=torch.float32, device=x.device) + + +def init_space_search( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + def _build_initial_param(tensor: torch.Tensor, max_initial: int, n_params: int): + """Generates the initial set of parameters. The first iteration generates 10 times more parameters.""" + for _ in range(n_params * 10): # The first iteration generates 10 times more parameters + yield init_rand(tensor) * max_initial # Generates n_params in range [-max_initial, max_initial] + + def _search_param(tensors: List[torch.tensor], n_params): + """Takes the best parameters and generates new parameters around the mean of the best parameters.""" + torch_tensors = torch.stack(tensors) + min_vals, max_vals = torch.aminmax(torch_tensors, dim=0) + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + mean = torch.mean(torch_tensors, dim=0) + for _ in range(n_params): # Generates n_params around the mean of the tensors + yield torch.randn_like(min_vals) * abs_max_val_per_ch + mean + + def _calc(x, qtz_func, deqtz_func, **params): + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params) + x_ = deqtz_func(x=x_, **params) + x_ = x_.transpose(0, 1) + return x_ + + assert "qtz_func" in kwargs, "qtz_func must be provided." + assert "deqtz_func" in kwargs, "deqtz_func must be provided." + assert "params_list" in kwargs, "params list must be provided." + assert "param" in kwargs, "param must be provided." + + qtz_func = kwargs.get('qtz_func') + deqtz_func = kwargs.get('deqtz_func') + params_list = kwargs.get('params_list') + param = kwargs.get('param') + + n_runs = 50 # Number of runs to try to find the best parameters + n_random_params = 50 # Number of random parameters to generate + n_best_to_pick = 5 # Number of best parameters to pick after each run + max_initial = 10000 # Maximum value to initialize the parameters + + # Initializes the parameters + base_params = { p: init_ones(x, **kwargs) for p in params_list if p != param } + params = _build_initial_param(x, max_initial, n_random_params) + + # Performs the search + for _ in range(n_runs): + + best_params = [] + for param_ in params: + try: + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: param_}) + loss_ones = nn.MSELoss()(x, x_) + + if len(best_params) < n_best_to_pick: + best_params.append((param_, loss_ones.item())) + best_params = sorted(best_params, key=lambda x: x[1]) + elif loss_ones < best_params[-1][1]: + best_params[-1] = (param_, loss_ones.item()) + best_params = sorted(best_params, key=lambda x: x[1]) + + except Exception: # The parameters might not be valid for the function's domain + continue + + # Generates new parameters around the mean + params = _search_param([p for p, _ in best_params], n_random_params) + + # Checks if the best parameter is better than the init_ones + p_ones = init_ones(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_ones}) + loss_ones = nn.MSELoss()(x, x_) + + # Checks if the best parameter is better than the init_rand + p_rand = init_rand(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_rand}) + loss_rand = nn.MSELoss()(x, x_) + + if loss_rand < best_params[0][1] and loss_rand < loss_ones: + return p_rand + elif loss_ones < best_params[0][1] and loss_ones < loss_rand: + return p_ones + else: + return best_params[0][0] + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_non_linear_regression_fit( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + assert "params_list" in kwargs, "params list must be provided." + assert "np_fit_func" in kwargs, "np_fit_func must be provided." + assert "p0" in kwargs, "p0 must be provided." + np_fit_func = kwargs.get('np_fit_func') + params_list = kwargs.get('params_list') + p0 = kwargs.get('p0') + + def _fit(xdata: np.ndarray, ydata: np.ndarray, func: Callable, p0: List[float]): + popt, _ = curve_fit( + func, + xdata, + ydata, + maxfev=1000, + p0=p0, + method='lm' + ) + return popt + + # 1. Needs to convert the torch tensor to numpy tensor + xdata = x.cpu().numpy() + + # 2. Sorts the data so that it makes it easier to fit to it + sorted_xdata = np.sort(xdata, axis=-1) + + p0 = {k: v.cpu().numpy() for k, v in p0.items()} + params_list = sorted(params_list) # We need to make sure that it matches the numpy fit func arg order + + # 3. Finds the best parameters for each channel + try: + params = [] + for i in range(sorted_xdata.shape[0]): + xdata_ = sorted_xdata[i] + p0_ = [p0[p][i] for p in params_list] + ch_params = _fit(xdata_, xdata_, np_fit_func, p0_) + params.append(ch_params) + + # 4. Builds the parameters + result = {} + for i, p in enumerate(params_list): + result[p] = torch.tensor([p_[i] for p_ in params], dtype=torch.float32).to(x.device) + + return result + + except ValueError as e: + print(f"Could not fit the function with error: {e}") + print(f"Using fallback result...") + return { + k: torch.tensor(v, dtype=torch.float32).to(x.device) for k, v in p0.items() + } + + +def init_zeros(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.zeros_like(val, dtype=torch.float32, device=x.device) + + +def init_inner_scale(tensor: torch.Tensor, _min: float = torch.inf, _max: float = torch.inf) -> torch.Tensor: + # Calculate the original minimum and maximum values + min_vals, max_vals = torch.aminmax(tensor, dim=-1) + x_min = torch.min(min_vals, torch.zeros_like(min_vals)) + x_max = torch.max(max_vals, torch.zeros_like(max_vals)) + + if _max is torch.inf: # We do not need to scale the tensor. Just need to move it + return torch.ones_like(x_min) + + # Calculate the scale factor + scale = (_max - _min) / (x_max - x_min) + return scale + + + +############## Quant ############### + +@torch.enable_grad() +def learn_parameters( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + qtz_func: nn.Module, + deqtz_func: nn.Module, + bits: int, + target_dtype: torch.dtype, + epochs: int = 1000, + early_stop: bool = True, + do_report: bool = False +) -> Tuple[Dict[str, nn.Parameter], torch.Tensor]: + + # Requires gradients in the parameters + for p in params.values(): + p.requires_grad = True + p.grad = None + + param_keys = list(params.keys()) + param_values = list(params.values()) + + # Defines optimizer and loss function + optimizer = torch.optim.Adam(param_values, lr=0.001) + scheduler = torch.optim.lr_scheduler.LinearLR(optimizer, start_factor=1.0, end_factor=0.001, total_iters=epochs // 10) + loss_fn = nn.MSELoss() + + # Contains the best loss and the best parameters + best_loss = float("inf") + best_params = None + + # Used to stop the search early + min_delta = 1e-7 + acc_loss = [] + percent_epochs_before_stop = 0.1 + + for i in range(epochs): + optimizer.zero_grad() + + quant = quantize(x, params, qtz_func, bits, target_dtype) + dequant = dequantize(quant, params, deqtz_func, bits, x.dtype) + loss = loss_fn(x, dequant) + + if loss.isnan() or loss.isinf(): + raise Exception("Loss is NaN or Inf. Stopping the search.") + + loss.backward() + optimizer.step() + scheduler.step() + + acc_loss.append(loss.item()) + + # Reports loss every 10 steps + if i % 10 == 0 and do_report: + print(f"Epoch {i}: Loss {loss.item()}") + + # Optimizes the parameter search by storing the best loss and the parameters + if loss.item() < best_loss: + best_loss = loss.item() + best_params = copy.deepcopy({ + k: v for k, v in params.items() if k in param_keys + }) + + # We also stop the search if the loss has not considerably during the last 10% epochs + if early_stop: + epochs_before_stop = int(epochs * percent_epochs_before_stop) + if i > epochs_before_stop and abs(acc_loss[i - epochs_before_stop] - acc_loss[i]) < min_delta: + break + + # No longer requires gradients in the parameters + for p in best_params.values(): + p.requires_grad = False + p.grad = None + + if do_report: + return best_params, acc_loss + else: + return best_params + + +def quantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + target_dtype: torch.dtype = torch.int8 +) -> torch.Tensor: + quant_min, quant_max = get_min_max_from_bits_signed(bits) + x = x.transpose(0, 1) # Aligns shapes + x = func(x=x, **params) + x = x.transpose(0, 1) + x = torch.clamp(round_func_BPDA(x), quant_min, quant_max).to(target_dtype) + return x + + +def dequantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + out_dtype: torch.dtype +) -> torch.Tensor: + x = x.to(dtype=out_dtype) + x = x.transpose(0, 1) + x = func(x=x, **params) + x = x.transpose(0, 1) + return x + + +def round_func_BPDA(input): + # This is equivalent to replacing round function (non-differentiable) with + # an identity function (differentiable) only when backward. + forward_value = torch.round(input) + out = input.clone() + out.data = forward_value.data + return out + + +def get_min_max_from_bits_signed(bit_width: int) -> Tuple[int, int]: + return -2 ** (bit_width - 1), 2 ** (bit_width - 1) - 1 + + + +############## Numpy ############### + +def np_domain_guard( + x: np.ndarray, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> np.ndarray: + """Guard a tensor to a valid domain.""" + x = np.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = np.clip(x, min, max) + return x + + +def np_replace_num(x: np.ndarray, num: float, to: float) -> np.ndarray: + """Replace a number in a tensor with another number. + + Args: + x (np.ndarray): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + np.ndarray: The tensor with the number replaced. + """ + return np.where(x == num, to, x) + + +def np_guarded_power(x: np.ndarray, exp: float) -> np.ndarray: + """Guard the power operation to a valid domain.""" + return np.power(x, exp) if exp >= 1 else np.power(np.maximum(x, 0), exp) + diff --git a/fn_gen/rnd_affine_scale/17/loss.png b/fn_gen/rnd_affine_scale/17/loss.png new file mode 100644 index 0000000000000000000000000000000000000000..d7710300745f01b8d39566131f90ba662bbec682 Binary files /dev/null and b/fn_gen/rnd_affine_scale/17/loss.png differ diff --git a/fn_gen/rnd_affine_scale/17/quantization.png b/fn_gen/rnd_affine_scale/17/quantization.png new file mode 100644 index 0000000000000000000000000000000000000000..71d2e3940ebb82717d12941720040e11329a834d Binary files /dev/null and b/fn_gen/rnd_affine_scale/17/quantization.png differ diff --git a/fn_gen/rnd_affine_scale/18/distortion.png b/fn_gen/rnd_affine_scale/18/distortion.png new file mode 100644 index 0000000000000000000000000000000000000000..3b602d31bc06096f7e1653b88e26bfffa711f1b5 Binary files /dev/null and b/fn_gen/rnd_affine_scale/18/distortion.png differ diff --git a/fn_gen/rnd_affine_scale/18/expressions.txt b/fn_gen/rnd_affine_scale/18/expressions.txt new file mode 100644 index 0000000000000000000000000000000000000000..ecd6e238827dcdb95f4bcb390c1c300696f34254 --- /dev/null +++ b/fn_gen/rnd_affine_scale/18/expressions.txt @@ -0,0 +1,2 @@ +sin(_0*x)/_s +asin(_s*x)/_0 \ No newline at end of file diff --git a/fn_gen/rnd_affine_scale/18/fn.py b/fn_gen/rnd_affine_scale/18/fn.py new file mode 100644 index 0000000000000000000000000000000000000000..691b881a9d064411c12ddd66ec1eabde86b77cbe --- /dev/null +++ b/fn_gen/rnd_affine_scale/18/fn.py @@ -0,0 +1,493 @@ +from __future__ import annotations + +import torch +from torch import amin # Necessary for arcsin +import copy +import torch.nn as nn +import numpy as np + +from scipy.optimize import curve_fit +from typing import Dict, Any, Tuple, List, Callable + + +def quantization(x, **params): + return (torch.div(1, replace_num(params['_s'], num=0, to=10000)) * torch.sin((params['_0'] * x))) + + +def dequantization(x, **params): + return (torch.div(1, replace_num(params['_0'], num=0, to=10000)) * torch.asin(domain_guard((params['_s'] * x), min=-0.99999, max=0.99999, nan=0))) + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_params(x: torch.Tensor, **kwargs: Dict[str, Any]) -> Dict[str, nn.Parameter]: + params = { + '_0': init_rand(x, qtz_func=quantization, deqtz_func=dequantization, param='_0', params_list=['_0', '_s'], **kwargs), + } + params['_s'] = init_linear_scale(x, params=params, qtz_func=quantization, **kwargs) + params = {k: nn.Parameter(v, requires_grad=False) for k, v in params.items()} + + if 'post_init_hook' in kwargs: + kwargs['post_init_hook'](parameters=params) + + + if 'post_train_hook' in kwargs: + kwargs['post_train_hook'](parameters=params) + + return params + + +############### Numpy Qtz ############### + + +def np_quantization(x, _0, _s): + return (np.divide(1, np_replace_num(_s, num=0, to=10000)) * np.sin((_0 * x))) + + +def np_dequantization(x, _0, _s): + return (np.divide(1, np_replace_num(_0, num=0, to=10000)) * np.arcsin(np_domain_guard((_s * x), min=-0.99999, max=0.99999, nan=0))) + + +def fit_func(x, _0, _s): + x_ = np_quantization(x, _0, _s) + x_ = np_dequantization(x_, _0, _s) + return x_ + + + +############### HELPERS ############### + +def domain_guard( + x: torch.Tensor, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> torch.Tensor: + """Guard a tensor to a valid domain.""" + x = torch.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = torch.clamp(x, min=min, max=max) + return x + + +def replace_num(x: torch.Tensor, num: float, to: float) -> torch.Tensor: + """Replace a number in a tensor with another number. + + Args: + x (torch.Tensor): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + torch.Tensor: The tensor with the number replaced. + """ + return torch.where(x == num, to, x) + + +def guarded_torch_power(x: torch.Tensor, exp: float) -> torch.Tensor: + """Guard the power operation to a valid domain.""" + return torch.pow(x, exp) if exp >= 1 else torch.pow(torch.relu(x), exp) + + +def init_ones(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.ones_like(val, dtype=torch.float32, device=x.device) + + +def init_rand(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.randn_like(val, dtype=torch.float32, device=x.device) + + +def init_space_search( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + def _build_initial_param(tensor: torch.Tensor, max_initial: int, n_params: int): + """Generates the initial set of parameters. The first iteration generates 10 times more parameters.""" + for _ in range(n_params * 10): # The first iteration generates 10 times more parameters + yield init_rand(tensor) * max_initial # Generates n_params in range [-max_initial, max_initial] + + def _search_param(tensors: List[torch.tensor], n_params): + """Takes the best parameters and generates new parameters around the mean of the best parameters.""" + torch_tensors = torch.stack(tensors) + min_vals, max_vals = torch.aminmax(torch_tensors, dim=0) + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + mean = torch.mean(torch_tensors, dim=0) + for _ in range(n_params): # Generates n_params around the mean of the tensors + yield torch.randn_like(min_vals) * abs_max_val_per_ch + mean + + def _calc(x, qtz_func, deqtz_func, **params): + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params) + x_ = deqtz_func(x=x_, **params) + x_ = x_.transpose(0, 1) + return x_ + + assert "qtz_func" in kwargs, "qtz_func must be provided." + assert "deqtz_func" in kwargs, "deqtz_func must be provided." + assert "params_list" in kwargs, "params list must be provided." + assert "param" in kwargs, "param must be provided." + + qtz_func = kwargs.get('qtz_func') + deqtz_func = kwargs.get('deqtz_func') + params_list = kwargs.get('params_list') + param = kwargs.get('param') + + n_runs = 50 # Number of runs to try to find the best parameters + n_random_params = 50 # Number of random parameters to generate + n_best_to_pick = 5 # Number of best parameters to pick after each run + max_initial = 10000 # Maximum value to initialize the parameters + + # Initializes the parameters + base_params = { p: init_ones(x, **kwargs) for p in params_list if p != param } + params = _build_initial_param(x, max_initial, n_random_params) + + # Performs the search + for _ in range(n_runs): + + best_params = [] + for param_ in params: + try: + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: param_}) + loss_ones = nn.MSELoss()(x, x_) + + if len(best_params) < n_best_to_pick: + best_params.append((param_, loss_ones.item())) + best_params = sorted(best_params, key=lambda x: x[1]) + elif loss_ones < best_params[-1][1]: + best_params[-1] = (param_, loss_ones.item()) + best_params = sorted(best_params, key=lambda x: x[1]) + + except Exception: # The parameters might not be valid for the function's domain + continue + + # Generates new parameters around the mean + params = _search_param([p for p, _ in best_params], n_random_params) + + # Checks if the best parameter is better than the init_ones + p_ones = init_ones(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_ones}) + loss_ones = nn.MSELoss()(x, x_) + + # Checks if the best parameter is better than the init_rand + p_rand = init_rand(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_rand}) + loss_rand = nn.MSELoss()(x, x_) + + if loss_rand < best_params[0][1] and loss_rand < loss_ones: + return p_rand + elif loss_ones < best_params[0][1] and loss_ones < loss_rand: + return p_ones + else: + return best_params[0][0] + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_non_linear_regression_fit( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + assert "params_list" in kwargs, "params list must be provided." + assert "np_fit_func" in kwargs, "np_fit_func must be provided." + assert "p0" in kwargs, "p0 must be provided." + np_fit_func = kwargs.get('np_fit_func') + params_list = kwargs.get('params_list') + p0 = kwargs.get('p0') + + def _fit(xdata: np.ndarray, ydata: np.ndarray, func: Callable, p0: List[float]): + popt, _ = curve_fit( + func, + xdata, + ydata, + maxfev=1000, + p0=p0, + method='lm' + ) + return popt + + # 1. Needs to convert the torch tensor to numpy tensor + xdata = x.cpu().numpy() + + # 2. Sorts the data so that it makes it easier to fit to it + sorted_xdata = np.sort(xdata, axis=-1) + + p0 = {k: v.cpu().numpy() for k, v in p0.items()} + params_list = sorted(params_list) # We need to make sure that it matches the numpy fit func arg order + + # 3. Finds the best parameters for each channel + try: + params = [] + for i in range(sorted_xdata.shape[0]): + xdata_ = sorted_xdata[i] + p0_ = [p0[p][i] for p in params_list] + ch_params = _fit(xdata_, xdata_, np_fit_func, p0_) + params.append(ch_params) + + # 4. Builds the parameters + result = {} + for i, p in enumerate(params_list): + result[p] = torch.tensor([p_[i] for p_ in params], dtype=torch.float32).to(x.device) + + return result + + except ValueError as e: + print(f"Could not fit the function with error: {e}") + print(f"Using fallback result...") + return { + k: torch.tensor(v, dtype=torch.float32).to(x.device) for k, v in p0.items() + } + + +def init_zeros(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.zeros_like(val, dtype=torch.float32, device=x.device) + + +def init_inner_scale(tensor: torch.Tensor, _min: float = torch.inf, _max: float = torch.inf) -> torch.Tensor: + # Calculate the original minimum and maximum values + min_vals, max_vals = torch.aminmax(tensor, dim=-1) + x_min = torch.min(min_vals, torch.zeros_like(min_vals)) + x_max = torch.max(max_vals, torch.zeros_like(max_vals)) + + if _max is torch.inf: # We do not need to scale the tensor. Just need to move it + return torch.ones_like(x_min) + + # Calculate the scale factor + scale = (_max - _min) / (x_max - x_min) + return scale + + + +############## Quant ############### + +@torch.enable_grad() +def learn_parameters( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + qtz_func: nn.Module, + deqtz_func: nn.Module, + bits: int, + target_dtype: torch.dtype, + epochs: int = 1000, + early_stop: bool = True, + do_report: bool = False +) -> Tuple[Dict[str, nn.Parameter], torch.Tensor]: + + # Requires gradients in the parameters + for p in params.values(): + p.requires_grad = True + p.grad = None + + param_keys = list(params.keys()) + param_values = list(params.values()) + + # Defines optimizer and loss function + optimizer = torch.optim.Adam(param_values, lr=0.001) + scheduler = torch.optim.lr_scheduler.LinearLR(optimizer, start_factor=1.0, end_factor=0.001, total_iters=epochs // 10) + loss_fn = nn.MSELoss() + + # Contains the best loss and the best parameters + best_loss = float("inf") + best_params = None + + # Used to stop the search early + min_delta = 1e-7 + acc_loss = [] + percent_epochs_before_stop = 0.1 + + for i in range(epochs): + optimizer.zero_grad() + + quant = quantize(x, params, qtz_func, bits, target_dtype) + dequant = dequantize(quant, params, deqtz_func, bits, x.dtype) + loss = loss_fn(x, dequant) + + if loss.isnan() or loss.isinf(): + raise Exception("Loss is NaN or Inf. Stopping the search.") + + loss.backward() + optimizer.step() + scheduler.step() + + acc_loss.append(loss.item()) + + # Reports loss every 10 steps + if i % 10 == 0 and do_report: + print(f"Epoch {i}: Loss {loss.item()}") + + # Optimizes the parameter search by storing the best loss and the parameters + if loss.item() < best_loss: + best_loss = loss.item() + best_params = copy.deepcopy({ + k: v for k, v in params.items() if k in param_keys + }) + + # We also stop the search if the loss has not considerably during the last 10% epochs + if early_stop: + epochs_before_stop = int(epochs * percent_epochs_before_stop) + if i > epochs_before_stop and abs(acc_loss[i - epochs_before_stop] - acc_loss[i]) < min_delta: + break + + # No longer requires gradients in the parameters + for p in best_params.values(): + p.requires_grad = False + p.grad = None + + if do_report: + return best_params, acc_loss + else: + return best_params + + +def quantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + target_dtype: torch.dtype = torch.int8 +) -> torch.Tensor: + quant_min, quant_max = get_min_max_from_bits_signed(bits) + x = x.transpose(0, 1) # Aligns shapes + x = func(x=x, **params) + x = x.transpose(0, 1) + x = torch.clamp(round_func_BPDA(x), quant_min, quant_max).to(target_dtype) + return x + + +def dequantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + out_dtype: torch.dtype +) -> torch.Tensor: + x = x.to(dtype=out_dtype) + x = x.transpose(0, 1) + x = func(x=x, **params) + x = x.transpose(0, 1) + return x + + +def round_func_BPDA(input): + # This is equivalent to replacing round function (non-differentiable) with + # an identity function (differentiable) only when backward. + forward_value = torch.round(input) + out = input.clone() + out.data = forward_value.data + return out + + +def get_min_max_from_bits_signed(bit_width: int) -> Tuple[int, int]: + return -2 ** (bit_width - 1), 2 ** (bit_width - 1) - 1 + + + +############## Numpy ############### + +def np_domain_guard( + x: np.ndarray, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> np.ndarray: + """Guard a tensor to a valid domain.""" + x = np.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = np.clip(x, min, max) + return x + + +def np_replace_num(x: np.ndarray, num: float, to: float) -> np.ndarray: + """Replace a number in a tensor with another number. + + Args: + x (np.ndarray): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + np.ndarray: The tensor with the number replaced. + """ + return np.where(x == num, to, x) + + +def np_guarded_power(x: np.ndarray, exp: float) -> np.ndarray: + """Guard the power operation to a valid domain.""" + return np.power(x, exp) if exp >= 1 else np.power(np.maximum(x, 0), exp) + diff --git a/fn_gen/rnd_affine_scale/18/loss.png b/fn_gen/rnd_affine_scale/18/loss.png new file mode 100644 index 0000000000000000000000000000000000000000..bd7a20fb55fc5f321ba94a0c86a7d67a5a6e55e5 Binary files /dev/null and b/fn_gen/rnd_affine_scale/18/loss.png differ diff --git a/fn_gen/rnd_affine_scale/18/quantization.png b/fn_gen/rnd_affine_scale/18/quantization.png new file mode 100644 index 0000000000000000000000000000000000000000..4c471f1263feb2611fb0345b496907ee796e3e1f Binary files /dev/null and b/fn_gen/rnd_affine_scale/18/quantization.png differ diff --git a/fn_gen/rnd_affine_scale/2/distortion.png b/fn_gen/rnd_affine_scale/2/distortion.png new file mode 100644 index 0000000000000000000000000000000000000000..4792a6637a50504ae431a52cd7e67da94b147bcc Binary files /dev/null and b/fn_gen/rnd_affine_scale/2/distortion.png differ diff --git a/fn_gen/rnd_affine_scale/2/expressions.txt b/fn_gen/rnd_affine_scale/2/expressions.txt new file mode 100644 index 0000000000000000000000000000000000000000..c7b68c388fdf6e1b6e2be8076f1d4b8d7bcef4f9 --- /dev/null +++ b/fn_gen/rnd_affine_scale/2/expressions.txt @@ -0,0 +1,2 @@ +(_0*x)**(1/3)/_s +_s**3*x**3/_0 \ No newline at end of file diff --git a/fn_gen/rnd_affine_scale/2/fn.py b/fn_gen/rnd_affine_scale/2/fn.py new file mode 100644 index 0000000000000000000000000000000000000000..382d339f34d61e748fa8c1fe1888aa5e6a8578ac --- /dev/null +++ b/fn_gen/rnd_affine_scale/2/fn.py @@ -0,0 +1,493 @@ +from __future__ import annotations + +import torch +from torch import amin # Necessary for arcsin +import copy +import torch.nn as nn +import numpy as np + +from scipy.optimize import curve_fit +from typing import Dict, Any, Tuple, List, Callable + + +def quantization(x, **params): + return (torch.div(1, replace_num(params['_s'], num=0, to=10000)) * guarded_torch_power((params['_0'] * x), 1 / 3)) + + +def dequantization(x, **params): + return (torch.div(1, replace_num(params['_0'], num=0, to=10000)) * guarded_torch_power(params['_s'], torch.tensor(3)) * guarded_torch_power(x, torch.tensor(3))) + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_params(x: torch.Tensor, **kwargs: Dict[str, Any]) -> Dict[str, nn.Parameter]: + params = { + '_0': init_rand(x, qtz_func=quantization, deqtz_func=dequantization, param='_0', params_list=['_0', '_s'], **kwargs), + } + params['_s'] = init_linear_scale(x, params=params, qtz_func=quantization, **kwargs) + params = {k: nn.Parameter(v, requires_grad=False) for k, v in params.items()} + + if 'post_init_hook' in kwargs: + kwargs['post_init_hook'](parameters=params) + + + if 'post_train_hook' in kwargs: + kwargs['post_train_hook'](parameters=params) + + return params + + +############### Numpy Qtz ############### + + +def np_quantization(x, _0, _s): + return (np.divide(1, np_replace_num(_s, num=0, to=10000)) * np_guarded_power((_0 * x), 1 / 3)) + + +def np_dequantization(x, _0, _s): + return (np.divide(1, np_replace_num(_0, num=0, to=10000)) * np_guarded_power(_s, np.array(3)) * np_guarded_power(x, np.array(3))) + + +def fit_func(x, _0, _s): + x_ = np_quantization(x, _0, _s) + x_ = np_dequantization(x_, _0, _s) + return x_ + + + +############### HELPERS ############### + +def domain_guard( + x: torch.Tensor, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> torch.Tensor: + """Guard a tensor to a valid domain.""" + x = torch.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = torch.clamp(x, min=min, max=max) + return x + + +def replace_num(x: torch.Tensor, num: float, to: float) -> torch.Tensor: + """Replace a number in a tensor with another number. + + Args: + x (torch.Tensor): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + torch.Tensor: The tensor with the number replaced. + """ + return torch.where(x == num, to, x) + + +def guarded_torch_power(x: torch.Tensor, exp: float) -> torch.Tensor: + """Guard the power operation to a valid domain.""" + return torch.pow(x, exp) if exp >= 1 else torch.pow(torch.relu(x), exp) + + +def init_ones(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.ones_like(val, dtype=torch.float32, device=x.device) + + +def init_rand(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.randn_like(val, dtype=torch.float32, device=x.device) + + +def init_space_search( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + def _build_initial_param(tensor: torch.Tensor, max_initial: int, n_params: int): + """Generates the initial set of parameters. The first iteration generates 10 times more parameters.""" + for _ in range(n_params * 10): # The first iteration generates 10 times more parameters + yield init_rand(tensor) * max_initial # Generates n_params in range [-max_initial, max_initial] + + def _search_param(tensors: List[torch.tensor], n_params): + """Takes the best parameters and generates new parameters around the mean of the best parameters.""" + torch_tensors = torch.stack(tensors) + min_vals, max_vals = torch.aminmax(torch_tensors, dim=0) + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + mean = torch.mean(torch_tensors, dim=0) + for _ in range(n_params): # Generates n_params around the mean of the tensors + yield torch.randn_like(min_vals) * abs_max_val_per_ch + mean + + def _calc(x, qtz_func, deqtz_func, **params): + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params) + x_ = deqtz_func(x=x_, **params) + x_ = x_.transpose(0, 1) + return x_ + + assert "qtz_func" in kwargs, "qtz_func must be provided." + assert "deqtz_func" in kwargs, "deqtz_func must be provided." + assert "params_list" in kwargs, "params list must be provided." + assert "param" in kwargs, "param must be provided." + + qtz_func = kwargs.get('qtz_func') + deqtz_func = kwargs.get('deqtz_func') + params_list = kwargs.get('params_list') + param = kwargs.get('param') + + n_runs = 50 # Number of runs to try to find the best parameters + n_random_params = 50 # Number of random parameters to generate + n_best_to_pick = 5 # Number of best parameters to pick after each run + max_initial = 10000 # Maximum value to initialize the parameters + + # Initializes the parameters + base_params = { p: init_ones(x, **kwargs) for p in params_list if p != param } + params = _build_initial_param(x, max_initial, n_random_params) + + # Performs the search + for _ in range(n_runs): + + best_params = [] + for param_ in params: + try: + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: param_}) + loss_ones = nn.MSELoss()(x, x_) + + if len(best_params) < n_best_to_pick: + best_params.append((param_, loss_ones.item())) + best_params = sorted(best_params, key=lambda x: x[1]) + elif loss_ones < best_params[-1][1]: + best_params[-1] = (param_, loss_ones.item()) + best_params = sorted(best_params, key=lambda x: x[1]) + + except Exception: # The parameters might not be valid for the function's domain + continue + + # Generates new parameters around the mean + params = _search_param([p for p, _ in best_params], n_random_params) + + # Checks if the best parameter is better than the init_ones + p_ones = init_ones(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_ones}) + loss_ones = nn.MSELoss()(x, x_) + + # Checks if the best parameter is better than the init_rand + p_rand = init_rand(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_rand}) + loss_rand = nn.MSELoss()(x, x_) + + if loss_rand < best_params[0][1] and loss_rand < loss_ones: + return p_rand + elif loss_ones < best_params[0][1] and loss_ones < loss_rand: + return p_ones + else: + return best_params[0][0] + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_non_linear_regression_fit( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + assert "params_list" in kwargs, "params list must be provided." + assert "np_fit_func" in kwargs, "np_fit_func must be provided." + assert "p0" in kwargs, "p0 must be provided." + np_fit_func = kwargs.get('np_fit_func') + params_list = kwargs.get('params_list') + p0 = kwargs.get('p0') + + def _fit(xdata: np.ndarray, ydata: np.ndarray, func: Callable, p0: List[float]): + popt, _ = curve_fit( + func, + xdata, + ydata, + maxfev=1000, + p0=p0, + method='lm' + ) + return popt + + # 1. Needs to convert the torch tensor to numpy tensor + xdata = x.cpu().numpy() + + # 2. Sorts the data so that it makes it easier to fit to it + sorted_xdata = np.sort(xdata, axis=-1) + + p0 = {k: v.cpu().numpy() for k, v in p0.items()} + params_list = sorted(params_list) # We need to make sure that it matches the numpy fit func arg order + + # 3. Finds the best parameters for each channel + try: + params = [] + for i in range(sorted_xdata.shape[0]): + xdata_ = sorted_xdata[i] + p0_ = [p0[p][i] for p in params_list] + ch_params = _fit(xdata_, xdata_, np_fit_func, p0_) + params.append(ch_params) + + # 4. Builds the parameters + result = {} + for i, p in enumerate(params_list): + result[p] = torch.tensor([p_[i] for p_ in params], dtype=torch.float32).to(x.device) + + return result + + except ValueError as e: + print(f"Could not fit the function with error: {e}") + print(f"Using fallback result...") + return { + k: torch.tensor(v, dtype=torch.float32).to(x.device) for k, v in p0.items() + } + + +def init_zeros(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.zeros_like(val, dtype=torch.float32, device=x.device) + + +def init_inner_scale(tensor: torch.Tensor, _min: float = torch.inf, _max: float = torch.inf) -> torch.Tensor: + # Calculate the original minimum and maximum values + min_vals, max_vals = torch.aminmax(tensor, dim=-1) + x_min = torch.min(min_vals, torch.zeros_like(min_vals)) + x_max = torch.max(max_vals, torch.zeros_like(max_vals)) + + if _max is torch.inf: # We do not need to scale the tensor. Just need to move it + return torch.ones_like(x_min) + + # Calculate the scale factor + scale = (_max - _min) / (x_max - x_min) + return scale + + + +############## Quant ############### + +@torch.enable_grad() +def learn_parameters( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + qtz_func: nn.Module, + deqtz_func: nn.Module, + bits: int, + target_dtype: torch.dtype, + epochs: int = 1000, + early_stop: bool = True, + do_report: bool = False +) -> Tuple[Dict[str, nn.Parameter], torch.Tensor]: + + # Requires gradients in the parameters + for p in params.values(): + p.requires_grad = True + p.grad = None + + param_keys = list(params.keys()) + param_values = list(params.values()) + + # Defines optimizer and loss function + optimizer = torch.optim.Adam(param_values, lr=0.001) + scheduler = torch.optim.lr_scheduler.LinearLR(optimizer, start_factor=1.0, end_factor=0.001, total_iters=epochs // 10) + loss_fn = nn.MSELoss() + + # Contains the best loss and the best parameters + best_loss = float("inf") + best_params = None + + # Used to stop the search early + min_delta = 1e-7 + acc_loss = [] + percent_epochs_before_stop = 0.1 + + for i in range(epochs): + optimizer.zero_grad() + + quant = quantize(x, params, qtz_func, bits, target_dtype) + dequant = dequantize(quant, params, deqtz_func, bits, x.dtype) + loss = loss_fn(x, dequant) + + if loss.isnan() or loss.isinf(): + raise Exception("Loss is NaN or Inf. Stopping the search.") + + loss.backward() + optimizer.step() + scheduler.step() + + acc_loss.append(loss.item()) + + # Reports loss every 10 steps + if i % 10 == 0 and do_report: + print(f"Epoch {i}: Loss {loss.item()}") + + # Optimizes the parameter search by storing the best loss and the parameters + if loss.item() < best_loss: + best_loss = loss.item() + best_params = copy.deepcopy({ + k: v for k, v in params.items() if k in param_keys + }) + + # We also stop the search if the loss has not considerably during the last 10% epochs + if early_stop: + epochs_before_stop = int(epochs * percent_epochs_before_stop) + if i > epochs_before_stop and abs(acc_loss[i - epochs_before_stop] - acc_loss[i]) < min_delta: + break + + # No longer requires gradients in the parameters + for p in best_params.values(): + p.requires_grad = False + p.grad = None + + if do_report: + return best_params, acc_loss + else: + return best_params + + +def quantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + target_dtype: torch.dtype = torch.int8 +) -> torch.Tensor: + quant_min, quant_max = get_min_max_from_bits_signed(bits) + x = x.transpose(0, 1) # Aligns shapes + x = func(x=x, **params) + x = x.transpose(0, 1) + x = torch.clamp(round_func_BPDA(x), quant_min, quant_max).to(target_dtype) + return x + + +def dequantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + out_dtype: torch.dtype +) -> torch.Tensor: + x = x.to(dtype=out_dtype) + x = x.transpose(0, 1) + x = func(x=x, **params) + x = x.transpose(0, 1) + return x + + +def round_func_BPDA(input): + # This is equivalent to replacing round function (non-differentiable) with + # an identity function (differentiable) only when backward. + forward_value = torch.round(input) + out = input.clone() + out.data = forward_value.data + return out + + +def get_min_max_from_bits_signed(bit_width: int) -> Tuple[int, int]: + return -2 ** (bit_width - 1), 2 ** (bit_width - 1) - 1 + + + +############## Numpy ############### + +def np_domain_guard( + x: np.ndarray, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> np.ndarray: + """Guard a tensor to a valid domain.""" + x = np.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = np.clip(x, min, max) + return x + + +def np_replace_num(x: np.ndarray, num: float, to: float) -> np.ndarray: + """Replace a number in a tensor with another number. + + Args: + x (np.ndarray): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + np.ndarray: The tensor with the number replaced. + """ + return np.where(x == num, to, x) + + +def np_guarded_power(x: np.ndarray, exp: float) -> np.ndarray: + """Guard the power operation to a valid domain.""" + return np.power(x, exp) if exp >= 1 else np.power(np.maximum(x, 0), exp) + diff --git a/fn_gen/rnd_affine_scale/2/loss.png b/fn_gen/rnd_affine_scale/2/loss.png new file mode 100644 index 0000000000000000000000000000000000000000..ce432b0a1c08d25e31d0a5c68a3e51949ae1634b Binary files /dev/null and b/fn_gen/rnd_affine_scale/2/loss.png differ diff --git a/fn_gen/rnd_affine_scale/2/quantization.png b/fn_gen/rnd_affine_scale/2/quantization.png new file mode 100644 index 0000000000000000000000000000000000000000..186b893179747a382e191680a54e8a64affd603c Binary files /dev/null and b/fn_gen/rnd_affine_scale/2/quantization.png differ diff --git a/fn_gen/rnd_affine_scale/3/distortion.png b/fn_gen/rnd_affine_scale/3/distortion.png new file mode 100644 index 0000000000000000000000000000000000000000..799fb34eae443514b7f2bd74b8f86043904afa1c Binary files /dev/null and b/fn_gen/rnd_affine_scale/3/distortion.png differ diff --git a/fn_gen/rnd_affine_scale/3/expressions.txt b/fn_gen/rnd_affine_scale/3/expressions.txt new file mode 100644 index 0000000000000000000000000000000000000000..8c0b1579c06c048d5603aa39c80e392c5906a879 --- /dev/null +++ b/fn_gen/rnd_affine_scale/3/expressions.txt @@ -0,0 +1,2 @@ +cos(_0*x)/_s +acos(_s*x)/_0 \ No newline at end of file diff --git a/fn_gen/rnd_affine_scale/3/fn.py b/fn_gen/rnd_affine_scale/3/fn.py new file mode 100644 index 0000000000000000000000000000000000000000..c1a009b00c5281ac7ed330627fe5423aec169bcc --- /dev/null +++ b/fn_gen/rnd_affine_scale/3/fn.py @@ -0,0 +1,493 @@ +from __future__ import annotations + +import torch +from torch import amin # Necessary for arcsin +import copy +import torch.nn as nn +import numpy as np + +from scipy.optimize import curve_fit +from typing import Dict, Any, Tuple, List, Callable + + +def quantization(x, **params): + return (torch.div(1, replace_num(params['_s'], num=0, to=10000)) * torch.cos((params['_0'] * x))) + + +def dequantization(x, **params): + return (torch.div(1, replace_num(params['_0'], num=0, to=10000)) * torch.acos(domain_guard((params['_s'] * x), min=-0.99999, max=0.99999, nan=0))) + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_params(x: torch.Tensor, **kwargs: Dict[str, Any]) -> Dict[str, nn.Parameter]: + params = { + '_0': init_rand(x, qtz_func=quantization, deqtz_func=dequantization, param='_0', params_list=['_0', '_s'], **kwargs), + } + params['_s'] = init_linear_scale(x, params=params, qtz_func=quantization, **kwargs) + params = {k: nn.Parameter(v, requires_grad=False) for k, v in params.items()} + + if 'post_init_hook' in kwargs: + kwargs['post_init_hook'](parameters=params) + + + if 'post_train_hook' in kwargs: + kwargs['post_train_hook'](parameters=params) + + return params + + +############### Numpy Qtz ############### + + +def np_quantization(x, _0, _s): + return (np.divide(1, np_replace_num(_s, num=0, to=10000)) * np.cos((_0 * x))) + + +def np_dequantization(x, _0, _s): + return (np.divide(1, np_replace_num(_0, num=0, to=10000)) * np.arccos(np_domain_guard((_s * x), min=-0.99999, max=0.99999, nan=0))) + + +def fit_func(x, _0, _s): + x_ = np_quantization(x, _0, _s) + x_ = np_dequantization(x_, _0, _s) + return x_ + + + +############### HELPERS ############### + +def domain_guard( + x: torch.Tensor, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> torch.Tensor: + """Guard a tensor to a valid domain.""" + x = torch.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = torch.clamp(x, min=min, max=max) + return x + + +def replace_num(x: torch.Tensor, num: float, to: float) -> torch.Tensor: + """Replace a number in a tensor with another number. + + Args: + x (torch.Tensor): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + torch.Tensor: The tensor with the number replaced. + """ + return torch.where(x == num, to, x) + + +def guarded_torch_power(x: torch.Tensor, exp: float) -> torch.Tensor: + """Guard the power operation to a valid domain.""" + return torch.pow(x, exp) if exp >= 1 else torch.pow(torch.relu(x), exp) + + +def init_ones(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.ones_like(val, dtype=torch.float32, device=x.device) + + +def init_rand(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.randn_like(val, dtype=torch.float32, device=x.device) + + +def init_space_search( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + def _build_initial_param(tensor: torch.Tensor, max_initial: int, n_params: int): + """Generates the initial set of parameters. The first iteration generates 10 times more parameters.""" + for _ in range(n_params * 10): # The first iteration generates 10 times more parameters + yield init_rand(tensor) * max_initial # Generates n_params in range [-max_initial, max_initial] + + def _search_param(tensors: List[torch.tensor], n_params): + """Takes the best parameters and generates new parameters around the mean of the best parameters.""" + torch_tensors = torch.stack(tensors) + min_vals, max_vals = torch.aminmax(torch_tensors, dim=0) + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + mean = torch.mean(torch_tensors, dim=0) + for _ in range(n_params): # Generates n_params around the mean of the tensors + yield torch.randn_like(min_vals) * abs_max_val_per_ch + mean + + def _calc(x, qtz_func, deqtz_func, **params): + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params) + x_ = deqtz_func(x=x_, **params) + x_ = x_.transpose(0, 1) + return x_ + + assert "qtz_func" in kwargs, "qtz_func must be provided." + assert "deqtz_func" in kwargs, "deqtz_func must be provided." + assert "params_list" in kwargs, "params list must be provided." + assert "param" in kwargs, "param must be provided." + + qtz_func = kwargs.get('qtz_func') + deqtz_func = kwargs.get('deqtz_func') + params_list = kwargs.get('params_list') + param = kwargs.get('param') + + n_runs = 50 # Number of runs to try to find the best parameters + n_random_params = 50 # Number of random parameters to generate + n_best_to_pick = 5 # Number of best parameters to pick after each run + max_initial = 10000 # Maximum value to initialize the parameters + + # Initializes the parameters + base_params = { p: init_ones(x, **kwargs) for p in params_list if p != param } + params = _build_initial_param(x, max_initial, n_random_params) + + # Performs the search + for _ in range(n_runs): + + best_params = [] + for param_ in params: + try: + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: param_}) + loss_ones = nn.MSELoss()(x, x_) + + if len(best_params) < n_best_to_pick: + best_params.append((param_, loss_ones.item())) + best_params = sorted(best_params, key=lambda x: x[1]) + elif loss_ones < best_params[-1][1]: + best_params[-1] = (param_, loss_ones.item()) + best_params = sorted(best_params, key=lambda x: x[1]) + + except Exception: # The parameters might not be valid for the function's domain + continue + + # Generates new parameters around the mean + params = _search_param([p for p, _ in best_params], n_random_params) + + # Checks if the best parameter is better than the init_ones + p_ones = init_ones(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_ones}) + loss_ones = nn.MSELoss()(x, x_) + + # Checks if the best parameter is better than the init_rand + p_rand = init_rand(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_rand}) + loss_rand = nn.MSELoss()(x, x_) + + if loss_rand < best_params[0][1] and loss_rand < loss_ones: + return p_rand + elif loss_ones < best_params[0][1] and loss_ones < loss_rand: + return p_ones + else: + return best_params[0][0] + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_non_linear_regression_fit( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + assert "params_list" in kwargs, "params list must be provided." + assert "np_fit_func" in kwargs, "np_fit_func must be provided." + assert "p0" in kwargs, "p0 must be provided." + np_fit_func = kwargs.get('np_fit_func') + params_list = kwargs.get('params_list') + p0 = kwargs.get('p0') + + def _fit(xdata: np.ndarray, ydata: np.ndarray, func: Callable, p0: List[float]): + popt, _ = curve_fit( + func, + xdata, + ydata, + maxfev=1000, + p0=p0, + method='lm' + ) + return popt + + # 1. Needs to convert the torch tensor to numpy tensor + xdata = x.cpu().numpy() + + # 2. Sorts the data so that it makes it easier to fit to it + sorted_xdata = np.sort(xdata, axis=-1) + + p0 = {k: v.cpu().numpy() for k, v in p0.items()} + params_list = sorted(params_list) # We need to make sure that it matches the numpy fit func arg order + + # 3. Finds the best parameters for each channel + try: + params = [] + for i in range(sorted_xdata.shape[0]): + xdata_ = sorted_xdata[i] + p0_ = [p0[p][i] for p in params_list] + ch_params = _fit(xdata_, xdata_, np_fit_func, p0_) + params.append(ch_params) + + # 4. Builds the parameters + result = {} + for i, p in enumerate(params_list): + result[p] = torch.tensor([p_[i] for p_ in params], dtype=torch.float32).to(x.device) + + return result + + except ValueError as e: + print(f"Could not fit the function with error: {e}") + print(f"Using fallback result...") + return { + k: torch.tensor(v, dtype=torch.float32).to(x.device) for k, v in p0.items() + } + + +def init_zeros(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.zeros_like(val, dtype=torch.float32, device=x.device) + + +def init_inner_scale(tensor: torch.Tensor, _min: float = torch.inf, _max: float = torch.inf) -> torch.Tensor: + # Calculate the original minimum and maximum values + min_vals, max_vals = torch.aminmax(tensor, dim=-1) + x_min = torch.min(min_vals, torch.zeros_like(min_vals)) + x_max = torch.max(max_vals, torch.zeros_like(max_vals)) + + if _max is torch.inf: # We do not need to scale the tensor. Just need to move it + return torch.ones_like(x_min) + + # Calculate the scale factor + scale = (_max - _min) / (x_max - x_min) + return scale + + + +############## Quant ############### + +@torch.enable_grad() +def learn_parameters( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + qtz_func: nn.Module, + deqtz_func: nn.Module, + bits: int, + target_dtype: torch.dtype, + epochs: int = 1000, + early_stop: bool = True, + do_report: bool = False +) -> Tuple[Dict[str, nn.Parameter], torch.Tensor]: + + # Requires gradients in the parameters + for p in params.values(): + p.requires_grad = True + p.grad = None + + param_keys = list(params.keys()) + param_values = list(params.values()) + + # Defines optimizer and loss function + optimizer = torch.optim.Adam(param_values, lr=0.001) + scheduler = torch.optim.lr_scheduler.LinearLR(optimizer, start_factor=1.0, end_factor=0.001, total_iters=epochs // 10) + loss_fn = nn.MSELoss() + + # Contains the best loss and the best parameters + best_loss = float("inf") + best_params = None + + # Used to stop the search early + min_delta = 1e-7 + acc_loss = [] + percent_epochs_before_stop = 0.1 + + for i in range(epochs): + optimizer.zero_grad() + + quant = quantize(x, params, qtz_func, bits, target_dtype) + dequant = dequantize(quant, params, deqtz_func, bits, x.dtype) + loss = loss_fn(x, dequant) + + if loss.isnan() or loss.isinf(): + raise Exception("Loss is NaN or Inf. Stopping the search.") + + loss.backward() + optimizer.step() + scheduler.step() + + acc_loss.append(loss.item()) + + # Reports loss every 10 steps + if i % 10 == 0 and do_report: + print(f"Epoch {i}: Loss {loss.item()}") + + # Optimizes the parameter search by storing the best loss and the parameters + if loss.item() < best_loss: + best_loss = loss.item() + best_params = copy.deepcopy({ + k: v for k, v in params.items() if k in param_keys + }) + + # We also stop the search if the loss has not considerably during the last 10% epochs + if early_stop: + epochs_before_stop = int(epochs * percent_epochs_before_stop) + if i > epochs_before_stop and abs(acc_loss[i - epochs_before_stop] - acc_loss[i]) < min_delta: + break + + # No longer requires gradients in the parameters + for p in best_params.values(): + p.requires_grad = False + p.grad = None + + if do_report: + return best_params, acc_loss + else: + return best_params + + +def quantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + target_dtype: torch.dtype = torch.int8 +) -> torch.Tensor: + quant_min, quant_max = get_min_max_from_bits_signed(bits) + x = x.transpose(0, 1) # Aligns shapes + x = func(x=x, **params) + x = x.transpose(0, 1) + x = torch.clamp(round_func_BPDA(x), quant_min, quant_max).to(target_dtype) + return x + + +def dequantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + out_dtype: torch.dtype +) -> torch.Tensor: + x = x.to(dtype=out_dtype) + x = x.transpose(0, 1) + x = func(x=x, **params) + x = x.transpose(0, 1) + return x + + +def round_func_BPDA(input): + # This is equivalent to replacing round function (non-differentiable) with + # an identity function (differentiable) only when backward. + forward_value = torch.round(input) + out = input.clone() + out.data = forward_value.data + return out + + +def get_min_max_from_bits_signed(bit_width: int) -> Tuple[int, int]: + return -2 ** (bit_width - 1), 2 ** (bit_width - 1) - 1 + + + +############## Numpy ############### + +def np_domain_guard( + x: np.ndarray, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> np.ndarray: + """Guard a tensor to a valid domain.""" + x = np.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = np.clip(x, min, max) + return x + + +def np_replace_num(x: np.ndarray, num: float, to: float) -> np.ndarray: + """Replace a number in a tensor with another number. + + Args: + x (np.ndarray): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + np.ndarray: The tensor with the number replaced. + """ + return np.where(x == num, to, x) + + +def np_guarded_power(x: np.ndarray, exp: float) -> np.ndarray: + """Guard the power operation to a valid domain.""" + return np.power(x, exp) if exp >= 1 else np.power(np.maximum(x, 0), exp) + diff --git a/fn_gen/rnd_affine_scale/3/loss.png b/fn_gen/rnd_affine_scale/3/loss.png new file mode 100644 index 0000000000000000000000000000000000000000..1bfc4298e752721cb56adc08410a8a369846cfb7 Binary files /dev/null and b/fn_gen/rnd_affine_scale/3/loss.png differ diff --git a/fn_gen/rnd_affine_scale/3/quantization.png b/fn_gen/rnd_affine_scale/3/quantization.png new file mode 100644 index 0000000000000000000000000000000000000000..91a737e4c42720399af820a3d399927be7d67244 Binary files /dev/null and b/fn_gen/rnd_affine_scale/3/quantization.png differ diff --git a/fn_gen/rnd_affine_scale/4/distortion.png b/fn_gen/rnd_affine_scale/4/distortion.png new file mode 100644 index 0000000000000000000000000000000000000000..55f649d865800d8c6b638878ea726ab6db8259d4 Binary files /dev/null and b/fn_gen/rnd_affine_scale/4/distortion.png differ diff --git a/fn_gen/rnd_affine_scale/4/expressions.txt b/fn_gen/rnd_affine_scale/4/expressions.txt new file mode 100644 index 0000000000000000000000000000000000000000..74791fc40576643d62f6366a8b4eda20eb1ad252 --- /dev/null +++ b/fn_gen/rnd_affine_scale/4/expressions.txt @@ -0,0 +1,2 @@ +x**3/_s +(_s*x)**(1/3) \ No newline at end of file diff --git a/fn_gen/rnd_affine_scale/4/fn.py b/fn_gen/rnd_affine_scale/4/fn.py new file mode 100644 index 0000000000000000000000000000000000000000..a01e9d86bcb7ff432bd7364ad3b6b592000ad62e --- /dev/null +++ b/fn_gen/rnd_affine_scale/4/fn.py @@ -0,0 +1,492 @@ +from __future__ import annotations + +import torch +from torch import amin # Necessary for arcsin +import copy +import torch.nn as nn +import numpy as np + +from scipy.optimize import curve_fit +from typing import Dict, Any, Tuple, List, Callable + + +def quantization(x, **params): + return (torch.div(1, replace_num(params['_s'], num=0, to=10000)) * guarded_torch_power(x, torch.tensor(3))) + + +def dequantization(x, **params): + return guarded_torch_power((params['_s'] * x), 1 / 3) + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_params(x: torch.Tensor, **kwargs: Dict[str, Any]) -> Dict[str, nn.Parameter]: + params = { + } + params['_s'] = init_linear_scale(x, params=params, qtz_func=quantization, **kwargs) + params = {k: nn.Parameter(v, requires_grad=False) for k, v in params.items()} + + if 'post_init_hook' in kwargs: + kwargs['post_init_hook'](parameters=params) + + + if 'post_train_hook' in kwargs: + kwargs['post_train_hook'](parameters=params) + + return params + + +############### Numpy Qtz ############### + + +def np_quantization(x, _s): + return (np.divide(1, np_replace_num(_s, num=0, to=10000)) * np_guarded_power(x, np.array(3))) + + +def np_dequantization(x, _s): + return np_guarded_power((_s * x), 1 / 3) + + +def fit_func(x, _s): + x_ = np_quantization(x, _s) + x_ = np_dequantization(x_, _s) + return x_ + + + +############### HELPERS ############### + +def domain_guard( + x: torch.Tensor, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> torch.Tensor: + """Guard a tensor to a valid domain.""" + x = torch.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = torch.clamp(x, min=min, max=max) + return x + + +def replace_num(x: torch.Tensor, num: float, to: float) -> torch.Tensor: + """Replace a number in a tensor with another number. + + Args: + x (torch.Tensor): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + torch.Tensor: The tensor with the number replaced. + """ + return torch.where(x == num, to, x) + + +def guarded_torch_power(x: torch.Tensor, exp: float) -> torch.Tensor: + """Guard the power operation to a valid domain.""" + return torch.pow(x, exp) if exp >= 1 else torch.pow(torch.relu(x), exp) + + +def init_ones(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.ones_like(val, dtype=torch.float32, device=x.device) + + +def init_rand(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.randn_like(val, dtype=torch.float32, device=x.device) + + +def init_space_search( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + def _build_initial_param(tensor: torch.Tensor, max_initial: int, n_params: int): + """Generates the initial set of parameters. The first iteration generates 10 times more parameters.""" + for _ in range(n_params * 10): # The first iteration generates 10 times more parameters + yield init_rand(tensor) * max_initial # Generates n_params in range [-max_initial, max_initial] + + def _search_param(tensors: List[torch.tensor], n_params): + """Takes the best parameters and generates new parameters around the mean of the best parameters.""" + torch_tensors = torch.stack(tensors) + min_vals, max_vals = torch.aminmax(torch_tensors, dim=0) + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + mean = torch.mean(torch_tensors, dim=0) + for _ in range(n_params): # Generates n_params around the mean of the tensors + yield torch.randn_like(min_vals) * abs_max_val_per_ch + mean + + def _calc(x, qtz_func, deqtz_func, **params): + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params) + x_ = deqtz_func(x=x_, **params) + x_ = x_.transpose(0, 1) + return x_ + + assert "qtz_func" in kwargs, "qtz_func must be provided." + assert "deqtz_func" in kwargs, "deqtz_func must be provided." + assert "params_list" in kwargs, "params list must be provided." + assert "param" in kwargs, "param must be provided." + + qtz_func = kwargs.get('qtz_func') + deqtz_func = kwargs.get('deqtz_func') + params_list = kwargs.get('params_list') + param = kwargs.get('param') + + n_runs = 50 # Number of runs to try to find the best parameters + n_random_params = 50 # Number of random parameters to generate + n_best_to_pick = 5 # Number of best parameters to pick after each run + max_initial = 10000 # Maximum value to initialize the parameters + + # Initializes the parameters + base_params = { p: init_ones(x, **kwargs) for p in params_list if p != param } + params = _build_initial_param(x, max_initial, n_random_params) + + # Performs the search + for _ in range(n_runs): + + best_params = [] + for param_ in params: + try: + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: param_}) + loss_ones = nn.MSELoss()(x, x_) + + if len(best_params) < n_best_to_pick: + best_params.append((param_, loss_ones.item())) + best_params = sorted(best_params, key=lambda x: x[1]) + elif loss_ones < best_params[-1][1]: + best_params[-1] = (param_, loss_ones.item()) + best_params = sorted(best_params, key=lambda x: x[1]) + + except Exception: # The parameters might not be valid for the function's domain + continue + + # Generates new parameters around the mean + params = _search_param([p for p, _ in best_params], n_random_params) + + # Checks if the best parameter is better than the init_ones + p_ones = init_ones(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_ones}) + loss_ones = nn.MSELoss()(x, x_) + + # Checks if the best parameter is better than the init_rand + p_rand = init_rand(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_rand}) + loss_rand = nn.MSELoss()(x, x_) + + if loss_rand < best_params[0][1] and loss_rand < loss_ones: + return p_rand + elif loss_ones < best_params[0][1] and loss_ones < loss_rand: + return p_ones + else: + return best_params[0][0] + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_non_linear_regression_fit( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + assert "params_list" in kwargs, "params list must be provided." + assert "np_fit_func" in kwargs, "np_fit_func must be provided." + assert "p0" in kwargs, "p0 must be provided." + np_fit_func = kwargs.get('np_fit_func') + params_list = kwargs.get('params_list') + p0 = kwargs.get('p0') + + def _fit(xdata: np.ndarray, ydata: np.ndarray, func: Callable, p0: List[float]): + popt, _ = curve_fit( + func, + xdata, + ydata, + maxfev=1000, + p0=p0, + method='lm' + ) + return popt + + # 1. Needs to convert the torch tensor to numpy tensor + xdata = x.cpu().numpy() + + # 2. Sorts the data so that it makes it easier to fit to it + sorted_xdata = np.sort(xdata, axis=-1) + + p0 = {k: v.cpu().numpy() for k, v in p0.items()} + params_list = sorted(params_list) # We need to make sure that it matches the numpy fit func arg order + + # 3. Finds the best parameters for each channel + try: + params = [] + for i in range(sorted_xdata.shape[0]): + xdata_ = sorted_xdata[i] + p0_ = [p0[p][i] for p in params_list] + ch_params = _fit(xdata_, xdata_, np_fit_func, p0_) + params.append(ch_params) + + # 4. Builds the parameters + result = {} + for i, p in enumerate(params_list): + result[p] = torch.tensor([p_[i] for p_ in params], dtype=torch.float32).to(x.device) + + return result + + except ValueError as e: + print(f"Could not fit the function with error: {e}") + print(f"Using fallback result...") + return { + k: torch.tensor(v, dtype=torch.float32).to(x.device) for k, v in p0.items() + } + + +def init_zeros(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.zeros_like(val, dtype=torch.float32, device=x.device) + + +def init_inner_scale(tensor: torch.Tensor, _min: float = torch.inf, _max: float = torch.inf) -> torch.Tensor: + # Calculate the original minimum and maximum values + min_vals, max_vals = torch.aminmax(tensor, dim=-1) + x_min = torch.min(min_vals, torch.zeros_like(min_vals)) + x_max = torch.max(max_vals, torch.zeros_like(max_vals)) + + if _max is torch.inf: # We do not need to scale the tensor. Just need to move it + return torch.ones_like(x_min) + + # Calculate the scale factor + scale = (_max - _min) / (x_max - x_min) + return scale + + + +############## Quant ############### + +@torch.enable_grad() +def learn_parameters( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + qtz_func: nn.Module, + deqtz_func: nn.Module, + bits: int, + target_dtype: torch.dtype, + epochs: int = 1000, + early_stop: bool = True, + do_report: bool = False +) -> Tuple[Dict[str, nn.Parameter], torch.Tensor]: + + # Requires gradients in the parameters + for p in params.values(): + p.requires_grad = True + p.grad = None + + param_keys = list(params.keys()) + param_values = list(params.values()) + + # Defines optimizer and loss function + optimizer = torch.optim.Adam(param_values, lr=0.001) + scheduler = torch.optim.lr_scheduler.LinearLR(optimizer, start_factor=1.0, end_factor=0.001, total_iters=epochs // 10) + loss_fn = nn.MSELoss() + + # Contains the best loss and the best parameters + best_loss = float("inf") + best_params = None + + # Used to stop the search early + min_delta = 1e-7 + acc_loss = [] + percent_epochs_before_stop = 0.1 + + for i in range(epochs): + optimizer.zero_grad() + + quant = quantize(x, params, qtz_func, bits, target_dtype) + dequant = dequantize(quant, params, deqtz_func, bits, x.dtype) + loss = loss_fn(x, dequant) + + if loss.isnan() or loss.isinf(): + raise Exception("Loss is NaN or Inf. Stopping the search.") + + loss.backward() + optimizer.step() + scheduler.step() + + acc_loss.append(loss.item()) + + # Reports loss every 10 steps + if i % 10 == 0 and do_report: + print(f"Epoch {i}: Loss {loss.item()}") + + # Optimizes the parameter search by storing the best loss and the parameters + if loss.item() < best_loss: + best_loss = loss.item() + best_params = copy.deepcopy({ + k: v for k, v in params.items() if k in param_keys + }) + + # We also stop the search if the loss has not considerably during the last 10% epochs + if early_stop: + epochs_before_stop = int(epochs * percent_epochs_before_stop) + if i > epochs_before_stop and abs(acc_loss[i - epochs_before_stop] - acc_loss[i]) < min_delta: + break + + # No longer requires gradients in the parameters + for p in best_params.values(): + p.requires_grad = False + p.grad = None + + if do_report: + return best_params, acc_loss + else: + return best_params + + +def quantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + target_dtype: torch.dtype = torch.int8 +) -> torch.Tensor: + quant_min, quant_max = get_min_max_from_bits_signed(bits) + x = x.transpose(0, 1) # Aligns shapes + x = func(x=x, **params) + x = x.transpose(0, 1) + x = torch.clamp(round_func_BPDA(x), quant_min, quant_max).to(target_dtype) + return x + + +def dequantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + out_dtype: torch.dtype +) -> torch.Tensor: + x = x.to(dtype=out_dtype) + x = x.transpose(0, 1) + x = func(x=x, **params) + x = x.transpose(0, 1) + return x + + +def round_func_BPDA(input): + # This is equivalent to replacing round function (non-differentiable) with + # an identity function (differentiable) only when backward. + forward_value = torch.round(input) + out = input.clone() + out.data = forward_value.data + return out + + +def get_min_max_from_bits_signed(bit_width: int) -> Tuple[int, int]: + return -2 ** (bit_width - 1), 2 ** (bit_width - 1) - 1 + + + +############## Numpy ############### + +def np_domain_guard( + x: np.ndarray, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> np.ndarray: + """Guard a tensor to a valid domain.""" + x = np.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = np.clip(x, min, max) + return x + + +def np_replace_num(x: np.ndarray, num: float, to: float) -> np.ndarray: + """Replace a number in a tensor with another number. + + Args: + x (np.ndarray): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + np.ndarray: The tensor with the number replaced. + """ + return np.where(x == num, to, x) + + +def np_guarded_power(x: np.ndarray, exp: float) -> np.ndarray: + """Guard the power operation to a valid domain.""" + return np.power(x, exp) if exp >= 1 else np.power(np.maximum(x, 0), exp) + diff --git a/fn_gen/rnd_affine_scale/4/loss.png b/fn_gen/rnd_affine_scale/4/loss.png new file mode 100644 index 0000000000000000000000000000000000000000..17c741e9d2518dc0e7a41adc64320c783e914249 Binary files /dev/null and b/fn_gen/rnd_affine_scale/4/loss.png differ diff --git a/fn_gen/rnd_affine_scale/4/quantization.png b/fn_gen/rnd_affine_scale/4/quantization.png new file mode 100644 index 0000000000000000000000000000000000000000..c462294edeec80629d618b908c3ccb397aa9b238 Binary files /dev/null and b/fn_gen/rnd_affine_scale/4/quantization.png differ diff --git a/fn_gen/rnd_affine_scale/5/distortion.png b/fn_gen/rnd_affine_scale/5/distortion.png new file mode 100644 index 0000000000000000000000000000000000000000..985ddb2ece3ea4a04cc0149c504556b24dfe80e6 Binary files /dev/null and b/fn_gen/rnd_affine_scale/5/distortion.png differ diff --git a/fn_gen/rnd_affine_scale/5/expressions.txt b/fn_gen/rnd_affine_scale/5/expressions.txt new file mode 100644 index 0000000000000000000000000000000000000000..5a7abbbdac98c7d53123fe0b9807e7644bc00acf --- /dev/null +++ b/fn_gen/rnd_affine_scale/5/expressions.txt @@ -0,0 +1,2 @@ +acosh(_0*x)/_s +cosh(_s*x)/_0 \ No newline at end of file diff --git a/fn_gen/rnd_affine_scale/5/fn.py b/fn_gen/rnd_affine_scale/5/fn.py new file mode 100644 index 0000000000000000000000000000000000000000..3d11983f3fd3039fca180dfe37be076ca1af3762 --- /dev/null +++ b/fn_gen/rnd_affine_scale/5/fn.py @@ -0,0 +1,493 @@ +from __future__ import annotations + +import torch +from torch import amin # Necessary for arcsin +import copy +import torch.nn as nn +import numpy as np + +from scipy.optimize import curve_fit +from typing import Dict, Any, Tuple, List, Callable + + +def quantization(x, **params): + return (torch.div(1, replace_num(params['_s'], num=0, to=10000)) * torch.acosh(domain_guard((params['_0'] * x), min=1, nan=1))) + + +def dequantization(x, **params): + return (torch.div(1, replace_num(params['_0'], num=0, to=10000)) * torch.cosh((params['_s'] * x))) + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_params(x: torch.Tensor, **kwargs: Dict[str, Any]) -> Dict[str, nn.Parameter]: + params = { + '_0': init_rand(x, qtz_func=quantization, deqtz_func=dequantization, param='_0', params_list=['_0', '_s'], **kwargs), + } + params['_s'] = init_linear_scale(x, params=params, qtz_func=quantization, **kwargs) + params = {k: nn.Parameter(v, requires_grad=False) for k, v in params.items()} + + if 'post_init_hook' in kwargs: + kwargs['post_init_hook'](parameters=params) + + + if 'post_train_hook' in kwargs: + kwargs['post_train_hook'](parameters=params) + + return params + + +############### Numpy Qtz ############### + + +def np_quantization(x, _0, _s): + return (np.divide(1, np_replace_num(_s, num=0, to=10000)) * np.arccosh(np_domain_guard((_0 * x), min=1, nan=1))) + + +def np_dequantization(x, _0, _s): + return (np.divide(1, np_replace_num(_0, num=0, to=10000)) * np.cosh((_s * x))) + + +def fit_func(x, _0, _s): + x_ = np_quantization(x, _0, _s) + x_ = np_dequantization(x_, _0, _s) + return x_ + + + +############### HELPERS ############### + +def domain_guard( + x: torch.Tensor, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> torch.Tensor: + """Guard a tensor to a valid domain.""" + x = torch.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = torch.clamp(x, min=min, max=max) + return x + + +def replace_num(x: torch.Tensor, num: float, to: float) -> torch.Tensor: + """Replace a number in a tensor with another number. + + Args: + x (torch.Tensor): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + torch.Tensor: The tensor with the number replaced. + """ + return torch.where(x == num, to, x) + + +def guarded_torch_power(x: torch.Tensor, exp: float) -> torch.Tensor: + """Guard the power operation to a valid domain.""" + return torch.pow(x, exp) if exp >= 1 else torch.pow(torch.relu(x), exp) + + +def init_ones(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.ones_like(val, dtype=torch.float32, device=x.device) + + +def init_rand(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.randn_like(val, dtype=torch.float32, device=x.device) + + +def init_space_search( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + def _build_initial_param(tensor: torch.Tensor, max_initial: int, n_params: int): + """Generates the initial set of parameters. The first iteration generates 10 times more parameters.""" + for _ in range(n_params * 10): # The first iteration generates 10 times more parameters + yield init_rand(tensor) * max_initial # Generates n_params in range [-max_initial, max_initial] + + def _search_param(tensors: List[torch.tensor], n_params): + """Takes the best parameters and generates new parameters around the mean of the best parameters.""" + torch_tensors = torch.stack(tensors) + min_vals, max_vals = torch.aminmax(torch_tensors, dim=0) + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + mean = torch.mean(torch_tensors, dim=0) + for _ in range(n_params): # Generates n_params around the mean of the tensors + yield torch.randn_like(min_vals) * abs_max_val_per_ch + mean + + def _calc(x, qtz_func, deqtz_func, **params): + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params) + x_ = deqtz_func(x=x_, **params) + x_ = x_.transpose(0, 1) + return x_ + + assert "qtz_func" in kwargs, "qtz_func must be provided." + assert "deqtz_func" in kwargs, "deqtz_func must be provided." + assert "params_list" in kwargs, "params list must be provided." + assert "param" in kwargs, "param must be provided." + + qtz_func = kwargs.get('qtz_func') + deqtz_func = kwargs.get('deqtz_func') + params_list = kwargs.get('params_list') + param = kwargs.get('param') + + n_runs = 50 # Number of runs to try to find the best parameters + n_random_params = 50 # Number of random parameters to generate + n_best_to_pick = 5 # Number of best parameters to pick after each run + max_initial = 10000 # Maximum value to initialize the parameters + + # Initializes the parameters + base_params = { p: init_ones(x, **kwargs) for p in params_list if p != param } + params = _build_initial_param(x, max_initial, n_random_params) + + # Performs the search + for _ in range(n_runs): + + best_params = [] + for param_ in params: + try: + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: param_}) + loss_ones = nn.MSELoss()(x, x_) + + if len(best_params) < n_best_to_pick: + best_params.append((param_, loss_ones.item())) + best_params = sorted(best_params, key=lambda x: x[1]) + elif loss_ones < best_params[-1][1]: + best_params[-1] = (param_, loss_ones.item()) + best_params = sorted(best_params, key=lambda x: x[1]) + + except Exception: # The parameters might not be valid for the function's domain + continue + + # Generates new parameters around the mean + params = _search_param([p for p, _ in best_params], n_random_params) + + # Checks if the best parameter is better than the init_ones + p_ones = init_ones(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_ones}) + loss_ones = nn.MSELoss()(x, x_) + + # Checks if the best parameter is better than the init_rand + p_rand = init_rand(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_rand}) + loss_rand = nn.MSELoss()(x, x_) + + if loss_rand < best_params[0][1] and loss_rand < loss_ones: + return p_rand + elif loss_ones < best_params[0][1] and loss_ones < loss_rand: + return p_ones + else: + return best_params[0][0] + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_non_linear_regression_fit( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + assert "params_list" in kwargs, "params list must be provided." + assert "np_fit_func" in kwargs, "np_fit_func must be provided." + assert "p0" in kwargs, "p0 must be provided." + np_fit_func = kwargs.get('np_fit_func') + params_list = kwargs.get('params_list') + p0 = kwargs.get('p0') + + def _fit(xdata: np.ndarray, ydata: np.ndarray, func: Callable, p0: List[float]): + popt, _ = curve_fit( + func, + xdata, + ydata, + maxfev=1000, + p0=p0, + method='lm' + ) + return popt + + # 1. Needs to convert the torch tensor to numpy tensor + xdata = x.cpu().numpy() + + # 2. Sorts the data so that it makes it easier to fit to it + sorted_xdata = np.sort(xdata, axis=-1) + + p0 = {k: v.cpu().numpy() for k, v in p0.items()} + params_list = sorted(params_list) # We need to make sure that it matches the numpy fit func arg order + + # 3. Finds the best parameters for each channel + try: + params = [] + for i in range(sorted_xdata.shape[0]): + xdata_ = sorted_xdata[i] + p0_ = [p0[p][i] for p in params_list] + ch_params = _fit(xdata_, xdata_, np_fit_func, p0_) + params.append(ch_params) + + # 4. Builds the parameters + result = {} + for i, p in enumerate(params_list): + result[p] = torch.tensor([p_[i] for p_ in params], dtype=torch.float32).to(x.device) + + return result + + except ValueError as e: + print(f"Could not fit the function with error: {e}") + print(f"Using fallback result...") + return { + k: torch.tensor(v, dtype=torch.float32).to(x.device) for k, v in p0.items() + } + + +def init_zeros(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.zeros_like(val, dtype=torch.float32, device=x.device) + + +def init_inner_scale(tensor: torch.Tensor, _min: float = torch.inf, _max: float = torch.inf) -> torch.Tensor: + # Calculate the original minimum and maximum values + min_vals, max_vals = torch.aminmax(tensor, dim=-1) + x_min = torch.min(min_vals, torch.zeros_like(min_vals)) + x_max = torch.max(max_vals, torch.zeros_like(max_vals)) + + if _max is torch.inf: # We do not need to scale the tensor. Just need to move it + return torch.ones_like(x_min) + + # Calculate the scale factor + scale = (_max - _min) / (x_max - x_min) + return scale + + + +############## Quant ############### + +@torch.enable_grad() +def learn_parameters( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + qtz_func: nn.Module, + deqtz_func: nn.Module, + bits: int, + target_dtype: torch.dtype, + epochs: int = 1000, + early_stop: bool = True, + do_report: bool = False +) -> Tuple[Dict[str, nn.Parameter], torch.Tensor]: + + # Requires gradients in the parameters + for p in params.values(): + p.requires_grad = True + p.grad = None + + param_keys = list(params.keys()) + param_values = list(params.values()) + + # Defines optimizer and loss function + optimizer = torch.optim.Adam(param_values, lr=0.001) + scheduler = torch.optim.lr_scheduler.LinearLR(optimizer, start_factor=1.0, end_factor=0.001, total_iters=epochs // 10) + loss_fn = nn.MSELoss() + + # Contains the best loss and the best parameters + best_loss = float("inf") + best_params = None + + # Used to stop the search early + min_delta = 1e-7 + acc_loss = [] + percent_epochs_before_stop = 0.1 + + for i in range(epochs): + optimizer.zero_grad() + + quant = quantize(x, params, qtz_func, bits, target_dtype) + dequant = dequantize(quant, params, deqtz_func, bits, x.dtype) + loss = loss_fn(x, dequant) + + if loss.isnan() or loss.isinf(): + raise Exception("Loss is NaN or Inf. Stopping the search.") + + loss.backward() + optimizer.step() + scheduler.step() + + acc_loss.append(loss.item()) + + # Reports loss every 10 steps + if i % 10 == 0 and do_report: + print(f"Epoch {i}: Loss {loss.item()}") + + # Optimizes the parameter search by storing the best loss and the parameters + if loss.item() < best_loss: + best_loss = loss.item() + best_params = copy.deepcopy({ + k: v for k, v in params.items() if k in param_keys + }) + + # We also stop the search if the loss has not considerably during the last 10% epochs + if early_stop: + epochs_before_stop = int(epochs * percent_epochs_before_stop) + if i > epochs_before_stop and abs(acc_loss[i - epochs_before_stop] - acc_loss[i]) < min_delta: + break + + # No longer requires gradients in the parameters + for p in best_params.values(): + p.requires_grad = False + p.grad = None + + if do_report: + return best_params, acc_loss + else: + return best_params + + +def quantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + target_dtype: torch.dtype = torch.int8 +) -> torch.Tensor: + quant_min, quant_max = get_min_max_from_bits_signed(bits) + x = x.transpose(0, 1) # Aligns shapes + x = func(x=x, **params) + x = x.transpose(0, 1) + x = torch.clamp(round_func_BPDA(x), quant_min, quant_max).to(target_dtype) + return x + + +def dequantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + out_dtype: torch.dtype +) -> torch.Tensor: + x = x.to(dtype=out_dtype) + x = x.transpose(0, 1) + x = func(x=x, **params) + x = x.transpose(0, 1) + return x + + +def round_func_BPDA(input): + # This is equivalent to replacing round function (non-differentiable) with + # an identity function (differentiable) only when backward. + forward_value = torch.round(input) + out = input.clone() + out.data = forward_value.data + return out + + +def get_min_max_from_bits_signed(bit_width: int) -> Tuple[int, int]: + return -2 ** (bit_width - 1), 2 ** (bit_width - 1) - 1 + + + +############## Numpy ############### + +def np_domain_guard( + x: np.ndarray, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> np.ndarray: + """Guard a tensor to a valid domain.""" + x = np.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = np.clip(x, min, max) + return x + + +def np_replace_num(x: np.ndarray, num: float, to: float) -> np.ndarray: + """Replace a number in a tensor with another number. + + Args: + x (np.ndarray): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + np.ndarray: The tensor with the number replaced. + """ + return np.where(x == num, to, x) + + +def np_guarded_power(x: np.ndarray, exp: float) -> np.ndarray: + """Guard the power operation to a valid domain.""" + return np.power(x, exp) if exp >= 1 else np.power(np.maximum(x, 0), exp) + diff --git a/fn_gen/rnd_affine_scale/5/loss.png b/fn_gen/rnd_affine_scale/5/loss.png new file mode 100644 index 0000000000000000000000000000000000000000..abc19ff29cd7964f14eb6741934510149a58cad9 Binary files /dev/null and b/fn_gen/rnd_affine_scale/5/loss.png differ diff --git a/fn_gen/rnd_affine_scale/5/quantization.png b/fn_gen/rnd_affine_scale/5/quantization.png new file mode 100644 index 0000000000000000000000000000000000000000..14c2e2f410a2f4b55c06ad5e1d223d7c4e054aee Binary files /dev/null and b/fn_gen/rnd_affine_scale/5/quantization.png differ diff --git a/fn_gen/rnd_affine_scale/6/distortion.png b/fn_gen/rnd_affine_scale/6/distortion.png new file mode 100644 index 0000000000000000000000000000000000000000..2a790213362cb64bae4291f825153d29faf6981c Binary files /dev/null and b/fn_gen/rnd_affine_scale/6/distortion.png differ diff --git a/fn_gen/rnd_affine_scale/6/expressions.txt b/fn_gen/rnd_affine_scale/6/expressions.txt new file mode 100644 index 0000000000000000000000000000000000000000..aa32b575e8c654dbc457c94f36222e70d86dc940 --- /dev/null +++ b/fn_gen/rnd_affine_scale/6/expressions.txt @@ -0,0 +1,2 @@ +atan(_0*x)/_s +tan(_s*x)/_0 \ No newline at end of file diff --git a/fn_gen/rnd_affine_scale/6/fn.py b/fn_gen/rnd_affine_scale/6/fn.py new file mode 100644 index 0000000000000000000000000000000000000000..a5c6a15d659281120d2a535e741a7d4d684dfb13 --- /dev/null +++ b/fn_gen/rnd_affine_scale/6/fn.py @@ -0,0 +1,493 @@ +from __future__ import annotations + +import torch +from torch import amin # Necessary for arcsin +import copy +import torch.nn as nn +import numpy as np + +from scipy.optimize import curve_fit +from typing import Dict, Any, Tuple, List, Callable + + +def quantization(x, **params): + return (torch.div(1, replace_num(params['_s'], num=0, to=10000)) * torch.atan((params['_0'] * x))) + + +def dequantization(x, **params): + return (torch.div(1, replace_num(params['_0'], num=0, to=10000)) * torch.tan(domain_guard((params['_s'] * x), posinf=1, neginf=-1, nan=0))) + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_params(x: torch.Tensor, **kwargs: Dict[str, Any]) -> Dict[str, nn.Parameter]: + params = { + '_0': init_rand(x, qtz_func=quantization, deqtz_func=dequantization, param='_0', params_list=['_0', '_s'], **kwargs), + } + params['_s'] = init_linear_scale(x, params=params, qtz_func=quantization, **kwargs) + params = {k: nn.Parameter(v, requires_grad=False) for k, v in params.items()} + + if 'post_init_hook' in kwargs: + kwargs['post_init_hook'](parameters=params) + + + if 'post_train_hook' in kwargs: + kwargs['post_train_hook'](parameters=params) + + return params + + +############### Numpy Qtz ############### + + +def np_quantization(x, _0, _s): + return (np.divide(1, np_replace_num(_s, num=0, to=10000)) * np.arctan((_0 * x))) + + +def np_dequantization(x, _0, _s): + return (np.divide(1, np_replace_num(_0, num=0, to=10000)) * np.tan(np_domain_guard((_s * x), posinf=1, neginf=-1, nan=0))) + + +def fit_func(x, _0, _s): + x_ = np_quantization(x, _0, _s) + x_ = np_dequantization(x_, _0, _s) + return x_ + + + +############### HELPERS ############### + +def domain_guard( + x: torch.Tensor, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> torch.Tensor: + """Guard a tensor to a valid domain.""" + x = torch.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = torch.clamp(x, min=min, max=max) + return x + + +def replace_num(x: torch.Tensor, num: float, to: float) -> torch.Tensor: + """Replace a number in a tensor with another number. + + Args: + x (torch.Tensor): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + torch.Tensor: The tensor with the number replaced. + """ + return torch.where(x == num, to, x) + + +def guarded_torch_power(x: torch.Tensor, exp: float) -> torch.Tensor: + """Guard the power operation to a valid domain.""" + return torch.pow(x, exp) if exp >= 1 else torch.pow(torch.relu(x), exp) + + +def init_ones(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.ones_like(val, dtype=torch.float32, device=x.device) + + +def init_rand(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.randn_like(val, dtype=torch.float32, device=x.device) + + +def init_space_search( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + def _build_initial_param(tensor: torch.Tensor, max_initial: int, n_params: int): + """Generates the initial set of parameters. The first iteration generates 10 times more parameters.""" + for _ in range(n_params * 10): # The first iteration generates 10 times more parameters + yield init_rand(tensor) * max_initial # Generates n_params in range [-max_initial, max_initial] + + def _search_param(tensors: List[torch.tensor], n_params): + """Takes the best parameters and generates new parameters around the mean of the best parameters.""" + torch_tensors = torch.stack(tensors) + min_vals, max_vals = torch.aminmax(torch_tensors, dim=0) + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + mean = torch.mean(torch_tensors, dim=0) + for _ in range(n_params): # Generates n_params around the mean of the tensors + yield torch.randn_like(min_vals) * abs_max_val_per_ch + mean + + def _calc(x, qtz_func, deqtz_func, **params): + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params) + x_ = deqtz_func(x=x_, **params) + x_ = x_.transpose(0, 1) + return x_ + + assert "qtz_func" in kwargs, "qtz_func must be provided." + assert "deqtz_func" in kwargs, "deqtz_func must be provided." + assert "params_list" in kwargs, "params list must be provided." + assert "param" in kwargs, "param must be provided." + + qtz_func = kwargs.get('qtz_func') + deqtz_func = kwargs.get('deqtz_func') + params_list = kwargs.get('params_list') + param = kwargs.get('param') + + n_runs = 50 # Number of runs to try to find the best parameters + n_random_params = 50 # Number of random parameters to generate + n_best_to_pick = 5 # Number of best parameters to pick after each run + max_initial = 10000 # Maximum value to initialize the parameters + + # Initializes the parameters + base_params = { p: init_ones(x, **kwargs) for p in params_list if p != param } + params = _build_initial_param(x, max_initial, n_random_params) + + # Performs the search + for _ in range(n_runs): + + best_params = [] + for param_ in params: + try: + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: param_}) + loss_ones = nn.MSELoss()(x, x_) + + if len(best_params) < n_best_to_pick: + best_params.append((param_, loss_ones.item())) + best_params = sorted(best_params, key=lambda x: x[1]) + elif loss_ones < best_params[-1][1]: + best_params[-1] = (param_, loss_ones.item()) + best_params = sorted(best_params, key=lambda x: x[1]) + + except Exception: # The parameters might not be valid for the function's domain + continue + + # Generates new parameters around the mean + params = _search_param([p for p, _ in best_params], n_random_params) + + # Checks if the best parameter is better than the init_ones + p_ones = init_ones(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_ones}) + loss_ones = nn.MSELoss()(x, x_) + + # Checks if the best parameter is better than the init_rand + p_rand = init_rand(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_rand}) + loss_rand = nn.MSELoss()(x, x_) + + if loss_rand < best_params[0][1] and loss_rand < loss_ones: + return p_rand + elif loss_ones < best_params[0][1] and loss_ones < loss_rand: + return p_ones + else: + return best_params[0][0] + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_non_linear_regression_fit( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + assert "params_list" in kwargs, "params list must be provided." + assert "np_fit_func" in kwargs, "np_fit_func must be provided." + assert "p0" in kwargs, "p0 must be provided." + np_fit_func = kwargs.get('np_fit_func') + params_list = kwargs.get('params_list') + p0 = kwargs.get('p0') + + def _fit(xdata: np.ndarray, ydata: np.ndarray, func: Callable, p0: List[float]): + popt, _ = curve_fit( + func, + xdata, + ydata, + maxfev=1000, + p0=p0, + method='lm' + ) + return popt + + # 1. Needs to convert the torch tensor to numpy tensor + xdata = x.cpu().numpy() + + # 2. Sorts the data so that it makes it easier to fit to it + sorted_xdata = np.sort(xdata, axis=-1) + + p0 = {k: v.cpu().numpy() for k, v in p0.items()} + params_list = sorted(params_list) # We need to make sure that it matches the numpy fit func arg order + + # 3. Finds the best parameters for each channel + try: + params = [] + for i in range(sorted_xdata.shape[0]): + xdata_ = sorted_xdata[i] + p0_ = [p0[p][i] for p in params_list] + ch_params = _fit(xdata_, xdata_, np_fit_func, p0_) + params.append(ch_params) + + # 4. Builds the parameters + result = {} + for i, p in enumerate(params_list): + result[p] = torch.tensor([p_[i] for p_ in params], dtype=torch.float32).to(x.device) + + return result + + except ValueError as e: + print(f"Could not fit the function with error: {e}") + print(f"Using fallback result...") + return { + k: torch.tensor(v, dtype=torch.float32).to(x.device) for k, v in p0.items() + } + + +def init_zeros(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.zeros_like(val, dtype=torch.float32, device=x.device) + + +def init_inner_scale(tensor: torch.Tensor, _min: float = torch.inf, _max: float = torch.inf) -> torch.Tensor: + # Calculate the original minimum and maximum values + min_vals, max_vals = torch.aminmax(tensor, dim=-1) + x_min = torch.min(min_vals, torch.zeros_like(min_vals)) + x_max = torch.max(max_vals, torch.zeros_like(max_vals)) + + if _max is torch.inf: # We do not need to scale the tensor. Just need to move it + return torch.ones_like(x_min) + + # Calculate the scale factor + scale = (_max - _min) / (x_max - x_min) + return scale + + + +############## Quant ############### + +@torch.enable_grad() +def learn_parameters( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + qtz_func: nn.Module, + deqtz_func: nn.Module, + bits: int, + target_dtype: torch.dtype, + epochs: int = 1000, + early_stop: bool = True, + do_report: bool = False +) -> Tuple[Dict[str, nn.Parameter], torch.Tensor]: + + # Requires gradients in the parameters + for p in params.values(): + p.requires_grad = True + p.grad = None + + param_keys = list(params.keys()) + param_values = list(params.values()) + + # Defines optimizer and loss function + optimizer = torch.optim.Adam(param_values, lr=0.001) + scheduler = torch.optim.lr_scheduler.LinearLR(optimizer, start_factor=1.0, end_factor=0.001, total_iters=epochs // 10) + loss_fn = nn.MSELoss() + + # Contains the best loss and the best parameters + best_loss = float("inf") + best_params = None + + # Used to stop the search early + min_delta = 1e-7 + acc_loss = [] + percent_epochs_before_stop = 0.1 + + for i in range(epochs): + optimizer.zero_grad() + + quant = quantize(x, params, qtz_func, bits, target_dtype) + dequant = dequantize(quant, params, deqtz_func, bits, x.dtype) + loss = loss_fn(x, dequant) + + if loss.isnan() or loss.isinf(): + raise Exception("Loss is NaN or Inf. Stopping the search.") + + loss.backward() + optimizer.step() + scheduler.step() + + acc_loss.append(loss.item()) + + # Reports loss every 10 steps + if i % 10 == 0 and do_report: + print(f"Epoch {i}: Loss {loss.item()}") + + # Optimizes the parameter search by storing the best loss and the parameters + if loss.item() < best_loss: + best_loss = loss.item() + best_params = copy.deepcopy({ + k: v for k, v in params.items() if k in param_keys + }) + + # We also stop the search if the loss has not considerably during the last 10% epochs + if early_stop: + epochs_before_stop = int(epochs * percent_epochs_before_stop) + if i > epochs_before_stop and abs(acc_loss[i - epochs_before_stop] - acc_loss[i]) < min_delta: + break + + # No longer requires gradients in the parameters + for p in best_params.values(): + p.requires_grad = False + p.grad = None + + if do_report: + return best_params, acc_loss + else: + return best_params + + +def quantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + target_dtype: torch.dtype = torch.int8 +) -> torch.Tensor: + quant_min, quant_max = get_min_max_from_bits_signed(bits) + x = x.transpose(0, 1) # Aligns shapes + x = func(x=x, **params) + x = x.transpose(0, 1) + x = torch.clamp(round_func_BPDA(x), quant_min, quant_max).to(target_dtype) + return x + + +def dequantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + out_dtype: torch.dtype +) -> torch.Tensor: + x = x.to(dtype=out_dtype) + x = x.transpose(0, 1) + x = func(x=x, **params) + x = x.transpose(0, 1) + return x + + +def round_func_BPDA(input): + # This is equivalent to replacing round function (non-differentiable) with + # an identity function (differentiable) only when backward. + forward_value = torch.round(input) + out = input.clone() + out.data = forward_value.data + return out + + +def get_min_max_from_bits_signed(bit_width: int) -> Tuple[int, int]: + return -2 ** (bit_width - 1), 2 ** (bit_width - 1) - 1 + + + +############## Numpy ############### + +def np_domain_guard( + x: np.ndarray, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> np.ndarray: + """Guard a tensor to a valid domain.""" + x = np.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = np.clip(x, min, max) + return x + + +def np_replace_num(x: np.ndarray, num: float, to: float) -> np.ndarray: + """Replace a number in a tensor with another number. + + Args: + x (np.ndarray): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + np.ndarray: The tensor with the number replaced. + """ + return np.where(x == num, to, x) + + +def np_guarded_power(x: np.ndarray, exp: float) -> np.ndarray: + """Guard the power operation to a valid domain.""" + return np.power(x, exp) if exp >= 1 else np.power(np.maximum(x, 0), exp) + diff --git a/fn_gen/rnd_affine_scale/6/loss.png b/fn_gen/rnd_affine_scale/6/loss.png new file mode 100644 index 0000000000000000000000000000000000000000..720b8e19c87132a4d130cc9628fdbe446c8498ef Binary files /dev/null and b/fn_gen/rnd_affine_scale/6/loss.png differ diff --git a/fn_gen/rnd_affine_scale/6/quantization.png b/fn_gen/rnd_affine_scale/6/quantization.png new file mode 100644 index 0000000000000000000000000000000000000000..1b9f761250134d3e4e98b4a2ed726d7314d2fc7f Binary files /dev/null and b/fn_gen/rnd_affine_scale/6/quantization.png differ diff --git a/fn_gen/rnd_affine_scale/7/distortion.png b/fn_gen/rnd_affine_scale/7/distortion.png new file mode 100644 index 0000000000000000000000000000000000000000..85ee6b518bcf3d13e4f464c9c857e966d4709744 Binary files /dev/null and b/fn_gen/rnd_affine_scale/7/distortion.png differ diff --git a/fn_gen/rnd_affine_scale/7/expressions.txt b/fn_gen/rnd_affine_scale/7/expressions.txt new file mode 100644 index 0000000000000000000000000000000000000000..6d6553d091cd1d343d7aa9b52b85ef6ec88ea854 --- /dev/null +++ b/fn_gen/rnd_affine_scale/7/expressions.txt @@ -0,0 +1,2 @@ +x/_s +_s*x \ No newline at end of file diff --git a/fn_gen/rnd_affine_scale/7/fn.py b/fn_gen/rnd_affine_scale/7/fn.py new file mode 100644 index 0000000000000000000000000000000000000000..1c5649aca25a9156eec414be64b82a997dd28fc2 --- /dev/null +++ b/fn_gen/rnd_affine_scale/7/fn.py @@ -0,0 +1,492 @@ +from __future__ import annotations + +import torch +from torch import amin # Necessary for arcsin +import copy +import torch.nn as nn +import numpy as np + +from scipy.optimize import curve_fit +from typing import Dict, Any, Tuple, List, Callable + + +def quantization(x, **params): + return (x * torch.div(1, replace_num(params['_s'], num=0, to=10000))) + + +def dequantization(x, **params): + return (params['_s'] * x) + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_params(x: torch.Tensor, **kwargs: Dict[str, Any]) -> Dict[str, nn.Parameter]: + params = { + } + params['_s'] = init_linear_scale(x, params=params, qtz_func=quantization, **kwargs) + params = {k: nn.Parameter(v, requires_grad=False) for k, v in params.items()} + + if 'post_init_hook' in kwargs: + kwargs['post_init_hook'](parameters=params) + + + if 'post_train_hook' in kwargs: + kwargs['post_train_hook'](parameters=params) + + return params + + +############### Numpy Qtz ############### + + +def np_quantization(x, _s): + return (x * np.divide(1, np_replace_num(_s, num=0, to=10000))) + + +def np_dequantization(x, _s): + return (_s * x) + + +def fit_func(x, _s): + x_ = np_quantization(x, _s) + x_ = np_dequantization(x_, _s) + return x_ + + + +############### HELPERS ############### + +def domain_guard( + x: torch.Tensor, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> torch.Tensor: + """Guard a tensor to a valid domain.""" + x = torch.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = torch.clamp(x, min=min, max=max) + return x + + +def replace_num(x: torch.Tensor, num: float, to: float) -> torch.Tensor: + """Replace a number in a tensor with another number. + + Args: + x (torch.Tensor): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + torch.Tensor: The tensor with the number replaced. + """ + return torch.where(x == num, to, x) + + +def guarded_torch_power(x: torch.Tensor, exp: float) -> torch.Tensor: + """Guard the power operation to a valid domain.""" + return torch.pow(x, exp) if exp >= 1 else torch.pow(torch.relu(x), exp) + + +def init_ones(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.ones_like(val, dtype=torch.float32, device=x.device) + + +def init_rand(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.randn_like(val, dtype=torch.float32, device=x.device) + + +def init_space_search( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + def _build_initial_param(tensor: torch.Tensor, max_initial: int, n_params: int): + """Generates the initial set of parameters. The first iteration generates 10 times more parameters.""" + for _ in range(n_params * 10): # The first iteration generates 10 times more parameters + yield init_rand(tensor) * max_initial # Generates n_params in range [-max_initial, max_initial] + + def _search_param(tensors: List[torch.tensor], n_params): + """Takes the best parameters and generates new parameters around the mean of the best parameters.""" + torch_tensors = torch.stack(tensors) + min_vals, max_vals = torch.aminmax(torch_tensors, dim=0) + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + mean = torch.mean(torch_tensors, dim=0) + for _ in range(n_params): # Generates n_params around the mean of the tensors + yield torch.randn_like(min_vals) * abs_max_val_per_ch + mean + + def _calc(x, qtz_func, deqtz_func, **params): + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params) + x_ = deqtz_func(x=x_, **params) + x_ = x_.transpose(0, 1) + return x_ + + assert "qtz_func" in kwargs, "qtz_func must be provided." + assert "deqtz_func" in kwargs, "deqtz_func must be provided." + assert "params_list" in kwargs, "params list must be provided." + assert "param" in kwargs, "param must be provided." + + qtz_func = kwargs.get('qtz_func') + deqtz_func = kwargs.get('deqtz_func') + params_list = kwargs.get('params_list') + param = kwargs.get('param') + + n_runs = 50 # Number of runs to try to find the best parameters + n_random_params = 50 # Number of random parameters to generate + n_best_to_pick = 5 # Number of best parameters to pick after each run + max_initial = 10000 # Maximum value to initialize the parameters + + # Initializes the parameters + base_params = { p: init_ones(x, **kwargs) for p in params_list if p != param } + params = _build_initial_param(x, max_initial, n_random_params) + + # Performs the search + for _ in range(n_runs): + + best_params = [] + for param_ in params: + try: + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: param_}) + loss_ones = nn.MSELoss()(x, x_) + + if len(best_params) < n_best_to_pick: + best_params.append((param_, loss_ones.item())) + best_params = sorted(best_params, key=lambda x: x[1]) + elif loss_ones < best_params[-1][1]: + best_params[-1] = (param_, loss_ones.item()) + best_params = sorted(best_params, key=lambda x: x[1]) + + except Exception: # The parameters might not be valid for the function's domain + continue + + # Generates new parameters around the mean + params = _search_param([p for p, _ in best_params], n_random_params) + + # Checks if the best parameter is better than the init_ones + p_ones = init_ones(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_ones}) + loss_ones = nn.MSELoss()(x, x_) + + # Checks if the best parameter is better than the init_rand + p_rand = init_rand(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_rand}) + loss_rand = nn.MSELoss()(x, x_) + + if loss_rand < best_params[0][1] and loss_rand < loss_ones: + return p_rand + elif loss_ones < best_params[0][1] and loss_ones < loss_rand: + return p_ones + else: + return best_params[0][0] + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_non_linear_regression_fit( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + assert "params_list" in kwargs, "params list must be provided." + assert "np_fit_func" in kwargs, "np_fit_func must be provided." + assert "p0" in kwargs, "p0 must be provided." + np_fit_func = kwargs.get('np_fit_func') + params_list = kwargs.get('params_list') + p0 = kwargs.get('p0') + + def _fit(xdata: np.ndarray, ydata: np.ndarray, func: Callable, p0: List[float]): + popt, _ = curve_fit( + func, + xdata, + ydata, + maxfev=1000, + p0=p0, + method='lm' + ) + return popt + + # 1. Needs to convert the torch tensor to numpy tensor + xdata = x.cpu().numpy() + + # 2. Sorts the data so that it makes it easier to fit to it + sorted_xdata = np.sort(xdata, axis=-1) + + p0 = {k: v.cpu().numpy() for k, v in p0.items()} + params_list = sorted(params_list) # We need to make sure that it matches the numpy fit func arg order + + # 3. Finds the best parameters for each channel + try: + params = [] + for i in range(sorted_xdata.shape[0]): + xdata_ = sorted_xdata[i] + p0_ = [p0[p][i] for p in params_list] + ch_params = _fit(xdata_, xdata_, np_fit_func, p0_) + params.append(ch_params) + + # 4. Builds the parameters + result = {} + for i, p in enumerate(params_list): + result[p] = torch.tensor([p_[i] for p_ in params], dtype=torch.float32).to(x.device) + + return result + + except ValueError as e: + print(f"Could not fit the function with error: {e}") + print(f"Using fallback result...") + return { + k: torch.tensor(v, dtype=torch.float32).to(x.device) for k, v in p0.items() + } + + +def init_zeros(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.zeros_like(val, dtype=torch.float32, device=x.device) + + +def init_inner_scale(tensor: torch.Tensor, _min: float = torch.inf, _max: float = torch.inf) -> torch.Tensor: + # Calculate the original minimum and maximum values + min_vals, max_vals = torch.aminmax(tensor, dim=-1) + x_min = torch.min(min_vals, torch.zeros_like(min_vals)) + x_max = torch.max(max_vals, torch.zeros_like(max_vals)) + + if _max is torch.inf: # We do not need to scale the tensor. Just need to move it + return torch.ones_like(x_min) + + # Calculate the scale factor + scale = (_max - _min) / (x_max - x_min) + return scale + + + +############## Quant ############### + +@torch.enable_grad() +def learn_parameters( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + qtz_func: nn.Module, + deqtz_func: nn.Module, + bits: int, + target_dtype: torch.dtype, + epochs: int = 1000, + early_stop: bool = True, + do_report: bool = False +) -> Tuple[Dict[str, nn.Parameter], torch.Tensor]: + + # Requires gradients in the parameters + for p in params.values(): + p.requires_grad = True + p.grad = None + + param_keys = list(params.keys()) + param_values = list(params.values()) + + # Defines optimizer and loss function + optimizer = torch.optim.Adam(param_values, lr=0.001) + scheduler = torch.optim.lr_scheduler.LinearLR(optimizer, start_factor=1.0, end_factor=0.001, total_iters=epochs // 10) + loss_fn = nn.MSELoss() + + # Contains the best loss and the best parameters + best_loss = float("inf") + best_params = None + + # Used to stop the search early + min_delta = 1e-7 + acc_loss = [] + percent_epochs_before_stop = 0.1 + + for i in range(epochs): + optimizer.zero_grad() + + quant = quantize(x, params, qtz_func, bits, target_dtype) + dequant = dequantize(quant, params, deqtz_func, bits, x.dtype) + loss = loss_fn(x, dequant) + + if loss.isnan() or loss.isinf(): + raise Exception("Loss is NaN or Inf. Stopping the search.") + + loss.backward() + optimizer.step() + scheduler.step() + + acc_loss.append(loss.item()) + + # Reports loss every 10 steps + if i % 10 == 0 and do_report: + print(f"Epoch {i}: Loss {loss.item()}") + + # Optimizes the parameter search by storing the best loss and the parameters + if loss.item() < best_loss: + best_loss = loss.item() + best_params = copy.deepcopy({ + k: v for k, v in params.items() if k in param_keys + }) + + # We also stop the search if the loss has not considerably during the last 10% epochs + if early_stop: + epochs_before_stop = int(epochs * percent_epochs_before_stop) + if i > epochs_before_stop and abs(acc_loss[i - epochs_before_stop] - acc_loss[i]) < min_delta: + break + + # No longer requires gradients in the parameters + for p in best_params.values(): + p.requires_grad = False + p.grad = None + + if do_report: + return best_params, acc_loss + else: + return best_params + + +def quantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + target_dtype: torch.dtype = torch.int8 +) -> torch.Tensor: + quant_min, quant_max = get_min_max_from_bits_signed(bits) + x = x.transpose(0, 1) # Aligns shapes + x = func(x=x, **params) + x = x.transpose(0, 1) + x = torch.clamp(round_func_BPDA(x), quant_min, quant_max).to(target_dtype) + return x + + +def dequantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + out_dtype: torch.dtype +) -> torch.Tensor: + x = x.to(dtype=out_dtype) + x = x.transpose(0, 1) + x = func(x=x, **params) + x = x.transpose(0, 1) + return x + + +def round_func_BPDA(input): + # This is equivalent to replacing round function (non-differentiable) with + # an identity function (differentiable) only when backward. + forward_value = torch.round(input) + out = input.clone() + out.data = forward_value.data + return out + + +def get_min_max_from_bits_signed(bit_width: int) -> Tuple[int, int]: + return -2 ** (bit_width - 1), 2 ** (bit_width - 1) - 1 + + + +############## Numpy ############### + +def np_domain_guard( + x: np.ndarray, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> np.ndarray: + """Guard a tensor to a valid domain.""" + x = np.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = np.clip(x, min, max) + return x + + +def np_replace_num(x: np.ndarray, num: float, to: float) -> np.ndarray: + """Replace a number in a tensor with another number. + + Args: + x (np.ndarray): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + np.ndarray: The tensor with the number replaced. + """ + return np.where(x == num, to, x) + + +def np_guarded_power(x: np.ndarray, exp: float) -> np.ndarray: + """Guard the power operation to a valid domain.""" + return np.power(x, exp) if exp >= 1 else np.power(np.maximum(x, 0), exp) + diff --git a/fn_gen/rnd_affine_scale/7/loss.png b/fn_gen/rnd_affine_scale/7/loss.png new file mode 100644 index 0000000000000000000000000000000000000000..f32ec16e2d60081c65167be3e4b7f5283e8c224d Binary files /dev/null and b/fn_gen/rnd_affine_scale/7/loss.png differ diff --git a/fn_gen/rnd_affine_scale/7/quantization.png b/fn_gen/rnd_affine_scale/7/quantization.png new file mode 100644 index 0000000000000000000000000000000000000000..0cb7f54f4e5d37769ed1d172e8764493be5b43c2 Binary files /dev/null and b/fn_gen/rnd_affine_scale/7/quantization.png differ diff --git a/fn_gen/rnd_affine_scale/8/distortion.png b/fn_gen/rnd_affine_scale/8/distortion.png new file mode 100644 index 0000000000000000000000000000000000000000..f1dd4d4634768f3b1cfa521e234a048195bb301e Binary files /dev/null and b/fn_gen/rnd_affine_scale/8/distortion.png differ diff --git a/fn_gen/rnd_affine_scale/8/expressions.txt b/fn_gen/rnd_affine_scale/8/expressions.txt new file mode 100644 index 0000000000000000000000000000000000000000..e8458af52eb4cfce21cf8459f3c454003cd78158 --- /dev/null +++ b/fn_gen/rnd_affine_scale/8/expressions.txt @@ -0,0 +1,2 @@ +sqrt(_0*x)/_s +_s**2*x**2/_0 \ No newline at end of file diff --git a/fn_gen/rnd_affine_scale/8/fn.py b/fn_gen/rnd_affine_scale/8/fn.py new file mode 100644 index 0000000000000000000000000000000000000000..0d6c05fb6c11c1ca88d28fb28b12090fed6ad83c --- /dev/null +++ b/fn_gen/rnd_affine_scale/8/fn.py @@ -0,0 +1,493 @@ +from __future__ import annotations + +import torch +from torch import amin # Necessary for arcsin +import copy +import torch.nn as nn +import numpy as np + +from scipy.optimize import curve_fit +from typing import Dict, Any, Tuple, List, Callable + + +def quantization(x, **params): + return (torch.div(1, replace_num(params['_s'], num=0, to=10000)) * torch.sqrt(domain_guard((params['_0'] * x), min=0.1, nan=0.1))) + + +def dequantization(x, **params): + return (torch.div(1, replace_num(params['_0'], num=0, to=10000)) * guarded_torch_power(params['_s'], torch.tensor(2)) * guarded_torch_power(x, torch.tensor(2))) + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_params(x: torch.Tensor, **kwargs: Dict[str, Any]) -> Dict[str, nn.Parameter]: + params = { + '_0': init_rand(x, qtz_func=quantization, deqtz_func=dequantization, param='_0', params_list=['_0', '_s'], **kwargs), + } + params['_s'] = init_linear_scale(x, params=params, qtz_func=quantization, **kwargs) + params = {k: nn.Parameter(v, requires_grad=False) for k, v in params.items()} + + if 'post_init_hook' in kwargs: + kwargs['post_init_hook'](parameters=params) + + + if 'post_train_hook' in kwargs: + kwargs['post_train_hook'](parameters=params) + + return params + + +############### Numpy Qtz ############### + + +def np_quantization(x, _0, _s): + return (np.divide(1, np_replace_num(_s, num=0, to=10000)) * np.sqrt(np_domain_guard((_0 * x), min=0.1, nan=0.1))) + + +def np_dequantization(x, _0, _s): + return (np.divide(1, np_replace_num(_0, num=0, to=10000)) * np_guarded_power(_s, np.array(2)) * np_guarded_power(x, np.array(2))) + + +def fit_func(x, _0, _s): + x_ = np_quantization(x, _0, _s) + x_ = np_dequantization(x_, _0, _s) + return x_ + + + +############### HELPERS ############### + +def domain_guard( + x: torch.Tensor, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> torch.Tensor: + """Guard a tensor to a valid domain.""" + x = torch.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = torch.clamp(x, min=min, max=max) + return x + + +def replace_num(x: torch.Tensor, num: float, to: float) -> torch.Tensor: + """Replace a number in a tensor with another number. + + Args: + x (torch.Tensor): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + torch.Tensor: The tensor with the number replaced. + """ + return torch.where(x == num, to, x) + + +def guarded_torch_power(x: torch.Tensor, exp: float) -> torch.Tensor: + """Guard the power operation to a valid domain.""" + return torch.pow(x, exp) if exp >= 1 else torch.pow(torch.relu(x), exp) + + +def init_ones(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.ones_like(val, dtype=torch.float32, device=x.device) + + +def init_rand(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.randn_like(val, dtype=torch.float32, device=x.device) + + +def init_space_search( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + def _build_initial_param(tensor: torch.Tensor, max_initial: int, n_params: int): + """Generates the initial set of parameters. The first iteration generates 10 times more parameters.""" + for _ in range(n_params * 10): # The first iteration generates 10 times more parameters + yield init_rand(tensor) * max_initial # Generates n_params in range [-max_initial, max_initial] + + def _search_param(tensors: List[torch.tensor], n_params): + """Takes the best parameters and generates new parameters around the mean of the best parameters.""" + torch_tensors = torch.stack(tensors) + min_vals, max_vals = torch.aminmax(torch_tensors, dim=0) + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + mean = torch.mean(torch_tensors, dim=0) + for _ in range(n_params): # Generates n_params around the mean of the tensors + yield torch.randn_like(min_vals) * abs_max_val_per_ch + mean + + def _calc(x, qtz_func, deqtz_func, **params): + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params) + x_ = deqtz_func(x=x_, **params) + x_ = x_.transpose(0, 1) + return x_ + + assert "qtz_func" in kwargs, "qtz_func must be provided." + assert "deqtz_func" in kwargs, "deqtz_func must be provided." + assert "params_list" in kwargs, "params list must be provided." + assert "param" in kwargs, "param must be provided." + + qtz_func = kwargs.get('qtz_func') + deqtz_func = kwargs.get('deqtz_func') + params_list = kwargs.get('params_list') + param = kwargs.get('param') + + n_runs = 50 # Number of runs to try to find the best parameters + n_random_params = 50 # Number of random parameters to generate + n_best_to_pick = 5 # Number of best parameters to pick after each run + max_initial = 10000 # Maximum value to initialize the parameters + + # Initializes the parameters + base_params = { p: init_ones(x, **kwargs) for p in params_list if p != param } + params = _build_initial_param(x, max_initial, n_random_params) + + # Performs the search + for _ in range(n_runs): + + best_params = [] + for param_ in params: + try: + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: param_}) + loss_ones = nn.MSELoss()(x, x_) + + if len(best_params) < n_best_to_pick: + best_params.append((param_, loss_ones.item())) + best_params = sorted(best_params, key=lambda x: x[1]) + elif loss_ones < best_params[-1][1]: + best_params[-1] = (param_, loss_ones.item()) + best_params = sorted(best_params, key=lambda x: x[1]) + + except Exception: # The parameters might not be valid for the function's domain + continue + + # Generates new parameters around the mean + params = _search_param([p for p, _ in best_params], n_random_params) + + # Checks if the best parameter is better than the init_ones + p_ones = init_ones(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_ones}) + loss_ones = nn.MSELoss()(x, x_) + + # Checks if the best parameter is better than the init_rand + p_rand = init_rand(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_rand}) + loss_rand = nn.MSELoss()(x, x_) + + if loss_rand < best_params[0][1] and loss_rand < loss_ones: + return p_rand + elif loss_ones < best_params[0][1] and loss_ones < loss_rand: + return p_ones + else: + return best_params[0][0] + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_non_linear_regression_fit( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + assert "params_list" in kwargs, "params list must be provided." + assert "np_fit_func" in kwargs, "np_fit_func must be provided." + assert "p0" in kwargs, "p0 must be provided." + np_fit_func = kwargs.get('np_fit_func') + params_list = kwargs.get('params_list') + p0 = kwargs.get('p0') + + def _fit(xdata: np.ndarray, ydata: np.ndarray, func: Callable, p0: List[float]): + popt, _ = curve_fit( + func, + xdata, + ydata, + maxfev=1000, + p0=p0, + method='lm' + ) + return popt + + # 1. Needs to convert the torch tensor to numpy tensor + xdata = x.cpu().numpy() + + # 2. Sorts the data so that it makes it easier to fit to it + sorted_xdata = np.sort(xdata, axis=-1) + + p0 = {k: v.cpu().numpy() for k, v in p0.items()} + params_list = sorted(params_list) # We need to make sure that it matches the numpy fit func arg order + + # 3. Finds the best parameters for each channel + try: + params = [] + for i in range(sorted_xdata.shape[0]): + xdata_ = sorted_xdata[i] + p0_ = [p0[p][i] for p in params_list] + ch_params = _fit(xdata_, xdata_, np_fit_func, p0_) + params.append(ch_params) + + # 4. Builds the parameters + result = {} + for i, p in enumerate(params_list): + result[p] = torch.tensor([p_[i] for p_ in params], dtype=torch.float32).to(x.device) + + return result + + except ValueError as e: + print(f"Could not fit the function with error: {e}") + print(f"Using fallback result...") + return { + k: torch.tensor(v, dtype=torch.float32).to(x.device) for k, v in p0.items() + } + + +def init_zeros(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.zeros_like(val, dtype=torch.float32, device=x.device) + + +def init_inner_scale(tensor: torch.Tensor, _min: float = torch.inf, _max: float = torch.inf) -> torch.Tensor: + # Calculate the original minimum and maximum values + min_vals, max_vals = torch.aminmax(tensor, dim=-1) + x_min = torch.min(min_vals, torch.zeros_like(min_vals)) + x_max = torch.max(max_vals, torch.zeros_like(max_vals)) + + if _max is torch.inf: # We do not need to scale the tensor. Just need to move it + return torch.ones_like(x_min) + + # Calculate the scale factor + scale = (_max - _min) / (x_max - x_min) + return scale + + + +############## Quant ############### + +@torch.enable_grad() +def learn_parameters( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + qtz_func: nn.Module, + deqtz_func: nn.Module, + bits: int, + target_dtype: torch.dtype, + epochs: int = 1000, + early_stop: bool = True, + do_report: bool = False +) -> Tuple[Dict[str, nn.Parameter], torch.Tensor]: + + # Requires gradients in the parameters + for p in params.values(): + p.requires_grad = True + p.grad = None + + param_keys = list(params.keys()) + param_values = list(params.values()) + + # Defines optimizer and loss function + optimizer = torch.optim.Adam(param_values, lr=0.001) + scheduler = torch.optim.lr_scheduler.LinearLR(optimizer, start_factor=1.0, end_factor=0.001, total_iters=epochs // 10) + loss_fn = nn.MSELoss() + + # Contains the best loss and the best parameters + best_loss = float("inf") + best_params = None + + # Used to stop the search early + min_delta = 1e-7 + acc_loss = [] + percent_epochs_before_stop = 0.1 + + for i in range(epochs): + optimizer.zero_grad() + + quant = quantize(x, params, qtz_func, bits, target_dtype) + dequant = dequantize(quant, params, deqtz_func, bits, x.dtype) + loss = loss_fn(x, dequant) + + if loss.isnan() or loss.isinf(): + raise Exception("Loss is NaN or Inf. Stopping the search.") + + loss.backward() + optimizer.step() + scheduler.step() + + acc_loss.append(loss.item()) + + # Reports loss every 10 steps + if i % 10 == 0 and do_report: + print(f"Epoch {i}: Loss {loss.item()}") + + # Optimizes the parameter search by storing the best loss and the parameters + if loss.item() < best_loss: + best_loss = loss.item() + best_params = copy.deepcopy({ + k: v for k, v in params.items() if k in param_keys + }) + + # We also stop the search if the loss has not considerably during the last 10% epochs + if early_stop: + epochs_before_stop = int(epochs * percent_epochs_before_stop) + if i > epochs_before_stop and abs(acc_loss[i - epochs_before_stop] - acc_loss[i]) < min_delta: + break + + # No longer requires gradients in the parameters + for p in best_params.values(): + p.requires_grad = False + p.grad = None + + if do_report: + return best_params, acc_loss + else: + return best_params + + +def quantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + target_dtype: torch.dtype = torch.int8 +) -> torch.Tensor: + quant_min, quant_max = get_min_max_from_bits_signed(bits) + x = x.transpose(0, 1) # Aligns shapes + x = func(x=x, **params) + x = x.transpose(0, 1) + x = torch.clamp(round_func_BPDA(x), quant_min, quant_max).to(target_dtype) + return x + + +def dequantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + out_dtype: torch.dtype +) -> torch.Tensor: + x = x.to(dtype=out_dtype) + x = x.transpose(0, 1) + x = func(x=x, **params) + x = x.transpose(0, 1) + return x + + +def round_func_BPDA(input): + # This is equivalent to replacing round function (non-differentiable) with + # an identity function (differentiable) only when backward. + forward_value = torch.round(input) + out = input.clone() + out.data = forward_value.data + return out + + +def get_min_max_from_bits_signed(bit_width: int) -> Tuple[int, int]: + return -2 ** (bit_width - 1), 2 ** (bit_width - 1) - 1 + + + +############## Numpy ############### + +def np_domain_guard( + x: np.ndarray, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> np.ndarray: + """Guard a tensor to a valid domain.""" + x = np.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = np.clip(x, min, max) + return x + + +def np_replace_num(x: np.ndarray, num: float, to: float) -> np.ndarray: + """Replace a number in a tensor with another number. + + Args: + x (np.ndarray): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + np.ndarray: The tensor with the number replaced. + """ + return np.where(x == num, to, x) + + +def np_guarded_power(x: np.ndarray, exp: float) -> np.ndarray: + """Guard the power operation to a valid domain.""" + return np.power(x, exp) if exp >= 1 else np.power(np.maximum(x, 0), exp) + diff --git a/fn_gen/rnd_affine_scale/8/loss.png b/fn_gen/rnd_affine_scale/8/loss.png new file mode 100644 index 0000000000000000000000000000000000000000..7f5084ce345cfc007429552b37bb3acc046e48ea Binary files /dev/null and b/fn_gen/rnd_affine_scale/8/loss.png differ diff --git a/fn_gen/rnd_affine_scale/8/quantization.png b/fn_gen/rnd_affine_scale/8/quantization.png new file mode 100644 index 0000000000000000000000000000000000000000..e2ff36182f45ca4dec1e693e2b6aa826782c0ebf Binary files /dev/null and b/fn_gen/rnd_affine_scale/8/quantization.png differ diff --git a/fn_gen/rnd_affine_scale/9/distortion.png b/fn_gen/rnd_affine_scale/9/distortion.png new file mode 100644 index 0000000000000000000000000000000000000000..3dfbc794707217eca349f8ff87f25a705727380d Binary files /dev/null and b/fn_gen/rnd_affine_scale/9/distortion.png differ diff --git a/fn_gen/rnd_affine_scale/9/expressions.txt b/fn_gen/rnd_affine_scale/9/expressions.txt new file mode 100644 index 0000000000000000000000000000000000000000..ed99293c42843616c361d59b23d32ae553cc0f8d --- /dev/null +++ b/fn_gen/rnd_affine_scale/9/expressions.txt @@ -0,0 +1,2 @@ +atanh(_0*x)/_s +tanh(_s*x)/_0 \ No newline at end of file diff --git a/fn_gen/rnd_affine_scale/9/fn.py b/fn_gen/rnd_affine_scale/9/fn.py new file mode 100644 index 0000000000000000000000000000000000000000..d66c2e8bb13f1068b688a20b8e83a2f98e4f68e6 --- /dev/null +++ b/fn_gen/rnd_affine_scale/9/fn.py @@ -0,0 +1,493 @@ +from __future__ import annotations + +import torch +from torch import amin # Necessary for arcsin +import copy +import torch.nn as nn +import numpy as np + +from scipy.optimize import curve_fit +from typing import Dict, Any, Tuple, List, Callable + + +def quantization(x, **params): + return (torch.div(1, replace_num(params['_s'], num=0, to=10000)) * torch.atanh(domain_guard((params['_0'] * x), min=-0.9999, max=0.9999, nan=0))) + + +def dequantization(x, **params): + return (torch.div(1, replace_num(params['_0'], num=0, to=10000)) * torch.tanh((params['_s'] * x))) + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_params(x: torch.Tensor, **kwargs: Dict[str, Any]) -> Dict[str, nn.Parameter]: + params = { + '_0': init_rand(x, qtz_func=quantization, deqtz_func=dequantization, param='_0', params_list=['_0', '_s'], **kwargs), + } + params['_s'] = init_linear_scale(x, params=params, qtz_func=quantization, **kwargs) + params = {k: nn.Parameter(v, requires_grad=False) for k, v in params.items()} + + if 'post_init_hook' in kwargs: + kwargs['post_init_hook'](parameters=params) + + + if 'post_train_hook' in kwargs: + kwargs['post_train_hook'](parameters=params) + + return params + + +############### Numpy Qtz ############### + + +def np_quantization(x, _0, _s): + return (np.divide(1, np_replace_num(_s, num=0, to=10000)) * np.arctanh(np_domain_guard((_0 * x), min=-0.9999, max=0.9999, nan=0))) + + +def np_dequantization(x, _0, _s): + return (np.divide(1, np_replace_num(_0, num=0, to=10000)) * np.tanh((_s * x))) + + +def fit_func(x, _0, _s): + x_ = np_quantization(x, _0, _s) + x_ = np_dequantization(x_, _0, _s) + return x_ + + + +############### HELPERS ############### + +def domain_guard( + x: torch.Tensor, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> torch.Tensor: + """Guard a tensor to a valid domain.""" + x = torch.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = torch.clamp(x, min=min, max=max) + return x + + +def replace_num(x: torch.Tensor, num: float, to: float) -> torch.Tensor: + """Replace a number in a tensor with another number. + + Args: + x (torch.Tensor): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + torch.Tensor: The tensor with the number replaced. + """ + return torch.where(x == num, to, x) + + +def guarded_torch_power(x: torch.Tensor, exp: float) -> torch.Tensor: + """Guard the power operation to a valid domain.""" + return torch.pow(x, exp) if exp >= 1 else torch.pow(torch.relu(x), exp) + + +def init_ones(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.ones_like(val, dtype=torch.float32, device=x.device) + + +def init_rand(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.randn_like(val, dtype=torch.float32, device=x.device) + + +def init_space_search( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + def _build_initial_param(tensor: torch.Tensor, max_initial: int, n_params: int): + """Generates the initial set of parameters. The first iteration generates 10 times more parameters.""" + for _ in range(n_params * 10): # The first iteration generates 10 times more parameters + yield init_rand(tensor) * max_initial # Generates n_params in range [-max_initial, max_initial] + + def _search_param(tensors: List[torch.tensor], n_params): + """Takes the best parameters and generates new parameters around the mean of the best parameters.""" + torch_tensors = torch.stack(tensors) + min_vals, max_vals = torch.aminmax(torch_tensors, dim=0) + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + mean = torch.mean(torch_tensors, dim=0) + for _ in range(n_params): # Generates n_params around the mean of the tensors + yield torch.randn_like(min_vals) * abs_max_val_per_ch + mean + + def _calc(x, qtz_func, deqtz_func, **params): + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params) + x_ = deqtz_func(x=x_, **params) + x_ = x_.transpose(0, 1) + return x_ + + assert "qtz_func" in kwargs, "qtz_func must be provided." + assert "deqtz_func" in kwargs, "deqtz_func must be provided." + assert "params_list" in kwargs, "params list must be provided." + assert "param" in kwargs, "param must be provided." + + qtz_func = kwargs.get('qtz_func') + deqtz_func = kwargs.get('deqtz_func') + params_list = kwargs.get('params_list') + param = kwargs.get('param') + + n_runs = 50 # Number of runs to try to find the best parameters + n_random_params = 50 # Number of random parameters to generate + n_best_to_pick = 5 # Number of best parameters to pick after each run + max_initial = 10000 # Maximum value to initialize the parameters + + # Initializes the parameters + base_params = { p: init_ones(x, **kwargs) for p in params_list if p != param } + params = _build_initial_param(x, max_initial, n_random_params) + + # Performs the search + for _ in range(n_runs): + + best_params = [] + for param_ in params: + try: + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: param_}) + loss_ones = nn.MSELoss()(x, x_) + + if len(best_params) < n_best_to_pick: + best_params.append((param_, loss_ones.item())) + best_params = sorted(best_params, key=lambda x: x[1]) + elif loss_ones < best_params[-1][1]: + best_params[-1] = (param_, loss_ones.item()) + best_params = sorted(best_params, key=lambda x: x[1]) + + except Exception: # The parameters might not be valid for the function's domain + continue + + # Generates new parameters around the mean + params = _search_param([p for p, _ in best_params], n_random_params) + + # Checks if the best parameter is better than the init_ones + p_ones = init_ones(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_ones}) + loss_ones = nn.MSELoss()(x, x_) + + # Checks if the best parameter is better than the init_rand + p_rand = init_rand(x, **kwargs) + x_ = _calc(x, qtz_func, deqtz_func, **base_params, **{param: p_rand}) + loss_rand = nn.MSELoss()(x, x_) + + if loss_rand < best_params[0][1] and loss_rand < loss_ones: + return p_rand + elif loss_ones < best_params[0][1] and loss_ones < loss_rand: + return p_ones + else: + return best_params[0][0] + + +def init_linear_scale( # Symmetric scale. From the study folder + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + assert "bits" in kwargs, "bits must be provided." + assert "params" in kwargs, "params must be provided." + assert "qtz_func" in kwargs, "qtz_func must be provided." + + bits = kwargs.get('bits') + params = kwargs.get('params') + qtz_func = kwargs.get('qtz_func') + + x_ = x.transpose(0, 1) + x_ = qtz_func(x=x_, **params, _s=init_ones(x, **kwargs)) + x_ = x_.transpose(0, 1) + + quant_min, quant_max = get_min_max_from_bits_signed(bits) + min_vals, max_vals = torch.aminmax(x_, dim=1) + min_vals = torch.min(min_vals, torch.zeros_like(min_vals)) + max_vals = torch.max(max_vals, torch.zeros_like(max_vals)) + + eps = torch.finfo(torch.float32).eps + + abs_max_val_per_ch = torch.max(-min_vals, max_vals) + scale = abs_max_val_per_ch / (float(quant_max - quant_min) / 2) + + scale = torch.clamp(scale, min=eps).to(dtype=torch.float32, device=min_vals.device) + + return scale + + # Introduces some noise in scale + # If I don't introduce noise, the accuracy is going to be 0.0 and not learn anything + scale = scale + 0.01 * torch.randn_like(scale) + return scale + + +def init_non_linear_regression_fit( + x: torch.Tensor, + **kwargs: Dict[str, Any], + ) -> torch.Tensor: + + assert "params_list" in kwargs, "params list must be provided." + assert "np_fit_func" in kwargs, "np_fit_func must be provided." + assert "p0" in kwargs, "p0 must be provided." + np_fit_func = kwargs.get('np_fit_func') + params_list = kwargs.get('params_list') + p0 = kwargs.get('p0') + + def _fit(xdata: np.ndarray, ydata: np.ndarray, func: Callable, p0: List[float]): + popt, _ = curve_fit( + func, + xdata, + ydata, + maxfev=1000, + p0=p0, + method='lm' + ) + return popt + + # 1. Needs to convert the torch tensor to numpy tensor + xdata = x.cpu().numpy() + + # 2. Sorts the data so that it makes it easier to fit to it + sorted_xdata = np.sort(xdata, axis=-1) + + p0 = {k: v.cpu().numpy() for k, v in p0.items()} + params_list = sorted(params_list) # We need to make sure that it matches the numpy fit func arg order + + # 3. Finds the best parameters for each channel + try: + params = [] + for i in range(sorted_xdata.shape[0]): + xdata_ = sorted_xdata[i] + p0_ = [p0[p][i] for p in params_list] + ch_params = _fit(xdata_, xdata_, np_fit_func, p0_) + params.append(ch_params) + + # 4. Builds the parameters + result = {} + for i, p in enumerate(params_list): + result[p] = torch.tensor([p_[i] for p_ in params], dtype=torch.float32).to(x.device) + + return result + + except ValueError as e: + print(f"Could not fit the function with error: {e}") + print(f"Using fallback result...") + return { + k: torch.tensor(v, dtype=torch.float32).to(x.device) for k, v in p0.items() + } + + +def init_zeros(x: torch.Tensor, **kwargs: Dict[str, Any]) -> torch.Tensor: + val = torch.amin(x, dim=1) + return torch.zeros_like(val, dtype=torch.float32, device=x.device) + + +def init_inner_scale(tensor: torch.Tensor, _min: float = torch.inf, _max: float = torch.inf) -> torch.Tensor: + # Calculate the original minimum and maximum values + min_vals, max_vals = torch.aminmax(tensor, dim=-1) + x_min = torch.min(min_vals, torch.zeros_like(min_vals)) + x_max = torch.max(max_vals, torch.zeros_like(max_vals)) + + if _max is torch.inf: # We do not need to scale the tensor. Just need to move it + return torch.ones_like(x_min) + + # Calculate the scale factor + scale = (_max - _min) / (x_max - x_min) + return scale + + + +############## Quant ############### + +@torch.enable_grad() +def learn_parameters( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + qtz_func: nn.Module, + deqtz_func: nn.Module, + bits: int, + target_dtype: torch.dtype, + epochs: int = 1000, + early_stop: bool = True, + do_report: bool = False +) -> Tuple[Dict[str, nn.Parameter], torch.Tensor]: + + # Requires gradients in the parameters + for p in params.values(): + p.requires_grad = True + p.grad = None + + param_keys = list(params.keys()) + param_values = list(params.values()) + + # Defines optimizer and loss function + optimizer = torch.optim.Adam(param_values, lr=0.001) + scheduler = torch.optim.lr_scheduler.LinearLR(optimizer, start_factor=1.0, end_factor=0.001, total_iters=epochs // 10) + loss_fn = nn.MSELoss() + + # Contains the best loss and the best parameters + best_loss = float("inf") + best_params = None + + # Used to stop the search early + min_delta = 1e-7 + acc_loss = [] + percent_epochs_before_stop = 0.1 + + for i in range(epochs): + optimizer.zero_grad() + + quant = quantize(x, params, qtz_func, bits, target_dtype) + dequant = dequantize(quant, params, deqtz_func, bits, x.dtype) + loss = loss_fn(x, dequant) + + if loss.isnan() or loss.isinf(): + raise Exception("Loss is NaN or Inf. Stopping the search.") + + loss.backward() + optimizer.step() + scheduler.step() + + acc_loss.append(loss.item()) + + # Reports loss every 10 steps + if i % 10 == 0 and do_report: + print(f"Epoch {i}: Loss {loss.item()}") + + # Optimizes the parameter search by storing the best loss and the parameters + if loss.item() < best_loss: + best_loss = loss.item() + best_params = copy.deepcopy({ + k: v for k, v in params.items() if k in param_keys + }) + + # We also stop the search if the loss has not considerably during the last 10% epochs + if early_stop: + epochs_before_stop = int(epochs * percent_epochs_before_stop) + if i > epochs_before_stop and abs(acc_loss[i - epochs_before_stop] - acc_loss[i]) < min_delta: + break + + # No longer requires gradients in the parameters + for p in best_params.values(): + p.requires_grad = False + p.grad = None + + if do_report: + return best_params, acc_loss + else: + return best_params + + +def quantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + target_dtype: torch.dtype = torch.int8 +) -> torch.Tensor: + quant_min, quant_max = get_min_max_from_bits_signed(bits) + x = x.transpose(0, 1) # Aligns shapes + x = func(x=x, **params) + x = x.transpose(0, 1) + x = torch.clamp(round_func_BPDA(x), quant_min, quant_max).to(target_dtype) + return x + + +def dequantize( + x: torch.Tensor, + params: Dict[str, nn.Parameter], + func: nn.Module, + bits: int, + out_dtype: torch.dtype +) -> torch.Tensor: + x = x.to(dtype=out_dtype) + x = x.transpose(0, 1) + x = func(x=x, **params) + x = x.transpose(0, 1) + return x + + +def round_func_BPDA(input): + # This is equivalent to replacing round function (non-differentiable) with + # an identity function (differentiable) only when backward. + forward_value = torch.round(input) + out = input.clone() + out.data = forward_value.data + return out + + +def get_min_max_from_bits_signed(bit_width: int) -> Tuple[int, int]: + return -2 ** (bit_width - 1), 2 ** (bit_width - 1) - 1 + + + +############## Numpy ############### + +def np_domain_guard( + x: np.ndarray, + min: float = None, + max: float = None, + posinf: float = None, + neginf: float = None, + nan: float = None + ) -> np.ndarray: + """Guard a tensor to a valid domain.""" + x = np.nan_to_num(x, posinf=posinf, neginf=neginf, nan=nan) + if min is not None or max is not None: + x = np.clip(x, min, max) + return x + + +def np_replace_num(x: np.ndarray, num: float, to: float) -> np.ndarray: + """Replace a number in a tensor with another number. + + Args: + x (np.ndarray): The input tensor. + num (float): The number to replace. + to (float): The number to replace with. + + Returns: + np.ndarray: The tensor with the number replaced. + """ + return np.where(x == num, to, x) + + +def np_guarded_power(x: np.ndarray, exp: float) -> np.ndarray: + """Guard the power operation to a valid domain.""" + return np.power(x, exp) if exp >= 1 else np.power(np.maximum(x, 0), exp) + diff --git a/fn_gen/rnd_affine_scale/9/loss.png b/fn_gen/rnd_affine_scale/9/loss.png new file mode 100644 index 0000000000000000000000000000000000000000..8c7a1b3e131b54e55e91b11386d142e4d59cfe15 Binary files /dev/null and b/fn_gen/rnd_affine_scale/9/loss.png differ diff --git a/fn_gen/rnd_affine_scale/9/quantization.png b/fn_gen/rnd_affine_scale/9/quantization.png new file mode 100644 index 0000000000000000000000000000000000000000..b0061db8dbdac7693b0567946cb567ecae9805b5 Binary files /dev/null and b/fn_gen/rnd_affine_scale/9/quantization.png differ