imagenet-sdxl-quantized / quantization.py
jon-kyl's picture
add everything
49e7d2b
from abc import ABC, abstractmethod
from dataclasses import dataclass
from importlib import import_module
from functools import wraps
from typing import Any, Callable, TypeVar, TypedDict, get_type_hints, get_args
T = TypeVar("T")
F = TypeVar("F", bound=Callable) # Type for functions
class Backend(ABC):
"""Abstract backend interface for quantization operations."""
@abstractmethod
def clip(self, x: T, min_val: float, max_val: float) -> T: ...
@abstractmethod
def abs(self, x: T) -> T: ...
@abstractmethod
def sign(self, x: T) -> T: ...
@abstractmethod
def log1p(self, x: T) -> T: ...
@abstractmethod
def tanh(self, x: T) -> T: ...
@abstractmethod
def atanh(self, x: T) -> T: ...
@abstractmethod
def sigmoid(self, x: T) -> T: ...
@abstractmethod
def logit(self, x: T) -> T: ...
@abstractmethod
def to_uint8(self, x: T) -> T: ...
@abstractmethod
def to_float32(self, x: T) -> T: ...
@abstractmethod
def normal_cdf(self, x: T) -> T: ...
@abstractmethod
def normal_ppf(self, x: T) -> T: ...
class NumpyBackend(Backend):
def __init__(self, numpy: str = "numpy", scipy: str = "scipy"):
self.np = import_module(numpy)
self.sp = import_module(scipy)
def clip(self, x: T, min_val: float, max_val: float) -> T:
return self.np.clip(x, min_val, max_val)
def abs(self, x: T) -> T:
return self.np.abs(x)
def sign(self, x: T) -> T:
return self.np.sign(x)
def log1p(self, x: T) -> T:
return self.np.log1p(x)
def tanh(self, x: T) -> T:
return self.np.tanh(x)
def atanh(self, x: T) -> T:
return self.np.arctanh(x)
def sigmoid(self, x: T) -> T:
return self.sp.special.expit(x)
def logit(self, x: T) -> T:
return self.sp.special.logit(x)
def to_uint8(self, x: T) -> T:
return self.np.uint8(x)
def to_float32(self, x: T) -> T:
return self.np.float32(x)
def normal_cdf(self, x: T) -> T:
return self.sp.stats.norm.cdf(x).astype(x.dtype) # scipy upcasts this op.
def normal_ppf(self, x: T) -> T:
return self.sp.stats.norm.ppf(x).astype(x.dtype) # scipy upcasts this op.
class JaxBackend(NumpyBackend):
def __init__(self):
super().__init__(numpy="jax.numpy", scipy="jax.scipy")
class TorchBackend(Backend):
def __init__(self):
self.torch = import_module("torch")
self.normal = import_module("torch.distributions").Normal(0, 1)
def clip(self, x: T, min_val: float, max_val: float) -> T:
return self.torch.clamp(x, min_val, max_val)
def abs(self, x: T) -> T:
return self.torch.abs(x)
def sign(self, x: T) -> T:
return self.torch.sign(x)
def log1p(self, x: T) -> T:
if isinstance(x, (int, float)):
# torch.log1p doesn't accept non-tensors.
x = self.torch.full((), x, dtype=self.torch.float32)
return self.torch.log1p(x)
def tanh(self, x: T) -> T:
return self.torch.tanh(x)
def atanh(self, x: T) -> T:
return self.torch.atanh(x)
def sigmoid(self, x: T) -> T:
return self.torch.sigmoid(x)
def logit(self, x: T) -> T:
return self.torch.logit(x)
def to_uint8(self, x: T) -> T:
return x.to(self.torch.uint8)
def to_float32(self, x: T) -> T:
return x.to(self.torch.float32)
def normal_cdf(self, x: T) -> T:
return self.normal.cdf(x)
def normal_ppf(self, x: T) -> T:
return self.normal.icdf(x)
class ClassNameToBackendSingletons(TypedDict):
ArrayImpl: JaxBackend | None
DynamicJaxprTracer: JaxBackend | None
ndarray: NumpyBackend | None
Tensor: TorchBackend | None
_backend_singletons: ClassNameToBackendSingletons = {
"ArrayImpl": None,
"DynamicJaxprTracer": None,
"ndarray": None,
"Tensor": None,
}
def make_backend(classname: str) -> Backend:
backend_hints = get_type_hints(ClassNameToBackendSingletons)
backend_class = get_args(backend_hints[classname])[0]
return backend_class()
def get_backend(x: Any) -> Backend:
"""
Get the appropriate backend based on the type of x.
Lazily imports the backend modules.
"""
classname = x.__class__.__name__
try:
backend = _backend_singletons[classname]
except KeyError as exc:
raise NotImplementedError(f"backend for {type(x)} not implemented") from exc
if backend is None:
backend = make_backend(classname)
_backend_singletons[classname] = backend
return backend
def with_backend(func: F) -> F:
"""
Decorator that extracts the backend from the first argument
and passes it as a second argument to the wrapped function.
"""
@wraps(func)
def wrapper(self: Any, x: T) -> Any:
backend = get_backend(x)
return func(self, x, backend)
return wrapper
@dataclass(frozen=True, kw_only=True)
class QuantizationType(ABC):
scale: float = 1.0
@abstractmethod
def nonlinearity(self, x: T, backend: Backend) -> T: ...
@abstractmethod
def inv_nonlinearity(self, x: T, backend: Backend) -> T: ...
@with_backend
def quantize(self, x: T, backend: Backend) -> T:
x = x * self.scale
x = self.nonlinearity(x) # [-1, 1)
x = x * 128 + 128 # [0, 256)
x = backend.to_uint8(x) # [0, 255]
return x
@with_backend
def dequantize(self, x: T, backend: Backend) -> T:
x = backend.to_float32(x) # [0, 255]
x = x + 0.5 # [0.5, 255.5]
x = x / 128 - 1 # (-1, 1)
x = self.inv_nonlinearity(x)
x = x / self.scale
return x
def descendents(cls: type) -> set[type]:
"""Gets all subclasses of the given class recursively."""
return set(cls.__subclasses__()).union(*map(descendents, cls.__subclasses__()))
class Normal(QuantizationType):
@with_backend
def nonlinearity(self, x: T, backend: Backend) -> T:
return backend.normal_cdf(x) * 2 - 1
@with_backend
def inv_nonlinearity(self, x: T, backend: Backend) -> T:
return backend.normal_ppf((x + 1) / 2)
class Linear(QuantizationType):
threshold: float = 3.5
eps: float = 1e-6
@with_backend
def nonlinearity(self, x: T, backend: Backend) -> T:
x = backend.clip(x, -self.threshold, self.threshold - self.eps)
return x / self.threshold
def inv_nonlinearity(self, x: T) -> T:
return x * self.threshold
class MuLaw(Linear):
mu: float = 255.
@with_backend
def nonlinearity(self, x: T, backend: Backend) -> T:
x = super().nonlinearity(x)
x_abs = backend.abs(x)
sign_x = backend.sign(x)
log_mu = backend.log1p(self.mu)
log_term = backend.log1p(self.mu * x_abs) / log_mu
return sign_x * log_term
@with_backend
def inv_nonlinearity(self, x: T, backend: Backend) -> T:
x_abs = backend.abs(x)
sign_x = backend.sign(x)
numerator = (1 + self.mu) ** x_abs - 1
x = sign_x * numerator / self.mu
return super().inv_nonlinearity(x)
class Tanh(QuantizationType):
@with_backend
def nonlinearity(self, x: T, backend: Backend) -> T:
return backend.tanh(x)
@with_backend
def inv_nonlinearity(self, x: T, backend: Backend) -> T:
return backend.atanh(x)
class Sigmoid(QuantizationType):
@with_backend
def nonlinearity(self, x: T, backend: Backend) -> T:
return backend.sigmoid(x) * 2 - 1
@with_backend
def inv_nonlinearity(self, x: T, backend: Backend) -> T:
return backend.logit((x + 1) / 2)
optimized_for_sdxl = Normal(scale=0.7)
quantization_types = tuple(sorted(list(descendents(QuantizationType)), key=lambda q: q.__name__))
__all__ = [
"QuantizationType",
"Normal",
"Linear",
"MuLaw",
"Tanh",
"Sigmoid",
"optimized_for_sdxl",
"quantization_types",
]