mirror of
https://github.com/lllyasviel/stable-diffusion-webui-forge.git
synced 2026-02-01 05:49:45 +00:00
Previously before this commit, credits are already in entry and licenses are already in root. This commit will make info clearer.
141 lines
5.1 KiB
Python
141 lines
5.1 KiB
Python
# 1st edit by https://github.com/comfyanonymous/ComfyUI
|
|
# 2nd edit by Forge Official
|
|
|
|
|
|
import torch
|
|
import numpy as np
|
|
from ldm_patched.ldm.modules.diffusionmodules.util import make_beta_schedule
|
|
import math
|
|
|
|
class EPS:
|
|
def calculate_input(self, sigma, noise):
|
|
sigma = sigma.view(sigma.shape[:1] + (1,) * (noise.ndim - 1))
|
|
return noise / (sigma ** 2 + self.sigma_data ** 2) ** 0.5
|
|
|
|
def calculate_denoised(self, sigma, model_output, model_input):
|
|
sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1))
|
|
return model_input - model_output * sigma
|
|
|
|
|
|
class V_PREDICTION(EPS):
|
|
def calculate_denoised(self, sigma, model_output, model_input):
|
|
sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1))
|
|
return model_input * self.sigma_data ** 2 / (sigma ** 2 + self.sigma_data ** 2) - model_output * sigma * self.sigma_data / (sigma ** 2 + self.sigma_data ** 2) ** 0.5
|
|
|
|
|
|
class ModelSamplingDiscrete(torch.nn.Module):
|
|
def __init__(self, model_config=None):
|
|
super().__init__()
|
|
|
|
if model_config is not None:
|
|
sampling_settings = model_config.sampling_settings
|
|
else:
|
|
sampling_settings = {}
|
|
|
|
beta_schedule = sampling_settings.get("beta_schedule", "linear")
|
|
linear_start = sampling_settings.get("linear_start", 0.00085)
|
|
linear_end = sampling_settings.get("linear_end", 0.012)
|
|
|
|
self._register_schedule(given_betas=None, beta_schedule=beta_schedule, timesteps=1000, linear_start=linear_start, linear_end=linear_end, cosine_s=8e-3)
|
|
self.sigma_data = 1.0
|
|
|
|
def _register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000,
|
|
linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
|
|
if given_betas is not None:
|
|
betas = given_betas
|
|
else:
|
|
betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s)
|
|
alphas = 1. - betas
|
|
alphas_cumprod = torch.tensor(np.cumprod(alphas, axis=0), dtype=torch.float32)
|
|
# alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
|
|
|
|
timesteps, = betas.shape
|
|
self.num_timesteps = int(timesteps)
|
|
self.linear_start = linear_start
|
|
self.linear_end = linear_end
|
|
|
|
# self.register_buffer('betas', torch.tensor(betas, dtype=torch.float32))
|
|
# self.register_buffer('alphas_cumprod', torch.tensor(alphas_cumprod, dtype=torch.float32))
|
|
# self.register_buffer('alphas_cumprod_prev', torch.tensor(alphas_cumprod_prev, dtype=torch.float32))
|
|
|
|
sigmas = ((1 - alphas_cumprod) / alphas_cumprod) ** 0.5
|
|
self.set_sigmas(sigmas)
|
|
|
|
def set_sigmas(self, sigmas):
|
|
self.register_buffer('sigmas', sigmas)
|
|
self.register_buffer('log_sigmas', sigmas.log())
|
|
|
|
@property
|
|
def sigma_min(self):
|
|
return self.sigmas[0]
|
|
|
|
@property
|
|
def sigma_max(self):
|
|
return self.sigmas[-1]
|
|
|
|
def timestep(self, sigma):
|
|
log_sigma = sigma.log()
|
|
dists = log_sigma.to(self.log_sigmas.device) - self.log_sigmas[:, None]
|
|
return dists.abs().argmin(dim=0).view(sigma.shape).to(sigma.device)
|
|
|
|
def sigma(self, timestep):
|
|
t = torch.clamp(timestep.float().to(self.log_sigmas.device), min=0, max=(len(self.sigmas) - 1))
|
|
low_idx = t.floor().long()
|
|
high_idx = t.ceil().long()
|
|
w = t.frac()
|
|
log_sigma = (1 - w) * self.log_sigmas[low_idx] + w * self.log_sigmas[high_idx]
|
|
return log_sigma.exp().to(timestep.device)
|
|
|
|
def percent_to_sigma(self, percent):
|
|
if percent <= 0.0:
|
|
return 999999999.9
|
|
if percent >= 1.0:
|
|
return 0.0
|
|
percent = 1.0 - percent
|
|
return self.sigma(torch.tensor(percent * 999.0)).item()
|
|
|
|
|
|
class ModelSamplingContinuousEDM(torch.nn.Module):
|
|
def __init__(self, model_config=None):
|
|
super().__init__()
|
|
self.sigma_data = 1.0
|
|
|
|
if model_config is not None:
|
|
sampling_settings = model_config.sampling_settings
|
|
else:
|
|
sampling_settings = {}
|
|
|
|
sigma_min = sampling_settings.get("sigma_min", 0.002)
|
|
sigma_max = sampling_settings.get("sigma_max", 120.0)
|
|
self.set_sigma_range(sigma_min, sigma_max)
|
|
|
|
def set_sigma_range(self, sigma_min, sigma_max):
|
|
sigmas = torch.linspace(math.log(sigma_min), math.log(sigma_max), 1000).exp()
|
|
|
|
self.register_buffer('sigmas', sigmas) #for compatibility with some schedulers
|
|
self.register_buffer('log_sigmas', sigmas.log())
|
|
|
|
@property
|
|
def sigma_min(self):
|
|
return self.sigmas[0]
|
|
|
|
@property
|
|
def sigma_max(self):
|
|
return self.sigmas[-1]
|
|
|
|
def timestep(self, sigma):
|
|
return 0.25 * sigma.log()
|
|
|
|
def sigma(self, timestep):
|
|
return (timestep / 0.25).exp()
|
|
|
|
def percent_to_sigma(self, percent):
|
|
if percent <= 0.0:
|
|
return 999999999.9
|
|
if percent >= 1.0:
|
|
return 0.0
|
|
percent = 1.0 - percent
|
|
|
|
log_sigma_min = math.log(self.sigma_min)
|
|
return math.exp((math.log(self.sigma_max) - log_sigma_min) * percent + log_sigma_min)
|