revise code

This commit is contained in:
lvmin 2023-10-14 06:23:29 -07:00
parent f2537e5751
commit cdf22b8ccc
3 changed files with 15 additions and 13 deletions

View File

@ -1 +1 @@
version = '2.1.65'
version = '2.1.66'

View File

@ -11,6 +11,7 @@ import modules.sample_hijack as sample_hijack
from fcbh.model_base import SDXL, SDXLRefiner
from modules.expansion import FooocusExpansion
from modules.sample_hijack import clip_separate
from fcbh.k_diffusion.sampling import BrownianTreeNoiseSampler
xl_base: core.StableDiffusionModel = None
@ -332,20 +333,24 @@ def process_diffusion(positive_cond, negative_cond, steps, switch, width, height
print(f'[Sampler] refiner_swap_method = {refiner_swap_method}')
if latent is None:
empty_latent = core.generate_empty_latent(width=width, height=height, batch_size=1)
else:
empty_latent = latent
minmax_sigmas = calculate_sigmas(sampler=sampler_name, scheduler=scheduler_name, model=final_unet.model, steps=steps, denoise=denoise)
sigma_min, sigma_max = minmax_sigmas[minmax_sigmas > 0].min(), minmax_sigmas.max()
sigma_min = float(sigma_min.cpu().numpy())
sigma_max = float(sigma_max.cpu().numpy())
print(f'[Sampler] sigma_min = {sigma_min}, sigma_max = {sigma_max}')
modules.patch.globalBrownianTreeNoiseSampler = BrownianTreeNoiseSampler(
empty_latent['samples'].to(fcbh.model_management.get_torch_device()),
sigma_min, sigma_max, seed=image_seed, cpu=False)
modules.patch.sigma_min = sigma_min
modules.patch.sigma_max = sigma_max
if latent is None:
empty_latent = core.generate_empty_latent(width=width, height=height, batch_size=1)
else:
empty_latent = latent
decoded_latent = None
if refiner_swap_method == 'joint':

View File

@ -274,12 +274,12 @@ def encode_token_weights_patched_with_a1111_method(self, token_weight_pairs):
return torch.cat(output, dim=-2).cpu(), first_pooled.cpu()
sigma_min = 0.029167539
sigma_max = 14.614643
globalBrownianTreeNoiseSampler = None
@torch.no_grad()
def sample_dpmpp_fooocus_2m_sde_inpaint_seamless(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None, **kwargs):
def sample_dpmpp_fooocus_2m_sde_inpaint_seamless(model, x, sigmas, extra_args=None, callback=None,
disable=None, eta=1., s_noise=1., **kwargs):
global sigma_min, sigma_max
print('[Sampler] Fooocus sampler is activated.')
@ -293,9 +293,6 @@ def sample_dpmpp_fooocus_2m_sde_inpaint_seamless(model, x, sigmas, extra_args=No
def get_energy():
return torch.randn(x.size(), dtype=x.dtype, generator=energy_generator, device="cpu").to(x)
noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=extra_args.get("seed", None),
cpu=False) if noise_sampler is None else noise_sampler
extra_args = {} if extra_args is None else extra_args
s_in = x.new_ones([x.shape[0]])
@ -334,7 +331,7 @@ def sample_dpmpp_fooocus_2m_sde_inpaint_seamless(model, x, sigmas, extra_args=No
r = h_last / h
x = x + 0.5 * (-h - eta_h).expm1().neg() * (1 / r) * (denoised - old_denoised)
x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * sigmas[i + 1] * (
x = x + globalBrownianTreeNoiseSampler(sigmas[i], sigmas[i + 1]) * sigmas[i + 1] * (
-2 * eta_h).expm1().neg().sqrt() * s_noise
old_denoised = denoised