From cdf22b8ccc3580d001c0622afc2a362d35667f2d Mon Sep 17 00:00:00 2001 From: lvmin Date: Sat, 14 Oct 2023 06:23:29 -0700 Subject: [PATCH] revise code --- fooocus_version.py | 2 +- modules/default_pipeline.py | 15 ++++++++++----- modules/patch.py | 11 ++++------- 3 files changed, 15 insertions(+), 13 deletions(-) diff --git a/fooocus_version.py b/fooocus_version.py index 006c3000..e87fe1bc 100644 --- a/fooocus_version.py +++ b/fooocus_version.py @@ -1 +1 @@ -version = '2.1.65' +version = '2.1.66' diff --git a/modules/default_pipeline.py b/modules/default_pipeline.py index 37d28211..942cdfbf 100644 --- a/modules/default_pipeline.py +++ b/modules/default_pipeline.py @@ -11,6 +11,7 @@ import modules.sample_hijack as sample_hijack from fcbh.model_base import SDXL, SDXLRefiner from modules.expansion import FooocusExpansion from modules.sample_hijack import clip_separate +from fcbh.k_diffusion.sampling import BrownianTreeNoiseSampler xl_base: core.StableDiffusionModel = None @@ -332,20 +333,24 @@ def process_diffusion(positive_cond, negative_cond, steps, switch, width, height print(f'[Sampler] refiner_swap_method = {refiner_swap_method}') + if latent is None: + empty_latent = core.generate_empty_latent(width=width, height=height, batch_size=1) + else: + empty_latent = latent + minmax_sigmas = calculate_sigmas(sampler=sampler_name, scheduler=scheduler_name, model=final_unet.model, steps=steps, denoise=denoise) sigma_min, sigma_max = minmax_sigmas[minmax_sigmas > 0].min(), minmax_sigmas.max() sigma_min = float(sigma_min.cpu().numpy()) sigma_max = float(sigma_max.cpu().numpy()) print(f'[Sampler] sigma_min = {sigma_min}, sigma_max = {sigma_max}') + modules.patch.globalBrownianTreeNoiseSampler = BrownianTreeNoiseSampler( + empty_latent['samples'].to(fcbh.model_management.get_torch_device()), + sigma_min, sigma_max, seed=image_seed, cpu=False) + modules.patch.sigma_min = sigma_min modules.patch.sigma_max = sigma_max - if latent is None: - empty_latent = core.generate_empty_latent(width=width, height=height, batch_size=1) - else: - empty_latent = latent - decoded_latent = None if refiner_swap_method == 'joint': diff --git a/modules/patch.py b/modules/patch.py index cf9584be..ab18f8ec 100644 --- a/modules/patch.py +++ b/modules/patch.py @@ -274,12 +274,12 @@ def encode_token_weights_patched_with_a1111_method(self, token_weight_pairs): return torch.cat(output, dim=-2).cpu(), first_pooled.cpu() -sigma_min = 0.029167539 -sigma_max = 14.614643 +globalBrownianTreeNoiseSampler = None @torch.no_grad() -def sample_dpmpp_fooocus_2m_sde_inpaint_seamless(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None, **kwargs): +def sample_dpmpp_fooocus_2m_sde_inpaint_seamless(model, x, sigmas, extra_args=None, callback=None, + disable=None, eta=1., s_noise=1., **kwargs): global sigma_min, sigma_max print('[Sampler] Fooocus sampler is activated.') @@ -293,9 +293,6 @@ def sample_dpmpp_fooocus_2m_sde_inpaint_seamless(model, x, sigmas, extra_args=No def get_energy(): return torch.randn(x.size(), dtype=x.dtype, generator=energy_generator, device="cpu").to(x) - noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=extra_args.get("seed", None), - cpu=False) if noise_sampler is None else noise_sampler - extra_args = {} if extra_args is None else extra_args s_in = x.new_ones([x.shape[0]]) @@ -334,7 +331,7 @@ def sample_dpmpp_fooocus_2m_sde_inpaint_seamless(model, x, sigmas, extra_args=No r = h_last / h x = x + 0.5 * (-h - eta_h).expm1().neg() * (1 / r) * (denoised - old_denoised) - x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * sigmas[i + 1] * ( + x = x + globalBrownianTreeNoiseSampler(sigmas[i], sigmas[i + 1]) * sigmas[i + 1] * ( -2 * eta_h).expm1().neg().sqrt() * s_noise old_denoised = denoised