using global sigmas for better results
This commit is contained in:
parent
e27383c07f
commit
b3d662054f
|
|
@ -1 +1 @@
|
|||
version = '2.1.63'
|
||||
version = '2.1.64'
|
||||
|
|
|
|||
|
|
@ -177,7 +177,7 @@ def worker():
|
|||
loras += [(inpaint_patch_model_path, 1.0)]
|
||||
print(f'[Inpaint] Current inpaint model is {inpaint_patch_model_path}')
|
||||
goals.append('inpaint')
|
||||
sampler_name = 'dpmpp_fooocus_2m_sde_inpaint_seamless'
|
||||
sampler_name = 'dpmpp_2m_sde_gpu' # only support the patched dpmpp_2m_sde_gpu
|
||||
if current_tab == 'ip' or \
|
||||
advanced_parameters.mixing_image_prompt_and_inpaint or \
|
||||
advanced_parameters.mixing_image_prompt_and_vary_upscale:
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
import modules.core as core
|
||||
import os
|
||||
import torch
|
||||
import modules.patch
|
||||
import modules.path
|
||||
import fcbh.model_management
|
||||
import fcbh.latent_formats
|
||||
|
|
@ -331,6 +332,15 @@ def process_diffusion(positive_cond, negative_cond, steps, switch, width, height
|
|||
|
||||
print(f'[Sampler] refiner_swap_method = {refiner_swap_method}')
|
||||
|
||||
minmax_sigmas = calculate_sigmas(sampler=sampler_name, scheduler=scheduler_name, model=final_unet.model, steps=steps, denoise=denoise)
|
||||
sigma_min, sigma_max = minmax_sigmas[minmax_sigmas > 0].min(), minmax_sigmas.max()
|
||||
sigma_min = float(sigma_min.cpu().numpy())
|
||||
sigma_max = float(sigma_max.cpu().numpy())
|
||||
print(f'[Sampler] sigma_min = {sigma_min}, sigma_max = {sigma_max}')
|
||||
|
||||
modules.patch.sigma_min = sigma_min
|
||||
modules.patch.sigma_max = sigma_max
|
||||
|
||||
if latent is None:
|
||||
empty_latent = core.generate_empty_latent(width=width, height=height, batch_size=1)
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -274,12 +274,15 @@ def encode_token_weights_patched_with_a1111_method(self, token_weight_pairs):
|
|||
return torch.cat(output, dim=-2).cpu(), first_pooled.cpu()
|
||||
|
||||
|
||||
sigma_min = 0.029167539
|
||||
sigma_max = 14.614643
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
def sample_dpmpp_fooocus_2m_sde_inpaint_seamless(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None, **kwargs):
|
||||
print('[Sampler] Inpaint sampler is activated.')
|
||||
global sigma_min, sigma_max
|
||||
|
||||
sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
|
||||
noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=extra_args.get("seed", None), cpu=False) if noise_sampler is None else noise_sampler
|
||||
print('[Sampler] Fooocus sampler is activated.')
|
||||
|
||||
seed = extra_args.get("seed", None)
|
||||
assert isinstance(seed, int)
|
||||
|
|
@ -290,8 +293,9 @@ def sample_dpmpp_fooocus_2m_sde_inpaint_seamless(model, x, sigmas, extra_args=No
|
|||
def get_energy():
|
||||
return torch.randn(x.size(), dtype=x.dtype, generator=energy_generator, device="cpu").to(x)
|
||||
|
||||
sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
|
||||
noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=seed, cpu=True) if noise_sampler is None else noise_sampler
|
||||
noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=extra_args.get("seed", None),
|
||||
cpu=False) if noise_sampler is None else noise_sampler
|
||||
|
||||
extra_args = {} if extra_args is None else extra_args
|
||||
s_in = x.new_ones([x.shape[0]])
|
||||
|
||||
|
|
@ -502,7 +506,7 @@ def patch_all():
|
|||
fcbh.model_patcher.ModelPatcher.calculate_weight = calculate_weight_patched
|
||||
fcbh.cldm.cldm.ControlNet.forward = patched_cldm_forward
|
||||
fcbh.ldm.modules.diffusionmodules.openaimodel.UNetModel.forward = patched_unet_forward
|
||||
fcbh.k_diffusion.sampling.sample_dpmpp_fooocus_2m_sde_inpaint_seamless = sample_dpmpp_fooocus_2m_sde_inpaint_seamless
|
||||
fcbh.k_diffusion.sampling.sample_dpmpp_2m_sde_gpu = sample_dpmpp_fooocus_2m_sde_inpaint_seamless
|
||||
fcbh.k_diffusion.external.DiscreteEpsDDPMDenoiser.forward = patched_discrete_eps_ddpm_denoiser_forward
|
||||
fcbh.model_base.SDXL.encode_adm = sdxl_encode_adm_patched
|
||||
fcbh.sd1_clip.ClipTokenWeightEncoder.encode_token_weights = encode_token_weights_patched_with_a1111_method
|
||||
|
|
|
|||
Loading…
Reference in New Issue