diff --git a/backend/headless/fcbh/sample.py b/backend/headless/fcbh/sample.py index c995e9a3..b6e0fddc 100644 --- a/backend/headless/fcbh/sample.py +++ b/backend/headless/fcbh/sample.py @@ -98,6 +98,7 @@ def sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative samples = samples.cpu() cleanup_additional_models(models) + cleanup_additional_models(set(get_models_from_cond(positive, "control") + get_models_from_cond(negative, "control"))) return samples def sample_custom(model, noise, cfg, sampler, sigmas, positive, negative, latent_image, noise_mask=None, callback=None, disable_pbar=False, seed=None): @@ -109,5 +110,6 @@ def sample_custom(model, noise, cfg, sampler, sigmas, positive, negative, latent samples = fcbh.samplers.sample(real_model, noise, positive_copy, negative_copy, cfg, model.load_device, sampler, sigmas, model_options=model.model_options, latent_image=latent_image, denoise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed) samples = samples.cpu() cleanup_additional_models(models) + cleanup_additional_models(set(get_models_from_cond(positive, "control") + get_models_from_cond(negative, "control"))) return samples diff --git a/backend/headless/fcbh_extras/nodes_freelunch.py b/backend/headless/fcbh_extras/nodes_freelunch.py index 07a88bd9..7512b841 100644 --- a/backend/headless/fcbh_extras/nodes_freelunch.py +++ b/backend/headless/fcbh_extras/nodes_freelunch.py @@ -61,7 +61,53 @@ class FreeU: m.set_model_output_block_patch(output_block_patch) return (m, ) +class FreeU_V2: + @classmethod + def INPUT_TYPES(s): + return {"required": { "model": ("MODEL",), + "b1": ("FLOAT", {"default": 1.3, "min": 0.0, "max": 10.0, "step": 0.01}), + "b2": ("FLOAT", {"default": 1.4, "min": 0.0, "max": 10.0, "step": 0.01}), + "s1": ("FLOAT", {"default": 0.9, "min": 0.0, "max": 10.0, "step": 0.01}), + "s2": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 10.0, "step": 0.01}), + }} + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + + CATEGORY = "_for_testing" + + def patch(self, model, b1, b2, s1, s2): + model_channels = model.model.model_config.unet_config["model_channels"] + scale_dict = {model_channels * 4: (b1, s1), model_channels * 2: (b2, s2)} + on_cpu_devices = {} + + def output_block_patch(h, hsp, transformer_options): + scale = scale_dict.get(h.shape[1], None) + if scale is not None: + hidden_mean = h.mean(1).unsqueeze(1) + B = hidden_mean.shape[0] + hidden_max, _ = torch.max(hidden_mean.view(B, -1), dim=-1, keepdim=True) + hidden_min, _ = torch.min(hidden_mean.view(B, -1), dim=-1, keepdim=True) + hidden_mean = (hidden_mean - hidden_min.unsqueeze(2).unsqueeze(3)) / (hidden_max - hidden_min).unsqueeze(2).unsqueeze(3) + + h[:,:h.shape[1] // 2] = h[:,:h.shape[1] // 2] * ((scale[0] - 1 ) * hidden_mean + 1) + + if hsp.device not in on_cpu_devices: + try: + hsp = Fourier_filter(hsp, threshold=1, scale=scale[1]) + except: + print("Device", hsp.device, "does not support the torch.fft functions used in the FreeU node, switching to CPU.") + on_cpu_devices[hsp.device] = True + hsp = Fourier_filter(hsp.cpu(), threshold=1, scale=scale[1]).to(hsp.device) + else: + hsp = Fourier_filter(hsp.cpu(), threshold=1, scale=scale[1]).to(hsp.device) + + return h, hsp + + m = model.clone() + m.set_model_output_block_patch(output_block_patch) + return (m, ) NODE_CLASS_MAPPINGS = { "FreeU": FreeU, + "FreeU_V2": FreeU_V2, } diff --git a/fooocus_version.py b/fooocus_version.py index 18558de9..48765c4b 100644 --- a/fooocus_version.py +++ b/fooocus_version.py @@ -1 +1 @@ -version = '2.1.699' +version = '2.1.700' diff --git a/modules/core.py b/modules/core.py index d7c2d650..e83be00f 100644 --- a/modules/core.py +++ b/modules/core.py @@ -256,7 +256,6 @@ def ksampler(model, positive, negative, latent, seed=None, steps=30, cfg=7.0, sa finally: modules.sample_hijack.current_refiner = None - modules.sample_hijack.force_unload_all_control(positive, negative) return out diff --git a/modules/sample_hijack.py b/modules/sample_hijack.py index f3853eb2..d4359e99 100644 --- a/modules/sample_hijack.py +++ b/modules/sample_hijack.py @@ -3,7 +3,7 @@ import fcbh.samplers import fcbh.model_management from fcbh.model_base import SDXLRefiner, SDXL -from fcbh.sample import get_additional_models, get_models_from_cond +from fcbh.sample import get_additional_models, get_models_from_cond, cleanup_additional_models from fcbh.samplers import resolve_areas_and_cond_masks, wrap_model, calculate_start_end_timesteps, \ create_cond_with_same_area_if_none, pre_run_control, apply_empty_x_to_equal_area, encode_adm, \ blank_inpaint_image_like @@ -49,22 +49,6 @@ def clip_separate(cond, target_model=None, target_clip=None): return [[c, p]] -@torch.no_grad() -@torch.inference_mode() -def force_unload_all_control(positive, negative): - control_nets = set(get_models_from_cond(positive, "control") + get_models_from_cond(negative, "control")) - - cleaned_any_model = False - for m in control_nets: - if hasattr(m, 'cleanup'): - m.cleanup() - cleaned_any_model = True - - if cleaned_any_model: - fcbh.model_management.soft_empty_cache() - return - - @torch.no_grad() @torch.inference_mode() def sample_hacked(model, noise, positive, negative, cfg, device, sampler, sigmas, model_options={}, latent_image=None, denoise_mask=None, callback=None, disable_pbar=False, seed=None): @@ -129,7 +113,7 @@ def sample_hacked(model, noise, positive, negative, cfg, device, sampler, sigmas extra_args["cond_concat"] = cond_concat def refiner_switch(): - force_unload_all_control(positive, negative) + cleanup_additional_models(set(get_models_from_cond(positive, "control") + get_models_from_cond(negative, "control"))) extra_args["cond"] = positive_refiner extra_args["uncond"] = negative_refiner