speed up again (#527)
This commit is contained in:
parent
6c56b580d7
commit
ba58dbc8bc
|
|
@ -1 +1 @@
|
|||
version = '2.0.84'
|
||||
version = '2.0.85'
|
||||
|
|
|
|||
|
|
@ -271,6 +271,7 @@ def worker():
|
|||
refiner_model_name=refiner_model_name,
|
||||
base_model_name=base_model_name,
|
||||
loras=loras)
|
||||
pipeline.prepare_text_encoder(async_call=False)
|
||||
|
||||
progressbar(3, 'Processing prompts ...')
|
||||
|
||||
|
|
@ -401,6 +402,8 @@ def worker():
|
|||
print(f'Generating and saving time: {execution_time:.2f} seconds')
|
||||
|
||||
outputs.append(['results', results])
|
||||
|
||||
pipeline.prepare_text_encoder(async_call=True)
|
||||
return
|
||||
|
||||
while True:
|
||||
|
|
|
|||
|
|
@ -196,6 +196,20 @@ refresh_everything(
|
|||
expansion = FooocusExpansion()
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
@torch.inference_mode()
|
||||
def prepare_text_encoder(async_call=True):
|
||||
if async_call:
|
||||
# TODO: make sure that this is always called in an async way so that users cannot feel it.
|
||||
pass
|
||||
assert_model_integrity()
|
||||
comfy.model_management.load_models_gpu([xl_base_patched.clip.patcher, expansion.patcher])
|
||||
return
|
||||
|
||||
|
||||
prepare_text_encoder(async_call=True)
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
@torch.inference_mode()
|
||||
def process_diffusion(positive_cond, negative_cond, steps, switch, width, height, image_seed, callback, sampler_name, scheduler_name, latent=None, denoise=1.0, tiled=False, cfg_scale=7.0):
|
||||
|
|
|
|||
|
|
@ -47,7 +47,6 @@ class FooocusExpansion:
|
|||
print(f'Fooocus Expansion engine loaded for {load_device}.')
|
||||
|
||||
def __call__(self, prompt, seed):
|
||||
model_management.load_model_gpu(self.patcher)
|
||||
seed = int(seed)
|
||||
set_seed(seed)
|
||||
origin = safe_str(prompt)
|
||||
|
|
|
|||
|
|
@ -402,7 +402,13 @@ def patched_unet_forward(self, x, timesteps=None, context=None, y=None, control=
|
|||
return self.out(h)
|
||||
|
||||
|
||||
def text_encoder_device_patched():
|
||||
# Fooocus's style system uses text encoder much more times than comfy so this makes things much faster.
|
||||
return comfy.model_management.get_torch_device()
|
||||
|
||||
|
||||
def patch_all():
|
||||
comfy.model_management.text_encoder_device = text_encoder_device_patched
|
||||
comfy.model_patcher.ModelPatcher.calculate_weight = calculate_weight_patched
|
||||
comfy.ldm.modules.diffusionmodules.openaimodel.UNetModel.forward = patched_unet_forward
|
||||
comfy.k_diffusion.sampling.sample_dpmpp_fooocus_2m_sde_inpaint_seamless = sample_dpmpp_fooocus_2m_sde_inpaint_seamless
|
||||
|
|
|
|||
|
|
@ -1,3 +1,7 @@
|
|||
# 2.0.85
|
||||
|
||||
* Speed Up Again
|
||||
|
||||
# 2.0.80
|
||||
|
||||
* Improved the scheduling of ADM guidance and CFG mimicking for better visual quality in high frequency domain and small objects.
|
||||
|
|
|
|||
Loading…
Reference in New Issue