diff --git a/fooocus_version.py b/fooocus_version.py index 7f2135fb..7ecf5579 100644 --- a/fooocus_version.py +++ b/fooocus_version.py @@ -1 +1 @@ -version = '1.0.20' +version = '1.0.21' diff --git a/modules/async_worker.py b/modules/async_worker.py index ded27cb6..342ba8f6 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -27,6 +27,7 @@ def worker(): pipeline.refresh_base_model(base_model_name) pipeline.refresh_refiner_model(refiner_model_name) pipeline.refresh_loras(loras) + pipeline.clean_prompt_cond_caches() p_txt, n_txt = apply_style(style_selction, prompt, negative_prompt) diff --git a/modules/default_pipeline.py b/modules/default_pipeline.py index 9fb98535..7db75ca2 100644 --- a/modules/default_pipeline.py +++ b/modules/default_pipeline.py @@ -102,18 +102,41 @@ refresh_base_model(modules.path.default_base_model_name) refresh_refiner_model(modules.path.default_refiner_model_name) refresh_loras([(modules.path.default_lora_name, 0.5), ('None', 0.5), ('None', 0.5), ('None', 0.5), ('None', 0.5)]) +positive_conditions_cache = None +negative_conditions_cache = None +positive_conditions_refiner_cache = None +negative_conditions_refiner_cache = None + + +def clean_prompt_cond_caches(): + global positive_conditions_cache, negative_conditions_cache, \ + positive_conditions_refiner_cache, negative_conditions_refiner_cache + positive_conditions_cache = None + negative_conditions_cache = None + positive_conditions_refiner_cache = None + negative_conditions_refiner_cache = None + return + @torch.no_grad() def process(positive_prompt, negative_prompt, steps, switch, width, height, image_seed, callback): - positive_conditions = core.encode_prompt_condition(clip=xl_base_patched.clip, prompt=positive_prompt) - negative_conditions = core.encode_prompt_condition(clip=xl_base_patched.clip, prompt=negative_prompt) + global positive_conditions_cache, negative_conditions_cache, \ + positive_conditions_refiner_cache, negative_conditions_refiner_cache + + positive_conditions = core.encode_prompt_condition(clip=xl_base_patched.clip, prompt=positive_prompt) if positive_conditions_cache is None else positive_conditions_cache + negative_conditions = core.encode_prompt_condition(clip=xl_base_patched.clip, prompt=negative_prompt) if negative_conditions_cache is None else negative_conditions_cache + + positive_conditions_cache = positive_conditions + negative_conditions_cache = negative_conditions empty_latent = core.generate_empty_latent(width=width, height=height, batch_size=1) if xl_refiner is not None: + positive_conditions_refiner = core.encode_prompt_condition(clip=xl_refiner.clip, prompt=positive_prompt) if positive_conditions_refiner_cache is None else positive_conditions_refiner_cache + negative_conditions_refiner = core.encode_prompt_condition(clip=xl_refiner.clip, prompt=negative_prompt) if negative_conditions_refiner_cache is None else negative_conditions_refiner_cache - positive_conditions_refiner = core.encode_prompt_condition(clip=xl_refiner.clip, prompt=positive_prompt) - negative_conditions_refiner = core.encode_prompt_condition(clip=xl_refiner.clip, prompt=negative_prompt) + positive_conditions_refiner_cache = positive_conditions_refiner + negative_conditions_refiner_cache = negative_conditions_refiner sampled_latent = core.ksampler_with_refiner( model=xl_base_patched.unet, diff --git a/update_log.md b/update_log.md index b8da9040..76f3fd02 100644 --- a/update_log.md +++ b/update_log.md @@ -1,5 +1,9 @@ ### 1.0.20 +* Speed-up text encoder + +### 1.0.20 + * Re-write UI to use async codes: (1) for faster start, and (2) for better live preview. * Removed opencv dependency * Plan to support Linux soon