diff --git a/fooocus_version.py b/fooocus_version.py index 36cc32c8..e765433d 100644 --- a/fooocus_version.py +++ b/fooocus_version.py @@ -1 +1 @@ -version = '2.1.797' +version = '2.1.798' diff --git a/modules/async_worker.py b/modules/async_worker.py index ef5f6559..4396d94e 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -575,6 +575,23 @@ def worker(): preparation_time = time.perf_counter() - execution_start_time print(f'Preparation time: {preparation_time:.2f} seconds') + final_sampler_name = sampler_name + final_scheduler_name = scheduler_name + + if scheduler_name == 'lcm': + final_scheduler_name = 'sgm_uniform' + if pipeline.final_unet is not None: + pipeline.final_unet = core.opModelSamplingDiscrete.patch( + pipeline.final_unet, + sampling='lcm', + zsnr=False)[0] + if pipeline.final_refiner_unet is not None: + pipeline.final_refiner_unet = core.opModelSamplingDiscrete.patch( + pipeline.final_refiner_unet, + sampling='lcm', + zsnr=False)[0] + print('Using lcm scheduler.') + outputs.append(['preview', (13, 'Moving model to GPU ...', None)]) def callback(step, x0, x, total_steps, y): @@ -609,8 +626,8 @@ def worker(): height=height, image_seed=task['task_seed'], callback=callback, - sampler_name=sampler_name, - scheduler_name=scheduler_name, + sampler_name=final_sampler_name, + scheduler_name=final_scheduler_name, latent=initial_latent, denoise=denoising_strength, tiled=tiled, diff --git a/modules/core.py b/modules/core.py index 4213a9bf..865f5d3d 100644 --- a/modules/core.py +++ b/modules/core.py @@ -26,6 +26,7 @@ from modules.patch import patched_sampler_cfg_function from fcbh.lora import model_lora_keys_unet, model_lora_keys_clip, load_lora from modules.config import path_embeddings from modules.lora import load_dangerous_lora +from fcbh_extras.nodes_model_advanced import ModelSamplingDiscrete opEmptyLatentImage = EmptyLatentImage() @@ -35,6 +36,7 @@ opVAEDecodeTiled = VAEDecodeTiled() opVAEEncodeTiled = VAEEncodeTiled() opControlNetApplyAdvanced = ControlNetApplyAdvanced() opFreeU = FreeU_V2() +opModelSamplingDiscrete = ModelSamplingDiscrete() class StableDiffusionModel: diff --git a/modules/flags.py b/modules/flags.py index 1d46f0a1..3f22e469 100644 --- a/modules/flags.py +++ b/modules/flags.py @@ -14,7 +14,7 @@ KSAMPLER_NAMES = ["euler", "euler_ancestral", "heun", "dpm_2", "dpm_2_ancestral" "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm"] -SCHEDULER_NAMES = ["normal", "karras", "exponential", "sgm_uniform", "simple", "ddim_uniform"] +SCHEDULER_NAMES = ["normal", "karras", "exponential", "sgm_uniform", "simple", "ddim_uniform", "lcm"] SAMPLER_NAMES = KSAMPLER_NAMES + ["ddim", "uni_pc", "uni_pc_bh2"] sampler_list = SAMPLER_NAMES diff --git a/update_log.md b/update_log.md index b8b9b900..9764d51c 100644 --- a/update_log.md +++ b/update_log.md @@ -1,3 +1,7 @@ +# 2.1.798 + +* added lcm scheduler - LCM may need to set both sampler and scheduler to "lcm". Other than that, see the description in 2.1.782 logs. + # 2.1.797 * fixed some dependency problems with facexlib and filterpy. @@ -35,7 +39,7 @@ Now when you load a lora, the following things will happen: In this way, Fooocus 2.1.782 can benefit from all models and loras from CivitAI with both SDXL and SD1.5 ecosystem, using the unique Fooocus swap algorithm, to achieve extremely high quality results (although the default setting is already very high quality), especially in some anime use cases, if users really want to play with all these things. -Recently the community also developed LCM loras. Users can use it by setting the sampler as 'LCM', sceduler as 'sgm_uniform', the forced overwrite of sampling step as 4 to 8, and CFG guidance as 1.0, in dev tools. Do not forget to change the LCM lora weight to 1.0 (many people forget this and report failure cases). Also, set refiner to None. If LCM's feedback in the artists community is good (not the feedback in the programmer community of Stable Diffusion), Fooocus may add some other shortcuts in the future. +Recently the community also developed LCM loras. Users can use it by setting the sampler as 'LCM', scheduler as 'sgm_uniform' (Update in 2.1.798: scheduler should also be "lcm"), the forced overwrite of sampling step as 4 to 8, and CFG guidance as 1.0, in dev tools. Do not forget to change the LCM lora weight to 1.0 (many people forget this and report failure cases). Also, set refiner to None. If LCM's feedback in the artists community is good (not the feedback in the programmer community of Stable Diffusion), Fooocus may add some other shortcuts in the future. # 2.1.781