diff --git a/modules/core.py b/modules/core.py index 27c4bc4d..11130dda 100644 --- a/modules/core.py +++ b/modules/core.py @@ -10,7 +10,7 @@ import numpy as np import comfy.model_management import comfy.utils -from comfy.sd import load_checkpoint_guess_config +from comfy.sd import load_checkpoint_guess_config, load_lora_for_models from nodes import VAEDecode, EmptyLatentImage, CLIPTextEncode from comfy.sample import prepare_mask, broadcast_cond, load_additional_models, cleanup_additional_models from modules.samplers_advanced import KSampler, KSamplerWithRefiner @@ -39,6 +39,16 @@ def load_model(ckpt_filename): return StableDiffusionModel(unet=unet, clip=clip, vae=vae, clip_vision=clip_vision) +@torch.no_grad() +def load_lora(model, lora_filename, strength_model=1.0, strength_clip=1.0): + if strength_model == 0 and strength_clip == 0: + return model + + lora = comfy.utils.load_torch_file(lora_filename, safe_load=True) + model.unet, model.clip = comfy.sd.load_lora_for_models(model.unet, model.clip, lora, strength_model, strength_clip) + return model + + @torch.no_grad() def encode_prompt_condition(clip, prompt): return opCLIPTextEncode.encode(clip=clip, text=prompt)[0] diff --git a/modules/default_pipeline.py b/modules/default_pipeline.py index 84e18b24..184c1b47 100644 --- a/modules/default_pipeline.py +++ b/modules/default_pipeline.py @@ -2,13 +2,16 @@ import modules.core as core import os import torch -from modules.path import modelfile_path +from modules.path import modelfile_path, lorafile_path xl_base_filename = os.path.join(modelfile_path, 'sd_xl_base_1.0.safetensors') xl_refiner_filename = os.path.join(modelfile_path, 'sd_xl_refiner_1.0.safetensors') +xl_base_offset_lora_filename = os.path.join(lorafile_path, 'sd_xl_offset_example-lora_1.0.safetensors') xl_base = core.load_model(xl_base_filename) +xl_base = core.load_lora(xl_base, xl_base_offset_lora_filename, strength_model=0.618, strength_clip=0.0) + xl_refiner = core.load_model(xl_refiner_filename) del xl_base.vae