commit
350fdd9021
|
|
@ -1,7 +1,4 @@
|
|||
import ldm_patched.modules.args_parser as args_parser
|
||||
import os
|
||||
|
||||
from tempfile import gettempdir
|
||||
|
||||
args_parser.parser.add_argument("--share", action='store_true', help="Set whether to share on Gradio.")
|
||||
|
||||
|
|
|
|||
|
|
@ -1 +1 @@
|
|||
version = '2.4.1'
|
||||
version = '2.4.2'
|
||||
|
|
|
|||
|
|
@ -108,7 +108,7 @@ class ModelSamplingContinuousEDM:
|
|||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": { "model": ("MODEL",),
|
||||
"sampling": (["v_prediction", "eps"],),
|
||||
"sampling": (["v_prediction", "edm_playground_v2.5", "eps"],),
|
||||
"sigma_max": ("FLOAT", {"default": 120.0, "min": 0.0, "max": 1000.0, "step":0.001, "round": False}),
|
||||
"sigma_min": ("FLOAT", {"default": 0.002, "min": 0.0, "max": 1000.0, "step":0.001, "round": False}),
|
||||
}}
|
||||
|
|
@ -121,17 +121,25 @@ class ModelSamplingContinuousEDM:
|
|||
def patch(self, model, sampling, sigma_max, sigma_min):
|
||||
m = model.clone()
|
||||
|
||||
latent_format = None
|
||||
sigma_data = 1.0
|
||||
if sampling == "eps":
|
||||
sampling_type = ldm_patched.modules.model_sampling.EPS
|
||||
elif sampling == "v_prediction":
|
||||
sampling_type = ldm_patched.modules.model_sampling.V_PREDICTION
|
||||
elif sampling == "edm_playground_v2.5":
|
||||
sampling_type = ldm_patched.modules.model_sampling.EDM
|
||||
sigma_data = 0.5
|
||||
latent_format = ldm_patched.modules.latent_formats.SDXL_Playground_2_5()
|
||||
|
||||
class ModelSamplingAdvanced(ldm_patched.modules.model_sampling.ModelSamplingContinuousEDM, sampling_type):
|
||||
pass
|
||||
|
||||
model_sampling = ModelSamplingAdvanced(model.model.model_config)
|
||||
model_sampling.set_sigma_range(sigma_min, sigma_max)
|
||||
model_sampling.set_parameters(sigma_min, sigma_max, sigma_data)
|
||||
m.add_object_patch("model_sampling", model_sampling)
|
||||
if latent_format is not None:
|
||||
m.add_object_patch("latent_format", latent_format)
|
||||
return (m, )
|
||||
|
||||
class RescaleCFG:
|
||||
|
|
|
|||
|
|
@ -832,5 +832,7 @@ def sample_tcd(model, x, sigmas, extra_args=None, callback=None, disable=None, n
|
|||
if eta > 0 and sigmas[i + 1] > 0:
|
||||
noise = noise_sampler(sigmas[i], sigmas[i + 1])
|
||||
x = x / alpha_prod_s[i+1].sqrt() + noise * (sigmas[i+1]**2 + 1 - 1/alpha_prod_s[i+1]).sqrt()
|
||||
else:
|
||||
x *= torch.sqrt(1.0 + sigmas[i + 1] ** 2)
|
||||
|
||||
return x
|
||||
|
|
@ -1,3 +1,4 @@
|
|||
import torch
|
||||
|
||||
class LatentFormat:
|
||||
scale_factor = 1.0
|
||||
|
|
@ -34,6 +35,70 @@ class SDXL(LatentFormat):
|
|||
]
|
||||
self.taesd_decoder_name = "taesdxl_decoder"
|
||||
|
||||
class SDXL_Playground_2_5(LatentFormat):
|
||||
def __init__(self):
|
||||
self.scale_factor = 0.5
|
||||
self.latents_mean = torch.tensor([-1.6574, 1.886, -1.383, 2.5155]).view(1, 4, 1, 1)
|
||||
self.latents_std = torch.tensor([8.4927, 5.9022, 6.5498, 5.2299]).view(1, 4, 1, 1)
|
||||
|
||||
self.latent_rgb_factors = [
|
||||
# R G B
|
||||
[ 0.3920, 0.4054, 0.4549],
|
||||
[-0.2634, -0.0196, 0.0653],
|
||||
[ 0.0568, 0.1687, -0.0755],
|
||||
[-0.3112, -0.2359, -0.2076]
|
||||
]
|
||||
self.taesd_decoder_name = "taesdxl_decoder"
|
||||
|
||||
def process_in(self, latent):
|
||||
latents_mean = self.latents_mean.to(latent.device, latent.dtype)
|
||||
latents_std = self.latents_std.to(latent.device, latent.dtype)
|
||||
return (latent - latents_mean) * self.scale_factor / latents_std
|
||||
|
||||
def process_out(self, latent):
|
||||
latents_mean = self.latents_mean.to(latent.device, latent.dtype)
|
||||
latents_std = self.latents_std.to(latent.device, latent.dtype)
|
||||
return latent * latents_std / self.scale_factor + latents_mean
|
||||
|
||||
|
||||
class SD_X4(LatentFormat):
|
||||
def __init__(self):
|
||||
self.scale_factor = 0.08333
|
||||
self.latent_rgb_factors = [
|
||||
[-0.2340, -0.3863, -0.3257],
|
||||
[ 0.0994, 0.0885, -0.0908],
|
||||
[-0.2833, -0.2349, -0.3741],
|
||||
[ 0.2523, -0.0055, -0.1651]
|
||||
]
|
||||
|
||||
class SC_Prior(LatentFormat):
|
||||
def __init__(self):
|
||||
self.scale_factor = 1.0
|
||||
self.latent_rgb_factors = [
|
||||
[-0.0326, -0.0204, -0.0127],
|
||||
[-0.1592, -0.0427, 0.0216],
|
||||
[ 0.0873, 0.0638, -0.0020],
|
||||
[-0.0602, 0.0442, 0.1304],
|
||||
[ 0.0800, -0.0313, -0.1796],
|
||||
[-0.0810, -0.0638, -0.1581],
|
||||
[ 0.1791, 0.1180, 0.0967],
|
||||
[ 0.0740, 0.1416, 0.0432],
|
||||
[-0.1745, -0.1888, -0.1373],
|
||||
[ 0.2412, 0.1577, 0.0928],
|
||||
[ 0.1908, 0.0998, 0.0682],
|
||||
[ 0.0209, 0.0365, -0.0092],
|
||||
[ 0.0448, -0.0650, -0.1728],
|
||||
[-0.1658, -0.1045, -0.1308],
|
||||
[ 0.0542, 0.1545, 0.1325],
|
||||
[-0.0352, -0.1672, -0.2541]
|
||||
]
|
||||
|
||||
class SC_B(LatentFormat):
|
||||
def __init__(self):
|
||||
self.scale_factor = 1.0 / 0.43
|
||||
self.latent_rgb_factors = [
|
||||
[ 0.1121, 0.2006, 0.1023],
|
||||
[-0.2093, -0.0222, -0.0195],
|
||||
[-0.3087, -0.1535, 0.0366],
|
||||
[ 0.0290, -0.1574, -0.4078]
|
||||
]
|
||||
|
|
@ -1,5 +1,4 @@
|
|||
import torch
|
||||
import numpy as np
|
||||
from ldm_patched.ldm.modules.diffusionmodules.util import make_beta_schedule
|
||||
import math
|
||||
|
||||
|
|
@ -12,12 +11,28 @@ class EPS:
|
|||
sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1))
|
||||
return model_input - model_output * sigma
|
||||
|
||||
def noise_scaling(self, sigma, noise, latent_image, max_denoise=False):
|
||||
if max_denoise:
|
||||
noise = noise * torch.sqrt(1.0 + sigma ** 2.0)
|
||||
else:
|
||||
noise = noise * sigma
|
||||
|
||||
noise += latent_image
|
||||
return noise
|
||||
|
||||
def inverse_noise_scaling(self, sigma, latent):
|
||||
return latent
|
||||
|
||||
class V_PREDICTION(EPS):
|
||||
def calculate_denoised(self, sigma, model_output, model_input):
|
||||
sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1))
|
||||
return model_input * self.sigma_data ** 2 / (sigma ** 2 + self.sigma_data ** 2) - model_output * sigma * self.sigma_data / (sigma ** 2 + self.sigma_data ** 2) ** 0.5
|
||||
|
||||
class EDM(V_PREDICTION):
|
||||
def calculate_denoised(self, sigma, model_output, model_input):
|
||||
sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1))
|
||||
return model_input * self.sigma_data ** 2 / (sigma ** 2 + self.sigma_data ** 2) + model_output * sigma * self.sigma_data / (sigma ** 2 + self.sigma_data ** 2) ** 0.5
|
||||
|
||||
|
||||
class ModelSamplingDiscrete(torch.nn.Module):
|
||||
def __init__(self, model_config=None):
|
||||
|
|
@ -42,24 +57,23 @@ class ModelSamplingDiscrete(torch.nn.Module):
|
|||
else:
|
||||
betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s)
|
||||
alphas = 1. - betas
|
||||
alphas_cumprod = torch.tensor(np.cumprod(alphas, axis=0), dtype=torch.float32)
|
||||
# alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
|
||||
alphas_cumprod = torch.cumprod(alphas, dim=0)
|
||||
|
||||
timesteps, = betas.shape
|
||||
self.num_timesteps = int(timesteps)
|
||||
self.linear_start = linear_start
|
||||
self.linear_end = linear_end
|
||||
|
||||
# self.register_buffer('betas', torch.tensor(betas, dtype=torch.float32))
|
||||
# self.register_buffer('alphas_cumprod', torch.tensor(alphas_cumprod, dtype=torch.float32))
|
||||
# self.register_buffer('alphas_cumprod_prev', torch.tensor(alphas_cumprod_prev, dtype=torch.float32))
|
||||
|
||||
sigmas = ((1 - alphas_cumprod) / alphas_cumprod) ** 0.5
|
||||
self.set_sigmas(sigmas)
|
||||
self.set_alphas_cumprod(alphas_cumprod.float())
|
||||
|
||||
def set_sigmas(self, sigmas):
|
||||
self.register_buffer('sigmas', sigmas)
|
||||
self.register_buffer('log_sigmas', sigmas.log())
|
||||
|
||||
def set_alphas_cumprod(self, alphas_cumprod):
|
||||
self.register_buffer("alphas_cumprod", alphas_cumprod.float())
|
||||
self.register_buffer('sigmas', sigmas.float())
|
||||
self.register_buffer('log_sigmas', sigmas.log().float())
|
||||
|
||||
@property
|
||||
def sigma_min(self):
|
||||
|
|
@ -94,8 +108,6 @@ class ModelSamplingDiscrete(torch.nn.Module):
|
|||
class ModelSamplingContinuousEDM(torch.nn.Module):
|
||||
def __init__(self, model_config=None):
|
||||
super().__init__()
|
||||
self.sigma_data = 1.0
|
||||
|
||||
if model_config is not None:
|
||||
sampling_settings = model_config.sampling_settings
|
||||
else:
|
||||
|
|
@ -103,9 +115,11 @@ class ModelSamplingContinuousEDM(torch.nn.Module):
|
|||
|
||||
sigma_min = sampling_settings.get("sigma_min", 0.002)
|
||||
sigma_max = sampling_settings.get("sigma_max", 120.0)
|
||||
self.set_sigma_range(sigma_min, sigma_max)
|
||||
sigma_data = sampling_settings.get("sigma_data", 1.0)
|
||||
self.set_parameters(sigma_min, sigma_max, sigma_data)
|
||||
|
||||
def set_sigma_range(self, sigma_min, sigma_max):
|
||||
def set_parameters(self, sigma_min, sigma_max, sigma_data):
|
||||
self.sigma_data = sigma_data
|
||||
sigmas = torch.linspace(math.log(sigma_min), math.log(sigma_max), 1000).exp()
|
||||
|
||||
self.register_buffer('sigmas', sigmas) #for compatibility with some schedulers
|
||||
|
|
@ -134,3 +148,56 @@ class ModelSamplingContinuousEDM(torch.nn.Module):
|
|||
|
||||
log_sigma_min = math.log(self.sigma_min)
|
||||
return math.exp((math.log(self.sigma_max) - log_sigma_min) * percent + log_sigma_min)
|
||||
|
||||
class StableCascadeSampling(ModelSamplingDiscrete):
|
||||
def __init__(self, model_config=None):
|
||||
super().__init__()
|
||||
|
||||
if model_config is not None:
|
||||
sampling_settings = model_config.sampling_settings
|
||||
else:
|
||||
sampling_settings = {}
|
||||
|
||||
self.set_parameters(sampling_settings.get("shift", 1.0))
|
||||
|
||||
def set_parameters(self, shift=1.0, cosine_s=8e-3):
|
||||
self.shift = shift
|
||||
self.cosine_s = torch.tensor(cosine_s)
|
||||
self._init_alpha_cumprod = torch.cos(self.cosine_s / (1 + self.cosine_s) * torch.pi * 0.5) ** 2
|
||||
|
||||
#This part is just for compatibility with some schedulers in the codebase
|
||||
self.num_timesteps = 10000
|
||||
sigmas = torch.empty((self.num_timesteps), dtype=torch.float32)
|
||||
for x in range(self.num_timesteps):
|
||||
t = (x + 1) / self.num_timesteps
|
||||
sigmas[x] = self.sigma(t)
|
||||
|
||||
self.set_sigmas(sigmas)
|
||||
|
||||
def sigma(self, timestep):
|
||||
alpha_cumprod = (torch.cos((timestep + self.cosine_s) / (1 + self.cosine_s) * torch.pi * 0.5) ** 2 / self._init_alpha_cumprod)
|
||||
|
||||
if self.shift != 1.0:
|
||||
var = alpha_cumprod
|
||||
logSNR = (var/(1-var)).log()
|
||||
logSNR += 2 * torch.log(1.0 / torch.tensor(self.shift))
|
||||
alpha_cumprod = logSNR.sigmoid()
|
||||
|
||||
alpha_cumprod = alpha_cumprod.clamp(0.0001, 0.9999)
|
||||
return ((1 - alpha_cumprod) / alpha_cumprod) ** 0.5
|
||||
|
||||
def timestep(self, sigma):
|
||||
var = 1 / ((sigma * sigma) + 1)
|
||||
var = var.clamp(0, 1.0)
|
||||
s, min_var = self.cosine_s.to(var.device), self._init_alpha_cumprod.to(var.device)
|
||||
t = (((var * min_var) ** 0.5).acos() / (torch.pi * 0.5)) * (1 + s) - s
|
||||
return t
|
||||
|
||||
def percent_to_sigma(self, percent):
|
||||
if percent <= 0.0:
|
||||
return 999999999.9
|
||||
if percent >= 1.0:
|
||||
return 0.0
|
||||
|
||||
percent = 1.0 - percent
|
||||
return self.sigma(torch.tensor(percent))
|
||||
|
|
@ -523,7 +523,7 @@ class UNIPCBH2(Sampler):
|
|||
|
||||
KSAMPLER_NAMES = ["euler", "euler_ancestral", "heun", "heunpp2","dpm_2", "dpm_2_ancestral",
|
||||
"lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_sde", "dpmpp_sde_gpu",
|
||||
"dpmpp_2m", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "tcd"]
|
||||
"dpmpp_2m", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "tcd", "edm_playground_v2.5"]
|
||||
|
||||
class KSAMPLER(Sampler):
|
||||
def __init__(self, sampler_function, extra_options={}, inpaint_options={}):
|
||||
|
|
|
|||
|
|
@ -462,8 +462,10 @@ def worker():
|
|||
|
||||
progressbar(async_task, 2, 'Loading models ...')
|
||||
|
||||
loras, prompt = parse_lora_references_from_prompt(prompt, loras, modules.config.default_max_lora_number)
|
||||
lora_filenames = modules.util.remove_performance_lora(modules.config.lora_filenames, performance_selection)
|
||||
loras, prompt = parse_lora_references_from_prompt(prompt, loras, modules.config.default_max_lora_number, lora_filenames=lora_filenames)
|
||||
loras += performance_loras
|
||||
|
||||
pipeline.refresh_everything(refiner_model_name=refiner_model_name, base_model_name=base_model_name,
|
||||
loras=loras, base_model_additional_loras=base_model_additional_loras,
|
||||
use_synthetic_refiner=use_synthetic_refiner, vae_name=vae_name)
|
||||
|
|
@ -826,16 +828,33 @@ def worker():
|
|||
|
||||
if scheduler_name in ['lcm', 'tcd']:
|
||||
final_scheduler_name = 'sgm_uniform'
|
||||
if pipeline.final_unet is not None:
|
||||
pipeline.final_unet = core.opModelSamplingDiscrete.patch(
|
||||
|
||||
def patch_discrete(unet):
|
||||
return core.opModelSamplingDiscrete.patch(
|
||||
pipeline.final_unet,
|
||||
sampling=scheduler_name,
|
||||
zsnr=False)[0]
|
||||
|
||||
if pipeline.final_unet is not None:
|
||||
pipeline.final_unet = patch_discrete(pipeline.final_unet)
|
||||
if pipeline.final_refiner_unet is not None:
|
||||
pipeline.final_refiner_unet = core.opModelSamplingDiscrete.patch(
|
||||
pipeline.final_refiner_unet,
|
||||
pipeline.final_refiner_unet = patch_discrete(pipeline.final_refiner_unet)
|
||||
print(f'Using {scheduler_name} scheduler.')
|
||||
elif scheduler_name == 'edm_playground_v2.5':
|
||||
final_scheduler_name = 'karras'
|
||||
|
||||
def patch_edm(unet):
|
||||
return core.opModelSamplingContinuousEDM.patch(
|
||||
unet,
|
||||
sampling=scheduler_name,
|
||||
zsnr=False)[0]
|
||||
sigma_max=120.0,
|
||||
sigma_min=0.002)[0]
|
||||
|
||||
if pipeline.final_unet is not None:
|
||||
pipeline.final_unet = patch_edm(pipeline.final_unet)
|
||||
if pipeline.final_refiner_unet is not None:
|
||||
pipeline.final_refiner_unet = patch_edm(pipeline.final_refiner_unet)
|
||||
|
||||
print(f'Using {scheduler_name} scheduler.')
|
||||
|
||||
async_task.yields.append(['preview', (flags.preparation_step_count, 'Moving model to GPU ...', None)])
|
||||
|
|
|
|||
|
|
@ -548,25 +548,9 @@ with open(config_example_path, "w", encoding="utf-8") as json_file:
|
|||
|
||||
model_filenames = []
|
||||
lora_filenames = []
|
||||
lora_filenames_no_special = []
|
||||
vae_filenames = []
|
||||
wildcard_filenames = []
|
||||
|
||||
sdxl_lcm_lora = 'sdxl_lcm_lora.safetensors'
|
||||
sdxl_lightning_lora = 'sdxl_lightning_4step_lora.safetensors'
|
||||
sdxl_hyper_sd_lora = 'sdxl_hyper_sd_4step_lora.safetensors'
|
||||
loras_metadata_remove = [sdxl_lcm_lora, sdxl_lightning_lora, sdxl_hyper_sd_lora]
|
||||
|
||||
|
||||
def remove_special_loras(lora_filenames):
|
||||
global loras_metadata_remove
|
||||
|
||||
loras_no_special = lora_filenames.copy()
|
||||
for lora_to_remove in loras_metadata_remove:
|
||||
if lora_to_remove in loras_no_special:
|
||||
loras_no_special.remove(lora_to_remove)
|
||||
return loras_no_special
|
||||
|
||||
|
||||
def get_model_filenames(folder_paths, extensions=None, name_filter=None):
|
||||
if extensions is None:
|
||||
|
|
@ -582,10 +566,9 @@ def get_model_filenames(folder_paths, extensions=None, name_filter=None):
|
|||
|
||||
|
||||
def update_files():
|
||||
global model_filenames, lora_filenames, lora_filenames_no_special, vae_filenames, wildcard_filenames, available_presets
|
||||
global model_filenames, lora_filenames, vae_filenames, wildcard_filenames, available_presets
|
||||
model_filenames = get_model_filenames(paths_checkpoints)
|
||||
lora_filenames = get_model_filenames(paths_loras)
|
||||
lora_filenames_no_special = remove_special_loras(lora_filenames)
|
||||
vae_filenames = get_model_filenames(path_vae)
|
||||
wildcard_filenames = get_files_from_folder(path_wildcards, ['.txt'])
|
||||
available_presets = get_presets()
|
||||
|
|
@ -634,26 +617,27 @@ def downloading_sdxl_lcm_lora():
|
|||
load_file_from_url(
|
||||
url='https://huggingface.co/lllyasviel/misc/resolve/main/sdxl_lcm_lora.safetensors',
|
||||
model_dir=paths_loras[0],
|
||||
file_name=sdxl_lcm_lora
|
||||
file_name=modules.flags.PerformanceLoRA.EXTREME_SPEED.value
|
||||
)
|
||||
return sdxl_lcm_lora
|
||||
return modules.flags.PerformanceLoRA.EXTREME_SPEED.value
|
||||
|
||||
|
||||
def downloading_sdxl_lightning_lora():
|
||||
load_file_from_url(
|
||||
url='https://huggingface.co/mashb1t/misc/resolve/main/sdxl_lightning_4step_lora.safetensors',
|
||||
model_dir=paths_loras[0],
|
||||
file_name=sdxl_lightning_lora
|
||||
file_name=modules.flags.PerformanceLoRA.LIGHTNING.value
|
||||
)
|
||||
return sdxl_lightning_lora
|
||||
return modules.flags.PerformanceLoRA.LIGHTNING.value
|
||||
|
||||
|
||||
def downloading_sdxl_hyper_sd_lora():
|
||||
load_file_from_url(
|
||||
url='https://huggingface.co/mashb1t/misc/resolve/main/sdxl_hyper_sd_4step_lora.safetensors',
|
||||
model_dir=paths_loras[0],
|
||||
file_name=sdxl_hyper_sd_lora
|
||||
file_name=modules.flags.PerformanceLoRA.HYPER_SD.value
|
||||
)
|
||||
return sdxl_hyper_sd_lora
|
||||
return modules.flags.PerformanceLoRA.HYPER_SD.value
|
||||
|
||||
|
||||
def downloading_controlnet_canny():
|
||||
|
|
|
|||
|
|
@ -21,8 +21,7 @@ from modules.lora import match_lora
|
|||
from modules.util import get_file_from_folder_list
|
||||
from ldm_patched.modules.lora import model_lora_keys_unet, model_lora_keys_clip
|
||||
from modules.config import path_embeddings
|
||||
from ldm_patched.contrib.external_model_advanced import ModelSamplingDiscrete
|
||||
|
||||
from ldm_patched.contrib.external_model_advanced import ModelSamplingDiscrete, ModelSamplingContinuousEDM
|
||||
|
||||
opEmptyLatentImage = EmptyLatentImage()
|
||||
opVAEDecode = VAEDecode()
|
||||
|
|
@ -32,6 +31,7 @@ opVAEEncodeTiled = VAEEncodeTiled()
|
|||
opControlNetApplyAdvanced = ControlNetApplyAdvanced()
|
||||
opFreeU = FreeU_V2()
|
||||
opModelSamplingDiscrete = ModelSamplingDiscrete()
|
||||
opModelSamplingContinuousEDM = ModelSamplingContinuousEDM()
|
||||
|
||||
|
||||
class StableDiffusionModel:
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ SAMPLERS = KSAMPLER | SAMPLER_EXTRA
|
|||
|
||||
KSAMPLER_NAMES = list(KSAMPLER.keys())
|
||||
|
||||
SCHEDULER_NAMES = ["normal", "karras", "exponential", "sgm_uniform", "simple", "ddim_uniform", "lcm", "turbo", "align_your_steps", "tcd"]
|
||||
SCHEDULER_NAMES = ["normal", "karras", "exponential", "sgm_uniform", "simple", "ddim_uniform", "lcm", "turbo", "align_your_steps", "tcd", "edm_playground_v2.5"]
|
||||
SAMPLER_NAMES = KSAMPLER_NAMES + list(SAMPLER_EXTRA.keys())
|
||||
|
||||
sampler_list = SAMPLER_NAMES
|
||||
|
|
@ -91,6 +91,7 @@ sdxl_aspect_ratios = [
|
|||
'1664*576', '1728*576'
|
||||
]
|
||||
|
||||
|
||||
class MetadataScheme(Enum):
|
||||
FOOOCUS = 'fooocus'
|
||||
A1111 = 'a1111'
|
||||
|
|
@ -115,6 +116,14 @@ class OutputFormat(Enum):
|
|||
return list(map(lambda c: c.value, cls))
|
||||
|
||||
|
||||
class PerformanceLoRA(Enum):
|
||||
QUALITY = None
|
||||
SPEED = None
|
||||
EXTREME_SPEED = 'sdxl_lcm_lora.safetensors'
|
||||
LIGHTNING = 'sdxl_lightning_4step_lora.safetensors'
|
||||
HYPER_SD = 'sdxl_hyper_sd_4step_lora.safetensors'
|
||||
|
||||
|
||||
class Steps(IntEnum):
|
||||
QUALITY = 60
|
||||
SPEED = 30
|
||||
|
|
@ -142,6 +151,10 @@ class Performance(Enum):
|
|||
def list(cls) -> list:
|
||||
return list(map(lambda c: c.value, cls))
|
||||
|
||||
@classmethod
|
||||
def by_steps(cls, steps: int | str):
|
||||
return cls[Steps(int(steps)).name]
|
||||
|
||||
@classmethod
|
||||
def has_restricted_features(cls, x) -> bool:
|
||||
if isinstance(x, Performance):
|
||||
|
|
@ -149,7 +162,10 @@ class Performance(Enum):
|
|||
return x in [cls.EXTREME_SPEED.value, cls.LIGHTNING.value, cls.HYPER_SD.value]
|
||||
|
||||
def steps(self) -> int | None:
|
||||
return Steps[self.name].value if Steps[self.name] else None
|
||||
return Steps[self.name].value if self.name in Steps.__members__ else None
|
||||
|
||||
def steps_uov(self) -> int | None:
|
||||
return StepsUOV[self.name].value if Steps[self.name] else None
|
||||
return StepsUOV[self.name].value if self.name in StepsUOV.__members__ else None
|
||||
|
||||
def lora_filename(self) -> str | None:
|
||||
return PerformanceLoRA[self.name].value if self.name in PerformanceLoRA.__members__ else None
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@ def load_parameter_button_click(raw_metadata: dict | str, is_generating: bool):
|
|||
get_str('prompt', 'Prompt', loaded_parameter_dict, results)
|
||||
get_str('negative_prompt', 'Negative Prompt', loaded_parameter_dict, results)
|
||||
get_list('styles', 'Styles', loaded_parameter_dict, results)
|
||||
get_str('performance', 'Performance', loaded_parameter_dict, results)
|
||||
performance = get_str('performance', 'Performance', loaded_parameter_dict, results)
|
||||
get_steps('steps', 'Steps', loaded_parameter_dict, results)
|
||||
get_number('overwrite_switch', 'Overwrite Switch', loaded_parameter_dict, results)
|
||||
get_resolution('resolution', 'Resolution', loaded_parameter_dict, results)
|
||||
|
|
@ -59,19 +59,27 @@ def load_parameter_button_click(raw_metadata: dict | str, is_generating: bool):
|
|||
|
||||
get_freeu('freeu', 'FreeU', loaded_parameter_dict, results)
|
||||
|
||||
# prevent performance LoRAs to be added twice, by performance and by lora
|
||||
performance_filename = None
|
||||
if performance is not None and performance in Performance.list():
|
||||
performance = Performance(performance)
|
||||
performance_filename = performance.lora_filename()
|
||||
|
||||
for i in range(modules.config.default_max_lora_number):
|
||||
get_lora(f'lora_combined_{i + 1}', f'LoRA {i + 1}', loaded_parameter_dict, results)
|
||||
get_lora(f'lora_combined_{i + 1}', f'LoRA {i + 1}', loaded_parameter_dict, results, performance_filename)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def get_str(key: str, fallback: str | None, source_dict: dict, results: list, default=None):
|
||||
def get_str(key: str, fallback: str | None, source_dict: dict, results: list, default=None) -> str | None:
|
||||
try:
|
||||
h = source_dict.get(key, source_dict.get(fallback, default))
|
||||
assert isinstance(h, str)
|
||||
results.append(h)
|
||||
return h
|
||||
except:
|
||||
results.append(gr.update())
|
||||
return None
|
||||
|
||||
|
||||
def get_list(key: str, fallback: str | None, source_dict: dict, results: list, default=None):
|
||||
|
|
@ -181,7 +189,7 @@ def get_freeu(key: str, fallback: str | None, source_dict: dict, results: list,
|
|||
results.append(gr.update())
|
||||
|
||||
|
||||
def get_lora(key: str, fallback: str | None, source_dict: dict, results: list):
|
||||
def get_lora(key: str, fallback: str | None, source_dict: dict, results: list, performance_filename: str | None):
|
||||
try:
|
||||
split_data = source_dict.get(key, source_dict.get(fallback)).split(' : ')
|
||||
enabled = True
|
||||
|
|
@ -193,6 +201,9 @@ def get_lora(key: str, fallback: str | None, source_dict: dict, results: list):
|
|||
name = split_data[1]
|
||||
weight = split_data[2]
|
||||
|
||||
if name == performance_filename:
|
||||
raise Exception
|
||||
|
||||
weight = float(weight)
|
||||
results.append(enabled)
|
||||
results.append(name)
|
||||
|
|
@ -248,7 +259,7 @@ class MetadataParser(ABC):
|
|||
self.full_prompt: str = ''
|
||||
self.raw_negative_prompt: str = ''
|
||||
self.full_negative_prompt: str = ''
|
||||
self.steps: int = 30
|
||||
self.steps: int = Steps.SPEED.value
|
||||
self.base_model_name: str = ''
|
||||
self.base_model_hash: str = ''
|
||||
self.refiner_model_name: str = ''
|
||||
|
|
@ -261,11 +272,11 @@ class MetadataParser(ABC):
|
|||
raise NotImplementedError
|
||||
|
||||
@abstractmethod
|
||||
def parse_json(self, metadata: dict | str) -> dict:
|
||||
def to_json(self, metadata: dict | str) -> dict:
|
||||
raise NotImplementedError
|
||||
|
||||
@abstractmethod
|
||||
def parse_string(self, metadata: dict) -> str:
|
||||
def to_string(self, metadata: dict) -> str:
|
||||
raise NotImplementedError
|
||||
|
||||
def set_data(self, raw_prompt, full_prompt, raw_negative_prompt, full_negative_prompt, steps, base_model_name,
|
||||
|
|
@ -328,7 +339,7 @@ class A1111MetadataParser(MetadataParser):
|
|||
'version': 'Version'
|
||||
}
|
||||
|
||||
def parse_json(self, metadata: str) -> dict:
|
||||
def to_json(self, metadata: str) -> dict:
|
||||
metadata_prompt = ''
|
||||
metadata_negative_prompt = ''
|
||||
|
||||
|
|
@ -382,9 +393,9 @@ class A1111MetadataParser(MetadataParser):
|
|||
data['styles'] = str(found_styles)
|
||||
|
||||
# try to load performance based on steps, fallback for direct A1111 imports
|
||||
if 'steps' in data and 'performance' not in data:
|
||||
if 'steps' in data and 'performance' in data is None:
|
||||
try:
|
||||
data['performance'] = Performance[Steps(int(data['steps'])).name].value
|
||||
data['performance'] = Performance.by_steps(data['steps']).value
|
||||
except ValueError | KeyError:
|
||||
pass
|
||||
|
||||
|
|
@ -414,7 +425,7 @@ class A1111MetadataParser(MetadataParser):
|
|||
lora_split = lora.split(': ')
|
||||
lora_name = lora_split[0]
|
||||
lora_weight = lora_split[2] if len(lora_split) == 3 else lora_split[1]
|
||||
for filename in modules.config.lora_filenames_no_special:
|
||||
for filename in modules.config.lora_filenames:
|
||||
path = Path(filename)
|
||||
if lora_name == path.stem:
|
||||
data[f'lora_combined_{li + 1}'] = f'{filename} : {lora_weight}'
|
||||
|
|
@ -422,7 +433,7 @@ class A1111MetadataParser(MetadataParser):
|
|||
|
||||
return data
|
||||
|
||||
def parse_string(self, metadata: dict) -> str:
|
||||
def to_string(self, metadata: dict) -> str:
|
||||
data = {k: v for _, k, v in metadata}
|
||||
|
||||
width, height = eval(data['resolution'])
|
||||
|
|
@ -502,14 +513,14 @@ class FooocusMetadataParser(MetadataParser):
|
|||
def get_scheme(self) -> MetadataScheme:
|
||||
return MetadataScheme.FOOOCUS
|
||||
|
||||
def parse_json(self, metadata: dict) -> dict:
|
||||
def to_json(self, metadata: dict) -> dict:
|
||||
for key, value in metadata.items():
|
||||
if value in ['', 'None']:
|
||||
continue
|
||||
if key in ['base_model', 'refiner_model']:
|
||||
metadata[key] = self.replace_value_with_filename(key, value, modules.config.model_filenames)
|
||||
elif key.startswith('lora_combined_'):
|
||||
metadata[key] = self.replace_value_with_filename(key, value, modules.config.lora_filenames_no_special)
|
||||
metadata[key] = self.replace_value_with_filename(key, value, modules.config.lora_filenames)
|
||||
elif key == 'vae':
|
||||
metadata[key] = self.replace_value_with_filename(key, value, modules.config.vae_filenames)
|
||||
else:
|
||||
|
|
@ -517,7 +528,7 @@ class FooocusMetadataParser(MetadataParser):
|
|||
|
||||
return metadata
|
||||
|
||||
def parse_string(self, metadata: list) -> str:
|
||||
def to_string(self, metadata: list) -> str:
|
||||
for li, (label, key, value) in enumerate(metadata):
|
||||
# remove model folder paths from metadata
|
||||
if key.startswith('lora_combined_'):
|
||||
|
|
@ -557,6 +568,8 @@ class FooocusMetadataParser(MetadataParser):
|
|||
elif value == path.stem:
|
||||
return filename
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def get_metadata_parser(metadata_scheme: MetadataScheme) -> MetadataParser:
|
||||
match metadata_scheme:
|
||||
|
|
|
|||
|
|
@ -51,8 +51,6 @@ def patched_register_schedule(self, given_betas=None, beta_schedule="linear", ti
|
|||
self.linear_end = linear_end
|
||||
sigmas = torch.tensor(((1 - alphas_cumprod) / alphas_cumprod) ** 0.5, dtype=torch.float32)
|
||||
self.set_sigmas(sigmas)
|
||||
alphas_cumprod = torch.tensor(alphas_cumprod, dtype=torch.float32)
|
||||
self.set_alphas_cumprod(alphas_cumprod)
|
||||
return
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ def log(img, metadata, metadata_parser: MetadataParser | None = None, output_for
|
|||
date_string, local_temp_filename, only_name = generate_temp_filename(folder=path_outputs, extension=output_format)
|
||||
os.makedirs(os.path.dirname(local_temp_filename), exist_ok=True)
|
||||
|
||||
parsed_parameters = metadata_parser.parse_string(metadata.copy()) if metadata_parser is not None else ''
|
||||
parsed_parameters = metadata_parser.to_string(metadata.copy()) if metadata_parser is not None else ''
|
||||
image = Image.fromarray(img)
|
||||
|
||||
if output_format == OutputFormat.PNG.value:
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@ from PIL import Image
|
|||
|
||||
import modules.config
|
||||
import modules.sdxl_styles
|
||||
from modules.flags import Performance
|
||||
|
||||
LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS)
|
||||
|
||||
|
|
@ -381,9 +382,6 @@ def get_file_from_folder_list(name, folders):
|
|||
|
||||
return os.path.abspath(os.path.realpath(os.path.join(folders[0], name)))
|
||||
|
||||
def ordinal_suffix(number: int) -> str:
|
||||
return 'th' if 10 <= number % 100 <= 20 else {1: 'st', 2: 'nd', 3: 'rd'}.get(number % 10, 'th')
|
||||
|
||||
|
||||
def makedirs_with_log(path):
|
||||
try:
|
||||
|
|
@ -397,10 +395,15 @@ def get_enabled_loras(loras: list, remove_none=True) -> list:
|
|||
|
||||
|
||||
def parse_lora_references_from_prompt(prompt: str, loras: List[Tuple[AnyStr, float]], loras_limit: int = 5,
|
||||
skip_file_check=False, prompt_cleanup=True, deduplicate_loras=True) -> tuple[List[Tuple[AnyStr, float]], str]:
|
||||
skip_file_check=False, prompt_cleanup=True, deduplicate_loras=True,
|
||||
lora_filenames=None) -> tuple[List[Tuple[AnyStr, float]], str]:
|
||||
if lora_filenames is None:
|
||||
lora_filenames = []
|
||||
|
||||
found_loras = []
|
||||
prompt_without_loras = ''
|
||||
cleaned_prompt = ''
|
||||
|
||||
for token in prompt.split(','):
|
||||
matches = LORAS_PROMPT_PATTERN.findall(token)
|
||||
|
||||
|
|
@ -410,7 +413,7 @@ def parse_lora_references_from_prompt(prompt: str, loras: List[Tuple[AnyStr, flo
|
|||
for match in matches:
|
||||
lora_name = match[1] + '.safetensors'
|
||||
if not skip_file_check:
|
||||
lora_name = get_filname_by_stem(match[1], modules.config.lora_filenames_no_special)
|
||||
lora_name = get_filname_by_stem(match[1], lora_filenames)
|
||||
if lora_name is not None:
|
||||
found_loras.append((lora_name, float(match[2])))
|
||||
token = token.replace(match[0], '')
|
||||
|
|
@ -440,6 +443,22 @@ def parse_lora_references_from_prompt(prompt: str, loras: List[Tuple[AnyStr, flo
|
|||
return updated_loras[:loras_limit], cleaned_prompt
|
||||
|
||||
|
||||
def remove_performance_lora(filenames: list, performance: Performance | None):
|
||||
loras_without_performance = filenames.copy()
|
||||
|
||||
if performance is None:
|
||||
return loras_without_performance
|
||||
|
||||
performance_lora = performance.lora_filename()
|
||||
|
||||
for filename in filenames:
|
||||
path = Path(filename)
|
||||
if performance_lora == path.name:
|
||||
loras_without_performance.remove(filename)
|
||||
|
||||
return loras_without_performance
|
||||
|
||||
|
||||
def cleanup_prompt(prompt):
|
||||
prompt = re.sub(' +', ' ', prompt)
|
||||
prompt = re.sub(',+', ',', prompt)
|
||||
|
|
|
|||
|
|
@ -2,5 +2,6 @@
|
|||
!anime.json
|
||||
!default.json
|
||||
!lcm.json
|
||||
!playground_v2.5.json
|
||||
!realistic.json
|
||||
!sai.json
|
||||
|
|
@ -0,0 +1,51 @@
|
|||
{
|
||||
"default_model": "playground-v2.5-1024px-aesthetic.fp16.safetensors",
|
||||
"default_refiner": "None",
|
||||
"default_refiner_switch": 0.5,
|
||||
"default_loras": [
|
||||
[
|
||||
true,
|
||||
"None",
|
||||
1.0
|
||||
],
|
||||
[
|
||||
true,
|
||||
"None",
|
||||
1.0
|
||||
],
|
||||
[
|
||||
true,
|
||||
"None",
|
||||
1.0
|
||||
],
|
||||
[
|
||||
true,
|
||||
"None",
|
||||
1.0
|
||||
],
|
||||
[
|
||||
true,
|
||||
"None",
|
||||
1.0
|
||||
]
|
||||
],
|
||||
"default_cfg_scale": 3.0,
|
||||
"default_sample_sharpness": 2.0,
|
||||
"default_sampler": "dpmpp_2m",
|
||||
"default_scheduler": "edm_playground_v2.5",
|
||||
"default_performance": "Speed",
|
||||
"default_prompt": "",
|
||||
"default_prompt_negative": "",
|
||||
"default_styles": [
|
||||
"Fooocus V2",
|
||||
"Fooocus Enhance",
|
||||
"Fooocus Sharp"
|
||||
],
|
||||
"default_aspect_ratio": "1024*1024",
|
||||
"checkpoint_downloads": {
|
||||
"playground-v2.5-1024px-aesthetic.fp16.safetensors": "https://huggingface.co/mashb1t/fav_models/resolve/main/fav/playground-v2.5-1024px-aesthetic.fp16.safetensors"
|
||||
},
|
||||
"embeddings_downloads": {},
|
||||
"lora_downloads": {},
|
||||
"previous_default_models": []
|
||||
}
|
||||
|
|
@ -1,5 +1,7 @@
|
|||
import os
|
||||
import unittest
|
||||
|
||||
import modules.flags
|
||||
from modules import util
|
||||
|
||||
|
||||
|
|
@ -77,5 +79,59 @@ class TestUtils(unittest.TestCase):
|
|||
for test in test_cases:
|
||||
prompt, loras, loras_limit, skip_file_check = test["input"]
|
||||
expected = test["output"]
|
||||
actual = util.parse_lora_references_from_prompt(prompt, loras, loras_limit=loras_limit, skip_file_check=skip_file_check)
|
||||
actual = util.parse_lora_references_from_prompt(prompt, loras, loras_limit=loras_limit,
|
||||
skip_file_check=skip_file_check)
|
||||
self.assertEqual(expected, actual)
|
||||
|
||||
def test_can_parse_tokens_and_strip_performance_lora(self):
|
||||
lora_filenames = [
|
||||
'hey-lora.safetensors',
|
||||
modules.flags.PerformanceLoRA.EXTREME_SPEED.value,
|
||||
modules.flags.PerformanceLoRA.LIGHTNING.value,
|
||||
os.path.join('subfolder', modules.flags.PerformanceLoRA.HYPER_SD.value)
|
||||
]
|
||||
|
||||
test_cases = [
|
||||
{
|
||||
"input": ("some prompt, <lora:hey-lora:0.4>", [], 5, True, modules.flags.Performance.QUALITY),
|
||||
"output": (
|
||||
[('hey-lora.safetensors', 0.4)],
|
||||
'some prompt'
|
||||
),
|
||||
},
|
||||
{
|
||||
"input": ("some prompt, <lora:hey-lora:0.4>", [], 5, True, modules.flags.Performance.SPEED),
|
||||
"output": (
|
||||
[('hey-lora.safetensors', 0.4)],
|
||||
'some prompt'
|
||||
),
|
||||
},
|
||||
{
|
||||
"input": ("some prompt, <lora:sdxl_lcm_lora:1>, <lora:hey-lora:0.4>", [], 5, True, modules.flags.Performance.EXTREME_SPEED),
|
||||
"output": (
|
||||
[('hey-lora.safetensors', 0.4)],
|
||||
'some prompt'
|
||||
),
|
||||
},
|
||||
{
|
||||
"input": ("some prompt, <lora:sdxl_lightning_4step_lora:1>, <lora:hey-lora:0.4>", [], 5, True, modules.flags.Performance.LIGHTNING),
|
||||
"output": (
|
||||
[('hey-lora.safetensors', 0.4)],
|
||||
'some prompt'
|
||||
),
|
||||
},
|
||||
{
|
||||
"input": ("some prompt, <lora:sdxl_hyper_sd_4step_lora:1>, <lora:hey-lora:0.4>", [], 5, True, modules.flags.Performance.HYPER_SD),
|
||||
"output": (
|
||||
[('hey-lora.safetensors', 0.4)],
|
||||
'some prompt'
|
||||
),
|
||||
}
|
||||
]
|
||||
|
||||
for test in test_cases:
|
||||
prompt, loras, loras_limit, skip_file_check, performance = test["input"]
|
||||
lora_filenames = modules.util.remove_performance_lora(lora_filenames, performance)
|
||||
expected = test["output"]
|
||||
actual = util.parse_lora_references_from_prompt(prompt, loras, loras_limit=loras_limit, lora_filenames=lora_filenames)
|
||||
self.assertEqual(expected, actual)
|
||||
|
|
|
|||
|
|
@ -1,3 +1,12 @@
|
|||
# [2.4.2](https://github.com/lllyasviel/Fooocus/releases/tag/v2.4.2)
|
||||
|
||||
* Fix some small bugs (tcd scheduler when gamma is 0, chown in Dockerfile, update cmd args in readme, translation for aspect ratios, vae default after file reload)
|
||||
* Fix performance LoRA replacement when data is loaded from history log and inline prompt
|
||||
* Add support and preset for playground v2.5 (only works with performance Quality or Speed, use with scheduler edm_playground_v2)
|
||||
* Make textboxes (incl. positive prompt) resizable
|
||||
* Hide intermediate images when performance of Gradio would bottleneck the generation process (Extreme Speed, Lightning, Hyper-SD)
|
||||
|
||||
|
||||
# [2.4.1](https://github.com/lllyasviel/Fooocus/releases/tag/v2.4.1)
|
||||
|
||||
* Fix some small bugs (e.g. adjust clip skip default value from 1 to 2, add type check to aspect ratios js update function)
|
||||
|
|
|
|||
9
webui.py
9
webui.py
|
|
@ -460,9 +460,8 @@ with shared.gradio_root:
|
|||
disable_preview = gr.Checkbox(label='Disable Preview', value=modules.config.default_black_out_nsfw,
|
||||
interactive=not modules.config.default_black_out_nsfw,
|
||||
info='Disable preview during generation.')
|
||||
disable_intermediate_results = gr.Checkbox(label='Disable Intermediate Results',
|
||||
value=modules.config.default_performance == flags.Performance.EXTREME_SPEED.value,
|
||||
interactive=modules.config.default_performance != flags.Performance.EXTREME_SPEED.value,
|
||||
disable_intermediate_results = gr.Checkbox(label='Disable Intermediate Results',
|
||||
value=flags.Performance.has_restricted_features(modules.config.default_performance),
|
||||
info='Disable intermediate results during generation, only show final gallery.')
|
||||
disable_seed_increment = gr.Checkbox(label='Disable seed increment',
|
||||
info='Disable automatic seed increment when image number is > 1.',
|
||||
|
|
@ -616,7 +615,7 @@ with shared.gradio_root:
|
|||
|
||||
performance_selection.change(lambda x: [gr.update(interactive=not flags.Performance.has_restricted_features(x))] * 11 +
|
||||
[gr.update(visible=not flags.Performance.has_restricted_features(x))] * 1 +
|
||||
[gr.update(interactive=not flags.Performance.has_restricted_features(x), value=flags.Performance.has_restricted_features(x))] * 1,
|
||||
[gr.update(value=flags.Performance.has_restricted_features(x))] * 1,
|
||||
inputs=performance_selection,
|
||||
outputs=[
|
||||
guidance_scale, sharpness, adm_scaler_end, adm_scaler_positive,
|
||||
|
|
@ -713,7 +712,7 @@ with shared.gradio_root:
|
|||
parsed_parameters = {}
|
||||
else:
|
||||
metadata_parser = modules.meta_parser.get_metadata_parser(metadata_scheme)
|
||||
parsed_parameters = metadata_parser.parse_json(parameters)
|
||||
parsed_parameters = metadata_parser.to_json(parameters)
|
||||
|
||||
return modules.meta_parser.load_parameter_button_click(parsed_parameters, state_is_generating)
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue