From f6d67d7655be4d396194ae053c2f8bc9ec3a7680 Mon Sep 17 00:00:00 2001 From: rsl8 <138326583+rsl8@users.noreply.github.com> Date: Sun, 21 Jan 2024 07:04:44 +0100 Subject: [PATCH 001/103] Make the private log url path relative (#1948) --- webui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webui.py b/webui.py index 581e3101..fadd852a 100644 --- a/webui.py +++ b/webui.py @@ -256,7 +256,7 @@ with shared.gradio_root: queue=False, show_progress=False) if not args_manager.args.disable_image_log: - gr.HTML(f'\U0001F4DA History Log') + gr.HTML(f'\U0001F4DA History Log') with gr.Tab(label='Style'): style_sorter.try_load_sorted_styles( From 0b00aaf897357ecd75eda7270fa3f409056b2ef9 Mon Sep 17 00:00:00 2001 From: Pradhyo Bijja Date: Sat, 27 Jan 2024 07:59:04 -0500 Subject: [PATCH 002/103] add venv mentioned in the readme to gitignore (#2038) --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 05ce1df8..de2f5778 100644 --- a/.gitignore +++ b/.gitignore @@ -20,6 +20,7 @@ user_path_config.txt user_path_config-deprecated.txt /modules/*.png /repositories +/fooocus_env /venv /tmp /ui-config.json From 1cc40d24d7e1302e963026dd178a6bb4604769f4 Mon Sep 17 00:00:00 2001 From: lllyasviel Date: Sat, 27 Jan 2024 05:07:20 -0800 Subject: [PATCH 003/103] backend --- ldm_patched/contrib/external.py | 68 +- .../contrib/external_custom_sampler.py | 12 +- ldm_patched/contrib/external_freelunch.py | 4 +- ldm_patched/contrib/external_hypertile.py | 18 +- ldm_patched/contrib/external_latent.py | 24 + ldm_patched/contrib/external_model_merging.py | 83 +-- ldm_patched/contrib/external_photomaker.py | 189 +++++ .../contrib/external_post_processing.py | 1 + ldm_patched/contrib/external_sag.py | 4 +- ldm_patched/contrib/external_sdupscale.py | 49 ++ ldm_patched/contrib/external_stable3d.py | 46 +- ldm_patched/contrib/external_video_model.py | 17 + ldm_patched/ldm/modules/attention.py | 35 +- .../modules/diffusionmodules/openaimodel.py | 9 +- .../ldm/modules/diffusionmodules/upscaling.py | 12 +- .../ldm/modules/encoders/noise_aug_modules.py | 4 +- .../ldm/modules/sub_quadratic_attention.py | 30 +- ldm_patched/licenses-3rd/chainer | 20 + ldm_patched/licenses-3rd/comfyui | 674 ++++++++++++++++++ ldm_patched/licenses-3rd/diffusers | 201 ++++++ ldm_patched/licenses-3rd/kdiffusion | 19 + ldm_patched/licenses-3rd/ldm | 21 + ldm_patched/licenses-3rd/taesd | 21 + ldm_patched/licenses-3rd/transformers | 203 ++++++ ldm_patched/modules/args_parser.py | 2 + ldm_patched/modules/clip_model.py | 2 +- ldm_patched/modules/clip_vision.py | 10 +- ldm_patched/modules/conds.py | 1 - ldm_patched/modules/controlnet.py | 6 +- ldm_patched/modules/diffusers_load.py | 1 - ldm_patched/modules/gligen.py | 2 +- ldm_patched/modules/latent_formats.py | 4 + ldm_patched/modules/model_base.py | 97 ++- ldm_patched/modules/model_detection.py | 8 +- ldm_patched/modules/model_management.py | 2 +- ldm_patched/modules/model_patcher.py | 47 +- ldm_patched/modules/ops.py | 1 - ldm_patched/modules/sample.py | 1 - ldm_patched/modules/samplers.py | 10 +- ldm_patched/modules/sd.py | 39 +- ldm_patched/modules/sd1_clip.py | 1 - ldm_patched/modules/supported_models.py | 29 +- ldm_patched/modules/supported_models_base.py | 6 + ldm_patched/utils/path_utils.py | 19 +- 44 files changed, 1885 insertions(+), 167 deletions(-) create mode 100644 ldm_patched/contrib/external_photomaker.py create mode 100644 ldm_patched/contrib/external_sdupscale.py create mode 100644 ldm_patched/licenses-3rd/chainer create mode 100644 ldm_patched/licenses-3rd/comfyui create mode 100644 ldm_patched/licenses-3rd/diffusers create mode 100644 ldm_patched/licenses-3rd/kdiffusion create mode 100644 ldm_patched/licenses-3rd/ldm create mode 100644 ldm_patched/licenses-3rd/taesd create mode 100644 ldm_patched/licenses-3rd/transformers diff --git a/ldm_patched/contrib/external.py b/ldm_patched/contrib/external.py index 9d2238df..927cd3f3 100644 --- a/ldm_patched/contrib/external.py +++ b/ldm_patched/contrib/external.py @@ -361,6 +361,62 @@ class VAEEncodeForInpaint: return ({"samples":t, "noise_mask": (mask_erosion[:,:,:x,:y].round())}, ) + +class InpaintModelConditioning: + @classmethod + def INPUT_TYPES(s): + return {"required": {"positive": ("CONDITIONING", ), + "negative": ("CONDITIONING", ), + "vae": ("VAE", ), + "pixels": ("IMAGE", ), + "mask": ("MASK", ), + }} + + RETURN_TYPES = ("CONDITIONING","CONDITIONING","LATENT") + RETURN_NAMES = ("positive", "negative", "latent") + FUNCTION = "encode" + + CATEGORY = "conditioning/inpaint" + + def encode(self, positive, negative, pixels, vae, mask): + x = (pixels.shape[1] // 8) * 8 + y = (pixels.shape[2] // 8) * 8 + mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(pixels.shape[1], pixels.shape[2]), mode="bilinear") + + orig_pixels = pixels + pixels = orig_pixels.clone() + if pixels.shape[1] != x or pixels.shape[2] != y: + x_offset = (pixels.shape[1] % 8) // 2 + y_offset = (pixels.shape[2] % 8) // 2 + pixels = pixels[:,x_offset:x + x_offset, y_offset:y + y_offset,:] + mask = mask[:,:,x_offset:x + x_offset, y_offset:y + y_offset] + + m = (1.0 - mask.round()).squeeze(1) + for i in range(3): + pixels[:,:,:,i] -= 0.5 + pixels[:,:,:,i] *= m + pixels[:,:,:,i] += 0.5 + concat_latent = vae.encode(pixels) + orig_latent = vae.encode(orig_pixels) + + out_latent = {} + + out_latent["samples"] = orig_latent + out_latent["noise_mask"] = mask + + out = [] + for conditioning in [positive, negative]: + c = [] + for t in conditioning: + d = t[1].copy() + d["concat_latent_image"] = concat_latent + d["concat_mask"] = mask + n = [t[0], d] + c.append(n) + out.append(c) + return (out[0], out[1], out_latent) + + class SaveLatent: def __init__(self): self.output_dir = ldm_patched.utils.path_utils.get_output_directory() @@ -1417,6 +1473,8 @@ class LoadImage: output_masks = [] for i in ImageSequence.Iterator(img): i = ImageOps.exif_transpose(i) + if i.mode == 'I': + i = i.point(lambda i: i * (1 / 255)) image = i.convert("RGB") image = np.array(image).astype(np.float32) / 255.0 image = torch.from_numpy(image)[None,] @@ -1472,6 +1530,8 @@ class LoadImageMask: i = Image.open(image_path) i = ImageOps.exif_transpose(i) if i.getbands() != ("R", "G", "B", "A"): + if i.mode == 'I': + i = i.point(lambda i: i * (1 / 255)) i = i.convert("RGBA") mask = None c = channel[0].upper() @@ -1626,10 +1686,11 @@ class ImagePadForOutpaint: def expand_image(self, image, left, top, right, bottom, feathering): d1, d2, d3, d4 = image.size() - new_image = torch.zeros( + new_image = torch.ones( (d1, d2 + top + bottom, d3 + left + right, d4), dtype=torch.float32, - ) + ) * 0.5 + new_image[:, top:top + d2, left:left + d3, :] = image mask = torch.ones( @@ -1721,6 +1782,7 @@ NODE_CLASS_MAPPINGS = { "unCLIPCheckpointLoader": unCLIPCheckpointLoader, "GLIGENLoader": GLIGENLoader, "GLIGENTextBoxApply": GLIGENTextBoxApply, + "InpaintModelConditioning": InpaintModelConditioning, "CheckpointLoader": CheckpointLoader, "DiffusersLoader": DiffusersLoader, @@ -1882,6 +1944,8 @@ def init_custom_nodes(): "nodes_sag.py", "nodes_perpneg.py", "nodes_stable3d.py", + "nodes_sdupscale.py", + "nodes_photomaker.py", ] for node_file in extras_files: diff --git a/ldm_patched/contrib/external_custom_sampler.py b/ldm_patched/contrib/external_custom_sampler.py index 6e5a769b..8f92e841 100644 --- a/ldm_patched/contrib/external_custom_sampler.py +++ b/ldm_patched/contrib/external_custom_sampler.py @@ -15,6 +15,7 @@ class BasicScheduler: {"model": ("MODEL",), "scheduler": (ldm_patched.modules.samplers.SCHEDULER_NAMES, ), "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), } } RETURN_TYPES = ("SIGMAS",) @@ -22,8 +23,14 @@ class BasicScheduler: FUNCTION = "get_sigmas" - def get_sigmas(self, model, scheduler, steps): - sigmas = ldm_patched.modules.samplers.calculate_sigmas_scheduler(model.model, scheduler, steps).cpu() + def get_sigmas(self, model, scheduler, steps, denoise): + total_steps = steps + if denoise < 1.0: + total_steps = int(steps/denoise) + + ldm_patched.modules.model_management.load_models_gpu([model]) + sigmas = ldm_patched.modules.samplers.calculate_sigmas_scheduler(model.model, scheduler, total_steps).cpu() + sigmas = sigmas[-(steps + 1):] return (sigmas, ) @@ -100,6 +107,7 @@ class SDTurboScheduler: def get_sigmas(self, model, steps, denoise): start_step = 10 - int(10 * denoise) timesteps = torch.flip(torch.arange(1, 11) * 100 - 1, (0,))[start_step:start_step + steps] + ldm_patched.modules.model_management.load_models_gpu([model]) sigmas = model.model.model_sampling.sigma(timesteps) sigmas = torch.cat([sigmas, sigmas.new_zeros([1])]) return (sigmas, ) diff --git a/ldm_patched/contrib/external_freelunch.py b/ldm_patched/contrib/external_freelunch.py index f8dd5a44..59ec5bab 100644 --- a/ldm_patched/contrib/external_freelunch.py +++ b/ldm_patched/contrib/external_freelunch.py @@ -36,7 +36,7 @@ class FreeU: RETURN_TYPES = ("MODEL",) FUNCTION = "patch" - CATEGORY = "_for_testing" + CATEGORY = "model_patches" def patch(self, model, b1, b2, s1, s2): model_channels = model.model.model_config.unet_config["model_channels"] @@ -75,7 +75,7 @@ class FreeU_V2: RETURN_TYPES = ("MODEL",) FUNCTION = "patch" - CATEGORY = "_for_testing" + CATEGORY = "model_patches" def patch(self, model, b1, b2, s1, s2): model_channels = model.model.model_config.unet_config["model_channels"] diff --git a/ldm_patched/contrib/external_hypertile.py b/ldm_patched/contrib/external_hypertile.py index 45f7c3ea..5cf7d9d6 100644 --- a/ldm_patched/contrib/external_hypertile.py +++ b/ldm_patched/contrib/external_hypertile.py @@ -34,29 +34,29 @@ class HyperTile: RETURN_TYPES = ("MODEL",) FUNCTION = "patch" - CATEGORY = "_for_testing" + CATEGORY = "model_patches" def patch(self, model, tile_size, swap_size, max_depth, scale_depth): model_channels = model.model.model_config.unet_config["model_channels"] - apply_to = set() - temp = model_channels - for x in range(max_depth + 1): - apply_to.add(temp) - temp *= 2 - latent_tile_size = max(32, tile_size) // 8 self.temp = None def hypertile_in(q, k, v, extra_options): - if q.shape[-1] in apply_to: + model_chans = q.shape[-2] + orig_shape = extra_options['original_shape'] + apply_to = [] + for i in range(max_depth + 1): + apply_to.append((orig_shape[-2] / (2 ** i)) * (orig_shape[-1] / (2 ** i))) + + if model_chans in apply_to: shape = extra_options["original_shape"] aspect_ratio = shape[-1] / shape[-2] hw = q.size(1) h, w = round(math.sqrt(hw * aspect_ratio)), round(math.sqrt(hw / aspect_ratio)) - factor = 2**((q.shape[-1] // model_channels) - 1) if scale_depth else 1 + factor = (2 ** apply_to.index(model_chans)) if scale_depth else 1 nh = random_divisor(h, latent_tile_size * factor, swap_size) nw = random_divisor(w, latent_tile_size * factor, swap_size) diff --git a/ldm_patched/contrib/external_latent.py b/ldm_patched/contrib/external_latent.py index c6f874e1..6d753d0f 100644 --- a/ldm_patched/contrib/external_latent.py +++ b/ldm_patched/contrib/external_latent.py @@ -124,10 +124,34 @@ class LatentBatch: samples_out["batch_index"] = samples1.get("batch_index", [x for x in range(0, s1.shape[0])]) + samples2.get("batch_index", [x for x in range(0, s2.shape[0])]) return (samples_out,) +class LatentBatchSeedBehavior: + @classmethod + def INPUT_TYPES(s): + return {"required": { "samples": ("LATENT",), + "seed_behavior": (["random", "fixed"],),}} + + RETURN_TYPES = ("LATENT",) + FUNCTION = "op" + + CATEGORY = "latent/advanced" + + def op(self, samples, seed_behavior): + samples_out = samples.copy() + latent = samples["samples"] + if seed_behavior == "random": + if 'batch_index' in samples_out: + samples_out.pop('batch_index') + elif seed_behavior == "fixed": + batch_number = samples_out.get("batch_index", [0])[0] + samples_out["batch_index"] = [batch_number] * latent.shape[0] + + return (samples_out,) + NODE_CLASS_MAPPINGS = { "LatentAdd": LatentAdd, "LatentSubtract": LatentSubtract, "LatentMultiply": LatentMultiply, "LatentInterpolate": LatentInterpolate, "LatentBatch": LatentBatch, + "LatentBatchSeedBehavior": LatentBatchSeedBehavior, } diff --git a/ldm_patched/contrib/external_model_merging.py b/ldm_patched/contrib/external_model_merging.py index c0cf9afd..ae8145d4 100644 --- a/ldm_patched/contrib/external_model_merging.py +++ b/ldm_patched/contrib/external_model_merging.py @@ -121,6 +121,48 @@ class ModelMergeBlocks: m.add_patches({k: kp[k]}, 1.0 - ratio, ratio) return (m, ) +def save_checkpoint(model, clip=None, vae=None, clip_vision=None, filename_prefix=None, output_dir=None, prompt=None, extra_pnginfo=None): + full_output_folder, filename, counter, subfolder, filename_prefix = ldm_patched.utils.path_utils.get_save_image_path(filename_prefix, output_dir) + prompt_info = "" + if prompt is not None: + prompt_info = json.dumps(prompt) + + metadata = {} + + enable_modelspec = True + if isinstance(model.model, ldm_patched.modules.model_base.SDXL): + metadata["modelspec.architecture"] = "stable-diffusion-xl-v1-base" + elif isinstance(model.model, ldm_patched.modules.model_base.SDXLRefiner): + metadata["modelspec.architecture"] = "stable-diffusion-xl-v1-refiner" + else: + enable_modelspec = False + + if enable_modelspec: + metadata["modelspec.sai_model_spec"] = "1.0.0" + metadata["modelspec.implementation"] = "sgm" + metadata["modelspec.title"] = "{} {}".format(filename, counter) + + #TODO: + # "stable-diffusion-v1", "stable-diffusion-v1-inpainting", "stable-diffusion-v2-512", + # "stable-diffusion-v2-768-v", "stable-diffusion-v2-unclip-l", "stable-diffusion-v2-unclip-h", + # "v2-inpainting" + + if model.model.model_type == ldm_patched.modules.model_base.ModelType.EPS: + metadata["modelspec.predict_key"] = "epsilon" + elif model.model.model_type == ldm_patched.modules.model_base.ModelType.V_PREDICTION: + metadata["modelspec.predict_key"] = "v" + + if not args.disable_server_info: + metadata["prompt"] = prompt_info + if extra_pnginfo is not None: + for x in extra_pnginfo: + metadata[x] = json.dumps(extra_pnginfo[x]) + + output_checkpoint = f"{filename}_{counter:05}_.safetensors" + output_checkpoint = os.path.join(full_output_folder, output_checkpoint) + + ldm_patched.modules.sd.save_checkpoint(output_checkpoint, model, clip, vae, clip_vision, metadata=metadata) + class CheckpointSave: def __init__(self): self.output_dir = ldm_patched.utils.path_utils.get_output_directory() @@ -139,46 +181,7 @@ class CheckpointSave: CATEGORY = "advanced/model_merging" def save(self, model, clip, vae, filename_prefix, prompt=None, extra_pnginfo=None): - full_output_folder, filename, counter, subfolder, filename_prefix = ldm_patched.utils.path_utils.get_save_image_path(filename_prefix, self.output_dir) - prompt_info = "" - if prompt is not None: - prompt_info = json.dumps(prompt) - - metadata = {} - - enable_modelspec = True - if isinstance(model.model, ldm_patched.modules.model_base.SDXL): - metadata["modelspec.architecture"] = "stable-diffusion-xl-v1-base" - elif isinstance(model.model, ldm_patched.modules.model_base.SDXLRefiner): - metadata["modelspec.architecture"] = "stable-diffusion-xl-v1-refiner" - else: - enable_modelspec = False - - if enable_modelspec: - metadata["modelspec.sai_model_spec"] = "1.0.0" - metadata["modelspec.implementation"] = "sgm" - metadata["modelspec.title"] = "{} {}".format(filename, counter) - - #TODO: - # "stable-diffusion-v1", "stable-diffusion-v1-inpainting", "stable-diffusion-v2-512", - # "stable-diffusion-v2-768-v", "stable-diffusion-v2-unclip-l", "stable-diffusion-v2-unclip-h", - # "v2-inpainting" - - if model.model.model_type == ldm_patched.modules.model_base.ModelType.EPS: - metadata["modelspec.predict_key"] = "epsilon" - elif model.model.model_type == ldm_patched.modules.model_base.ModelType.V_PREDICTION: - metadata["modelspec.predict_key"] = "v" - - if not args.disable_server_info: - metadata["prompt"] = prompt_info - if extra_pnginfo is not None: - for x in extra_pnginfo: - metadata[x] = json.dumps(extra_pnginfo[x]) - - output_checkpoint = f"{filename}_{counter:05}_.safetensors" - output_checkpoint = os.path.join(full_output_folder, output_checkpoint) - - ldm_patched.modules.sd.save_checkpoint(output_checkpoint, model, clip, vae, metadata=metadata) + save_checkpoint(model, clip=clip, vae=vae, filename_prefix=filename_prefix, output_dir=self.output_dir, prompt=prompt, extra_pnginfo=extra_pnginfo) return {} class CLIPSave: diff --git a/ldm_patched/contrib/external_photomaker.py b/ldm_patched/contrib/external_photomaker.py new file mode 100644 index 00000000..cc7f6710 --- /dev/null +++ b/ldm_patched/contrib/external_photomaker.py @@ -0,0 +1,189 @@ +# https://github.com/comfyanonymous/ComfyUI/blob/master/nodes.py + +import torch +import torch.nn as nn +import ldm_patched.utils.path_utils +import ldm_patched.modules.clip_model +import ldm_patched.modules.clip_vision +import ldm_patched.modules.ops + +# code for model from: https://github.com/TencentARC/PhotoMaker/blob/main/photomaker/model.py under Apache License Version 2.0 +VISION_CONFIG_DICT = { + "hidden_size": 1024, + "image_size": 224, + "intermediate_size": 4096, + "num_attention_heads": 16, + "num_channels": 3, + "num_hidden_layers": 24, + "patch_size": 14, + "projection_dim": 768, + "hidden_act": "quick_gelu", +} + +class MLP(nn.Module): + def __init__(self, in_dim, out_dim, hidden_dim, use_residual=True, operations=ldm_patched.modules.ops): + super().__init__() + if use_residual: + assert in_dim == out_dim + self.layernorm = operations.LayerNorm(in_dim) + self.fc1 = operations.Linear(in_dim, hidden_dim) + self.fc2 = operations.Linear(hidden_dim, out_dim) + self.use_residual = use_residual + self.act_fn = nn.GELU() + + def forward(self, x): + residual = x + x = self.layernorm(x) + x = self.fc1(x) + x = self.act_fn(x) + x = self.fc2(x) + if self.use_residual: + x = x + residual + return x + + +class FuseModule(nn.Module): + def __init__(self, embed_dim, operations): + super().__init__() + self.mlp1 = MLP(embed_dim * 2, embed_dim, embed_dim, use_residual=False, operations=operations) + self.mlp2 = MLP(embed_dim, embed_dim, embed_dim, use_residual=True, operations=operations) + self.layer_norm = operations.LayerNorm(embed_dim) + + def fuse_fn(self, prompt_embeds, id_embeds): + stacked_id_embeds = torch.cat([prompt_embeds, id_embeds], dim=-1) + stacked_id_embeds = self.mlp1(stacked_id_embeds) + prompt_embeds + stacked_id_embeds = self.mlp2(stacked_id_embeds) + stacked_id_embeds = self.layer_norm(stacked_id_embeds) + return stacked_id_embeds + + def forward( + self, + prompt_embeds, + id_embeds, + class_tokens_mask, + ) -> torch.Tensor: + # id_embeds shape: [b, max_num_inputs, 1, 2048] + id_embeds = id_embeds.to(prompt_embeds.dtype) + num_inputs = class_tokens_mask.sum().unsqueeze(0) # TODO: check for training case + batch_size, max_num_inputs = id_embeds.shape[:2] + # seq_length: 77 + seq_length = prompt_embeds.shape[1] + # flat_id_embeds shape: [b*max_num_inputs, 1, 2048] + flat_id_embeds = id_embeds.view( + -1, id_embeds.shape[-2], id_embeds.shape[-1] + ) + # valid_id_mask [b*max_num_inputs] + valid_id_mask = ( + torch.arange(max_num_inputs, device=flat_id_embeds.device)[None, :] + < num_inputs[:, None] + ) + valid_id_embeds = flat_id_embeds[valid_id_mask.flatten()] + + prompt_embeds = prompt_embeds.view(-1, prompt_embeds.shape[-1]) + class_tokens_mask = class_tokens_mask.view(-1) + valid_id_embeds = valid_id_embeds.view(-1, valid_id_embeds.shape[-1]) + # slice out the image token embeddings + image_token_embeds = prompt_embeds[class_tokens_mask] + stacked_id_embeds = self.fuse_fn(image_token_embeds, valid_id_embeds) + assert class_tokens_mask.sum() == stacked_id_embeds.shape[0], f"{class_tokens_mask.sum()} != {stacked_id_embeds.shape[0]}" + prompt_embeds.masked_scatter_(class_tokens_mask[:, None], stacked_id_embeds.to(prompt_embeds.dtype)) + updated_prompt_embeds = prompt_embeds.view(batch_size, seq_length, -1) + return updated_prompt_embeds + +class PhotoMakerIDEncoder(ldm_patched.modules.clip_model.CLIPVisionModelProjection): + def __init__(self): + self.load_device = ldm_patched.modules.model_management.text_encoder_device() + offload_device = ldm_patched.modules.model_management.text_encoder_offload_device() + dtype = ldm_patched.modules.model_management.text_encoder_dtype(self.load_device) + + super().__init__(VISION_CONFIG_DICT, dtype, offload_device, ldm_patched.modules.ops.manual_cast) + self.visual_projection_2 = ldm_patched.modules.ops.manual_cast.Linear(1024, 1280, bias=False) + self.fuse_module = FuseModule(2048, ldm_patched.modules.ops.manual_cast) + + def forward(self, id_pixel_values, prompt_embeds, class_tokens_mask): + b, num_inputs, c, h, w = id_pixel_values.shape + id_pixel_values = id_pixel_values.view(b * num_inputs, c, h, w) + + shared_id_embeds = self.vision_model(id_pixel_values)[2] + id_embeds = self.visual_projection(shared_id_embeds) + id_embeds_2 = self.visual_projection_2(shared_id_embeds) + + id_embeds = id_embeds.view(b, num_inputs, 1, -1) + id_embeds_2 = id_embeds_2.view(b, num_inputs, 1, -1) + + id_embeds = torch.cat((id_embeds, id_embeds_2), dim=-1) + updated_prompt_embeds = self.fuse_module(prompt_embeds, id_embeds, class_tokens_mask) + + return updated_prompt_embeds + + +class PhotoMakerLoader: + @classmethod + def INPUT_TYPES(s): + return {"required": { "photomaker_model_name": (ldm_patched.utils.path_utils.get_filename_list("photomaker"), )}} + + RETURN_TYPES = ("PHOTOMAKER",) + FUNCTION = "load_photomaker_model" + + CATEGORY = "_for_testing/photomaker" + + def load_photomaker_model(self, photomaker_model_name): + photomaker_model_path = ldm_patched.utils.path_utils.get_full_path("photomaker", photomaker_model_name) + photomaker_model = PhotoMakerIDEncoder() + data = ldm_patched.modules.utils.load_torch_file(photomaker_model_path, safe_load=True) + if "id_encoder" in data: + data = data["id_encoder"] + photomaker_model.load_state_dict(data) + return (photomaker_model,) + + +class PhotoMakerEncode: + @classmethod + def INPUT_TYPES(s): + return {"required": { "photomaker": ("PHOTOMAKER",), + "image": ("IMAGE",), + "clip": ("CLIP", ), + "text": ("STRING", {"multiline": True, "default": "photograph of photomaker"}), + }} + + RETURN_TYPES = ("CONDITIONING",) + FUNCTION = "apply_photomaker" + + CATEGORY = "_for_testing/photomaker" + + def apply_photomaker(self, photomaker, image, clip, text): + special_token = "photomaker" + pixel_values = ldm_patched.modules.clip_vision.clip_preprocess(image.to(photomaker.load_device)).float() + try: + index = text.split(" ").index(special_token) + 1 + except ValueError: + index = -1 + tokens = clip.tokenize(text, return_word_ids=True) + out_tokens = {} + for k in tokens: + out_tokens[k] = [] + for t in tokens[k]: + f = list(filter(lambda x: x[2] != index, t)) + while len(f) < len(t): + f.append(t[-1]) + out_tokens[k].append(f) + + cond, pooled = clip.encode_from_tokens(out_tokens, return_pooled=True) + + if index > 0: + token_index = index - 1 + num_id_images = 1 + class_tokens_mask = [True if token_index <= i < token_index+num_id_images else False for i in range(77)] + out = photomaker(id_pixel_values=pixel_values.unsqueeze(0), prompt_embeds=cond.to(photomaker.load_device), + class_tokens_mask=torch.tensor(class_tokens_mask, dtype=torch.bool, device=photomaker.load_device).unsqueeze(0)) + else: + out = cond + + return ([[out, {"pooled_output": pooled}]], ) + + +NODE_CLASS_MAPPINGS = { + "PhotoMakerLoader": PhotoMakerLoader, + "PhotoMakerEncode": PhotoMakerEncode, +} + diff --git a/ldm_patched/contrib/external_post_processing.py b/ldm_patched/contrib/external_post_processing.py index 432c53fb..93cb1212 100644 --- a/ldm_patched/contrib/external_post_processing.py +++ b/ldm_patched/contrib/external_post_processing.py @@ -35,6 +35,7 @@ class Blend: CATEGORY = "image/postprocessing" def blend_images(self, image1: torch.Tensor, image2: torch.Tensor, blend_factor: float, blend_mode: str): + image2 = image2.to(image1.device) if image1.shape != image2.shape: image2 = image2.permute(0, 3, 1, 2) image2 = ldm_patched.modules.utils.common_upscale(image2, image1.shape[2], image1.shape[1], upscale_method='bicubic', crop='center') diff --git a/ldm_patched/contrib/external_sag.py b/ldm_patched/contrib/external_sag.py index 9cffe879..804d5611 100644 --- a/ldm_patched/contrib/external_sag.py +++ b/ldm_patched/contrib/external_sag.py @@ -60,7 +60,7 @@ def create_blur_map(x0, attn, sigma=3.0, threshold=1.0): attn = attn.reshape(b, -1, hw1, hw2) # Global Average Pool mask = attn.mean(1, keepdim=False).sum(1, keepdim=False) > threshold - ratio = math.ceil(math.sqrt(lh * lw / hw1)) + ratio = 2**(math.ceil(math.sqrt(lh * lw / hw1)) - 1).bit_length() mid_shape = [math.ceil(lh / ratio), math.ceil(lw / ratio)] # Reshape @@ -145,6 +145,8 @@ class SelfAttentionGuidance: sigma = args["sigma"] model_options = args["model_options"] x = args["input"] + if min(cfg_result.shape[2:]) <= 4: #skip when too small to add padding + return cfg_result # create the adversarially blurred image degraded = create_blur_map(uncond_pred, uncond_attn, sag_sigma, sag_threshold) diff --git a/ldm_patched/contrib/external_sdupscale.py b/ldm_patched/contrib/external_sdupscale.py new file mode 100644 index 00000000..68153c47 --- /dev/null +++ b/ldm_patched/contrib/external_sdupscale.py @@ -0,0 +1,49 @@ +# https://github.com/comfyanonymous/ComfyUI/blob/master/nodes.py + +import torch +import ldm_patched.contrib.external +import ldm_patched.modules.utils + +class SD_4XUpscale_Conditioning: + @classmethod + def INPUT_TYPES(s): + return {"required": { "images": ("IMAGE",), + "positive": ("CONDITIONING",), + "negative": ("CONDITIONING",), + "scale_ratio": ("FLOAT", {"default": 4.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "noise_augmentation": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}), + }} + RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT") + RETURN_NAMES = ("positive", "negative", "latent") + + FUNCTION = "encode" + + CATEGORY = "conditioning/upscale_diffusion" + + def encode(self, images, positive, negative, scale_ratio, noise_augmentation): + width = max(1, round(images.shape[-2] * scale_ratio)) + height = max(1, round(images.shape[-3] * scale_ratio)) + + pixels = ldm_patched.modules.utils.common_upscale((images.movedim(-1,1) * 2.0) - 1.0, width // 4, height // 4, "bilinear", "center") + + out_cp = [] + out_cn = [] + + for t in positive: + n = [t[0], t[1].copy()] + n[1]['concat_image'] = pixels + n[1]['noise_augmentation'] = noise_augmentation + out_cp.append(n) + + for t in negative: + n = [t[0], t[1].copy()] + n[1]['concat_image'] = pixels + n[1]['noise_augmentation'] = noise_augmentation + out_cn.append(n) + + latent = torch.zeros([images.shape[0], 4, height // 4, width // 4]) + return (out_cp, out_cn, {"samples":latent}) + +NODE_CLASS_MAPPINGS = { + "SD_4XUpscale_Conditioning": SD_4XUpscale_Conditioning, +} diff --git a/ldm_patched/contrib/external_stable3d.py b/ldm_patched/contrib/external_stable3d.py index 2913a3d0..bae2623f 100644 --- a/ldm_patched/contrib/external_stable3d.py +++ b/ldm_patched/contrib/external_stable3d.py @@ -48,13 +48,57 @@ class StableZero123_Conditioning: encode_pixels = pixels[:,:,:,:3] t = vae.encode(encode_pixels) cam_embeds = camera_embeddings(elevation, azimuth) - cond = torch.cat([pooled, cam_embeds.repeat((pooled.shape[0], 1, 1))], dim=-1) + cond = torch.cat([pooled, cam_embeds.to(pooled.device).repeat((pooled.shape[0], 1, 1))], dim=-1) positive = [[cond, {"concat_latent_image": t}]] negative = [[torch.zeros_like(pooled), {"concat_latent_image": torch.zeros_like(t)}]] latent = torch.zeros([batch_size, 4, height // 8, width // 8]) return (positive, negative, {"samples":latent}) +class StableZero123_Conditioning_Batched: + @classmethod + def INPUT_TYPES(s): + return {"required": { "clip_vision": ("CLIP_VISION",), + "init_image": ("IMAGE",), + "vae": ("VAE",), + "width": ("INT", {"default": 256, "min": 16, "max": ldm_patched.contrib.external.MAX_RESOLUTION, "step": 8}), + "height": ("INT", {"default": 256, "min": 16, "max": ldm_patched.contrib.external.MAX_RESOLUTION, "step": 8}), + "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}), + "elevation": ("FLOAT", {"default": 0.0, "min": -180.0, "max": 180.0}), + "azimuth": ("FLOAT", {"default": 0.0, "min": -180.0, "max": 180.0}), + "elevation_batch_increment": ("FLOAT", {"default": 0.0, "min": -180.0, "max": 180.0}), + "azimuth_batch_increment": ("FLOAT", {"default": 0.0, "min": -180.0, "max": 180.0}), + }} + RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT") + RETURN_NAMES = ("positive", "negative", "latent") + + FUNCTION = "encode" + + CATEGORY = "conditioning/3d_models" + + def encode(self, clip_vision, init_image, vae, width, height, batch_size, elevation, azimuth, elevation_batch_increment, azimuth_batch_increment): + output = clip_vision.encode_image(init_image) + pooled = output.image_embeds.unsqueeze(0) + pixels = ldm_patched.modules.utils.common_upscale(init_image.movedim(-1,1), width, height, "bilinear", "center").movedim(1,-1) + encode_pixels = pixels[:,:,:,:3] + t = vae.encode(encode_pixels) + + cam_embeds = [] + for i in range(batch_size): + cam_embeds.append(camera_embeddings(elevation, azimuth)) + elevation += elevation_batch_increment + azimuth += azimuth_batch_increment + + cam_embeds = torch.cat(cam_embeds, dim=0) + cond = torch.cat([ldm_patched.modules.utils.repeat_to_batch_size(pooled, batch_size), cam_embeds], dim=-1) + + positive = [[cond, {"concat_latent_image": t}]] + negative = [[torch.zeros_like(pooled), {"concat_latent_image": torch.zeros_like(t)}]] + latent = torch.zeros([batch_size, 4, height // 8, width // 8]) + return (positive, negative, {"samples":latent, "batch_index": [0] * batch_size}) + + NODE_CLASS_MAPPINGS = { "StableZero123_Conditioning": StableZero123_Conditioning, + "StableZero123_Conditioning_Batched": StableZero123_Conditioning_Batched, } diff --git a/ldm_patched/contrib/external_video_model.py b/ldm_patched/contrib/external_video_model.py index 4504528a..503df0e1 100644 --- a/ldm_patched/contrib/external_video_model.py +++ b/ldm_patched/contrib/external_video_model.py @@ -5,6 +5,7 @@ import torch import ldm_patched.modules.utils import ldm_patched.modules.sd import ldm_patched.utils.path_utils +import ldm_patched.contrib.external_model_merging class ImageOnlyCheckpointLoader: @@ -80,10 +81,26 @@ class VideoLinearCFGGuidance: m.set_model_sampler_cfg_function(linear_cfg) return (m, ) +class ImageOnlyCheckpointSave(ldm_patched.contrib.external_model_merging.CheckpointSave): + CATEGORY = "_for_testing" + + @classmethod + def INPUT_TYPES(s): + return {"required": { "model": ("MODEL",), + "clip_vision": ("CLIP_VISION",), + "vae": ("VAE",), + "filename_prefix": ("STRING", {"default": "checkpoints/ldm_patched"}),}, + "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},} + + def save(self, model, clip_vision, vae, filename_prefix, prompt=None, extra_pnginfo=None): + ldm_patched.contrib.external_model_merging.save_checkpoint(model, clip_vision=clip_vision, vae=vae, filename_prefix=filename_prefix, output_dir=self.output_dir, prompt=prompt, extra_pnginfo=extra_pnginfo) + return {} + NODE_CLASS_MAPPINGS = { "ImageOnlyCheckpointLoader": ImageOnlyCheckpointLoader, "SVD_img2vid_Conditioning": SVD_img2vid_Conditioning, "VideoLinearCFGGuidance": VideoLinearCFGGuidance, + "ImageOnlyCheckpointSave": ImageOnlyCheckpointSave, } NODE_DISPLAY_NAME_MAPPINGS = { diff --git a/ldm_patched/ldm/modules/attention.py b/ldm_patched/ldm/modules/attention.py index 49e502ed..e10a868d 100644 --- a/ldm_patched/ldm/modules/attention.py +++ b/ldm_patched/ldm/modules/attention.py @@ -1,12 +1,9 @@ -from inspect import isfunction import math import torch import torch.nn.functional as F from torch import nn, einsum from einops import rearrange, repeat from typing import Optional, Any -from functools import partial - from .diffusionmodules.util import checkpoint, AlphaBlender, timestep_embedding from .sub_quadratic_attention import efficient_dot_product_attention @@ -177,6 +174,7 @@ def attention_sub_quad(query, key, value, heads, mask=None): kv_chunk_size_min=kv_chunk_size_min, use_checkpoint=False, upcast_attention=upcast_attention, + mask=mask, ) hidden_states = hidden_states.to(dtype) @@ -239,6 +237,12 @@ def attention_split(q, k, v, heads, mask=None): else: s1 = einsum('b i d, b j d -> b i j', q[:, i:end], k) * scale + if mask is not None: + if len(mask.shape) == 2: + s1 += mask[i:end] + else: + s1 += mask[:, i:end] + s2 = s1.softmax(dim=-1).to(v.dtype) del s1 first_op_done = True @@ -294,11 +298,14 @@ def attention_xformers(q, k, v, heads, mask=None): (q, k, v), ) - # actually compute the attention, what we cannot get enough of - out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None) + if mask is not None: + pad = 8 - q.shape[1] % 8 + mask_out = torch.empty([q.shape[0], q.shape[1], q.shape[1] + pad], dtype=q.dtype, device=q.device) + mask_out[:, :, :mask.shape[-1]] = mask + mask = mask_out[:, :, :mask.shape[-1]] + + out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=mask) - if exists(mask): - raise NotImplementedError out = ( out.unsqueeze(0) .reshape(b, heads, -1, dim_head) @@ -323,7 +330,6 @@ def attention_pytorch(q, k, v, heads, mask=None): optimized_attention = attention_basic -optimized_attention_masked = attention_basic if model_management.xformers_enabled(): print("Using xformers cross attention") @@ -339,15 +345,18 @@ else: print("Using sub quadratic optimization for cross attention, if you have memory or speed issues try using: --attention-split") optimized_attention = attention_sub_quad -if model_management.pytorch_attention_enabled(): - optimized_attention_masked = attention_pytorch +optimized_attention_masked = optimized_attention -def optimized_attention_for_device(device, mask=False): - if device == torch.device("cpu"): #TODO +def optimized_attention_for_device(device, mask=False, small_input=False): + if small_input: if model_management.pytorch_attention_enabled(): - return attention_pytorch + return attention_pytorch #TODO: need to confirm but this is probably slightly faster for small inputs in all cases else: return attention_basic + + if device == torch.device("cpu"): + return attention_sub_quad + if mask: return optimized_attention_masked diff --git a/ldm_patched/ldm/modules/diffusionmodules/openaimodel.py b/ldm_patched/ldm/modules/diffusionmodules/openaimodel.py index e5784f28..4b695f76 100644 --- a/ldm_patched/ldm/modules/diffusionmodules/openaimodel.py +++ b/ldm_patched/ldm/modules/diffusionmodules/openaimodel.py @@ -1,12 +1,9 @@ from abc import abstractmethod -import math -import numpy as np import torch as th import torch.nn as nn import torch.nn.functional as F from einops import rearrange -from functools import partial from .util import ( checkpoint, @@ -437,9 +434,6 @@ class UNetModel(nn.Module): operations=ops, ): super().__init__() - assert use_spatial_transformer == True, "use_spatial_transformer has to be true" - if use_spatial_transformer: - assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...' if context_dim is not None: assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...' @@ -456,7 +450,6 @@ class UNetModel(nn.Module): if num_head_channels == -1: assert num_heads != -1, 'Either num_heads or num_head_channels has to be set' - self.image_size = image_size self.in_channels = in_channels self.model_channels = model_channels self.out_channels = out_channels @@ -502,7 +495,7 @@ class UNetModel(nn.Module): if self.num_classes is not None: if isinstance(self.num_classes, int): - self.label_emb = nn.Embedding(num_classes, time_embed_dim) + self.label_emb = nn.Embedding(num_classes, time_embed_dim, dtype=self.dtype, device=device) elif self.num_classes == "continuous": print("setting up linear c_adm embedding layer") self.label_emb = nn.Linear(1, time_embed_dim) diff --git a/ldm_patched/ldm/modules/diffusionmodules/upscaling.py b/ldm_patched/ldm/modules/diffusionmodules/upscaling.py index 2cde80c5..a38bff57 100644 --- a/ldm_patched/ldm/modules/diffusionmodules/upscaling.py +++ b/ldm_patched/ldm/modules/diffusionmodules/upscaling.py @@ -41,8 +41,12 @@ class AbstractLowScaleModel(nn.Module): self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) - def q_sample(self, x_start, t, noise=None): - noise = default(noise, lambda: torch.randn_like(x_start)) + def q_sample(self, x_start, t, noise=None, seed=None): + if noise is None: + if seed is None: + noise = torch.randn_like(x_start) + else: + noise = torch.randn(x_start.size(), dtype=x_start.dtype, layout=x_start.layout, generator=torch.manual_seed(seed)).to(x_start.device) return (extract_into_tensor(self.sqrt_alphas_cumprod.to(x_start.device), t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod.to(x_start.device), t, x_start.shape) * noise) @@ -69,12 +73,12 @@ class ImageConcatWithNoiseAugmentation(AbstractLowScaleModel): super().__init__(noise_schedule_config=noise_schedule_config) self.max_noise_level = max_noise_level - def forward(self, x, noise_level=None): + def forward(self, x, noise_level=None, seed=None): if noise_level is None: noise_level = torch.randint(0, self.max_noise_level, (x.shape[0],), device=x.device).long() else: assert isinstance(noise_level, torch.Tensor) - z = self.q_sample(x, noise_level) + z = self.q_sample(x, noise_level, seed=seed) return z, noise_level diff --git a/ldm_patched/ldm/modules/encoders/noise_aug_modules.py b/ldm_patched/ldm/modules/encoders/noise_aug_modules.py index 66767b58..a5d86603 100644 --- a/ldm_patched/ldm/modules/encoders/noise_aug_modules.py +++ b/ldm_patched/ldm/modules/encoders/noise_aug_modules.py @@ -23,13 +23,13 @@ class CLIPEmbeddingNoiseAugmentation(ImageConcatWithNoiseAugmentation): x = (x * self.data_std.to(x.device)) + self.data_mean.to(x.device) return x - def forward(self, x, noise_level=None): + def forward(self, x, noise_level=None, seed=None): if noise_level is None: noise_level = torch.randint(0, self.max_noise_level, (x.shape[0],), device=x.device).long() else: assert isinstance(noise_level, torch.Tensor) x = self.scale(x) - z = self.q_sample(x, noise_level) + z = self.q_sample(x, noise_level, seed=seed) z = self.unscale(z) noise_level = self.time_embed(noise_level) return z, noise_level diff --git a/ldm_patched/ldm/modules/sub_quadratic_attention.py b/ldm_patched/ldm/modules/sub_quadratic_attention.py index cabf1f67..9f4c23c7 100644 --- a/ldm_patched/ldm/modules/sub_quadratic_attention.py +++ b/ldm_patched/ldm/modules/sub_quadratic_attention.py @@ -61,6 +61,7 @@ def _summarize_chunk( value: Tensor, scale: float, upcast_attention: bool, + mask, ) -> AttnChunk: if upcast_attention: with torch.autocast(enabled=False, device_type = 'cuda'): @@ -84,6 +85,8 @@ def _summarize_chunk( max_score, _ = torch.max(attn_weights, -1, keepdim=True) max_score = max_score.detach() attn_weights -= max_score + if mask is not None: + attn_weights += mask torch.exp(attn_weights, out=attn_weights) exp_weights = attn_weights.to(value.dtype) exp_values = torch.bmm(exp_weights, value) @@ -96,11 +99,12 @@ def _query_chunk_attention( value: Tensor, summarize_chunk: SummarizeChunk, kv_chunk_size: int, + mask, ) -> Tensor: batch_x_heads, k_channels_per_head, k_tokens = key_t.shape _, _, v_channels_per_head = value.shape - def chunk_scanner(chunk_idx: int) -> AttnChunk: + def chunk_scanner(chunk_idx: int, mask) -> AttnChunk: key_chunk = dynamic_slice( key_t, (0, 0, chunk_idx), @@ -111,10 +115,13 @@ def _query_chunk_attention( (0, chunk_idx, 0), (batch_x_heads, kv_chunk_size, v_channels_per_head) ) - return summarize_chunk(query, key_chunk, value_chunk) + if mask is not None: + mask = mask[:,:,chunk_idx:chunk_idx + kv_chunk_size] + + return summarize_chunk(query, key_chunk, value_chunk, mask=mask) chunks: List[AttnChunk] = [ - chunk_scanner(chunk) for chunk in torch.arange(0, k_tokens, kv_chunk_size) + chunk_scanner(chunk, mask) for chunk in torch.arange(0, k_tokens, kv_chunk_size) ] acc_chunk = AttnChunk(*map(torch.stack, zip(*chunks))) chunk_values, chunk_weights, chunk_max = acc_chunk @@ -135,6 +142,7 @@ def _get_attention_scores_no_kv_chunking( value: Tensor, scale: float, upcast_attention: bool, + mask, ) -> Tensor: if upcast_attention: with torch.autocast(enabled=False, device_type = 'cuda'): @@ -156,6 +164,8 @@ def _get_attention_scores_no_kv_chunking( beta=0, ) + if mask is not None: + attn_scores += mask try: attn_probs = attn_scores.softmax(dim=-1) del attn_scores @@ -183,6 +193,7 @@ def efficient_dot_product_attention( kv_chunk_size_min: Optional[int] = None, use_checkpoint=True, upcast_attention=False, + mask = None, ): """Computes efficient dot-product attention given query, transposed key, and value. This is efficient version of attention presented in @@ -209,13 +220,22 @@ def efficient_dot_product_attention( if kv_chunk_size_min is not None: kv_chunk_size = max(kv_chunk_size, kv_chunk_size_min) + if mask is not None and len(mask.shape) == 2: + mask = mask.unsqueeze(0) + def get_query_chunk(chunk_idx: int) -> Tensor: return dynamic_slice( query, (0, chunk_idx, 0), (batch_x_heads, min(query_chunk_size, q_tokens), q_channels_per_head) ) - + + def get_mask_chunk(chunk_idx: int) -> Tensor: + if mask is None: + return None + chunk = min(query_chunk_size, q_tokens) + return mask[:,chunk_idx:chunk_idx + chunk] + summarize_chunk: SummarizeChunk = partial(_summarize_chunk, scale=scale, upcast_attention=upcast_attention) summarize_chunk: SummarizeChunk = partial(checkpoint, summarize_chunk) if use_checkpoint else summarize_chunk compute_query_chunk_attn: ComputeQueryChunkAttn = partial( @@ -237,6 +257,7 @@ def efficient_dot_product_attention( query=query, key_t=key_t, value=value, + mask=mask, ) # TODO: maybe we should use torch.empty_like(query) to allocate storage in-advance, @@ -246,6 +267,7 @@ def efficient_dot_product_attention( query=get_query_chunk(i * query_chunk_size), key_t=key_t, value=value, + mask=get_mask_chunk(i * query_chunk_size) ) for i in range(math.ceil(q_tokens / query_chunk_size)) ], dim=1) return res diff --git a/ldm_patched/licenses-3rd/chainer b/ldm_patched/licenses-3rd/chainer new file mode 100644 index 00000000..db8ef9d9 --- /dev/null +++ b/ldm_patched/licenses-3rd/chainer @@ -0,0 +1,20 @@ +Copyright (c) 2015 Preferred Infrastructure, Inc. +Copyright (c) 2015 Preferred Networks, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. \ No newline at end of file diff --git a/ldm_patched/licenses-3rd/comfyui b/ldm_patched/licenses-3rd/comfyui new file mode 100644 index 00000000..e72bfdda --- /dev/null +++ b/ldm_patched/licenses-3rd/comfyui @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. \ No newline at end of file diff --git a/ldm_patched/licenses-3rd/diffusers b/ldm_patched/licenses-3rd/diffusers new file mode 100644 index 00000000..f49a4e16 --- /dev/null +++ b/ldm_patched/licenses-3rd/diffusers @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/ldm_patched/licenses-3rd/kdiffusion b/ldm_patched/licenses-3rd/kdiffusion new file mode 100644 index 00000000..e20684e5 --- /dev/null +++ b/ldm_patched/licenses-3rd/kdiffusion @@ -0,0 +1,19 @@ +Copyright (c) 2022 Katherine Crowson + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. \ No newline at end of file diff --git a/ldm_patched/licenses-3rd/ldm b/ldm_patched/licenses-3rd/ldm new file mode 100644 index 00000000..1a1c5058 --- /dev/null +++ b/ldm_patched/licenses-3rd/ldm @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 Machine Vision and Learning Group, LMU Munich + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/ldm_patched/licenses-3rd/taesd b/ldm_patched/licenses-3rd/taesd new file mode 100644 index 00000000..62e6312e --- /dev/null +++ b/ldm_patched/licenses-3rd/taesd @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Ollin Boer Bohan + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/ldm_patched/licenses-3rd/transformers b/ldm_patched/licenses-3rd/transformers new file mode 100644 index 00000000..e44d8f5b --- /dev/null +++ b/ldm_patched/licenses-3rd/transformers @@ -0,0 +1,203 @@ +Copyright 2018- The Hugging Face team. All rights reserved. + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/ldm_patched/modules/args_parser.py b/ldm_patched/modules/args_parser.py index 7ffc4a81..e5b84dc1 100644 --- a/ldm_patched/modules/args_parser.py +++ b/ldm_patched/modules/args_parser.py @@ -112,6 +112,8 @@ parser.add_argument("--is-windows-embedded-python", action="store_true") parser.add_argument("--disable-server-info", action="store_true") +parser.add_argument("--multi-user", action="store_true") + if ldm_patched.modules.options.args_parsing: args = parser.parse_args([]) else: diff --git a/ldm_patched/modules/clip_model.py b/ldm_patched/modules/clip_model.py index 4c4588c3..aceca86d 100644 --- a/ldm_patched/modules/clip_model.py +++ b/ldm_patched/modules/clip_model.py @@ -57,7 +57,7 @@ class CLIPEncoder(torch.nn.Module): self.layers = torch.nn.ModuleList([CLIPLayer(embed_dim, heads, intermediate_size, intermediate_activation, dtype, device, operations) for i in range(num_layers)]) def forward(self, x, mask=None, intermediate_output=None): - optimized_attention = optimized_attention_for_device(x.device, mask=mask is not None) + optimized_attention = optimized_attention_for_device(x.device, mask=mask is not None, small_input=True) if intermediate_output is not None: if intermediate_output < 0: diff --git a/ldm_patched/modules/clip_vision.py b/ldm_patched/modules/clip_vision.py index 9699210d..affdb8b2 100644 --- a/ldm_patched/modules/clip_vision.py +++ b/ldm_patched/modules/clip_vision.py @@ -1,7 +1,6 @@ -from .utils import load_torch_file, transformers_convert, common_upscale +from .utils import load_torch_file, transformers_convert, state_dict_prefix_replace import os import torch -import contextlib import json import ldm_patched.modules.ops @@ -41,9 +40,13 @@ class ClipVisionModel(): self.model.eval() self.patcher = ldm_patched.modules.model_patcher.ModelPatcher(self.model, load_device=self.load_device, offload_device=offload_device) + def load_sd(self, sd): return self.model.load_state_dict(sd, strict=False) + def get_sd(self): + return self.model.state_dict() + def encode_image(self, image): ldm_patched.modules.model_management.load_model_gpu(self.patcher) pixel_values = clip_preprocess(image.to(self.load_device)).float() @@ -76,6 +79,9 @@ def convert_to_transformers(sd, prefix): sd['visual_projection.weight'] = sd.pop("{}proj".format(prefix)).transpose(0, 1) sd = transformers_convert(sd, prefix, "vision_model.", 48) + else: + replace_prefix = {prefix: ""} + sd = state_dict_prefix_replace(sd, replace_prefix) return sd def load_clipvision_from_sd(sd, prefix="", convert_keys=False): diff --git a/ldm_patched/modules/conds.py b/ldm_patched/modules/conds.py index a7325680..ed03bd64 100644 --- a/ldm_patched/modules/conds.py +++ b/ldm_patched/modules/conds.py @@ -1,4 +1,3 @@ -import enum import torch import math import ldm_patched.modules.utils diff --git a/ldm_patched/modules/controlnet.py b/ldm_patched/modules/controlnet.py index a7224660..7e11497f 100644 --- a/ldm_patched/modules/controlnet.py +++ b/ldm_patched/modules/controlnet.py @@ -1,7 +1,6 @@ import torch import math import os -import contextlib import ldm_patched.modules.utils import ldm_patched.modules.model_management import ldm_patched.modules.model_detection @@ -126,7 +125,10 @@ class ControlBase: if o[i] is None: o[i] = prev_val else: - o[i] += prev_val + if o[i].shape[0] < prev_val.shape[0]: + o[i] = prev_val + o[i] + else: + o[i] += prev_val return out class ControlNet(ControlBase): diff --git a/ldm_patched/modules/diffusers_load.py b/ldm_patched/modules/diffusers_load.py index 79fbbd55..62edc72b 100644 --- a/ldm_patched/modules/diffusers_load.py +++ b/ldm_patched/modules/diffusers_load.py @@ -1,4 +1,3 @@ -import json import os import ldm_patched.modules.sd diff --git a/ldm_patched/modules/gligen.py b/ldm_patched/modules/gligen.py index 8dbd5fa4..11f1ee93 100644 --- a/ldm_patched/modules/gligen.py +++ b/ldm_patched/modules/gligen.py @@ -1,5 +1,5 @@ import torch -from torch import nn, einsum +from torch import nn from ldm_patched.ldm.modules.attention import CrossAttention from inspect import isfunction diff --git a/ldm_patched/modules/latent_formats.py b/ldm_patched/modules/latent_formats.py index c209087e..2252a075 100644 --- a/ldm_patched/modules/latent_formats.py +++ b/ldm_patched/modules/latent_formats.py @@ -33,3 +33,7 @@ class SDXL(LatentFormat): [-0.3112, -0.2359, -0.2076] ] self.taesd_decoder_name = "taesdxl_decoder" + +class SD_X4(LatentFormat): + def __init__(self): + self.scale_factor = 0.08333 diff --git a/ldm_patched/modules/model_base.py b/ldm_patched/modules/model_base.py index c04ccb3e..9c69e98b 100644 --- a/ldm_patched/modules/model_base.py +++ b/ldm_patched/modules/model_base.py @@ -1,12 +1,11 @@ import torch -from ldm_patched.ldm.modules.diffusionmodules.openaimodel import UNetModel +from ldm_patched.ldm.modules.diffusionmodules.openaimodel import UNetModel, Timestep from ldm_patched.ldm.modules.encoders.noise_aug_modules import CLIPEmbeddingNoiseAugmentation -from ldm_patched.ldm.modules.diffusionmodules.openaimodel import Timestep +from ldm_patched.ldm.modules.diffusionmodules.upscaling import ImageConcatWithNoiseAugmentation import ldm_patched.modules.model_management import ldm_patched.modules.conds import ldm_patched.modules.ops from enum import Enum -import contextlib from . import utils class ModelType(Enum): @@ -78,8 +77,9 @@ class BaseModel(torch.nn.Module): extra_conds = {} for o in kwargs: extra = kwargs[o] - if hasattr(extra, "to"): - extra = extra.to(dtype) + if hasattr(extra, "dtype"): + if extra.dtype != torch.int and extra.dtype != torch.long: + extra = extra.to(dtype) extra_conds[o] = extra model_output = self.diffusion_model(xc, t, context=context, control=control, transformer_options=transformer_options, **extra_conds).float() @@ -99,11 +99,29 @@ class BaseModel(torch.nn.Module): if self.inpaint_model: concat_keys = ("mask", "masked_image") cond_concat = [] - denoise_mask = kwargs.get("denoise_mask", None) - latent_image = kwargs.get("latent_image", None) + denoise_mask = kwargs.get("concat_mask", kwargs.get("denoise_mask", None)) + concat_latent_image = kwargs.get("concat_latent_image", None) + if concat_latent_image is None: + concat_latent_image = kwargs.get("latent_image", None) + else: + concat_latent_image = self.process_latent_in(concat_latent_image) + noise = kwargs.get("noise", None) device = kwargs["device"] + if concat_latent_image.shape[1:] != noise.shape[1:]: + concat_latent_image = utils.common_upscale(concat_latent_image, noise.shape[-1], noise.shape[-2], "bilinear", "center") + + concat_latent_image = utils.resize_to_batch_size(concat_latent_image, noise.shape[0]) + + if len(denoise_mask.shape) == len(noise.shape): + denoise_mask = denoise_mask[:,:1] + + denoise_mask = denoise_mask.reshape((-1, 1, denoise_mask.shape[-2], denoise_mask.shape[-1])) + if denoise_mask.shape[-2:] != noise.shape[-2:]: + denoise_mask = utils.common_upscale(denoise_mask, noise.shape[-1], noise.shape[-2], "bilinear", "center") + denoise_mask = utils.resize_to_batch_size(denoise_mask.round(), noise.shape[0]) + def blank_inpaint_image_like(latent_image): blank_image = torch.ones_like(latent_image) # these are the values for "zero" in pixel space translated to latent space @@ -116,9 +134,9 @@ class BaseModel(torch.nn.Module): for ck in concat_keys: if denoise_mask is not None: if ck == "mask": - cond_concat.append(denoise_mask[:,:1].to(device)) + cond_concat.append(denoise_mask.to(device)) elif ck == "masked_image": - cond_concat.append(latent_image.to(device)) #NOTE: the latent_image should be masked by the mask in pixel space + cond_concat.append(concat_latent_image.to(device)) #NOTE: the latent_image should be masked by the mask in pixel space else: if ck == "mask": cond_concat.append(torch.ones_like(noise)[:,:1]) @@ -160,19 +178,28 @@ class BaseModel(torch.nn.Module): def process_latent_out(self, latent): return self.latent_format.process_out(latent) - def state_dict_for_saving(self, clip_state_dict, vae_state_dict): - clip_state_dict = self.model_config.process_clip_state_dict_for_saving(clip_state_dict) + def state_dict_for_saving(self, clip_state_dict=None, vae_state_dict=None, clip_vision_state_dict=None): + extra_sds = [] + if clip_state_dict is not None: + extra_sds.append(self.model_config.process_clip_state_dict_for_saving(clip_state_dict)) + if vae_state_dict is not None: + extra_sds.append(self.model_config.process_vae_state_dict_for_saving(vae_state_dict)) + if clip_vision_state_dict is not None: + extra_sds.append(self.model_config.process_clip_vision_state_dict_for_saving(clip_vision_state_dict)) + unet_state_dict = self.diffusion_model.state_dict() unet_state_dict = self.model_config.process_unet_state_dict_for_saving(unet_state_dict) - vae_state_dict = self.model_config.process_vae_state_dict_for_saving(vae_state_dict) + if self.get_dtype() == torch.float16: - clip_state_dict = utils.convert_sd_to(clip_state_dict, torch.float16) - vae_state_dict = utils.convert_sd_to(vae_state_dict, torch.float16) + extra_sds = map(lambda sd: utils.convert_sd_to(sd, torch.float16), extra_sds) if self.model_type == ModelType.V_PREDICTION: unet_state_dict["v_pred"] = torch.tensor([]) - return {**unet_state_dict, **vae_state_dict, **clip_state_dict} + for sd in extra_sds: + unet_state_dict.update(sd) + + return unet_state_dict def set_inpaint(self): self.inpaint_model = True @@ -191,7 +218,7 @@ class BaseModel(torch.nn.Module): return (((area * 0.6) / 0.9) + 1024) * (1024 * 1024) -def unclip_adm(unclip_conditioning, device, noise_augmentor, noise_augment_merge=0.0): +def unclip_adm(unclip_conditioning, device, noise_augmentor, noise_augment_merge=0.0, seed=None): adm_inputs = [] weights = [] noise_aug = [] @@ -200,7 +227,7 @@ def unclip_adm(unclip_conditioning, device, noise_augmentor, noise_augment_merge weight = unclip_cond["strength"] noise_augment = unclip_cond["noise_augmentation"] noise_level = round((noise_augmentor.max_noise_level - 1) * noise_augment) - c_adm, noise_level_emb = noise_augmentor(adm_cond.to(device), noise_level=torch.tensor([noise_level], device=device)) + c_adm, noise_level_emb = noise_augmentor(adm_cond.to(device), noise_level=torch.tensor([noise_level], device=device), seed=seed) adm_out = torch.cat((c_adm, noise_level_emb), 1) * weight weights.append(weight) noise_aug.append(noise_augment) @@ -226,11 +253,11 @@ class SD21UNCLIP(BaseModel): if unclip_conditioning is None: return torch.zeros((1, self.adm_channels)) else: - return unclip_adm(unclip_conditioning, device, self.noise_augmentor, kwargs.get("unclip_noise_augment_merge", 0.05)) + return unclip_adm(unclip_conditioning, device, self.noise_augmentor, kwargs.get("unclip_noise_augment_merge", 0.05), kwargs.get("seed", 0) - 10) def sdxl_pooled(args, noise_augmentor): if "unclip_conditioning" in args: - return unclip_adm(args.get("unclip_conditioning", None), args["device"], noise_augmentor)[:,:1280] + return unclip_adm(args.get("unclip_conditioning", None), args["device"], noise_augmentor, seed=args.get("seed", 0) - 10)[:,:1280] else: return args["pooled_output"] @@ -364,3 +391,35 @@ class Stable_Zero123(BaseModel): cross_attn = self.cc_projection(cross_attn) out['c_crossattn'] = ldm_patched.modules.conds.CONDCrossAttn(cross_attn) return out + +class SD_X4Upscaler(BaseModel): + def __init__(self, model_config, model_type=ModelType.V_PREDICTION, device=None): + super().__init__(model_config, model_type, device=device) + self.noise_augmentor = ImageConcatWithNoiseAugmentation(noise_schedule_config={"linear_start": 0.0001, "linear_end": 0.02}, max_noise_level=350) + + def extra_conds(self, **kwargs): + out = {} + + image = kwargs.get("concat_image", None) + noise = kwargs.get("noise", None) + noise_augment = kwargs.get("noise_augmentation", 0.0) + device = kwargs["device"] + seed = kwargs["seed"] - 10 + + noise_level = round((self.noise_augmentor.max_noise_level) * noise_augment) + + if image is None: + image = torch.zeros_like(noise)[:,:3] + + if image.shape[1:] != noise.shape[1:]: + image = utils.common_upscale(image.to(device), noise.shape[-1], noise.shape[-2], "bilinear", "center") + + noise_level = torch.tensor([noise_level], device=device) + if noise_augment > 0: + image, noise_level = self.noise_augmentor(image.to(device), noise_level=noise_level, seed=seed) + + image = utils.resize_to_batch_size(image, noise.shape[0]) + + out['c_concat'] = ldm_patched.modules.conds.CONDNoiseShape(image) + out['y'] = ldm_patched.modules.conds.CONDRegular(noise_level) + return out diff --git a/ldm_patched/modules/model_detection.py b/ldm_patched/modules/model_detection.py index e8fc87ac..126386ca 100644 --- a/ldm_patched/modules/model_detection.py +++ b/ldm_patched/modules/model_detection.py @@ -34,7 +34,6 @@ def detect_unet_config(state_dict, key_prefix, dtype): unet_config = { "use_checkpoint": False, "image_size": 32, - "out_channels": 4, "use_spatial_transformer": True, "legacy": False } @@ -50,6 +49,12 @@ def detect_unet_config(state_dict, key_prefix, dtype): model_channels = state_dict['{}input_blocks.0.0.weight'.format(key_prefix)].shape[0] in_channels = state_dict['{}input_blocks.0.0.weight'.format(key_prefix)].shape[1] + out_key = '{}out.2.weight'.format(key_prefix) + if out_key in state_dict: + out_channels = state_dict[out_key].shape[0] + else: + out_channels = 4 + num_res_blocks = [] channel_mult = [] attention_resolutions = [] @@ -122,6 +127,7 @@ def detect_unet_config(state_dict, key_prefix, dtype): transformer_depth_middle = -1 unet_config["in_channels"] = in_channels + unet_config["out_channels"] = out_channels unet_config["model_channels"] = model_channels unet_config["num_res_blocks"] = num_res_blocks unet_config["transformer_depth"] = transformer_depth diff --git a/ldm_patched/modules/model_management.py b/ldm_patched/modules/model_management.py index 59f0f3d0..6f88579d 100644 --- a/ldm_patched/modules/model_management.py +++ b/ldm_patched/modules/model_management.py @@ -175,7 +175,7 @@ try: if int(torch_version[0]) >= 2: if ENABLE_PYTORCH_ATTENTION == False and args.attention_split == False and args.attention_quad == False: ENABLE_PYTORCH_ATTENTION = True - if torch.cuda.is_bf16_supported(): + if torch.cuda.is_bf16_supported() and torch.cuda.get_device_properties(torch.cuda.current_device()).major >= 8: VAE_DTYPE = torch.bfloat16 if is_intel_xpu(): if args.attention_split == False and args.attention_quad == False: diff --git a/ldm_patched/modules/model_patcher.py b/ldm_patched/modules/model_patcher.py index 0945a13c..dd816e52 100644 --- a/ldm_patched/modules/model_patcher.py +++ b/ldm_patched/modules/model_patcher.py @@ -174,40 +174,41 @@ class ModelPatcher: sd.pop(k) return sd - def patch_model(self, device_to=None): + def patch_model(self, device_to=None, patch_weights=True): for k in self.object_patches: old = getattr(self.model, k) if k not in self.object_patches_backup: self.object_patches_backup[k] = old setattr(self.model, k, self.object_patches[k]) - model_sd = self.model_state_dict() - for key in self.patches: - if key not in model_sd: - print("could not patch. key doesn't exist in model:", key) - continue + if patch_weights: + model_sd = self.model_state_dict() + for key in self.patches: + if key not in model_sd: + print("could not patch. key doesn't exist in model:", key) + continue - weight = model_sd[key] + weight = model_sd[key] - inplace_update = self.weight_inplace_update + inplace_update = self.weight_inplace_update - if key not in self.backup: - self.backup[key] = weight.to(device=self.offload_device, copy=inplace_update) + if key not in self.backup: + self.backup[key] = weight.to(device=self.offload_device, copy=inplace_update) + + if device_to is not None: + temp_weight = ldm_patched.modules.model_management.cast_to_device(weight, device_to, torch.float32, copy=True) + else: + temp_weight = weight.to(torch.float32, copy=True) + out_weight = self.calculate_weight(self.patches[key], temp_weight, key).to(weight.dtype) + if inplace_update: + ldm_patched.modules.utils.copy_to_param(self.model, key, out_weight) + else: + ldm_patched.modules.utils.set_attr(self.model, key, out_weight) + del temp_weight if device_to is not None: - temp_weight = ldm_patched.modules.model_management.cast_to_device(weight, device_to, torch.float32, copy=True) - else: - temp_weight = weight.to(torch.float32, copy=True) - out_weight = self.calculate_weight(self.patches[key], temp_weight, key).to(weight.dtype) - if inplace_update: - ldm_patched.modules.utils.copy_to_param(self.model, key, out_weight) - else: - ldm_patched.modules.utils.set_attr(self.model, key, out_weight) - del temp_weight - - if device_to is not None: - self.model.to(device_to) - self.current_device = device_to + self.model.to(device_to) + self.current_device = device_to return self.model diff --git a/ldm_patched/modules/ops.py b/ldm_patched/modules/ops.py index 435aba57..2d7fa377 100644 --- a/ldm_patched/modules/ops.py +++ b/ldm_patched/modules/ops.py @@ -1,5 +1,4 @@ import torch -from contextlib import contextmanager import ldm_patched.modules.model_management def cast_bias_weight(s, input): diff --git a/ldm_patched/modules/sample.py b/ldm_patched/modules/sample.py index b5576cee..0f483950 100644 --- a/ldm_patched/modules/sample.py +++ b/ldm_patched/modules/sample.py @@ -28,7 +28,6 @@ def prepare_noise(latent_image, seed, noise_inds=None): def prepare_mask(noise_mask, shape, device): """ensures noise mask is of proper dimensions""" noise_mask = torch.nn.functional.interpolate(noise_mask.reshape((-1, 1, noise_mask.shape[-2], noise_mask.shape[-1])), size=(shape[2], shape[3]), mode="bilinear") - noise_mask = noise_mask.round() noise_mask = torch.cat([noise_mask] * shape[1], dim=1) noise_mask = ldm_patched.modules.utils.repeat_to_batch_size(noise_mask, shape[0]) noise_mask = noise_mask.to(device) diff --git a/ldm_patched/modules/samplers.py b/ldm_patched/modules/samplers.py index fc17ef4d..1f69d2b1 100644 --- a/ldm_patched/modules/samplers.py +++ b/ldm_patched/modules/samplers.py @@ -1,13 +1,9 @@ from ldm_patched.k_diffusion import sampling as k_diffusion_sampling from ldm_patched.unipc import uni_pc import torch -import enum import collections from ldm_patched.modules import model_management import math -from ldm_patched.modules import model_base -import ldm_patched.modules.utils -import ldm_patched.modules.conds def get_area_and_mult(conds, x_in, timestep_in): area = (x_in.shape[2], x_in.shape[3], 0, 0) @@ -603,8 +599,8 @@ def sample(model, noise, positive, negative, cfg, device, sampler, sigmas, model latent_image = model.process_latent_in(latent_image) if hasattr(model, 'extra_conds'): - positive = encode_model_conds(model.extra_conds, positive, noise, device, "positive", latent_image=latent_image, denoise_mask=denoise_mask) - negative = encode_model_conds(model.extra_conds, negative, noise, device, "negative", latent_image=latent_image, denoise_mask=denoise_mask) + positive = encode_model_conds(model.extra_conds, positive, noise, device, "positive", latent_image=latent_image, denoise_mask=denoise_mask, seed=seed) + negative = encode_model_conds(model.extra_conds, negative, noise, device, "negative", latent_image=latent_image, denoise_mask=denoise_mask, seed=seed) #make sure each cond area has an opposite one with the same area for c in positive: @@ -639,7 +635,7 @@ def calculate_sigmas_scheduler(model, scheduler_name, steps): elif scheduler_name == "sgm_uniform": sigmas = normal_scheduler(model, steps, sgm=True) else: - print("error invalid scheduler", self.scheduler) + print("error invalid scheduler", scheduler_name) return sigmas def sampler_object(name): diff --git a/ldm_patched/modules/sd.py b/ldm_patched/modules/sd.py index 3caa92da..e197c39c 100644 --- a/ldm_patched/modules/sd.py +++ b/ldm_patched/modules/sd.py @@ -1,9 +1,6 @@ import torch -import contextlib -import math from ldm_patched.modules import model_management -from ldm_patched.ldm.util import instantiate_from_config from ldm_patched.ldm.models.autoencoder import AutoencoderKL, AutoencodingEngine import yaml @@ -157,6 +154,8 @@ class VAE: self.memory_used_encode = lambda shape, dtype: (1767 * shape[2] * shape[3]) * model_management.dtype_size(dtype) #These are for AutoencoderKL and need tweaking (should be lower) self.memory_used_decode = lambda shape, dtype: (2178 * shape[2] * shape[3] * 64) * model_management.dtype_size(dtype) + self.downscale_ratio = 8 + self.latent_channels = 4 if config is None: if "decoder.mid.block_1.mix_factor" in sd: @@ -172,6 +171,11 @@ class VAE: else: #default SD1.x/SD2.x VAE parameters ddconfig = {'double_z': True, 'z_channels': 4, 'resolution': 256, 'in_channels': 3, 'out_ch': 3, 'ch': 128, 'ch_mult': [1, 2, 4, 4], 'num_res_blocks': 2, 'attn_resolutions': [], 'dropout': 0.0} + + if 'encoder.down.2.downsample.conv.weight' not in sd: #Stable diffusion x4 upscaler VAE + ddconfig['ch_mult'] = [1, 2, 4] + self.downscale_ratio = 4 + self.first_stage_model = AutoencoderKL(ddconfig=ddconfig, embed_dim=4) else: self.first_stage_model = AutoencoderKL(**(config['params'])) @@ -204,9 +208,9 @@ class VAE: decode_fn = lambda a: (self.first_stage_model.decode(a.to(self.vae_dtype).to(self.device)) + 1.0).float() output = torch.clamp(( - (ldm_patched.modules.utils.tiled_scale(samples, decode_fn, tile_x // 2, tile_y * 2, overlap, upscale_amount = 8, output_device=self.output_device, pbar = pbar) + - ldm_patched.modules.utils.tiled_scale(samples, decode_fn, tile_x * 2, tile_y // 2, overlap, upscale_amount = 8, output_device=self.output_device, pbar = pbar) + - ldm_patched.modules.utils.tiled_scale(samples, decode_fn, tile_x, tile_y, overlap, upscale_amount = 8, output_device=self.output_device, pbar = pbar)) + (ldm_patched.modules.utils.tiled_scale(samples, decode_fn, tile_x // 2, tile_y * 2, overlap, upscale_amount = self.downscale_ratio, output_device=self.output_device, pbar = pbar) + + ldm_patched.modules.utils.tiled_scale(samples, decode_fn, tile_x * 2, tile_y // 2, overlap, upscale_amount = self.downscale_ratio, output_device=self.output_device, pbar = pbar) + + ldm_patched.modules.utils.tiled_scale(samples, decode_fn, tile_x, tile_y, overlap, upscale_amount = self.downscale_ratio, output_device=self.output_device, pbar = pbar)) / 3.0) / 2.0, min=0.0, max=1.0) return output @@ -217,9 +221,9 @@ class VAE: pbar = ldm_patched.modules.utils.ProgressBar(steps) encode_fn = lambda a: self.first_stage_model.encode((2. * a - 1.).to(self.vae_dtype).to(self.device)).float() - samples = ldm_patched.modules.utils.tiled_scale(pixel_samples, encode_fn, tile_x, tile_y, overlap, upscale_amount = (1/8), out_channels=4, output_device=self.output_device, pbar=pbar) - samples += ldm_patched.modules.utils.tiled_scale(pixel_samples, encode_fn, tile_x * 2, tile_y // 2, overlap, upscale_amount = (1/8), out_channels=4, output_device=self.output_device, pbar=pbar) - samples += ldm_patched.modules.utils.tiled_scale(pixel_samples, encode_fn, tile_x // 2, tile_y * 2, overlap, upscale_amount = (1/8), out_channels=4, output_device=self.output_device, pbar=pbar) + samples = ldm_patched.modules.utils.tiled_scale(pixel_samples, encode_fn, tile_x, tile_y, overlap, upscale_amount = (1/self.downscale_ratio), out_channels=self.latent_channels, output_device=self.output_device, pbar=pbar) + samples += ldm_patched.modules.utils.tiled_scale(pixel_samples, encode_fn, tile_x * 2, tile_y // 2, overlap, upscale_amount = (1/self.downscale_ratio), out_channels=self.latent_channels, output_device=self.output_device, pbar=pbar) + samples += ldm_patched.modules.utils.tiled_scale(pixel_samples, encode_fn, tile_x // 2, tile_y * 2, overlap, upscale_amount = (1/self.downscale_ratio), out_channels=self.latent_channels, output_device=self.output_device, pbar=pbar) samples /= 3.0 return samples @@ -231,7 +235,7 @@ class VAE: batch_number = int(free_memory / memory_used) batch_number = max(1, batch_number) - pixel_samples = torch.empty((samples_in.shape[0], 3, round(samples_in.shape[2] * 8), round(samples_in.shape[3] * 8)), device=self.output_device) + pixel_samples = torch.empty((samples_in.shape[0], 3, round(samples_in.shape[2] * self.downscale_ratio), round(samples_in.shape[3] * self.downscale_ratio)), device=self.output_device) for x in range(0, samples_in.shape[0], batch_number): samples = samples_in[x:x+batch_number].to(self.vae_dtype).to(self.device) pixel_samples[x:x+batch_number] = torch.clamp((self.first_stage_model.decode(samples).to(self.output_device).float() + 1.0) / 2.0, min=0.0, max=1.0) @@ -255,7 +259,7 @@ class VAE: free_memory = model_management.get_free_memory(self.device) batch_number = int(free_memory / memory_used) batch_number = max(1, batch_number) - samples = torch.empty((pixel_samples.shape[0], 4, round(pixel_samples.shape[2] // 8), round(pixel_samples.shape[3] // 8)), device=self.output_device) + samples = torch.empty((pixel_samples.shape[0], self.latent_channels, round(pixel_samples.shape[2] // self.downscale_ratio), round(pixel_samples.shape[3] // self.downscale_ratio)), device=self.output_device) for x in range(0, pixel_samples.shape[0], batch_number): pixels_in = (2. * pixel_samples[x:x+batch_number] - 1.).to(self.vae_dtype).to(self.device) samples[x:x+batch_number] = self.first_stage_model.encode(pixels_in).to(self.output_device).float() @@ -527,7 +531,14 @@ def load_unet(unet_path): raise RuntimeError("ERROR: Could not detect model type of: {}".format(unet_path)) return model -def save_checkpoint(output_path, model, clip, vae, metadata=None): - model_management.load_models_gpu([model, clip.load_model()]) - sd = model.model.state_dict_for_saving(clip.get_sd(), vae.get_sd()) +def save_checkpoint(output_path, model, clip=None, vae=None, clip_vision=None, metadata=None): + clip_sd = None + load_models = [model] + if clip is not None: + load_models.append(clip.load_model()) + clip_sd = clip.get_sd() + + model_management.load_models_gpu(load_models) + clip_vision_sd = clip_vision.get_sd() if clip_vision is not None else None + sd = model.model.state_dict_for_saving(clip_sd, vae.get_sd(), clip_vision_sd) ldm_patched.modules.utils.save_torch_file(sd, output_path, metadata=metadata) diff --git a/ldm_patched/modules/sd1_clip.py b/ldm_patched/modules/sd1_clip.py index 736d6167..3727fb48 100644 --- a/ldm_patched/modules/sd1_clip.py +++ b/ldm_patched/modules/sd1_clip.py @@ -6,7 +6,6 @@ import torch import traceback import zipfile from . import model_management -import contextlib import ldm_patched.modules.clip_model import json diff --git a/ldm_patched/modules/supported_models.py b/ldm_patched/modules/supported_models.py index 251bf6ac..1d442d4d 100644 --- a/ldm_patched/modules/supported_models.py +++ b/ldm_patched/modules/supported_models.py @@ -278,6 +278,33 @@ class Stable_Zero123(supported_models_base.BASE): def clip_target(self): return None +class SD_X4Upscaler(SD20): + unet_config = { + "context_dim": 1024, + "model_channels": 256, + 'in_channels': 7, + "use_linear_in_transformer": True, + "adm_in_channels": None, + "use_temporal_attention": False, + } -models = [Stable_Zero123, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXLRefiner, SDXL, SSD1B, Segmind_Vega] + unet_extra_config = { + "disable_self_attentions": [True, True, True, False], + "num_classes": 1000, + "num_heads": 8, + "num_head_channels": -1, + } + + latent_format = latent_formats.SD_X4 + + sampling_settings = { + "linear_start": 0.0001, + "linear_end": 0.02, + } + + def get_model(self, state_dict, prefix="", device=None): + out = model_base.SD_X4Upscaler(self, device=device) + return out + +models = [Stable_Zero123, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXLRefiner, SDXL, SSD1B, Segmind_Vega, SD_X4Upscaler] models += [SVD_img2vid] diff --git a/ldm_patched/modules/supported_models_base.py b/ldm_patched/modules/supported_models_base.py index 49087d23..5baf4bca 100644 --- a/ldm_patched/modules/supported_models_base.py +++ b/ldm_patched/modules/supported_models_base.py @@ -65,6 +65,12 @@ class BASE: replace_prefix = {"": "cond_stage_model."} return utils.state_dict_prefix_replace(state_dict, replace_prefix) + def process_clip_vision_state_dict_for_saving(self, state_dict): + replace_prefix = {} + if self.clip_vision_prefix is not None: + replace_prefix[""] = self.clip_vision_prefix + return utils.state_dict_prefix_replace(state_dict, replace_prefix) + def process_unet_state_dict_for_saving(self, state_dict): replace_prefix = {"": "model.diffusion_model."} return utils.state_dict_prefix_replace(state_dict, replace_prefix) diff --git a/ldm_patched/utils/path_utils.py b/ldm_patched/utils/path_utils.py index d21b6485..6cae149b 100644 --- a/ldm_patched/utils/path_utils.py +++ b/ldm_patched/utils/path_utils.py @@ -29,11 +29,14 @@ folder_names_and_paths["custom_nodes"] = ([os.path.join(base_path, "custom_nodes folder_names_and_paths["hypernetworks"] = ([os.path.join(models_dir, "hypernetworks")], supported_pt_extensions) +folder_names_and_paths["photomaker"] = ([os.path.join(models_dir, "photomaker")], supported_pt_extensions) + folder_names_and_paths["classifiers"] = ([os.path.join(models_dir, "classifiers")], {""}) output_directory = os.path.join(os.getcwd(), "output") temp_directory = os.path.join(os.getcwd(), "temp") input_directory = os.path.join(os.getcwd(), "input") +user_directory = os.path.join(os.getcwd(), "user") filename_list_cache = {} @@ -137,15 +140,27 @@ def recursive_search(directory, excluded_dir_names=None): excluded_dir_names = [] result = [] - dirs = {directory: os.path.getmtime(directory)} + dirs = {} + + # Attempt to add the initial directory to dirs with error handling + try: + dirs[directory] = os.path.getmtime(directory) + except FileNotFoundError: + print(f"Warning: Unable to access {directory}. Skipping this path.") + for dirpath, subdirs, filenames in os.walk(directory, followlinks=True, topdown=True): subdirs[:] = [d for d in subdirs if d not in excluded_dir_names] for file_name in filenames: relative_path = os.path.relpath(os.path.join(dirpath, file_name), directory) result.append(relative_path) + for d in subdirs: path = os.path.join(dirpath, d) - dirs[path] = os.path.getmtime(path) + try: + dirs[path] = os.path.getmtime(path) + except FileNotFoundError: + print(f"Warning: Unable to access {path}. Skipping this path.") + continue return result, dirs def filter_files_extensions(files, extensions): From 80068a0cd729d6148ac533bf844e1247a0baa9b4 Mon Sep 17 00:00:00 2001 From: rayleichenxi <81690791+rayleichenxi@users.noreply.github.com> Date: Sat, 27 Jan 2024 20:59:58 +0800 Subject: [PATCH 004/103] [enhance]: use opencv to rewrite morphological_open(), achieve a 40x speed increase in morphological_open opporation, for inpanting images size smaller than 3k (#2016) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: 雷晨曦 --- modules/inpaint_worker.py | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/modules/inpaint_worker.py b/modules/inpaint_worker.py index 88ec39d6..43a7ae23 100644 --- a/modules/inpaint_worker.py +++ b/modules/inpaint_worker.py @@ -4,6 +4,7 @@ import numpy as np from PIL import Image, ImageFilter from modules.util import resample_image, set_image_shape_ceil, get_image_shape_ceil from modules.upscaler import perform_upscale +import cv2 inpaint_head_model = None @@ -28,19 +29,25 @@ def box_blur(x, k): return np.array(x) -def max33(x): - x = Image.fromarray(x) - x = x.filter(ImageFilter.MaxFilter(3)) - return np.array(x) +def max_filter_opencv(x, ksize=3): + # Use OpenCV maximum filter + # Make sure the input type is int16 + return cv2.dilate(x, np.ones((ksize, ksize), dtype=np.int16)) def morphological_open(x): - x_int32 = np.zeros_like(x).astype(np.int32) - x_int32[x > 127] = 256 - for _ in range(32): - maxed = max33(x_int32) - 8 - x_int32 = np.maximum(maxed, x_int32) - return x_int32.clip(0, 255).astype(np.uint8) + # Convert array to int16 type via threshold operation + x_int16 = np.zeros_like(x, dtype=np.int16) + x_int16[x > 127] = 256 + + for i in range(32): + # Use int16 type to avoid overflow + maxed = max_filter_opencv(x_int16, ksize=3) - 8 + x_int16 = np.maximum(maxed, x_int16) + + # Clip negative values to 0 and convert back to uint8 type + x_uint8 = np.clip(x_int16, 0, 255).astype(np.uint8) + return x_uint8 def up255(x, t=0): From ff2187efed346a9f1d7b115fe1dc404848193829 Mon Sep 17 00:00:00 2001 From: Hunter-Houts <41592164+Hunter-Houts@users.noreply.github.com> Date: Sat, 27 Jan 2024 07:01:58 -0600 Subject: [PATCH 005/103] Upgrade requirements_met function (#1955) * Upgrade requirements_met function * Removed extra import * Using Requirements instead of Regex --- modules/launch_util.py | 38 +++++++++++++++++--------------------- 1 file changed, 17 insertions(+), 21 deletions(-) diff --git a/modules/launch_util.py b/modules/launch_util.py index 00fff8ae..8d92fad0 100644 --- a/modules/launch_util.py +++ b/modules/launch_util.py @@ -5,6 +5,11 @@ import subprocess import sys import re import logging +import importlib.metadata +import packaging.version +from packaging.requirements import Requirement + + logging.getLogger("torch.distributed.nn").setLevel(logging.ERROR) # sshh... @@ -73,35 +78,26 @@ def run_pip(command, desc=None, live=default_command_live): def requirements_met(requirements_file): - """ - Does a simple parse of a requirements.txt file to determine if all rerqirements in it - are already installed. Returns True if so, False if not installed or parsing fails. - """ - - import importlib.metadata - import packaging.version - with open(requirements_file, "r", encoding="utf8") as file: for line in file: - if line.strip() == "": + line = line.strip() + if line == "" or line.startswith('#'): continue - m = re.match(re_requirement, line) - if m is None: - return False - - package = m.group(1).strip() - version_required = (m.group(2) or "").strip() - - if version_required == "": - continue + requirement = Requirement(line) + package = requirement.name try: version_installed = importlib.metadata.version(package) - except Exception: - return False + installed_version = packaging.version.parse(version_installed) - if packaging.version.parse(version_required) != packaging.version.parse(version_installed): + # Check if the installed version satisfies the requirement + if installed_version not in requirement.specifier: + print(f"Version mismatch for {package}: Installed version {version_installed} does not meet requirement {requirement}") + return False + except Exception as e: + print(f"Error checking version for {package}: {e}") return False return True + From 69ad1b3c249b54cd72ba30b92d19f07d06eb1ed9 Mon Sep 17 00:00:00 2001 From: lllyasviel Date: Sat, 27 Jan 2024 08:05:19 -0800 Subject: [PATCH 006/103] advanced preset load new model list i new candidates add model fallback allow disable preset download Update anime.json Update anime.json Update config.py Update anime.json Update readme.md Update default.json --- args_manager.py | 6 +++++ launch.py | 59 ++++++++++++++++++++++++++++++----------------- modules/config.py | 30 +++++++++++++++--------- 3 files changed, 63 insertions(+), 32 deletions(-) diff --git a/args_manager.py b/args_manager.py index e5e76753..eeb38e1f 100644 --- a/args_manager.py +++ b/args_manager.py @@ -20,6 +20,12 @@ args_parser.parser.add_argument("--disable-image-log", action='store_true', args_parser.parser.add_argument("--disable-analytics", action='store_true', help="Disables analytics for Gradio", default=False) +args_parser.parser.add_argument("--disable-preset-download", action='store_true', + help="Disables downloading models for presets", default=False) + +args_parser.parser.add_argument("--always-download-new-model", action='store_true', + help="Always download newer models ", default=False) + args_parser.parser.set_defaults( disable_cuda_malloc=True, in_browser=True, diff --git a/launch.py b/launch.py index e98045f6..9dbd3b6a 100644 --- a/launch.py +++ b/launch.py @@ -21,8 +21,7 @@ import fooocus_version from build_launcher import build_launcher from modules.launch_util import is_installed, run, python, run_pip, requirements_met from modules.model_loader import load_file_from_url -from modules.config import path_checkpoints, path_loras, path_vae_approx, path_fooocus_expansion, \ - checkpoint_downloads, path_embeddings, embeddings_downloads, lora_downloads +from modules import config REINSTALL_ALL = False @@ -70,25 +69,6 @@ vae_approx_filenames = [ ] -def download_models(): - for file_name, url in checkpoint_downloads.items(): - load_file_from_url(url=url, model_dir=path_checkpoints, file_name=file_name) - for file_name, url in embeddings_downloads.items(): - load_file_from_url(url=url, model_dir=path_embeddings, file_name=file_name) - for file_name, url in lora_downloads.items(): - load_file_from_url(url=url, model_dir=path_loras, file_name=file_name) - for file_name, url in vae_approx_filenames: - load_file_from_url(url=url, model_dir=path_vae_approx, file_name=file_name) - - load_file_from_url( - url='https://huggingface.co/lllyasviel/misc/resolve/main/fooocus_expansion.bin', - model_dir=path_fooocus_expansion, - file_name='pytorch_model.bin' - ) - - return - - def ini_args(): from args_manager import args return args @@ -104,6 +84,43 @@ if args.gpu_device_id is not None: print("Set device to:", args.gpu_device_id) +def download_models(): + for file_name, url in vae_approx_filenames: + load_file_from_url(url=url, model_dir=config.path_vae_approx, file_name=file_name) + + load_file_from_url( + url='https://huggingface.co/lllyasviel/misc/resolve/main/fooocus_expansion.bin', + model_dir=config.path_fooocus_expansion, + file_name='pytorch_model.bin' + ) + + if args.disable_preset_download: + print('Skipped model download.') + return + + if not args.always_download_new_model: + if not os.path.exists(os.path.join(config.path_checkpoints, config.default_base_model_name)): + for alternative_model_name in config.previous_default_models: + if os.path.exists(os.path.join(config.path_checkpoints, alternative_model_name)): + print(f'You do not have [{config.default_base_model_name}] but you have [{alternative_model_name}].') + print(f'Fooocus will use [{alternative_model_name}] to avoid downloading new models, ' + f'but you are not using latest models.') + print('Use --always-download-new-model to avoid fallback and always get new models.') + config.checkpoint_downloads = {} + config.default_base_model_name = alternative_model_name + break + + for file_name, url in config.checkpoint_downloads.items(): + load_file_from_url(url=url, model_dir=config.path_checkpoints, file_name=file_name) + for file_name, url in config.embeddings_downloads.items(): + load_file_from_url(url=url, model_dir=config.path_embeddings, file_name=file_name) + for file_name, url in config.lora_downloads.items(): + load_file_from_url(url=url, model_dir=config.path_loras, file_name=file_name) + + return + + download_models() + from webui import * diff --git a/modules/config.py b/modules/config.py index c7af33db..7feae8f0 100644 --- a/modules/config.py +++ b/modules/config.py @@ -79,6 +79,13 @@ def try_load_deprecated_user_path_config(): try_load_deprecated_user_path_config() +try: + with open(os.path.abspath(f'./presets/default.json'), "r", encoding="utf-8") as json_file: + config_dict.update(json.load(json_file)) +except Exception as e: + print(f'Load default preset failed.') + print(e) + preset = args_manager.args.preset if isinstance(preset, str): @@ -153,9 +160,14 @@ def get_config_item_or_set_default(key, default_value, validator, disable_empty_ default_base_model_name = get_config_item_or_set_default( key='default_model', - default_value='juggernautXL_version6Rundiffusion.safetensors', + default_value='model.safetensors', validator=lambda x: isinstance(x, str) ) +previous_default_models = get_config_item_or_set_default( + key='previous_default_models', + default_value=[], + validator=lambda x: isinstance(x, list) and all(isinstance(k, str) for k in x) +) default_refiner_model_name = get_config_item_or_set_default( key='default_refiner', default_value='None', @@ -163,15 +175,15 @@ default_refiner_model_name = get_config_item_or_set_default( ) default_refiner_switch = get_config_item_or_set_default( key='default_refiner_switch', - default_value=0.5, + default_value=0.8, validator=lambda x: isinstance(x, numbers.Number) and 0 <= x <= 1 ) default_loras = get_config_item_or_set_default( key='default_loras', default_value=[ [ - "sd_xl_offset_example-lora_1.0.safetensors", - 0.1 + "None", + 1.0 ], [ "None", @@ -194,7 +206,7 @@ default_loras = get_config_item_or_set_default( ) default_cfg_scale = get_config_item_or_set_default( key='default_cfg_scale', - default_value=4.0, + default_value=7.0, validator=lambda x: isinstance(x, numbers.Number) ) default_sample_sharpness = get_config_item_or_set_default( @@ -255,16 +267,12 @@ default_image_number = get_config_item_or_set_default( ) checkpoint_downloads = get_config_item_or_set_default( key='checkpoint_downloads', - default_value={ - "juggernautXL_version6Rundiffusion.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/juggernautXL_version6Rundiffusion.safetensors" - }, + default_value={}, validator=lambda x: isinstance(x, dict) and all(isinstance(k, str) and isinstance(v, str) for k, v in x.items()) ) lora_downloads = get_config_item_or_set_default( key='lora_downloads', - default_value={ - "sd_xl_offset_example-lora_1.0.safetensors": "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_offset_example-lora_1.0.safetensors" - }, + default_value={}, validator=lambda x: isinstance(x, dict) and all(isinstance(k, str) and isinstance(v, str) for k, v in x.items()) ) embeddings_downloads = get_config_item_or_set_default( From 7b26b292260ac4f1c9acc5a95109e5e3765b9a5c Mon Sep 17 00:00:00 2001 From: lllyasviel Date: Sat, 27 Jan 2024 08:08:21 -0800 Subject: [PATCH 007/103] new model list i Update readme.md --- presets/anime.json | 30 +++++++++++------------------- presets/default.json | 13 ++++++++++--- presets/realistic.json | 7 ++++--- readme.md | 10 +++++----- 4 files changed, 30 insertions(+), 30 deletions(-) diff --git a/presets/anime.json b/presets/anime.json index 32428a71..c8110955 100644 --- a/presets/anime.json +++ b/presets/anime.json @@ -1,11 +1,11 @@ { - "default_model": "bluePencilXL_v050.safetensors", - "default_refiner": "DreamShaper_8_pruned.safetensors", - "default_refiner_switch": 0.667, + "default_model": "animaPencilXL_v100.safetensors", + "default_refiner": "None", + "default_refiner_switch": 0.5, "default_loras": [ [ - "sd_xl_offset_example-lora_1.0.safetensors", - 0.5 + "None", + 1.0 ], [ "None", @@ -30,24 +30,16 @@ "default_scheduler": "karras", "default_performance": "Speed", "default_prompt": "1girl, ", - "default_prompt_negative": "(embedding:unaestheticXLv31:0.8), low quality, watermark", + "default_prompt_negative": "", "default_styles": [ "Fooocus V2", - "Fooocus Masterpiece", - "SAI Anime", - "SAI Digital Art", - "SAI Enhance", - "SAI Fantasy Art" + "Fooocus Negative", + "Fooocus Masterpiece" ], "default_aspect_ratio": "896*1152", "checkpoint_downloads": { - "bluePencilXL_v050.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/bluePencilXL_v050.safetensors", - "DreamShaper_8_pruned.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/DreamShaper_8_pruned.safetensors" + "animaPencilXL_v100.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/animaPencilXL_v100.safetensors" }, - "embeddings_downloads": { - "unaestheticXLv31.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/unaestheticXLv31.safetensors" - }, - "lora_downloads": { - "sd_xl_offset_example-lora_1.0.safetensors": "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_offset_example-lora_1.0.safetensors" - } + "embeddings_downloads": {}, + "lora_downloads": {} } \ No newline at end of file diff --git a/presets/default.json b/presets/default.json index bc014035..7930c92f 100644 --- a/presets/default.json +++ b/presets/default.json @@ -1,5 +1,5 @@ { - "default_model": "juggernautXL_version6Rundiffusion.safetensors", + "default_model": "juggernautXL_v8Rundiffusion.safetensors", "default_refiner": "None", "default_refiner_switch": 0.5, "default_loras": [ @@ -38,10 +38,17 @@ ], "default_aspect_ratio": "1152*896", "checkpoint_downloads": { - "juggernautXL_version6Rundiffusion.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/juggernautXL_version6Rundiffusion.safetensors" + "juggernautXL_v8Rundiffusion.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/juggernautXL_v8Rundiffusion.safetensors" }, "embeddings_downloads": {}, "lora_downloads": { "sd_xl_offset_example-lora_1.0.safetensors": "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_offset_example-lora_1.0.safetensors" - } + }, + "previous_default_models": [ + "juggernautXL_version8Rundiffusion.safetensors", + "juggernautXL_version7Rundiffusion.safetensors", + "juggernautXL_v7Rundiffusion.safetensors", + "juggernautXL_version6Rundiffusion.safetensors", + "juggernautXL_v6Rundiffusion.safetensors" + ] } \ No newline at end of file diff --git a/presets/realistic.json b/presets/realistic.json index ed625d45..7799c96a 100644 --- a/presets/realistic.json +++ b/presets/realistic.json @@ -1,5 +1,5 @@ { - "default_model": "realisticStockPhoto_v10.safetensors", + "default_model": "realisticStockPhoto_v20.safetensors", "default_refiner": "", "default_refiner_switch": 0.5, "default_loras": [ @@ -38,10 +38,11 @@ ], "default_aspect_ratio": "896*1152", "checkpoint_downloads": { - "realisticStockPhoto_v10.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/realisticStockPhoto_v10.safetensors" + "realisticStockPhoto_v20.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/realisticStockPhoto_v20.safetensors" }, "embeddings_downloads": {}, "lora_downloads": { "SDXL_FILM_PHOTOGRAPHY_STYLE_BetaV0.4.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/SDXL_FILM_PHOTOGRAPHY_STYLE_BetaV0.4.safetensors" - } + }, + "previous_default_models": ["realisticStockPhoto_v10.safetensors"] } \ No newline at end of file diff --git a/readme.md b/readme.md index 87c44b83..3536b2af 100644 --- a/readme.md +++ b/readme.md @@ -273,11 +273,11 @@ See the common problems [here](troubleshoot.md). Given different goals, the default models and configs of Fooocus are different: -| Task | Windows | Linux args | Main Model | Refiner | Config | -| --- | --- | --- | --- | --- | --- | -| General | run.bat | | [juggernautXL v6_RunDiffusion](https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/juggernautXL_version6Rundiffusion.safetensors) | not used | [here](https://github.com/lllyasviel/Fooocus/blob/main/modules/path.py) | -| Realistic | run_realistic.bat | --preset realistic | [realistic_stock_photo](https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/realisticStockPhoto_v10.safetensors) | not used | [here](https://github.com/lllyasviel/Fooocus/blob/main/presets/realistic.json) | -| Anime | run_anime.bat | --preset anime | [bluepencil_v50](https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/bluePencilXL_v050.safetensors) | [dreamsharper_v8](https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/DreamShaper_8_pruned.safetensors) (SD1.5) | [here](https://github.com/lllyasviel/Fooocus/blob/main/presets/anime.json) | +| Task | Windows | Linux args | Main Model | Refiner | Config | +| --- | --- | --- | --- | --- |--------------------------------------------------------------------------------| +| General | run.bat | | juggernautXL_v8Rundiffusion | not used | [here](https://github.com/lllyasviel/Fooocus/blob/main/presets/default.json) | +| Realistic | run_realistic.bat | --preset realistic | realisticStockPhoto_v20 | not used | [here](https://github.com/lllyasviel/Fooocus/blob/main/presets/realistic.json) | +| Anime | run_anime.bat | --preset anime | animaPencilXL_v100 | not used | [here](https://github.com/lllyasviel/Fooocus/blob/main/presets/anime.json) | Note that the download is **automatic** - you do not need to do anything if the internet connection is okay. However, you can download them manually if you (or move them from somewhere else) have your own preparation. From 79afb3a619c2ec2a1d4f83e6617b269e66803202 Mon Sep 17 00:00:00 2001 From: lllyasviel Date: Sat, 27 Jan 2024 08:31:25 -0800 Subject: [PATCH 008/103] version version Update fooocus_version.py --- fooocus_version.py | 2 +- update_log.md | 6 +++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/fooocus_version.py b/fooocus_version.py index efe3b925..ff5a8830 100644 --- a/fooocus_version.py +++ b/fooocus_version.py @@ -1 +1 @@ -version = '2.1.862' +version = '2.1.864' diff --git a/update_log.md b/update_log.md index 1e8914d1..e052d24c 100644 --- a/update_log.md +++ b/update_log.md @@ -1,7 +1,11 @@ -**(2023 Dec 21) Hi all, the feature updating of Fooocus will be paused for about two or three weeks because we have some other workloads. See you soon and we will come back in mid or late Jan. However, you may still see updates if other collaborators are fixing bugs or solving problems.** +# 2.1.864 + +* New model list. See also discussions. # 2.1.861 (requested update) +(2023 Dec 21) Hi all, the feature updating of Fooocus will be paused for about two or three weeks because we have some other workloads. See you soon and we will come back in mid or late Jan. However, you may still see updates if other collaborators are fixing bugs or solving problems. + * Show image preview in Style when mouse hover. # 2.1.860 (requested update) From 0cb2db9c412582ffd6493654aaebaf76842ad118 Mon Sep 17 00:00:00 2001 From: lllyasviel Date: Sat, 27 Jan 2024 09:00:46 -0800 Subject: [PATCH 009/103] Update readme.md --- readme.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/readme.md b/readme.md index 3536b2af..77653816 100644 --- a/readme.md +++ b/readme.md @@ -5,7 +5,7 @@ without any parameter tweaking, without any strange prompt tags. -See also **non-cherry-picked** generalization and diversity tests [here](https://github.com/lllyasviel/Fooocus/discussions/808) and [here](https://github.com/lllyasviel/Fooocus/discussions/679) and [here](https://github.com/lllyasviel/Fooocus/discussions/679#realistic). +See also **non-cherry-picked** generalization and diversity tests [here](https://github.com/lllyasviel/Fooocus/discussions/2067) and [here](https://github.com/lllyasviel/Fooocus/discussions/808) and [here](https://github.com/lllyasviel/Fooocus/discussions/679) and [here](https://github.com/lllyasviel/Fooocus/discussions/679#realistic). In the entire open source community, only Fooocus can achieve this level of **non-cherry-picked** quality. From 31fc99d2bc844bccfe6d9254a7901c9a32e2e738 Mon Sep 17 00:00:00 2001 From: lllyasviel Date: Sat, 27 Jan 2024 09:07:30 -0800 Subject: [PATCH 010/103] fix (#2069) --- presets/anime.json | 3 ++- presets/lcm.json | 13 ++++++++++--- presets/sai.json | 3 ++- 3 files changed, 14 insertions(+), 5 deletions(-) diff --git a/presets/anime.json b/presets/anime.json index c8110955..8bd2813b 100644 --- a/presets/anime.json +++ b/presets/anime.json @@ -41,5 +41,6 @@ "animaPencilXL_v100.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/animaPencilXL_v100.safetensors" }, "embeddings_downloads": {}, - "lora_downloads": {} + "lora_downloads": {}, + "previous_default_models": [] } \ No newline at end of file diff --git a/presets/lcm.json b/presets/lcm.json index 8822172d..3897f881 100644 --- a/presets/lcm.json +++ b/presets/lcm.json @@ -1,5 +1,5 @@ { - "default_model": "juggernautXL_version6Rundiffusion.safetensors", + "default_model": "juggernautXL_v8Rundiffusion.safetensors", "default_refiner": "None", "default_refiner_switch": 0.5, "default_loras": [ @@ -38,8 +38,15 @@ ], "default_aspect_ratio": "1152*896", "checkpoint_downloads": { - "juggernautXL_version6Rundiffusion.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/juggernautXL_version6Rundiffusion.safetensors" + "juggernautXL_v8Rundiffusion.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/juggernautXL_v8Rundiffusion.safetensors" }, "embeddings_downloads": {}, - "lora_downloads": {} + "lora_downloads": {}, + "previous_default_models": [ + "juggernautXL_version8Rundiffusion.safetensors", + "juggernautXL_version7Rundiffusion.safetensors", + "juggernautXL_v7Rundiffusion.safetensors", + "juggernautXL_version6Rundiffusion.safetensors", + "juggernautXL_v6Rundiffusion.safetensors" + ] } \ No newline at end of file diff --git a/presets/sai.json b/presets/sai.json index ac9c17d1..fecf047b 100644 --- a/presets/sai.json +++ b/presets/sai.json @@ -43,5 +43,6 @@ "embeddings_downloads": {}, "lora_downloads": { "sd_xl_offset_example-lora_1.0.safetensors": "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_offset_example-lora_1.0.safetensors" - } + }, + "previous_default_models": [] } \ No newline at end of file From 1bcbd6501bb3e5db5674a18a8a8074034cda318b Mon Sep 17 00:00:00 2001 From: lllyasviel Date: Sat, 27 Jan 2024 16:18:26 -0800 Subject: [PATCH 011/103] fix config --- modules/config.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/modules/config.py b/modules/config.py index 7feae8f0..58107806 100644 --- a/modules/config.py +++ b/modules/config.py @@ -16,10 +16,17 @@ config_dict = {} always_save_keys = [] visited_keys = [] +try: + with open(os.path.abspath(f'./presets/default.json'), "r", encoding="utf-8") as json_file: + config_dict.update(json.load(json_file)) +except Exception as e: + print(f'Load default preset failed.') + print(e) + try: if os.path.exists(config_path): with open(config_path, "r", encoding="utf-8") as json_file: - config_dict = json.load(json_file) + config_dict.update(json.load(json_file)) always_save_keys = list(config_dict.keys()) except Exception as e: print(f'Failed to load config file "{config_path}" . The reason is: {str(e)}') @@ -79,13 +86,6 @@ def try_load_deprecated_user_path_config(): try_load_deprecated_user_path_config() -try: - with open(os.path.abspath(f'./presets/default.json'), "r", encoding="utf-8") as json_file: - config_dict.update(json.load(json_file)) -except Exception as e: - print(f'Load default preset failed.') - print(e) - preset = args_manager.args.preset if isinstance(preset, str): From 71eb040afceb539005de7dff8c1c3c729640dc46 Mon Sep 17 00:00:00 2001 From: Justin Dhillon Date: Sat, 10 Feb 2024 08:36:56 -0800 Subject: [PATCH 012/103] Fix broken links (#2217) * https://github.com/rlaphoenix/VSGAN/blob/master/vsgan/archs/esrgan.py * https://github.com/huggingface/pytorch-image-models/blob/main/timm/layers/drop.py * https://kornia.readthedocs.io/en/latest/ --- ldm_patched/contrib/external_canny.py | 6 +++--- ldm_patched/pfn/architecture/HAT.py | 4 ++-- ldm_patched/pfn/architecture/RRDB.py | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/ldm_patched/contrib/external_canny.py b/ldm_patched/contrib/external_canny.py index 42c22210..7347ba1e 100644 --- a/ldm_patched/contrib/external_canny.py +++ b/ldm_patched/contrib/external_canny.py @@ -78,7 +78,7 @@ def spatial_gradient(input, normalized: bool = True): Return: the derivatives of the input feature map. with shape :math:`(B, C, 2, H, W)`. .. note:: - See a working example `here `__. Examples: >>> input = torch.rand(1, 3, 4, 4) @@ -120,7 +120,7 @@ def rgb_to_grayscale(image, rgb_weights = None): grayscale version of the image with shape :math:`(*,1,H,W)`. .. note:: - See a working example `here `__. Example: @@ -176,7 +176,7 @@ def canny( - the canny edge magnitudes map, shape of :math:`(B,1,H,W)`. - the canny edge detection filtered by thresholds and hysteresis, shape of :math:`(B,1,H,W)`. .. note:: - See a working example `here `__. Example: >>> input = torch.rand(5, 3, 4, 4) diff --git a/ldm_patched/pfn/architecture/HAT.py b/ldm_patched/pfn/architecture/HAT.py index 66947421..7e12ad0f 100644 --- a/ldm_patched/pfn/architecture/HAT.py +++ b/ldm_patched/pfn/architecture/HAT.py @@ -14,7 +14,7 @@ from .timm.weight_init import trunc_normal_ def drop_path(x, drop_prob: float = 0.0, training: bool = False): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). - From: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/drop.py + From: https://github.com/huggingface/pytorch-image-models/blob/main/timm/layers/drop.py """ if drop_prob == 0.0 or not training: return x @@ -30,7 +30,7 @@ def drop_path(x, drop_prob: float = 0.0, training: bool = False): class DropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). - From: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/drop.py + From: https://github.com/huggingface/pytorch-image-models/blob/main/timm/layers/drop.py """ def __init__(self, drop_prob=None): diff --git a/ldm_patched/pfn/architecture/RRDB.py b/ldm_patched/pfn/architecture/RRDB.py index b50db7c2..8d318b90 100644 --- a/ldm_patched/pfn/architecture/RRDB.py +++ b/ldm_patched/pfn/architecture/RRDB.py @@ -13,7 +13,7 @@ import torch.nn.functional as F from . import block as B -# Borrowed from https://github.com/rlaphoenix/VSGAN/blob/master/vsgan/archs/ESRGAN.py +# Borrowed from https://github.com/rlaphoenix/VSGAN/blob/master/vsgan/archs/esrgan.py # Which enhanced stuff that was already here class RRDBNet(nn.Module): def __init__( From fdc4dc1d8793e71a43c89e75e73f3e050d3dd308 Mon Sep 17 00:00:00 2001 From: rsl8 <138326583+rsl8@users.noreply.github.com> Date: Sat, 10 Feb 2024 17:42:30 +0100 Subject: [PATCH 013/103] delay importing of modules.config (#2195) --- launch.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/launch.py b/launch.py index 9dbd3b6a..36fad9e0 100644 --- a/launch.py +++ b/launch.py @@ -21,7 +21,6 @@ import fooocus_version from build_launcher import build_launcher from modules.launch_util import is_installed, run, python, run_pip, requirements_met from modules.model_loader import load_file_from_url -from modules import config REINSTALL_ALL = False @@ -84,6 +83,8 @@ if args.gpu_device_id is not None: print("Set device to:", args.gpu_device_id) +from modules import config + def download_models(): for file_name, url in vae_approx_filenames: load_file_from_url(url=url, model_dir=config.path_vae_approx, file_name=file_name) From d1a450c581490d66fa0238c6ac69d9f020054df8 Mon Sep 17 00:00:00 2001 From: V1sionVerse <155375712+V1sionVerse@users.noreply.github.com> Date: Sat, 10 Feb 2024 17:50:41 +0100 Subject: [PATCH 014/103] Fixed mistakes in HTML generation (#2187) Added declaration instead of
instead of
--- modules/private_logger.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/private_logger.py b/modules/private_logger.py index 968bd4f5..49f17dca 100644 --- a/modules/private_logger.py +++ b/modules/private_logger.py @@ -68,7 +68,7 @@ def log(img, dic): """ ) - begin_part = f"Fooocus Log {date_string}{css_styles}{js}

Fooocus Log {date_string} (private)

\n

All images are clean, without any hidden data/meta, and safe to share with others.

\n\n" + begin_part = f"Fooocus Log {date_string}{css_styles}{js}

Fooocus Log {date_string} (private)

\n

All images are clean, without any hidden data/meta, and safe to share with others.

\n\n" end_part = f'\n' middle_part = log_cache.get(html_name, "") @@ -83,15 +83,15 @@ def log(img, dic): div_name = only_name.replace('.', '_') item = f"

\n" - item += f"" + item += f"" item += "" item += "
{only_name}
{only_name}
" for key, value in dic: - value_txt = str(value).replace('\n', '
') + value_txt = str(value).replace('\n', '
') item += f"\n" item += "" js_txt = urllib.parse.quote(json.dumps({k: v for k, v in dic}, indent=0), safe='') - item += f"
" + item += f"
" item += "
\n\n" From 95f93a1f4bc32bc6d114fe3c314b3f69da207d5a Mon Sep 17 00:00:00 2001 From: rsl8 <138326583+rsl8@users.noreply.github.com> Date: Sat, 10 Feb 2024 17:42:30 +0100 Subject: [PATCH 015/103] delay importing of modules.config (#2195) --- launch.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/launch.py b/launch.py index 9dbd3b6a..36fad9e0 100644 --- a/launch.py +++ b/launch.py @@ -21,7 +21,6 @@ import fooocus_version from build_launcher import build_launcher from modules.launch_util import is_installed, run, python, run_pip, requirements_met from modules.model_loader import load_file_from_url -from modules import config REINSTALL_ALL = False @@ -84,6 +83,8 @@ if args.gpu_device_id is not None: print("Set device to:", args.gpu_device_id) +from modules import config + def download_models(): for file_name, url in vae_approx_filenames: load_file_from_url(url=url, model_dir=config.path_vae_approx, file_name=file_name) From ac10e51364054a849251d6f09b8007842a33f054 Mon Sep 17 00:00:00 2001 From: Roman Schmitz Date: Sat, 10 Feb 2024 18:10:54 +0100 Subject: [PATCH 016/103] add auth to --listen and readme (#2127) * Update webui.py * Update readme.md * Update webui.py Only enable AuthN for --listen and --share Co-authored-by: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> * docs: rephrase documentation changes for auth --------- Co-authored-by: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Co-authored-by: Manuel Schmid --- readme.md | 7 +++++++ webui.py | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/readme.md b/readme.md index 77653816..7310cff4 100644 --- a/readme.md +++ b/readme.md @@ -281,6 +281,13 @@ Given different goals, the default models and configs of Fooocus are different: Note that the download is **automatic** - you do not need to do anything if the internet connection is okay. However, you can download them manually if you (or move them from somewhere else) have your own preparation. +## UI Access and Authentication +In addition to running on localhost, Fooocus can also expose its UI in two ways: +* Local UI listener: use `--listen` (specify port e.g. with `--port 8888`). +* API access: use `--share` (registers an endpoint at `.gradio.live`). + +In both ways the access is unauthenticated by default. You can add basic authentication by creating a file called `auth.json` in the main directory, which contains a list of JSON objects with the keys `user` and `pass` (see example in [auth-example.json](./auth-example.json)). + ## List of "Hidden" Tricks diff --git a/webui.py b/webui.py index fadd852a..a3de1498 100644 --- a/webui.py +++ b/webui.py @@ -618,6 +618,6 @@ shared.gradio_root.launch( server_name=args_manager.args.listen, server_port=args_manager.args.port, share=args_manager.args.share, - auth=check_auth if args_manager.args.share and auth_enabled else None, + auth=check_auth if (args_manager.args.share or args_manager.args.listen) and auth_enabled else None, blocked_paths=[constants.AUTH_FILENAME] ) From b7715b0a0cf3f2b4bfceaf5f4b7aad0df928a28a Mon Sep 17 00:00:00 2001 From: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Date: Sat, 10 Feb 2024 18:33:28 +0100 Subject: [PATCH 017/103] fix: prevents outdated history log link after midnight (#1979) * feat: update history link date after each generation prevents outdated date in link after midnight * delay importing of modules.config (#2195) * fix: disable queue for initial queue loading --------- Co-authored-by: rsl8 <138326583+rsl8@users.noreply.github.com> --- webui.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/webui.py b/webui.py index a3de1498..b9b620d2 100644 --- a/webui.py +++ b/webui.py @@ -255,8 +255,14 @@ with shared.gradio_root: seed_random.change(random_checked, inputs=[seed_random], outputs=[image_seed], queue=False, show_progress=False) - if not args_manager.args.disable_image_log: - gr.HTML(f'\U0001F4DA History Log') + def update_history_link(): + if args_manager.args.disable_image_log: + return gr.update(value='') + + return gr.update(value=f'\U0001F4DA History Log') + + history_link = gr.HTML() + shared.gradio_root.load(update_history_link, outputs=history_link, queue=False, show_progress=False) with gr.Tab(label='Style'): style_sorter.try_load_sorted_styles( @@ -586,6 +592,7 @@ with shared.gradio_root: .then(fn=generate_clicked, inputs=ctrls, outputs=[progress_html, progress_window, progress_gallery, gallery]) \ .then(lambda: (gr.update(visible=True, interactive=True), gr.update(visible=False, interactive=False), gr.update(visible=False, interactive=False), False), outputs=[generate_button, stop_button, skip_button, state_is_generating]) \ + .then(fn=update_history_link, outputs=history_link) \ .then(fn=lambda: None, _js='playNotification').then(fn=lambda: None, _js='refresh_grid_delayed') for notification_file in ['notification.ogg', 'notification.mp3']: From e4929a9ed79f8d461b387f8b013b23ad23f3cb90 Mon Sep 17 00:00:00 2001 From: rsl8 <138326583+rsl8@users.noreply.github.com> Date: Sat, 10 Feb 2024 18:42:18 +0100 Subject: [PATCH 018/103] fix: do not overwrite $GRADIO_SERVER_PORT if it is already set (#1921) --- launch.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/launch.py b/launch.py index 36fad9e0..db174f54 100644 --- a/launch.py +++ b/launch.py @@ -10,7 +10,8 @@ os.chdir(root) os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" os.environ["PYTORCH_MPS_HIGH_WATERMARK_RATIO"] = "0.0" -os.environ["GRADIO_SERVER_PORT"] = "7865" +if "GRADIO_SERVER_PORT" not in os.environ: + os.environ["GRADIO_SERVER_PORT"] = "7865" ssl._create_default_https_context = ssl._create_unverified_context From 231956065ff88da523c3472c7228a0072cffed70 Mon Sep 17 00:00:00 2001 From: "Dr. Christoph Mittendorf" <34183942+Cassini-chris@users.noreply.github.com> Date: Sat, 10 Feb 2024 18:51:03 +0100 Subject: [PATCH 019/103] Removing unnecessary comments / old code (#1905) --- javascript/contextMenus.js | 4 ---- 1 file changed, 4 deletions(-) diff --git a/javascript/contextMenus.js b/javascript/contextMenus.js index 2f32af1b..7494674d 100644 --- a/javascript/contextMenus.js +++ b/javascript/contextMenus.js @@ -154,12 +154,8 @@ let cancelGenerateForever = function() { let generateOnRepeatForButtons = function() { generateOnRepeat('#generate_button', '#stop_button'); }; - appendContextMenuOption('#generate_button', 'Generate forever', generateOnRepeatForButtons); -// appendContextMenuOption('#stop_button', 'Generate forever', generateOnRepeatForButtons); -// appendContextMenuOption('#stop_button', 'Cancel generate forever', cancelGenerateForever); -// appendContextMenuOption('#generate_button', 'Cancel generate forever', cancelGenerateForever); })(); //End example Context Menu Items From 98ba1d5d475fa04bc37c88a09e7eb8f47f2cc68a Mon Sep 17 00:00:00 2001 From: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Date: Sat, 10 Feb 2024 19:03:26 +0100 Subject: [PATCH 020/103] fix: correctly sort files, display deepest dir level first (#1784) --- modules/util.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/modules/util.py b/modules/util.py index 052b746b..de9bd5b9 100644 --- a/modules/util.py +++ b/modules/util.py @@ -164,14 +164,14 @@ def get_files_from_folder(folder_path, exensions=None, name_filter=None): filenames = [] - for root, dirs, files in os.walk(folder_path): + for root, dirs, files in os.walk(folder_path, topdown=False): relative_path = os.path.relpath(root, folder_path) if relative_path == ".": relative_path = "" - for filename in files: + for filename in sorted(files): _, file_extension = os.path.splitext(filename) if (exensions == None or file_extension.lower() in exensions) and (name_filter == None or name_filter in _): path = os.path.join(relative_path, filename) filenames.append(path) - return sorted(filenames, key=lambda x: -1 if os.sep in x else 1) + return filenames From c32b9bdc44e8955947ba66fce4e9dfd2e1e6f259 Mon Sep 17 00:00:00 2001 From: Evgenii Date: Sat, 10 Feb 2024 21:15:10 +0300 Subject: [PATCH 021/103] fix: replace regexp to support unicode chars (#1424) --- modules/launch_util.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/launch_util.py b/modules/launch_util.py index 8d92fad0..b483d515 100644 --- a/modules/launch_util.py +++ b/modules/launch_util.py @@ -15,7 +15,7 @@ from packaging.requirements import Requirement logging.getLogger("torch.distributed.nn").setLevel(logging.ERROR) # sshh... logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage()) -re_requirement = re.compile(r"\s*([-_a-zA-Z0-9]+)\s*(?:==\s*([-+_.a-zA-Z0-9]+))?\s*") +re_requirement = re.compile(r"\s*([-\w]+)\s*(?:==\s*([-+.\w]+))?\s*") python = sys.executable default_command_live = (os.environ.get('LAUNCH_LIVE_OUTPUT') == "1") From b9d7e77b0df97e54f7cb9670d6257ffc09b65ada Mon Sep 17 00:00:00 2001 From: Praveen Kumar Sridhar <69740366+PraveenKumarSridhar@users.noreply.github.com> Date: Sat, 10 Feb 2024 10:28:10 -0800 Subject: [PATCH 022/103] replaced the custom lcm function with math.lcm (#1122) Co-authored-by: Manuel Schmid --- ldm_patched/modules/conds.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/ldm_patched/modules/conds.py b/ldm_patched/modules/conds.py index ed03bd64..0ee184bc 100644 --- a/ldm_patched/modules/conds.py +++ b/ldm_patched/modules/conds.py @@ -3,8 +3,6 @@ import math import ldm_patched.modules.utils -def lcm(a, b): #TODO: eventually replace by math.lcm (added in python3.9) - return abs(a*b) // math.gcd(a, b) class CONDRegular: def __init__(self, cond): @@ -41,7 +39,7 @@ class CONDCrossAttn(CONDRegular): if s1[0] != s2[0] or s1[2] != s2[2]: #these 2 cases should not happen return False - mult_min = lcm(s1[1], s2[1]) + mult_min = math.lcm(s1[1], s2[1]) diff = mult_min // min(s1[1], s2[1]) if diff > 4: #arbitrary limit on the padding because it's probably going to impact performance negatively if it's too much return False @@ -52,7 +50,7 @@ class CONDCrossAttn(CONDRegular): crossattn_max_len = self.cond.shape[1] for x in others: c = x.cond - crossattn_max_len = lcm(crossattn_max_len, c.shape[1]) + crossattn_max_len = math.lcm(crossattn_max_len, c.shape[1]) conds.append(c) out = [] From eb3f4d745c5d8736d5f22cacfabd9856909ab00f Mon Sep 17 00:00:00 2001 From: hisk2323 <114307999+hisk2323@users.noreply.github.com> Date: Sat, 10 Feb 2024 14:39:36 -0600 Subject: [PATCH 023/103] feat: add suffix ordinals (#845) * add suffix ordinals with lambda * delay importing of modules.config (#2195) * refactor: use easier to read version to find matching ordinal suffix --------- Co-authored-by: rsl8 <138326583+rsl8@users.noreply.github.com> Co-authored-by: Manuel Schmid Co-authored-by: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> --- modules/async_worker.py | 5 ++--- modules/util.py | 4 ++++ 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/modules/async_worker.py b/modules/async_worker.py index b2af6712..67c2e707 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -40,7 +40,7 @@ def worker(): from modules.private_logger import log from extras.expansion import safe_str from modules.util import remove_empty_str, HWC3, resize_image, \ - get_image_shape_ceil, set_image_shape_ceil, get_shape_ceil, resample_image, erode_or_dilate + get_image_shape_ceil, set_image_shape_ceil, get_shape_ceil, resample_image, erode_or_dilate, ordinal_suffix from modules.upscaler import perform_upscale try: @@ -732,8 +732,7 @@ def worker(): done_steps = current_task_id * steps + step async_task.yields.append(['preview', ( int(15.0 + 85.0 * float(done_steps) / float(all_steps)), - f'Step {step}/{total_steps} in the {current_task_id + 1}-th Sampling', - y)]) + f'Step {step}/{total_steps} in the {current_task_id + 1}{ordinal_suffix(current_task_id + 1)} Sampling', y)]) for current_task_id, task in enumerate(tasks): execution_start_time = time.perf_counter() diff --git a/modules/util.py b/modules/util.py index de9bd5b9..c309480a 100644 --- a/modules/util.py +++ b/modules/util.py @@ -175,3 +175,7 @@ def get_files_from_folder(folder_path, exensions=None, name_filter=None): filenames.append(path) return filenames + + +def ordinal_suffix(number: int) -> str: + return 'th' if 10 <= number % 100 <= 20 else {1: 'st', 2: 'nd', 3: 'rd'}.get(number % 10, 'th') From 2037de3fcb683434355cee4b5504046dfd9dc483 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Sat, 10 Feb 2024 21:54:50 +0100 Subject: [PATCH 024/103] chore: fix typos and adjust wording (#1521, #1644, #1691, #1772) --- .github/ISSUE_TEMPLATE/bug_report.md | 4 ++-- ldm_patched/pfn/architecture/face/codeformer.py | 2 +- readme.md | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 331426a3..624cfe3e 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -9,10 +9,10 @@ assignees: '' **Read Troubleshoot** -[x] I admit that I have read the [Troubleshoot](https://github.com/lllyasviel/Fooocus/blob/main/troubleshoot.md) before making this issue. +[x] I confirm that I have read the [Troubleshoot](https://github.com/lllyasviel/Fooocus/blob/main/troubleshoot.md) guide before making this issue. **Describe the problem** A clear and concise description of what the bug is. **Full Console Log** -Paste **full** console log here. You will make our job easier if you give a **full** log. +Paste the **full** console log here. You will make our job easier if you give a **full** log. diff --git a/ldm_patched/pfn/architecture/face/codeformer.py b/ldm_patched/pfn/architecture/face/codeformer.py index 06614007..a0e2e985 100644 --- a/ldm_patched/pfn/architecture/face/codeformer.py +++ b/ldm_patched/pfn/architecture/face/codeformer.py @@ -2,7 +2,7 @@ Modified from https://github.com/sczhou/CodeFormer VQGAN code, adapted from the original created by the Unleashing Transformers authors: https://github.com/samb-t/unleashing-transformers/blob/master/models/vqgan.py -This verison of the arch specifically was gathered from an old version of GFPGAN. If this is a problem, please contact me. +This version of the arch specifically was gathered from an old version of GFPGAN. If this is a problem, please contact me. """ import math from typing import Optional diff --git a/readme.md b/readme.md index 7310cff4..fa7e829c 100644 --- a/readme.md +++ b/readme.md @@ -202,7 +202,7 @@ AMD is not intensively tested, however. The AMD support is in beta. Use `python entry_with_update.py --preset anime` or `python entry_with_update.py --preset realistic` for Fooocus Anime/Realistic Edition. -### Windows(AMD GPUs) +### Windows (AMD GPUs) Note that the [minimal requirement](#minimal-requirement) for different platforms is different. @@ -295,7 +295,7 @@ The below things are already inside the software, and **users do not need to do 1. GPT2-based [prompt expansion as a dynamic style "Fooocus V2".](https://github.com/lllyasviel/Fooocus/discussions/117#raw) (similar to Midjourney's hidden pre-processsing and "raw" mode, or the LeonardoAI's Prompt Magic). 2. Native refiner swap inside one single k-sampler. The advantage is that the refiner model can now reuse the base model's momentum (or ODE's history parameters) collected from k-sampling to achieve more coherent sampling. In Automatic1111's high-res fix and ComfyUI's node system, the base model and refiner use two independent k-samplers, which means the momentum is largely wasted, and the sampling continuity is broken. Fooocus uses its own advanced k-diffusion sampling that ensures seamless, native, and continuous swap in a refiner setup. (Update Aug 13: Actually, I discussed this with Automatic1111 several days ago, and it seems that the “native refiner swap inside one single k-sampler” is [merged]( https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12371) into the dev branch of webui. Great!) -3. Negative ADM guidance. Because the highest resolution level of XL Base does not have cross attentions, the positive and negative signals for XL's highest resolution level cannot receive enough contrasts during the CFG sampling, causing the results to look a bit plastic or overly smooth in certain cases. Fortunately, since the XL's highest resolution level is still conditioned on image aspect ratios (ADM), we can modify the adm on the positive/negative side to compensate for the lack of CFG contrast in the highest resolution level. (Update Aug 16, the IOS App [Drawing Things](https://apps.apple.com/us/app/draw-things-ai-generation/id6444050820) will support Negative ADM Guidance. Great!) +3. Negative ADM guidance. Because the highest resolution level of XL Base does not have cross attentions, the positive and negative signals for XL's highest resolution level cannot receive enough contrasts during the CFG sampling, causing the results to look a bit plastic or overly smooth in certain cases. Fortunately, since the XL's highest resolution level is still conditioned on image aspect ratios (ADM), we can modify the adm on the positive/negative side to compensate for the lack of CFG contrast in the highest resolution level. (Update Aug 16, the IOS App [Draw Things](https://apps.apple.com/us/app/draw-things-ai-generation/id6444050820) will support Negative ADM Guidance. Great!) 4. We implemented a carefully tuned variation of Section 5.1 of ["Improving Sample Quality of Diffusion Models Using Self-Attention Guidance"](https://arxiv.org/pdf/2210.00939.pdf). The weight is set to very low, but this is Fooocus's final guarantee to make sure that the XL will never yield an overly smooth or plastic appearance (examples [here](https://github.com/lllyasviel/Fooocus/discussions/117#sharpness)). This can almost eliminate all cases for which XL still occasionally produces overly smooth results, even with negative ADM guidance. (Update 2023 Aug 18, the Gaussian kernel of SAG is changed to an anisotropic kernel for better structure preservation and fewer artifacts.) 5. We modified the style templates a bit and added the "cinematic-default". 6. We tested the "sd_xl_offset_example-lora_1.0.safetensors" and it seems that when the lora weight is below 0.5, the results are always better than XL without lora. From ee3ce9556689264cffa0151d891c12525a20cf24 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Sat, 10 Feb 2024 21:59:13 +0100 Subject: [PATCH 025/103] docs: update version --- fooocus_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fooocus_version.py b/fooocus_version.py index ff5a8830..91c2ddda 100644 --- a/fooocus_version.py +++ b/fooocus_version.py @@ -1 +1 @@ -version = '2.1.864' +version = '2.1.865' From 074b655dff89bfb69bcbe9a08cc947f2a0e33568 Mon Sep 17 00:00:00 2001 From: eddyizm Date: Sun, 11 Feb 2024 04:03:10 -0800 Subject: [PATCH 026/103] fix: implement output path argument (#2074) * added function to check output path arg and override, other wise, use temp or fallback to config * added function to check output path arg and override, other wise, use temp or fallback to config #2065 * Revert to 1bcbd650 * moved path output arg handling inside config start up * Revert "added function to check output path arg and override, other wise, use temp or fallback to config" This reverts commit fecb97b59ccd3bde3a5fc2d2302245611eb85d2c. * Updated tag to uppercase * updated docstring to standard double quotes. Co-authored-by: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> * removed extra check on image log flag per feedback * feat: update config_dict value when overriding path_outputs, change message --------- Co-authored-by: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Co-authored-by: Manuel Schmid --- modules/config.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/modules/config.py b/modules/config.py index 58107806..1f4e82eb 100644 --- a/modules/config.py +++ b/modules/config.py @@ -102,6 +102,18 @@ if isinstance(preset, str): print(e) +def get_path_output() -> str: + """ + Checking output path argument and overriding default path. + """ + global config_dict + path_output = get_dir_or_set_default('path_outputs', '../outputs/') + if args_manager.args.output_path: + print(f'[CONFIG] Overriding config value path_outputs with {args_manager.args.output_path}') + config_dict['path_outputs'] = path_output = args_manager.args.output_path + return path_output + + def get_dir_or_set_default(key, default_value): global config_dict, visited_keys, always_save_keys @@ -132,7 +144,7 @@ path_inpaint = get_dir_or_set_default('path_inpaint', '../models/inpaint/') path_controlnet = get_dir_or_set_default('path_controlnet', '../models/controlnet/') path_clip_vision = get_dir_or_set_default('path_clip_vision', '../models/clip_vision/') path_fooocus_expansion = get_dir_or_set_default('path_fooocus_expansion', '../models/prompt_expansion/fooocus_expansion') -path_outputs = get_dir_or_set_default('path_outputs', '../outputs/') +path_outputs = get_path_output() def get_config_item_or_set_default(key, default_value, validator, disable_empty_as_none=False): From f4a8bf24cf33c8315101b67098298118eacbe572 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Sun, 11 Feb 2024 15:12:41 +0100 Subject: [PATCH 027/103] fix: correctly calculate refiner switch when overwrite_switch is > 0 (#2165) When using custom steps, the calculation of switching timing is wrong. Now it is modified to calculate "steps x timing" after custom steps are used. By @xhoxye --- modules/async_worker.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/async_worker.py b/modules/async_worker.py index 67c2e707..40abb7fa 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -335,11 +335,11 @@ def worker(): ip_adapter.load_ip_adapter(clip_vision_path, ip_negative_path, ip_adapter_path) ip_adapter.load_ip_adapter(clip_vision_path, ip_negative_path, ip_adapter_face_path) - switch = int(round(steps * refiner_switch)) - if advanced_parameters.overwrite_step > 0: steps = advanced_parameters.overwrite_step + switch = int(round(steps * refiner_switch)) + if advanced_parameters.overwrite_switch > 0: switch = advanced_parameters.overwrite_switch From a78f66ffb5bf7b684c6a529314f74acc33d53775 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Mon, 12 Feb 2024 21:34:07 +0100 Subject: [PATCH 028/103] fix: sort with casefold, case insensitive https://docs.python.org/3/library/stdtypes.html#str.casefold --- modules/util.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/util.py b/modules/util.py index c309480a..9d4d0996 100644 --- a/modules/util.py +++ b/modules/util.py @@ -168,7 +168,7 @@ def get_files_from_folder(folder_path, exensions=None, name_filter=None): relative_path = os.path.relpath(root, folder_path) if relative_path == ".": relative_path = "" - for filename in sorted(files): + for filename in sorted(files, key=lambda s: s.casefold()): _, file_extension = os.path.splitext(filename) if (exensions == None or file_extension.lower() in exensions) and (name_filter == None or name_filter in _): path = os.path.join(relative_path, filename) From f8ca04a4061a0dabb420c5c271cfd115f88169cd Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Mon, 19 Feb 2024 15:22:10 +0100 Subject: [PATCH 029/103] feat: add early return for prompt expansion when no new tokens should be added closes https://github.com/lllyasviel/Fooocus/issues/2278, also removes comma at the end added before tokenizer --- extras/expansion.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/extras/expansion.py b/extras/expansion.py index c1b59b8a..34c1ee8d 100644 --- a/extras/expansion.py +++ b/extras/expansion.py @@ -112,6 +112,9 @@ class FooocusExpansion: max_token_length = 75 * int(math.ceil(float(current_token_length) / 75.0)) max_new_tokens = max_token_length - current_token_length + if max_new_tokens == 0: + return prompt[:-1] + # https://huggingface.co/blog/introducing-csearch # https://huggingface.co/docs/transformers/generation_strategies features = self.model.generate(**tokenized_kwargs, From 187f4a76c66ebd4281f5313533af13b6b47a5bf7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Charlie=20=E2=9A=A1=EF=B8=8F?= Date: Tue, 20 Feb 2024 21:51:01 -0500 Subject: [PATCH 030/103] Remove mac generated invisible files --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index de2f5778..85914986 100644 --- a/.gitignore +++ b/.gitignore @@ -51,3 +51,4 @@ user_path_config-deprecated.txt /package-lock.json /.coverage* /auth.json +.DS_Store From 5b7ddf8b22d3c682de612218fc31245b70f492d8 Mon Sep 17 00:00:00 2001 From: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Date: Sat, 24 Feb 2024 18:59:57 +0100 Subject: [PATCH 031/103] feat: advanced params refactoring + prevent users from skipping/stopping other users tasks in queue (#981) * only make stop_button and skip_button interactive when rendering process starts fix inconsistency in behaviour of stop_button and skip_button as it was possible to skip or stop other users processes while still being in queue * use AsyncTask for last_stop handling instead of shared * Revert "only make stop_button and skip_button interactive when rendering process starts" This reverts commit d3f9156854b3d6b4c3d5d736f3b0454743203076. * introduce state for task skipping/stopping * fix return parameters of stop_clicked * code cleanup, do not disable skip/stop on stop_clicked * reset last_stop when skipping for further processing * fix: replace fcbh with ldm_patched * fix: use currentTask instead of ctrls after merging upstream * feat: extract attribute disable_preview * feat: extract attribute adm_scaler_positive * feat: extract attribute adm_scaler_negative * feat: extract attribute adm_scaler_end * feat: extract attribute adaptive_cfg * feat: extract attribute sampler_name * feat: extract attribute scheduler_name * feat: extract attribute generate_image_grid * feat: extract attribute overwrite_step * feat: extract attribute overwrite_switch * feat: extract attribute overwrite_width * feat: extract attribute overwrite_height * feat: extract attribute overwrite_vary_strength * feat: extract attribute overwrite_upscale_strength * feat: extract attribute mixing_image_prompt_and_vary_upscale * feat: extract attribute mixing_image_prompt_and_inpaint * feat: extract attribute debugging_cn_preprocessor * feat: extract attribute skipping_cn_preprocessor * feat: extract attribute canny_low_threshold * feat: extract attribute canny_high_threshold * feat: extract attribute refiner_swap_method * feat: extract freeu_ctrls attributes freeu_enabled, freeu_b1, freeu_b2, freeu_s1, freeu_s2 * feat: extract inpaint_ctrls attributes debugging_inpaint_preprocessor, inpaint_disable_initial_latent, inpaint_engine, inpaint_strength, inpaint_respective_field, inpaint_mask_upload_checkbox, invert_mask_checkbox, inpaint_erode_or_dilate * wip: add TODOs * chore: cleanup code * feat: extract attribute controlnet_softness * feat: extract remaining attributes, do not use globals in patch * fix: resolve circular import, patch_all now in async_worker * chore: cleanup pid code --- extras/preprocessors.py | 17 ++- modules/advanced_parameters.py | 33 ------ modules/async_worker.py | 203 ++++++++++++++++++++------------- modules/core.py | 10 +- modules/default_pipeline.py | 17 ++- modules/patch.py | 70 ++++++------ shared.py | 3 +- webui.py | 61 +++++----- 8 files changed, 218 insertions(+), 196 deletions(-) delete mode 100644 modules/advanced_parameters.py diff --git a/extras/preprocessors.py b/extras/preprocessors.py index 798fe15d..0aa83109 100644 --- a/extras/preprocessors.py +++ b/extras/preprocessors.py @@ -1,27 +1,26 @@ import cv2 import numpy as np -import modules.advanced_parameters as advanced_parameters -def centered_canny(x: np.ndarray): +def centered_canny(x: np.ndarray, canny_low_threshold, canny_high_threshold): assert isinstance(x, np.ndarray) assert x.ndim == 2 and x.dtype == np.uint8 - y = cv2.Canny(x, int(advanced_parameters.canny_low_threshold), int(advanced_parameters.canny_high_threshold)) + y = cv2.Canny(x, int(canny_low_threshold), int(canny_high_threshold)) y = y.astype(np.float32) / 255.0 return y -def centered_canny_color(x: np.ndarray): +def centered_canny_color(x: np.ndarray, canny_low_threshold, canny_high_threshold): assert isinstance(x, np.ndarray) assert x.ndim == 3 and x.shape[2] == 3 - result = [centered_canny(x[..., i]) for i in range(3)] + result = [centered_canny(x[..., i], canny_low_threshold, canny_high_threshold) for i in range(3)] result = np.stack(result, axis=2) return result -def pyramid_canny_color(x: np.ndarray): +def pyramid_canny_color(x: np.ndarray, canny_low_threshold, canny_high_threshold): assert isinstance(x, np.ndarray) assert x.ndim == 3 and x.shape[2] == 3 @@ -31,7 +30,7 @@ def pyramid_canny_color(x: np.ndarray): for k in [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]: Hs, Ws = int(H * k), int(W * k) small = cv2.resize(x, (Ws, Hs), interpolation=cv2.INTER_AREA) - edge = centered_canny_color(small) + edge = centered_canny_color(small, canny_low_threshold, canny_high_threshold) if acc_edge is None: acc_edge = edge else: @@ -54,11 +53,11 @@ def norm255(x, low=4, high=96): return x * 255.0 -def canny_pyramid(x): +def canny_pyramid(x, canny_low_threshold, canny_high_threshold): # For some reasons, SAI's Control-lora Canny seems to be trained on canny maps with non-standard resolutions. # Then we use pyramid to use all resolutions to avoid missing any structure in specific resolutions. - color_canny = pyramid_canny_color(x) + color_canny = pyramid_canny_color(x, canny_low_threshold, canny_high_threshold) result = np.sum(color_canny, axis=2) return norm255(result, low=1, high=99).clip(0, 255).astype(np.uint8) diff --git a/modules/advanced_parameters.py b/modules/advanced_parameters.py deleted file mode 100644 index 0caa3eec..00000000 --- a/modules/advanced_parameters.py +++ /dev/null @@ -1,33 +0,0 @@ -disable_preview, adm_scaler_positive, adm_scaler_negative, adm_scaler_end, adaptive_cfg, sampler_name, \ - scheduler_name, generate_image_grid, overwrite_step, overwrite_switch, overwrite_width, overwrite_height, \ - overwrite_vary_strength, overwrite_upscale_strength, \ - mixing_image_prompt_and_vary_upscale, mixing_image_prompt_and_inpaint, \ - debugging_cn_preprocessor, skipping_cn_preprocessor, controlnet_softness, canny_low_threshold, canny_high_threshold, \ - refiner_swap_method, \ - freeu_enabled, freeu_b1, freeu_b2, freeu_s1, freeu_s2, \ - debugging_inpaint_preprocessor, inpaint_disable_initial_latent, inpaint_engine, inpaint_strength, inpaint_respective_field, \ - inpaint_mask_upload_checkbox, invert_mask_checkbox, inpaint_erode_or_dilate = [None] * 35 - - -def set_all_advanced_parameters(*args): - global disable_preview, adm_scaler_positive, adm_scaler_negative, adm_scaler_end, adaptive_cfg, sampler_name, \ - scheduler_name, generate_image_grid, overwrite_step, overwrite_switch, overwrite_width, overwrite_height, \ - overwrite_vary_strength, overwrite_upscale_strength, \ - mixing_image_prompt_and_vary_upscale, mixing_image_prompt_and_inpaint, \ - debugging_cn_preprocessor, skipping_cn_preprocessor, controlnet_softness, canny_low_threshold, canny_high_threshold, \ - refiner_swap_method, \ - freeu_enabled, freeu_b1, freeu_b2, freeu_s1, freeu_s2, \ - debugging_inpaint_preprocessor, inpaint_disable_initial_latent, inpaint_engine, inpaint_strength, inpaint_respective_field, \ - inpaint_mask_upload_checkbox, invert_mask_checkbox, inpaint_erode_or_dilate - - disable_preview, adm_scaler_positive, adm_scaler_negative, adm_scaler_end, adaptive_cfg, sampler_name, \ - scheduler_name, generate_image_grid, overwrite_step, overwrite_switch, overwrite_width, overwrite_height, \ - overwrite_vary_strength, overwrite_upscale_strength, \ - mixing_image_prompt_and_vary_upscale, mixing_image_prompt_and_inpaint, \ - debugging_cn_preprocessor, skipping_cn_preprocessor, controlnet_softness, canny_low_threshold, canny_high_threshold, \ - refiner_swap_method, \ - freeu_enabled, freeu_b1, freeu_b2, freeu_s1, freeu_s2, \ - debugging_inpaint_preprocessor, inpaint_disable_initial_latent, inpaint_engine, inpaint_strength, inpaint_respective_field, \ - inpaint_mask_upload_checkbox, invert_mask_checkbox, inpaint_erode_or_dilate = args - - return diff --git a/modules/async_worker.py b/modules/async_worker.py index 40abb7fa..d0ce4ba9 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -1,4 +1,8 @@ import threading +import os +from modules.patch import PatchSettings, patch_settings, patch_all + +patch_all() class AsyncTask: @@ -6,6 +10,8 @@ class AsyncTask: self.args = args self.yields = [] self.results = [] + self.last_stop = False + self.processing = False async_tasks = [] @@ -31,7 +37,6 @@ def worker(): import extras.preprocessors as preprocessors import modules.inpaint_worker as inpaint_worker import modules.constants as constants - import modules.advanced_parameters as advanced_parameters import extras.ip_adapter as ip_adapter import extras.face_crop import fooocus_version @@ -43,6 +48,9 @@ def worker(): get_image_shape_ceil, set_image_shape_ceil, get_shape_ceil, resample_image, erode_or_dilate, ordinal_suffix from modules.upscaler import perform_upscale + pid = os.getpid() + print(f'Started worker with PID {pid}') + try: async_gradio_app = shared.gradio_root flag = f'''App started successful. Use the app with {str(async_gradio_app.local_url)} or {str(async_gradio_app.server_name)}:{str(async_gradio_app.server_port)}''' @@ -69,9 +77,6 @@ def worker(): return def build_image_wall(async_task): - if not advanced_parameters.generate_image_grid: - return - results = async_task.results if len(results) < 2: @@ -115,6 +120,7 @@ def worker(): @torch.inference_mode() def handler(async_task): execution_start_time = time.perf_counter() + async_task.processing = True args = async_task.args args.reverse() @@ -140,6 +146,40 @@ def worker(): inpaint_input_image = args.pop() inpaint_additional_prompt = args.pop() inpaint_mask_image_upload = args.pop() + disable_preview = args.pop() + adm_scaler_positive = args.pop() + adm_scaler_negative = args.pop() + adm_scaler_end = args.pop() + adaptive_cfg = args.pop() + sampler_name = args.pop() + scheduler_name = args.pop() + overwrite_step = args.pop() + overwrite_switch = args.pop() + overwrite_width = args.pop() + overwrite_height = args.pop() + overwrite_vary_strength = args.pop() + overwrite_upscale_strength = args.pop() + mixing_image_prompt_and_vary_upscale = args.pop() + mixing_image_prompt_and_inpaint = args.pop() + debugging_cn_preprocessor = args.pop() + skipping_cn_preprocessor = args.pop() + canny_low_threshold = args.pop() + canny_high_threshold = args.pop() + refiner_swap_method = args.pop() + controlnet_softness = args.pop() + freeu_enabled = args.pop() + freeu_b1 = args.pop() + freeu_b2 = args.pop() + freeu_s1 = args.pop() + freeu_s2 = args.pop() + debugging_inpaint_preprocessor = args.pop() + inpaint_disable_initial_latent = args.pop() + inpaint_engine = args.pop() + inpaint_strength = args.pop() + inpaint_respective_field = args.pop() + inpaint_mask_upload_checkbox = args.pop() + invert_mask_checkbox = args.pop() + inpaint_erode_or_dilate = args.pop() cn_tasks = {x: [] for x in flags.ip_list} for _ in range(4): @@ -186,30 +226,33 @@ def worker(): print(f'Refiner disabled in LCM mode.') refiner_model_name = 'None' - sampler_name = advanced_parameters.sampler_name = 'lcm' - scheduler_name = advanced_parameters.scheduler_name = 'lcm' - modules.patch.sharpness = sharpness = 0.0 - cfg_scale = guidance_scale = 1.0 - modules.patch.adaptive_cfg = advanced_parameters.adaptive_cfg = 1.0 + sampler_name = 'lcm' + scheduler_name = 'lcm' + sharpness = 0.0 + guidance_scale = 1.0 + adaptive_cfg = 1.0 refiner_switch = 1.0 - modules.patch.positive_adm_scale = advanced_parameters.adm_scaler_positive = 1.0 - modules.patch.negative_adm_scale = advanced_parameters.adm_scaler_negative = 1.0 - modules.patch.adm_scaler_end = advanced_parameters.adm_scaler_end = 0.0 + adm_scaler_positive = 1.0 + adm_scaler_negative = 1.0 + adm_scaler_end = 0.0 steps = 8 - modules.patch.adaptive_cfg = advanced_parameters.adaptive_cfg - print(f'[Parameters] Adaptive CFG = {modules.patch.adaptive_cfg}') - - modules.patch.sharpness = sharpness - print(f'[Parameters] Sharpness = {modules.patch.sharpness}') - - modules.patch.positive_adm_scale = advanced_parameters.adm_scaler_positive - modules.patch.negative_adm_scale = advanced_parameters.adm_scaler_negative - modules.patch.adm_scaler_end = advanced_parameters.adm_scaler_end + print(f'[Parameters] Adaptive CFG = {adaptive_cfg}') + print(f'[Parameters] Sharpness = {sharpness}') + print(f'[Parameters] ControlNet Softness = {controlnet_softness}') print(f'[Parameters] ADM Scale = ' - f'{modules.patch.positive_adm_scale} : ' - f'{modules.patch.negative_adm_scale} : ' - f'{modules.patch.adm_scaler_end}') + f'{adm_scaler_positive} : ' + f'{adm_scaler_negative} : ' + f'{adm_scaler_end}') + + patch_settings[pid] = PatchSettings( + sharpness, + adm_scaler_end, + adm_scaler_positive, + adm_scaler_negative, + controlnet_softness, + adaptive_cfg + ) cfg_scale = float(guidance_scale) print(f'[Parameters] CFG = {cfg_scale}') @@ -222,10 +265,9 @@ def worker(): width, height = int(width), int(height) skip_prompt_processing = False - refiner_swap_method = advanced_parameters.refiner_swap_method inpaint_worker.current_task = None - inpaint_parameterized = advanced_parameters.inpaint_engine != 'None' + inpaint_parameterized = inpaint_engine != 'None' inpaint_image = None inpaint_mask = None inpaint_head_model_path = None @@ -239,15 +281,12 @@ def worker(): seed = int(image_seed) print(f'[Parameters] Seed = {seed}') - sampler_name = advanced_parameters.sampler_name - scheduler_name = advanced_parameters.scheduler_name - goals = [] tasks = [] if input_image_checkbox: if (current_tab == 'uov' or ( - current_tab == 'ip' and advanced_parameters.mixing_image_prompt_and_vary_upscale)) \ + current_tab == 'ip' and mixing_image_prompt_and_vary_upscale)) \ and uov_method != flags.disabled and uov_input_image is not None: uov_input_image = HWC3(uov_input_image) if 'vary' in uov_method: @@ -271,12 +310,12 @@ def worker(): progressbar(async_task, 1, 'Downloading upscale models ...') modules.config.downloading_upscale_model() if (current_tab == 'inpaint' or ( - current_tab == 'ip' and advanced_parameters.mixing_image_prompt_and_inpaint)) \ + current_tab == 'ip' and mixing_image_prompt_and_inpaint)) \ and isinstance(inpaint_input_image, dict): inpaint_image = inpaint_input_image['image'] inpaint_mask = inpaint_input_image['mask'][:, :, 0] - - if advanced_parameters.inpaint_mask_upload_checkbox: + + if inpaint_mask_upload_checkbox: if isinstance(inpaint_mask_image_upload, np.ndarray): if inpaint_mask_image_upload.ndim == 3: H, W, C = inpaint_image.shape @@ -285,10 +324,10 @@ def worker(): inpaint_mask_image_upload = (inpaint_mask_image_upload > 127).astype(np.uint8) * 255 inpaint_mask = np.maximum(inpaint_mask, inpaint_mask_image_upload) - if int(advanced_parameters.inpaint_erode_or_dilate) != 0: - inpaint_mask = erode_or_dilate(inpaint_mask, advanced_parameters.inpaint_erode_or_dilate) + if int(inpaint_erode_or_dilate) != 0: + inpaint_mask = erode_or_dilate(inpaint_mask, inpaint_erode_or_dilate) - if advanced_parameters.invert_mask_checkbox: + if invert_mask_checkbox: inpaint_mask = 255 - inpaint_mask inpaint_image = HWC3(inpaint_image) @@ -299,7 +338,7 @@ def worker(): if inpaint_parameterized: progressbar(async_task, 1, 'Downloading inpainter ...') inpaint_head_model_path, inpaint_patch_model_path = modules.config.downloading_inpaint_models( - advanced_parameters.inpaint_engine) + inpaint_engine) base_model_additional_loras += [(inpaint_patch_model_path, 1.0)] print(f'[Inpaint] Current inpaint model is {inpaint_patch_model_path}') if refiner_model_name == 'None': @@ -315,8 +354,8 @@ def worker(): prompt = inpaint_additional_prompt + '\n' + prompt goals.append('inpaint') if current_tab == 'ip' or \ - advanced_parameters.mixing_image_prompt_and_inpaint or \ - advanced_parameters.mixing_image_prompt_and_vary_upscale: + mixing_image_prompt_and_vary_upscale or \ + mixing_image_prompt_and_inpaint: goals.append('cn') progressbar(async_task, 1, 'Downloading control models ...') if len(cn_tasks[flags.cn_canny]) > 0: @@ -335,19 +374,19 @@ def worker(): ip_adapter.load_ip_adapter(clip_vision_path, ip_negative_path, ip_adapter_path) ip_adapter.load_ip_adapter(clip_vision_path, ip_negative_path, ip_adapter_face_path) - if advanced_parameters.overwrite_step > 0: - steps = advanced_parameters.overwrite_step + if overwrite_step > 0: + steps = overwrite_step switch = int(round(steps * refiner_switch)) - if advanced_parameters.overwrite_switch > 0: - switch = advanced_parameters.overwrite_switch + if overwrite_switch > 0: + switch = overwrite_switch - if advanced_parameters.overwrite_width > 0: - width = advanced_parameters.overwrite_width + if overwrite_width > 0: + width = overwrite_width - if advanced_parameters.overwrite_height > 0: - height = advanced_parameters.overwrite_height + if overwrite_height > 0: + height = overwrite_height print(f'[Parameters] Sampler = {sampler_name} - {scheduler_name}') print(f'[Parameters] Steps = {steps} - {switch}') @@ -446,8 +485,8 @@ def worker(): denoising_strength = 0.5 if 'strong' in uov_method: denoising_strength = 0.85 - if advanced_parameters.overwrite_vary_strength > 0: - denoising_strength = advanced_parameters.overwrite_vary_strength + if overwrite_vary_strength > 0: + denoising_strength = overwrite_vary_strength shape_ceil = get_image_shape_ceil(uov_input_image) if shape_ceil < 1024: @@ -518,8 +557,8 @@ def worker(): tiled = True denoising_strength = 0.382 - if advanced_parameters.overwrite_upscale_strength > 0: - denoising_strength = advanced_parameters.overwrite_upscale_strength + if overwrite_upscale_strength > 0: + denoising_strength = overwrite_upscale_strength initial_pixels = core.numpy_to_pytorch(uov_input_image) progressbar(async_task, 13, 'VAE encoding ...') @@ -563,19 +602,19 @@ def worker(): inpaint_image = np.ascontiguousarray(inpaint_image.copy()) inpaint_mask = np.ascontiguousarray(inpaint_mask.copy()) - advanced_parameters.inpaint_strength = 1.0 - advanced_parameters.inpaint_respective_field = 1.0 + inpaint_strength = 1.0 + inpaint_respective_field = 1.0 - denoising_strength = advanced_parameters.inpaint_strength + denoising_strength = inpaint_strength inpaint_worker.current_task = inpaint_worker.InpaintWorker( image=inpaint_image, mask=inpaint_mask, use_fill=denoising_strength > 0.99, - k=advanced_parameters.inpaint_respective_field + k=inpaint_respective_field ) - if advanced_parameters.debugging_inpaint_preprocessor: + if debugging_inpaint_preprocessor: yield_result(async_task, inpaint_worker.current_task.visualize_mask_processing(), do_not_show_finished_images=True) return @@ -621,7 +660,7 @@ def worker(): model=pipeline.final_unet ) - if not advanced_parameters.inpaint_disable_initial_latent: + if not inpaint_disable_initial_latent: initial_latent = {'samples': latent_fill} B, C, H, W = latent_fill.shape @@ -634,24 +673,24 @@ def worker(): cn_img, cn_stop, cn_weight = task cn_img = resize_image(HWC3(cn_img), width=width, height=height) - if not advanced_parameters.skipping_cn_preprocessor: - cn_img = preprocessors.canny_pyramid(cn_img) + if not skipping_cn_preprocessor: + cn_img = preprocessors.canny_pyramid(cn_img, canny_low_threshold, canny_high_threshold) cn_img = HWC3(cn_img) task[0] = core.numpy_to_pytorch(cn_img) - if advanced_parameters.debugging_cn_preprocessor: + if debugging_cn_preprocessor: yield_result(async_task, cn_img, do_not_show_finished_images=True) return for task in cn_tasks[flags.cn_cpds]: cn_img, cn_stop, cn_weight = task cn_img = resize_image(HWC3(cn_img), width=width, height=height) - if not advanced_parameters.skipping_cn_preprocessor: + if not skipping_cn_preprocessor: cn_img = preprocessors.cpds(cn_img) cn_img = HWC3(cn_img) task[0] = core.numpy_to_pytorch(cn_img) - if advanced_parameters.debugging_cn_preprocessor: + if debugging_cn_preprocessor: yield_result(async_task, cn_img, do_not_show_finished_images=True) return for task in cn_tasks[flags.cn_ip]: @@ -662,21 +701,21 @@ def worker(): cn_img = resize_image(cn_img, width=224, height=224, resize_mode=0) task[0] = ip_adapter.preprocess(cn_img, ip_adapter_path=ip_adapter_path) - if advanced_parameters.debugging_cn_preprocessor: + if debugging_cn_preprocessor: yield_result(async_task, cn_img, do_not_show_finished_images=True) return for task in cn_tasks[flags.cn_ip_face]: cn_img, cn_stop, cn_weight = task cn_img = HWC3(cn_img) - if not advanced_parameters.skipping_cn_preprocessor: + if not skipping_cn_preprocessor: cn_img = extras.face_crop.crop_image(cn_img) # https://github.com/tencent-ailab/IP-Adapter/blob/d580c50a291566bbf9fc7ac0f760506607297e6d/README.md?plain=1#L75 cn_img = resize_image(cn_img, width=224, height=224, resize_mode=0) task[0] = ip_adapter.preprocess(cn_img, ip_adapter_path=ip_adapter_face_path) - if advanced_parameters.debugging_cn_preprocessor: + if debugging_cn_preprocessor: yield_result(async_task, cn_img, do_not_show_finished_images=True) return @@ -685,14 +724,14 @@ def worker(): if len(all_ip_tasks) > 0: pipeline.final_unet = ip_adapter.patch_model(pipeline.final_unet, all_ip_tasks) - if advanced_parameters.freeu_enabled: + if freeu_enabled: print(f'FreeU is enabled!') pipeline.final_unet = core.apply_freeu( pipeline.final_unet, - advanced_parameters.freeu_b1, - advanced_parameters.freeu_b2, - advanced_parameters.freeu_s1, - advanced_parameters.freeu_s2 + freeu_b1, + freeu_b2, + freeu_s1, + freeu_s2 ) all_steps = steps * image_number @@ -738,6 +777,8 @@ def worker(): execution_start_time = time.perf_counter() try: + if async_task.last_stop is not False: + ldm_patched.model_management.interrupt_current_processing() positive_cond, negative_cond = task['c'], task['uc'] if 'cn' in goals: @@ -765,7 +806,8 @@ def worker(): denoise=denoising_strength, tiled=tiled, cfg_scale=cfg_scale, - refiner_swap_method=refiner_swap_method + refiner_swap_method=refiner_swap_method, + disable_preview=disable_preview ) del task['c'], task['uc'], positive_cond, negative_cond # Save memory @@ -784,9 +826,9 @@ def worker(): ('Sharpness', sharpness), ('Guidance Scale', guidance_scale), ('ADM Guidance', str(( - modules.patch.positive_adm_scale, - modules.patch.negative_adm_scale, - modules.patch.adm_scaler_end))), + modules.patch.patch_settings[pid].positive_adm_scale, + modules.patch.patch_settings[pid].negative_adm_scale, + modules.patch.patch_settings[pid].adm_scaler_end))), ('Base Model', base_model_name), ('Refiner Model', refiner_model_name), ('Refiner Switch', refiner_switch), @@ -802,8 +844,9 @@ def worker(): yield_result(async_task, imgs, do_not_show_finished_images=len(tasks) == 1) except ldm_patched.modules.model_management.InterruptProcessingException as e: - if shared.last_stop == 'skip': + if async_task.last_stop == 'skip': print('User skipped') + async_task.last_stop = False continue else: print('User stopped') @@ -811,21 +854,27 @@ def worker(): execution_time = time.perf_counter() - execution_start_time print(f'Generating and saving time: {execution_time:.2f} seconds') - + async_task.processing = False return while True: time.sleep(0.01) if len(async_tasks) > 0: task = async_tasks.pop(0) + generate_image_grid = task.args.pop(0) + try: handler(task) - build_image_wall(task) + if generate_image_grid: + build_image_wall(task) task.yields.append(['finish', task.results]) pipeline.prepare_text_encoder(async_call=True) except: traceback.print_exc() task.yields.append(['finish', task.results]) + finally: + if pid in modules.patch.patch_settings: + del modules.patch.patch_settings[pid] pass diff --git a/modules/core.py b/modules/core.py index 989b8e32..7a29d988 100644 --- a/modules/core.py +++ b/modules/core.py @@ -1,8 +1,3 @@ -from modules.patch import patch_all - -patch_all() - - import os import einops import torch @@ -16,7 +11,6 @@ import ldm_patched.modules.controlnet import modules.sample_hijack import ldm_patched.modules.samplers import ldm_patched.modules.latent_formats -import modules.advanced_parameters from ldm_patched.modules.sd import load_checkpoint_guess_config from ldm_patched.contrib.external import VAEDecode, EmptyLatentImage, VAEEncode, VAEEncodeTiled, VAEDecodeTiled, \ @@ -268,7 +262,7 @@ def get_previewer(model): def ksampler(model, positive, negative, latent, seed=None, steps=30, cfg=7.0, sampler_name='dpmpp_2m_sde_gpu', scheduler='karras', denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False, callback_function=None, refiner=None, refiner_switch=-1, - previewer_start=None, previewer_end=None, sigmas=None, noise_mean=None): + previewer_start=None, previewer_end=None, sigmas=None, noise_mean=None, disable_preview=False): if sigmas is not None: sigmas = sigmas.clone().to(ldm_patched.modules.model_management.get_torch_device()) @@ -299,7 +293,7 @@ def ksampler(model, positive, negative, latent, seed=None, steps=30, cfg=7.0, sa def callback(step, x0, x, total_steps): ldm_patched.modules.model_management.throw_exception_if_processing_interrupted() y = None - if previewer is not None and not modules.advanced_parameters.disable_preview: + if previewer is not None and not disable_preview: y = previewer(x0, previewer_start + step, previewer_end) if callback_function is not None: callback_function(previewer_start + step, x0, x, previewer_end, y) diff --git a/modules/default_pipeline.py b/modules/default_pipeline.py index 6001d97f..2f45667c 100644 --- a/modules/default_pipeline.py +++ b/modules/default_pipeline.py @@ -315,7 +315,7 @@ def get_candidate_vae(steps, switch, denoise=1.0, refiner_swap_method='joint'): @torch.no_grad() @torch.inference_mode() -def process_diffusion(positive_cond, negative_cond, steps, switch, width, height, image_seed, callback, sampler_name, scheduler_name, latent=None, denoise=1.0, tiled=False, cfg_scale=7.0, refiner_swap_method='joint'): +def process_diffusion(positive_cond, negative_cond, steps, switch, width, height, image_seed, callback, sampler_name, scheduler_name, latent=None, denoise=1.0, tiled=False, cfg_scale=7.0, refiner_swap_method='joint', disable_preview=False): target_unet, target_vae, target_refiner_unet, target_refiner_vae, target_clip \ = final_unet, final_vae, final_refiner_unet, final_refiner_vae, final_clip @@ -374,6 +374,7 @@ def process_diffusion(positive_cond, negative_cond, steps, switch, width, height refiner_switch=switch, previewer_start=0, previewer_end=steps, + disable_preview=disable_preview ) decoded_latent = core.decode_vae(vae=target_vae, latent_image=sampled_latent, tiled=tiled) @@ -392,6 +393,7 @@ def process_diffusion(positive_cond, negative_cond, steps, switch, width, height scheduler=scheduler_name, previewer_start=0, previewer_end=steps, + disable_preview=disable_preview ) print('Refiner swapped by changing ksampler. Noise preserved.') @@ -414,6 +416,7 @@ def process_diffusion(positive_cond, negative_cond, steps, switch, width, height scheduler=scheduler_name, previewer_start=switch, previewer_end=steps, + disable_preview=disable_preview ) target_model = target_refiner_vae @@ -422,7 +425,7 @@ def process_diffusion(positive_cond, negative_cond, steps, switch, width, height decoded_latent = core.decode_vae(vae=target_model, latent_image=sampled_latent, tiled=tiled) if refiner_swap_method == 'vae': - modules.patch.eps_record = 'vae' + modules.patch.patch_settings[os.getpid()].eps_record = 'vae' if modules.inpaint_worker.current_task is not None: modules.inpaint_worker.current_task.unswap() @@ -440,7 +443,8 @@ def process_diffusion(positive_cond, negative_cond, steps, switch, width, height sampler_name=sampler_name, scheduler=scheduler_name, previewer_start=0, - previewer_end=steps + previewer_end=steps, + disable_preview=disable_preview ) print('Fooocus VAE-based swap.') @@ -459,7 +463,7 @@ def process_diffusion(positive_cond, negative_cond, steps, switch, width, height denoise=denoise)[switch:] * k_sigmas len_sigmas = len(sigmas) - 1 - noise_mean = torch.mean(modules.patch.eps_record, dim=1, keepdim=True) + noise_mean = torch.mean(modules.patch.patch_settings[os.getpid()].eps_record, dim=1, keepdim=True) if modules.inpaint_worker.current_task is not None: modules.inpaint_worker.current_task.swap() @@ -479,7 +483,8 @@ def process_diffusion(positive_cond, negative_cond, steps, switch, width, height previewer_start=switch, previewer_end=steps, sigmas=sigmas, - noise_mean=noise_mean + noise_mean=noise_mean, + disable_preview=disable_preview ) target_model = target_refiner_vae @@ -488,5 +493,5 @@ def process_diffusion(positive_cond, negative_cond, steps, switch, width, height decoded_latent = core.decode_vae(vae=target_model, latent_image=sampled_latent, tiled=tiled) images = core.pytorch_to_numpy(decoded_latent) - modules.patch.eps_record = None + modules.patch.patch_settings[os.getpid()].eps_record = None return images diff --git a/modules/patch.py b/modules/patch.py index 2e2409c5..3c2dd8f4 100644 --- a/modules/patch.py +++ b/modules/patch.py @@ -17,7 +17,6 @@ import ldm_patched.controlnet.cldm import ldm_patched.modules.model_patcher import ldm_patched.modules.samplers import ldm_patched.modules.args_parser -import modules.advanced_parameters as advanced_parameters import warnings import safetensors.torch import modules.constants as constants @@ -29,15 +28,25 @@ from modules.patch_precision import patch_all_precision from modules.patch_clip import patch_all_clip -sharpness = 2.0 +class PatchSettings: + def __init__(self, + sharpness=2.0, + adm_scaler_end=0.3, + positive_adm_scale=1.5, + negative_adm_scale=0.8, + controlnet_softness=0.25, + adaptive_cfg=7.0): + self.sharpness = sharpness + self.adm_scaler_end = adm_scaler_end + self.positive_adm_scale = positive_adm_scale + self.negative_adm_scale = negative_adm_scale + self.controlnet_softness = controlnet_softness + self.adaptive_cfg = adaptive_cfg + self.global_diffusion_progress = 0 + self.eps_record = None -adm_scaler_end = 0.3 -positive_adm_scale = 1.5 -negative_adm_scale = 0.8 -adaptive_cfg = 7.0 -global_diffusion_progress = 0 -eps_record = None +patch_settings = {} def calculate_weight_patched(self, patches, weight, key): @@ -201,14 +210,13 @@ class BrownianTreeNoiseSamplerPatched: def compute_cfg(uncond, cond, cfg_scale, t): - global adaptive_cfg - - mimic_cfg = float(adaptive_cfg) + pid = os.getpid() + mimic_cfg = float(patch_settings[pid].adaptive_cfg) real_cfg = float(cfg_scale) real_eps = uncond + real_cfg * (cond - uncond) - if cfg_scale > adaptive_cfg: + if cfg_scale > patch_settings[pid].adaptive_cfg: mimicked_eps = uncond + mimic_cfg * (cond - uncond) return real_eps * t + mimicked_eps * (1 - t) else: @@ -216,13 +224,13 @@ def compute_cfg(uncond, cond, cfg_scale, t): def patched_sampling_function(model, x, timestep, uncond, cond, cond_scale, model_options=None, seed=None): - global eps_record + pid = os.getpid() if math.isclose(cond_scale, 1.0) and not model_options.get("disable_cfg1_optimization", False): final_x0 = calc_cond_uncond_batch(model, cond, None, x, timestep, model_options)[0] - if eps_record is not None: - eps_record = ((x - final_x0) / timestep).cpu() + if patch_settings[pid].eps_record is not None: + patch_settings[pid].eps_record = ((x - final_x0) / timestep).cpu() return final_x0 @@ -231,16 +239,16 @@ def patched_sampling_function(model, x, timestep, uncond, cond, cond_scale, mode positive_eps = x - positive_x0 negative_eps = x - negative_x0 - alpha = 0.001 * sharpness * global_diffusion_progress + alpha = 0.001 * patch_settings[pid].sharpness * patch_settings[pid].global_diffusion_progress positive_eps_degraded = anisotropic.adaptive_anisotropic_filter(x=positive_eps, g=positive_x0) positive_eps_degraded_weighted = positive_eps_degraded * alpha + positive_eps * (1.0 - alpha) final_eps = compute_cfg(uncond=negative_eps, cond=positive_eps_degraded_weighted, - cfg_scale=cond_scale, t=global_diffusion_progress) + cfg_scale=cond_scale, t=patch_settings[pid].global_diffusion_progress) - if eps_record is not None: - eps_record = (final_eps / timestep).cpu() + if patch_settings[pid].eps_record is not None: + patch_settings[pid].eps_record = (final_eps / timestep).cpu() return x - final_eps @@ -255,20 +263,19 @@ def round_to_64(x): def sdxl_encode_adm_patched(self, **kwargs): - global positive_adm_scale, negative_adm_scale - clip_pooled = ldm_patched.modules.model_base.sdxl_pooled(kwargs, self.noise_augmentor) width = kwargs.get("width", 1024) height = kwargs.get("height", 1024) target_width = width target_height = height + pid = os.getpid() if kwargs.get("prompt_type", "") == "negative": - width = float(width) * negative_adm_scale - height = float(height) * negative_adm_scale + width = float(width) * patch_settings[pid].negative_adm_scale + height = float(height) * patch_settings[pid].negative_adm_scale elif kwargs.get("prompt_type", "") == "positive": - width = float(width) * positive_adm_scale - height = float(height) * positive_adm_scale + width = float(width) * patch_settings[pid].positive_adm_scale + height = float(height) * patch_settings[pid].positive_adm_scale def embedder(number_list): h = self.embedder(torch.tensor(number_list, dtype=torch.float32)) @@ -322,7 +329,7 @@ def patched_KSamplerX0Inpaint_forward(self, x, sigma, uncond, cond, cond_scale, def timed_adm(y, timesteps): if isinstance(y, torch.Tensor) and int(y.dim()) == 2 and int(y.shape[1]) == 5632: - y_mask = (timesteps > 999.0 * (1.0 - float(adm_scaler_end))).to(y)[..., None] + y_mask = (timesteps > 999.0 * (1.0 - float(patch_settings[os.getpid()].adm_scaler_end))).to(y)[..., None] y_with_adm = y[..., :2816].clone() y_without_adm = y[..., 2816:].clone() return y_with_adm * y_mask + y_without_adm * (1.0 - y_mask) @@ -332,6 +339,7 @@ def timed_adm(y, timesteps): def patched_cldm_forward(self, x, hint, timesteps, context, y=None, **kwargs): t_emb = ldm_patched.ldm.modules.diffusionmodules.openaimodel.timestep_embedding(timesteps, self.model_channels, repeat_only=False).to(x.dtype) emb = self.time_embed(t_emb) + pid = os.getpid() guided_hint = self.input_hint_block(hint, emb, context) @@ -357,19 +365,17 @@ def patched_cldm_forward(self, x, hint, timesteps, context, y=None, **kwargs): h = self.middle_block(h, emb, context) outs.append(self.middle_block_out(h, emb, context)) - if advanced_parameters.controlnet_softness > 0: + if patch_settings[pid].controlnet_softness > 0: for i in range(10): k = 1.0 - float(i) / 9.0 - outs[i] = outs[i] * (1.0 - advanced_parameters.controlnet_softness * k) + outs[i] = outs[i] * (1.0 - patch_settings[pid].controlnet_softness * k) return outs def patched_unet_forward(self, x, timesteps=None, context=None, y=None, control=None, transformer_options={}, **kwargs): - global global_diffusion_progress - self.current_step = 1.0 - timesteps.to(x) / 999.0 - global_diffusion_progress = float(self.current_step.detach().cpu().numpy().tolist()[0]) + patch_settings[os.getpid()].global_diffusion_progress = float(self.current_step.detach().cpu().numpy().tolist()[0]) y = timed_adm(y, timesteps) @@ -483,7 +489,7 @@ def patch_all(): if ldm_patched.modules.model_management.directml_enabled: ldm_patched.modules.model_management.lowvram_available = True ldm_patched.modules.model_management.OOM_EXCEPTION = Exception - + patch_all_precision() patch_all_clip() diff --git a/shared.py b/shared.py index 269809e3..21a2a864 100644 --- a/shared.py +++ b/shared.py @@ -1,2 +1 @@ -gradio_root = None -last_stop = None +gradio_root = None \ No newline at end of file diff --git a/webui.py b/webui.py index b9b620d2..05b7d20e 100644 --- a/webui.py +++ b/webui.py @@ -11,7 +11,6 @@ import modules.async_worker as worker import modules.constants as constants import modules.flags as flags import modules.gradio_hijack as grh -import modules.advanced_parameters as advanced_parameters import modules.style_sorter as style_sorter import modules.meta_parser import args_manager @@ -22,17 +21,19 @@ from modules.private_logger import get_current_html_path from modules.ui_gradio_extensions import reload_javascript from modules.auth import auth_enabled, check_auth +def get_task(*args): + args = list(args) + args.pop(0) -def generate_clicked(*args): + return worker.AsyncTask(args=args) + +def generate_clicked(task): import ldm_patched.modules.model_management as model_management with model_management.interrupt_processing_mutex: model_management.interrupt_processing = False - # outputs=[progress_html, progress_window, progress_gallery, gallery] - execution_start_time = time.perf_counter() - task = worker.AsyncTask(args=list(args)) finished = False yield gr.update(visible=True, value=modules.html.make_progress_html(1, 'Waiting for task to start ...')), \ @@ -88,6 +89,7 @@ shared.gradio_root = gr.Blocks( css=modules.html.css).queue() with shared.gradio_root: + currentTask = gr.State(worker.AsyncTask(args=[])) with gr.Row(): with gr.Column(scale=2): with gr.Row(): @@ -115,21 +117,22 @@ with shared.gradio_root: skip_button = gr.Button(label="Skip", value="Skip", elem_classes='type_row_half', visible=False) stop_button = gr.Button(label="Stop", value="Stop", elem_classes='type_row_half', elem_id='stop_button', visible=False) - def stop_clicked(): + def stop_clicked(currentTask): import ldm_patched.modules.model_management as model_management - shared.last_stop = 'stop' - model_management.interrupt_current_processing() - return [gr.update(interactive=False)] * 2 + currentTask.last_stop = 'stop' + if (currentTask.processing): + model_management.interrupt_current_processing() + return currentTask - def skip_clicked(): + def skip_clicked(currentTask): import ldm_patched.modules.model_management as model_management - shared.last_stop = 'skip' - model_management.interrupt_current_processing() - return + currentTask.last_stop = 'skip' + if (currentTask.processing): + model_management.interrupt_current_processing() + return currentTask - stop_button.click(stop_clicked, outputs=[skip_button, stop_button], - queue=False, show_progress=False, _js='cancelGenerateForever') - skip_button.click(skip_clicked, queue=False, show_progress=False) + stop_button.click(stop_clicked, inputs=currentTask, outputs=currentTask, queue=False, show_progress=False, _js='cancelGenerateForever') + skip_button.click(skip_clicked, inputs=currentTask, outputs=currentTask, queue=False, show_progress=False) with gr.Row(elem_classes='advanced_check_row'): input_image_checkbox = gr.Checkbox(label='Input Image', value=False, container=False, elem_classes='min_check') advanced_checkbox = gr.Checkbox(label='Advanced', value=modules.config.default_advanced_checkbox, container=False, elem_classes='min_check') @@ -435,7 +438,7 @@ with shared.gradio_root: '(default is 0, always process before any mask invert)') inpaint_mask_upload_checkbox = gr.Checkbox(label='Enable Mask Upload', value=False) invert_mask_checkbox = gr.Checkbox(label='Invert Mask', value=False) - + inpaint_ctrls = [debugging_inpaint_preprocessor, inpaint_disable_initial_latent, inpaint_engine, inpaint_strength, inpaint_respective_field, inpaint_mask_upload_checkbox, invert_mask_checkbox, inpaint_erode_or_dilate] @@ -452,15 +455,6 @@ with shared.gradio_root: freeu_s2 = gr.Slider(label='S2', minimum=0, maximum=4, step=0.01, value=0.95) freeu_ctrls = [freeu_enabled, freeu_b1, freeu_b2, freeu_s1, freeu_s2] - adps = [disable_preview, adm_scaler_positive, adm_scaler_negative, adm_scaler_end, adaptive_cfg, sampler_name, - scheduler_name, generate_image_grid, overwrite_step, overwrite_switch, overwrite_width, overwrite_height, - overwrite_vary_strength, overwrite_upscale_strength, - mixing_image_prompt_and_vary_upscale, mixing_image_prompt_and_inpaint, - debugging_cn_preprocessor, skipping_cn_preprocessor, controlnet_softness, - canny_low_threshold, canny_high_threshold, refiner_swap_method] - adps += freeu_ctrls - adps += inpaint_ctrls - def dev_mode_checked(r): return gr.update(visible=r) @@ -525,7 +519,8 @@ with shared.gradio_root: inpaint_strength, inpaint_respective_field ], show_progress=False, queue=False) - ctrls = [ + ctrls = [currentTask, generate_image_grid] + ctrls += [ prompt, negative_prompt, style_selections, performance_selection, aspect_ratios_selection, image_number, image_seed, sharpness, guidance_scale ] @@ -534,6 +529,14 @@ with shared.gradio_root: ctrls += [input_image_checkbox, current_tab] ctrls += [uov_method, uov_input_image] ctrls += [outpaint_selections, inpaint_input_image, inpaint_additional_prompt, inpaint_mask_image] + ctrls += [disable_preview, adm_scaler_positive, adm_scaler_negative, adm_scaler_end, adaptive_cfg] + ctrls += [sampler_name, scheduler_name] + ctrls += [overwrite_step, overwrite_switch, overwrite_width, overwrite_height, overwrite_vary_strength] + ctrls += [overwrite_upscale_strength, mixing_image_prompt_and_vary_upscale, mixing_image_prompt_and_inpaint] + ctrls += [debugging_cn_preprocessor, skipping_cn_preprocessor, canny_low_threshold, canny_high_threshold] + ctrls += [refiner_swap_method, controlnet_softness] + ctrls += freeu_ctrls + ctrls += inpaint_ctrls ctrls += ip_ctrls state_is_generating = gr.State(False) @@ -588,8 +591,8 @@ with shared.gradio_root: generate_button.click(lambda: (gr.update(visible=True, interactive=True), gr.update(visible=True, interactive=True), gr.update(visible=False, interactive=False), [], True), outputs=[stop_button, skip_button, generate_button, gallery, state_is_generating]) \ .then(fn=refresh_seed, inputs=[seed_random, image_seed], outputs=image_seed) \ - .then(advanced_parameters.set_all_advanced_parameters, inputs=adps) \ - .then(fn=generate_clicked, inputs=ctrls, outputs=[progress_html, progress_window, progress_gallery, gallery]) \ + .then(fn=get_task, inputs=ctrls, outputs=currentTask) \ + .then(fn=generate_clicked, inputs=currentTask, outputs=[progress_html, progress_window, progress_gallery, gallery]) \ .then(lambda: (gr.update(visible=True, interactive=True), gr.update(visible=False, interactive=False), gr.update(visible=False, interactive=False), False), outputs=[generate_button, stop_button, skip_button, state_is_generating]) \ .then(fn=update_history_link, outputs=history_link) \ From 965364cd80e63686bd1138306995bbcea29a4d14 Mon Sep 17 00:00:00 2001 From: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Date: Sat, 24 Feb 2024 19:03:25 +0100 Subject: [PATCH 032/103] feat: add list of 100 most popular animals to wildcards (#985) --- wildcards/animal.txt | 100 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 100 insertions(+) create mode 100644 wildcards/animal.txt diff --git a/wildcards/animal.txt b/wildcards/animal.txt new file mode 100644 index 00000000..9a6f09ba --- /dev/null +++ b/wildcards/animal.txt @@ -0,0 +1,100 @@ +Alligator +Ant +Antelope +Armadillo +Badger +Bat +Bear +Beaver +Bison +Boar +Bobcat +Bull +Camel +Chameleon +Cheetah +Chicken +Chihuahua +Chimpanzee +Chinchilla +Chipmunk +Comodo Dragon +Cow +Coyote +Crocodile +Crow +Deer +Dinosaur +Dolphin +Donkey +Duck +Eagle +Eel +Elephant +Elk +Emu +Falcon +Ferret +Flamingo +Flying Squirrel +Giraffe +Goose +Guinea pig +Hawk +Hedgehog +Hippopotamus +Horse +Hummingbird +Hyena +Jackal +Jaguar +Jellyfish +Kangaroo +King Cobra +Koala bear +Leopard +Lion +Lizard +Magpie +Marten +Meerkat +Mole +Monkey +Moose +Mouse +Octopus +Okapi +Orangutan +Ostrich +Otter +Owl +Panda +Pangolin +Panther +Penguin +Pig +Porcupine +Possum +Puma +Quokka +Rabbit +Raccoon +Raven +Reindeer +Rhinoceros +Seal +Shark +Sheep +Snail +Snake +Sparrow +Spider +Squirrel +Swallow +Tiger +Walrus +Whale +Wolf +Wombat +Yak +Zebra \ No newline at end of file From 7cfb5e742db2b22eab61966b4be5300bd96dc53c Mon Sep 17 00:00:00 2001 From: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Date: Sat, 24 Feb 2024 20:07:36 +0100 Subject: [PATCH 033/103] feat: add advanced parameter for disable_intermediate_results (progress_gallery) (#1013) * add advanced parameter for disable_intermediate_results prevents gradio frontend process from clogging image output and updates in high throughput scenarios such as LCM with image number >= 4 * update disable_intermediate_results correctly based on default and selected performance * chore: add missing translations --- language/en.json | 4 ++++ modules/async_worker.py | 3 ++- webui.py | 12 +++++++++--- 3 files changed, 15 insertions(+), 4 deletions(-) diff --git a/language/en.json b/language/en.json index fd40ca2f..8a782e3f 100644 --- a/language/en.json +++ b/language/en.json @@ -342,6 +342,10 @@ "Forced Overwrite of Denoising Strength of \"Vary\"": "Forced Overwrite of Denoising Strength of \"Vary\"", "Set as negative number to disable. For developer debugging.": "Set as negative number to disable. For developer debugging.", "Forced Overwrite of Denoising Strength of \"Upscale\"": "Forced Overwrite of Denoising Strength of \"Upscale\"", + "Disable Preview": "Disable Preview", + "Disable preview during generation.": "Disable preview during generation.", + "Disable Intermediate Results": "Disable Intermediate Results", + "Disable intermediate results during generation, only show final gallery.": "Disable intermediate results during generation, only show final gallery.", "Inpaint Engine": "Inpaint Engine", "v1": "v1", "Version of Fooocus inpaint model": "Version of Fooocus inpaint model", diff --git a/modules/async_worker.py b/modules/async_worker.py index d0ce4ba9..a304e697 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -147,6 +147,7 @@ def worker(): inpaint_additional_prompt = args.pop() inpaint_mask_image_upload = args.pop() disable_preview = args.pop() + disable_intermediate_results = args.pop() adm_scaler_positive = args.pop() adm_scaler_negative = args.pop() adm_scaler_end = args.pop() @@ -842,7 +843,7 @@ def worker(): d.append(('Version', 'v' + fooocus_version.version)) log(x, d) - yield_result(async_task, imgs, do_not_show_finished_images=len(tasks) == 1) + yield_result(async_task, imgs, do_not_show_finished_images=len(tasks) == 1 or disable_intermediate_results) except ldm_patched.modules.model_management.InterruptProcessingException as e: if async_task.last_stop == 'skip': print('User skipped') diff --git a/webui.py b/webui.py index 05b7d20e..0d8c3c04 100644 --- a/webui.py +++ b/webui.py @@ -390,6 +390,10 @@ with shared.gradio_root: info='Set as negative number to disable. For developer debugging.') disable_preview = gr.Checkbox(label='Disable Preview', value=False, info='Disable preview during generation.') + disable_intermediate_results = gr.Checkbox(label='Disable Intermediate Results', + value=modules.config.default_performance == 'Extreme Speed', + interactive=modules.config.default_performance != 'Extreme Speed', + info='Disable intermediate results during generation, only show final gallery.') with gr.Tab(label='Control'): debugging_cn_preprocessor = gr.Checkbox(label='Debug Preprocessors', value=False, @@ -474,12 +478,13 @@ with shared.gradio_root: queue=False, show_progress=False) performance_selection.change(lambda x: [gr.update(interactive=x != 'Extreme Speed')] * 11 + - [gr.update(visible=x != 'Extreme Speed')] * 1, + [gr.update(visible=x != 'Extreme Speed')] * 1 + + [gr.update(interactive=x != 'Extreme Speed', value=x == 'Extreme Speed', )] * 1, inputs=performance_selection, outputs=[ guidance_scale, sharpness, adm_scaler_end, adm_scaler_positive, adm_scaler_negative, refiner_switch, refiner_model, sampler_name, - scheduler_name, adaptive_cfg, refiner_swap_method, negative_prompt + scheduler_name, adaptive_cfg, refiner_swap_method, negative_prompt, disable_intermediate_results ], queue=False, show_progress=False) advanced_checkbox.change(lambda x: gr.update(visible=x), advanced_checkbox, advanced_column, @@ -529,7 +534,8 @@ with shared.gradio_root: ctrls += [input_image_checkbox, current_tab] ctrls += [uov_method, uov_input_image] ctrls += [outpaint_selections, inpaint_input_image, inpaint_additional_prompt, inpaint_mask_image] - ctrls += [disable_preview, adm_scaler_positive, adm_scaler_negative, adm_scaler_end, adaptive_cfg] + ctrls += [disable_preview, disable_intermediate_results] + ctrls += [adm_scaler_positive, adm_scaler_negative, adm_scaler_end, adaptive_cfg] ctrls += [sampler_name, scheduler_name] ctrls += [overwrite_step, overwrite_switch, overwrite_width, overwrite_height, overwrite_vary_strength] ctrls += [overwrite_upscale_strength, mixing_image_prompt_and_vary_upscale, mixing_image_prompt_and_inpaint] From ef1999c52c8b0ae7fb26ee4563dfca9cc3b5c5c6 Mon Sep 17 00:00:00 2001 From: dooglewoogle <46539436+dooglewoogle@users.noreply.github.com> Date: Mon, 26 Feb 2024 00:47:14 +1300 Subject: [PATCH 034/103] feat: add ability to load checkpoints and loras from multiple locations (#1256) * Add ability to load checkpoints and loras from multiple locations * Found another location a default path is required * feat: use array as default --------- Co-authored-by: Manuel Schmid --- launch.py | 9 ++++----- modules/config.py | 35 +++++++++++++++++++++++++---------- modules/core.py | 3 ++- modules/default_pipeline.py | 5 +++-- modules/util.py | 9 +++++++++ 5 files changed, 43 insertions(+), 18 deletions(-) diff --git a/launch.py b/launch.py index db174f54..4269f1fc 100644 --- a/launch.py +++ b/launch.py @@ -68,7 +68,6 @@ vae_approx_filenames = [ 'https://huggingface.co/lllyasviel/misc/resolve/main/xl-to-v1_interposer-v3.1.safetensors') ] - def ini_args(): from args_manager import args return args @@ -101,9 +100,9 @@ def download_models(): return if not args.always_download_new_model: - if not os.path.exists(os.path.join(config.path_checkpoints, config.default_base_model_name)): + if not os.path.exists(os.path.join(config.paths_checkpoints[0], config.default_base_model_name)): for alternative_model_name in config.previous_default_models: - if os.path.exists(os.path.join(config.path_checkpoints, alternative_model_name)): + if os.path.exists(os.path.join(config.paths_checkpoints[0], alternative_model_name)): print(f'You do not have [{config.default_base_model_name}] but you have [{alternative_model_name}].') print(f'Fooocus will use [{alternative_model_name}] to avoid downloading new models, ' f'but you are not using latest models.') @@ -113,11 +112,11 @@ def download_models(): break for file_name, url in config.checkpoint_downloads.items(): - load_file_from_url(url=url, model_dir=config.path_checkpoints, file_name=file_name) + load_file_from_url(url=url, model_dir=config.paths_checkpoints[0], file_name=file_name) for file_name, url in config.embeddings_downloads.items(): load_file_from_url(url=url, model_dir=config.path_embeddings, file_name=file_name) for file_name, url in config.lora_downloads.items(): - load_file_from_url(url=url, model_dir=config.path_loras, file_name=file_name) + load_file_from_url(url=url, model_dir=config.paths_loras[0], file_name=file_name) return diff --git a/modules/config.py b/modules/config.py index 1f4e82eb..d3be1f21 100644 --- a/modules/config.py +++ b/modules/config.py @@ -114,7 +114,7 @@ def get_path_output() -> str: return path_output -def get_dir_or_set_default(key, default_value): +def get_dir_or_set_default(key, default_value, as_array=False): global config_dict, visited_keys, always_save_keys if key not in visited_keys: @@ -125,18 +125,29 @@ def get_dir_or_set_default(key, default_value): v = config_dict.get(key, None) if isinstance(v, str) and os.path.exists(v) and os.path.isdir(v): + return v if not as_array else [v] + elif isinstance(v, list) and all([os.path.exists(d) and os.path.isdir(d) for d in v]): return v else: if v is not None: print(f'Failed to load config key: {json.dumps({key:v})} is invalid or does not exist; will use {json.dumps({key:default_value})} instead.') - dp = os.path.abspath(os.path.join(os.path.dirname(__file__), default_value)) - os.makedirs(dp, exist_ok=True) + if isinstance(default_value, list): + dp = [] + for path in default_value: + abs_path = os.path.abspath(os.path.join(os.path.dirname(__file__), path)) + dp.append(abs_path) + os.makedirs(abs_path, exist_ok=True) + else: + dp = os.path.abspath(os.path.join(os.path.dirname(__file__), default_value)) + os.makedirs(dp, exist_ok=True) + if as_array: + dp = [dp] config_dict[key] = dp return dp -path_checkpoints = get_dir_or_set_default('path_checkpoints', '../models/checkpoints/') -path_loras = get_dir_or_set_default('path_loras', '../models/loras/') +paths_checkpoints = get_dir_or_set_default('path_checkpoints', ['../models/checkpoints/'], True) +paths_loras = get_dir_or_set_default('path_loras', ['../models/loras/'], True) path_embeddings = get_dir_or_set_default('path_embeddings', '../models/embeddings/') path_vae_approx = get_dir_or_set_default('path_vae_approx', '../models/vae_approx/') path_upscale_models = get_dir_or_set_default('path_upscale_models', '../models/upscale_models/') @@ -404,14 +415,18 @@ model_filenames = [] lora_filenames = [] -def get_model_filenames(folder_path, name_filter=None): - return get_files_from_folder(folder_path, ['.pth', '.ckpt', '.bin', '.safetensors', '.fooocus.patch'], name_filter) +def get_model_filenames(folder_paths, name_filter=None): + extensions = ['.pth', '.ckpt', '.bin', '.safetensors', '.fooocus.patch'] + files = [] + for folder in folder_paths: + files += get_files_from_folder(folder, extensions, name_filter) + return files def update_all_model_names(): global model_filenames, lora_filenames - model_filenames = get_model_filenames(path_checkpoints) - lora_filenames = get_model_filenames(path_loras) + model_filenames = get_model_filenames(paths_checkpoints) + lora_filenames = get_model_filenames(paths_loras) return @@ -456,7 +471,7 @@ def downloading_inpaint_models(v): def downloading_sdxl_lcm_lora(): load_file_from_url( url='https://huggingface.co/lllyasviel/misc/resolve/main/sdxl_lcm_lora.safetensors', - model_dir=path_loras, + model_dir=paths_loras[0], file_name='sdxl_lcm_lora.safetensors' ) return 'sdxl_lcm_lora.safetensors' diff --git a/modules/core.py b/modules/core.py index 7a29d988..bfc44966 100644 --- a/modules/core.py +++ b/modules/core.py @@ -18,6 +18,7 @@ from ldm_patched.contrib.external import VAEDecode, EmptyLatentImage, VAEEncode, from ldm_patched.contrib.external_freelunch import FreeU_V2 from ldm_patched.modules.sample import prepare_mask from modules.lora import match_lora +from modules.util import get_file_from_folder_list from ldm_patched.modules.lora import model_lora_keys_unet, model_lora_keys_clip from modules.config import path_embeddings from ldm_patched.contrib.external_model_advanced import ModelSamplingDiscrete @@ -79,7 +80,7 @@ class StableDiffusionModel: if os.path.exists(name): lora_filename = name else: - lora_filename = os.path.join(modules.config.path_loras, name) + lora_filename = get_file_from_folder_list(name, modules.config.paths_loras) if not os.path.exists(lora_filename): print(f'Lora file not found: {lora_filename}') diff --git a/modules/default_pipeline.py b/modules/default_pipeline.py index 2f45667c..f8edfae1 100644 --- a/modules/default_pipeline.py +++ b/modules/default_pipeline.py @@ -11,6 +11,7 @@ from extras.expansion import FooocusExpansion from ldm_patched.modules.model_base import SDXL, SDXLRefiner from modules.sample_hijack import clip_separate +from modules.util import get_file_from_folder_list model_base = core.StableDiffusionModel() @@ -60,7 +61,7 @@ def assert_model_integrity(): def refresh_base_model(name): global model_base - filename = os.path.abspath(os.path.realpath(os.path.join(modules.config.path_checkpoints, name))) + filename = get_file_from_folder_list(name, modules.config.paths_checkpoints) if model_base.filename == filename: return @@ -76,7 +77,7 @@ def refresh_base_model(name): def refresh_refiner_model(name): global model_refiner - filename = os.path.abspath(os.path.realpath(os.path.join(modules.config.path_checkpoints, name))) + filename = get_file_from_folder_list(name, modules.config.paths_checkpoints) if model_refiner.filename == filename: return diff --git a/modules/util.py b/modules/util.py index 9d4d0996..3c23a992 100644 --- a/modules/util.py +++ b/modules/util.py @@ -177,5 +177,14 @@ def get_files_from_folder(folder_path, exensions=None, name_filter=None): return filenames +def get_file_from_folder_list(name, folders): + for folder in folders: + filename = os.path.abspath(os.path.realpath(os.path.join(folder, name))) + if os.path.isfile(filename): + return filename + + return os.path.abspath(os.path.realpath(os.path.join(folders[0], name))) + + def ordinal_suffix(number: int) -> str: return 'th' if 10 <= number % 100 <= 20 else {1: 'st', 2: 'nd', 3: 'rd'}.get(number % 10, 'th') From 4d34f31a7207e7f2f4e2040be6a62fccd892a57d Mon Sep 17 00:00:00 2001 From: Maxim Saplin Date: Sun, 25 Feb 2024 19:14:17 +0300 Subject: [PATCH 035/103] feat: allow users to specify the number of threads when running on CPU (#1601) * CPU_NUM_THREADS * refactor: optimize code, type is already strict --------- Co-authored-by: Manuel Schmid --- ldm_patched/modules/args_parser.py | 2 +- ldm_patched/modules/model_management.py | 3 +++ readme.md | 2 +- 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/ldm_patched/modules/args_parser.py b/ldm_patched/modules/args_parser.py index e5b84dc1..272deb83 100644 --- a/ldm_patched/modules/args_parser.py +++ b/ldm_patched/modules/args_parser.py @@ -100,7 +100,7 @@ vram_group.add_argument("--always-high-vram", action="store_true") vram_group.add_argument("--always-normal-vram", action="store_true") vram_group.add_argument("--always-low-vram", action="store_true") vram_group.add_argument("--always-no-vram", action="store_true") -vram_group.add_argument("--always-cpu", action="store_true") +vram_group.add_argument("--always-cpu", type=int, nargs="?", metavar="CPU_NUM_THREADS", const=-1) parser.add_argument("--always-offload-from-vram", action="store_true") diff --git a/ldm_patched/modules/model_management.py b/ldm_patched/modules/model_management.py index 6f88579d..840d79a0 100644 --- a/ldm_patched/modules/model_management.py +++ b/ldm_patched/modules/model_management.py @@ -60,6 +60,9 @@ except: pass if args.always_cpu: + if args.always_cpu > 0: + torch.set_num_threads(args.always_cpu) + print(f"Running on {torch.get_num_threads()} CPU threads") cpu_state = CPUState.CPU def is_intel_xpu(): diff --git a/readme.md b/readme.md index fa7e829c..18b48f3a 100644 --- a/readme.md +++ b/readme.md @@ -370,7 +370,7 @@ entry_with_update.py [-h] [--listen [IP]] [--port PORT] [--attention-split | --attention-quad | --attention-pytorch] [--disable-xformers] [--always-gpu | --always-high-vram | --always-normal-vram | - --always-low-vram | --always-no-vram | --always-cpu] + --always-low-vram | --always-no-vram | --always-cpu [CPU_NUM_THREADS]] [--always-offload-from-vram] [--disable-server-log] [--debug-mode] [--is-windows-embedded-python] [--disable-server-info] [--share] [--preset PRESET] From 9c19300a3e3ed184b0d27fbd8fc6bc52eb2d38cb Mon Sep 17 00:00:00 2001 From: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Date: Sun, 25 Feb 2024 18:04:46 +0100 Subject: [PATCH 036/103] feat: improve bug report and feature request issue templates (#1631) * refactor and improve bug report and feature request issue templates * update operating system placeholder to Windows 10 most common usage i assume * use already existing label "enhancement" instead of "feature" * feat: add checkbox for latest version check, add triage to feature requests * feat: add link to ask a question * feat: use templates of stable-diffusion-webui-forge as basis * feat: add optional hosting and operating system inputs --- .github/ISSUE_TEMPLATE/bug_report.md | 18 ---- .github/ISSUE_TEMPLATE/bug_report.yml | 106 +++++++++++++++++++++ .github/ISSUE_TEMPLATE/config.yml | 5 + .github/ISSUE_TEMPLATE/feature_request.md | 14 --- .github/ISSUE_TEMPLATE/feature_request.yml | 40 ++++++++ 5 files changed, 151 insertions(+), 32 deletions(-) delete mode 100644 .github/ISSUE_TEMPLATE/bug_report.md create mode 100644 .github/ISSUE_TEMPLATE/bug_report.yml create mode 100644 .github/ISSUE_TEMPLATE/config.yml delete mode 100644 .github/ISSUE_TEMPLATE/feature_request.md create mode 100644 .github/ISSUE_TEMPLATE/feature_request.yml diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md deleted file mode 100644 index 624cfe3e..00000000 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -name: Bug report -about: Describe a problem -title: '' -labels: '' -assignees: '' - ---- - -**Read Troubleshoot** - -[x] I confirm that I have read the [Troubleshoot](https://github.com/lllyasviel/Fooocus/blob/main/troubleshoot.md) guide before making this issue. - -**Describe the problem** -A clear and concise description of what the bug is. - -**Full Console Log** -Paste the **full** console log here. You will make our job easier if you give a **full** log. diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml new file mode 100644 index 00000000..483e0de1 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -0,0 +1,106 @@ +name: Bug Report +description: You think something is broken in Fooocus +title: "[Bug]: " +labels: ["bug", "triage"] + +body: + - type: markdown + attributes: + value: | + > The title of the bug report should be short and descriptive. + > Use relevant keywords for searchability. + > Do not leave it blank, but also do not put an entire error log in it. + - type: checkboxes + attributes: + label: Checklist + description: | + Please perform basic debugging to see if your configuration is the cause of the issue. + Basic debug procedure +  2. Update Fooocus - sometimes things just need to be updated +  3. Backup and remove your config.txt - check if the issue is caused by bad configuration +  5. Try a fresh installation of Fooocus in a different directory - see if a clean installation solves the issue + Before making a issue report please, check that the issue hasn't been reported recently. + options: + - label: The issue exists on a clean installation of Fooocus + - label: The issue exists in the current version of Fooocus + - label: The issue has not been reported before recently + - label: The issue has been reported before but has not been fixed yet + - type: markdown + attributes: + value: | + > Please fill this form with as much information as possible. Don't forget to add information about "What browsers" and provide screenshots if possible + - type: textarea + id: what-did + attributes: + label: What happened? + description: Tell us what happened in a very clear and simple way + placeholder: | + image generation is not working as intended. + validations: + required: true + - type: textarea + id: steps + attributes: + label: Steps to reproduce the problem + description: Please provide us with precise step by step instructions on how to reproduce the bug + placeholder: | + 1. Go to ... + 2. Press ... + 3. ... + validations: + required: true + - type: textarea + id: what-should + attributes: + label: What should have happened? + description: Tell us what you think the normal behavior should be + placeholder: | + Fooocus should ... + validations: + required: true + - type: dropdown + id: browsers + attributes: + label: What browsers do you use to access Fooocus? + multiple: true + options: + - Mozilla Firefox + - Google Chrome + - Brave + - Apple Safari + - Microsoft Edge + - Android + - iOS + - Other + - type: dropdown + id: hosting + attributes: + label: Where are you running Fooocus? + multiple: false + options: + - Locally + - Locally with virtualization (e.g. Docker) + - Cloud (Google Colab) + - Cloud (other) + - type: input + id: operating-system + attributes: + label: What operating system are you using? + placeholder: | + Windows 10 + - type: textarea + id: logs + attributes: + label: Console logs + description: Please provide **full** cmd/terminal logs from the moment you started UI to the end of it, after the bug occured. If it's very long, provide a link to pastebin or similar service. + render: Shell + validations: + required: true + - type: textarea + id: misc + attributes: + label: Additional information + description: | + Please provide us with any relevant additional info or context. + Examples: +  I have updated my GPU driver recently. \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 00000000..7bbf022a --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,5 @@ +blank_issues_enabled: false +contact_links: + - name: Ask a question + url: https://github.com/lllyasviel/Fooocus/discussions/new?category=q-a + about: Ask the community for help \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md deleted file mode 100644 index 8101bc36..00000000 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -name: Feature request -about: Suggest an idea for this project -title: '' -labels: '' -assignees: '' - ---- - -**Is your feature request related to a problem? Please describe.** -A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] - -**Describe the idea you'd like** -A clear and concise description of what you want to happen. diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml new file mode 100644 index 00000000..90e594e4 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -0,0 +1,40 @@ +name: Feature request +description: Suggest an idea for this project +title: "[Feature Request]: " +labels: ["enhancement", "triage"] + +body: + - type: checkboxes + attributes: + label: Is there an existing issue for this? + description: Please search to see if an issue already exists for the feature you want, and that it's not implemented in a recent build/commit. + options: + - label: I have searched the existing issues and checked the recent builds/commits + required: true + - type: markdown + attributes: + value: | + *Please fill this form with as much information as possible, provide screenshots and/or illustrations of the feature if possible* + - type: textarea + id: feature + attributes: + label: What would your feature do? + description: Tell us about your feature in a very clear and simple way, and what problem it would solve + validations: + required: true + - type: textarea + id: workflow + attributes: + label: Proposed workflow + description: Please provide us with step by step information on how you'd like the feature to be accessed and used + value: | + 1. Go to .... + 2. Press .... + 3. ... + validations: + required: true + - type: textarea + id: misc + attributes: + label: Additional information + description: Add any other context or screenshots about the feature request here. \ No newline at end of file From b5f019fb6237ef0074f3e930f6176e4635456ff3 Mon Sep 17 00:00:00 2001 From: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Date: Sun, 25 Feb 2024 18:41:43 +0100 Subject: [PATCH 037/103] fix: correctly create directory for path_outputs if not existing (#1668) * correctly create directory for outputs if not existing * feat: add make_directory parameter checks for list, extract make_directory to util --- modules/config.py | 55 ++++++++++++++++++++++++++--------------------- modules/util.py | 7 ++++++ 2 files changed, 37 insertions(+), 25 deletions(-) diff --git a/modules/config.py b/modules/config.py index d3be1f21..6f713916 100644 --- a/modules/config.py +++ b/modules/config.py @@ -7,7 +7,7 @@ import modules.flags import modules.sdxl_styles from modules.model_loader import load_file_from_url -from modules.util import get_files_from_folder +from modules.util import get_files_from_folder, makedirs_with_log config_path = os.path.abspath("./config.txt") @@ -107,14 +107,14 @@ def get_path_output() -> str: Checking output path argument and overriding default path. """ global config_dict - path_output = get_dir_or_set_default('path_outputs', '../outputs/') + path_output = get_dir_or_set_default('path_outputs', '../outputs/', make_directory=True) if args_manager.args.output_path: print(f'[CONFIG] Overriding config value path_outputs with {args_manager.args.output_path}') config_dict['path_outputs'] = path_output = args_manager.args.output_path return path_output -def get_dir_or_set_default(key, default_value, as_array=False): +def get_dir_or_set_default(key, default_value, as_array=False, make_directory=False): global config_dict, visited_keys, always_save_keys if key not in visited_keys: @@ -124,26 +124,34 @@ def get_dir_or_set_default(key, default_value, as_array=False): always_save_keys.append(key) v = config_dict.get(key, None) - if isinstance(v, str) and os.path.exists(v) and os.path.isdir(v): - return v if not as_array else [v] - elif isinstance(v, list) and all([os.path.exists(d) and os.path.isdir(d) for d in v]): - return v + + if isinstance(v, str): + if make_directory: + makedirs_with_log(v) + if os.path.exists(v) and os.path.isdir(v): + return v if not as_array else [v] + elif isinstance(v, list): + if make_directory: + for d in v: + makedirs_with_log(d) + if all([os.path.exists(d) and os.path.isdir(d) for d in v]): + return v + + if v is not None: + print(f'Failed to load config key: {json.dumps({key:v})} is invalid or does not exist; will use {json.dumps({key:default_value})} instead.') + if isinstance(default_value, list): + dp = [] + for path in default_value: + abs_path = os.path.abspath(os.path.join(os.path.dirname(__file__), path)) + dp.append(abs_path) + os.makedirs(abs_path, exist_ok=True) else: - if v is not None: - print(f'Failed to load config key: {json.dumps({key:v})} is invalid or does not exist; will use {json.dumps({key:default_value})} instead.') - if isinstance(default_value, list): - dp = [] - for path in default_value: - abs_path = os.path.abspath(os.path.join(os.path.dirname(__file__), path)) - dp.append(abs_path) - os.makedirs(abs_path, exist_ok=True) - else: - dp = os.path.abspath(os.path.join(os.path.dirname(__file__), default_value)) - os.makedirs(dp, exist_ok=True) - if as_array: - dp = [dp] - config_dict[key] = dp - return dp + dp = os.path.abspath(os.path.join(os.path.dirname(__file__), default_value)) + os.makedirs(dp, exist_ok=True) + if as_array: + dp = [dp] + config_dict[key] = dp + return dp paths_checkpoints = get_dir_or_set_default('path_checkpoints', ['../models/checkpoints/'], True) @@ -408,9 +416,6 @@ with open(config_example_path, "w", encoding="utf-8") as json_file: 'and there is no "," before the last "}". \n\n\n') json.dump({k: config_dict[k] for k in visited_keys}, json_file, indent=4) - -os.makedirs(path_outputs, exist_ok=True) - model_filenames = [] lora_filenames = [] diff --git a/modules/util.py b/modules/util.py index 3c23a992..1b165115 100644 --- a/modules/util.py +++ b/modules/util.py @@ -188,3 +188,10 @@ def get_file_from_folder_list(name, folders): def ordinal_suffix(number: int) -> str: return 'th' if 10 <= number % 100 <= 20 else {1: 'st', 2: 'nd', 3: 'rd'}.get(number % 10, 'th') + + +def makedirs_with_log(path): + try: + os.makedirs(path, exist_ok=True) + except OSError as error: + print(f'Directory {path} could not be created, reason: {error}') From eebd7752ab7aaa42e4928d1472115bb896468286 Mon Sep 17 00:00:00 2001 From: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Date: Sun, 25 Feb 2024 18:44:28 +0100 Subject: [PATCH 038/103] fix: allow path_outputs to be outside of root dir (#2332) allows Gradio to serve outputs when folder has been changed in the config --- webui.py | 1 + 1 file changed, 1 insertion(+) diff --git a/webui.py b/webui.py index 0d8c3c04..6d72c67c 100644 --- a/webui.py +++ b/webui.py @@ -635,5 +635,6 @@ shared.gradio_root.launch( server_port=args_manager.args.port, share=args_manager.args.share, auth=check_auth if (args_manager.args.share or args_manager.args.listen) and auth_enabled else None, + allowed_paths=[modules.config.path_outputs], blocked_paths=[constants.AUTH_FILENAME] ) From 468d704b299e0bf10ace1662506289ccd85be018 Mon Sep 17 00:00:00 2001 From: MindOfMatter <35126123+MindOfMatter@users.noreply.github.com> Date: Sun, 25 Feb 2024 13:59:28 -0500 Subject: [PATCH 039/103] feat: add button to enable LoRAs (#2210) * Initial commit * Update README.md * sync with original main Fooocus repo * update with my gitignore setup * add max lora config feature * Revert "add max lora config feature" This reverts commit cfe7463fe25475b6d59f36072ade410a2d8d5124. * add lora enabler feature * Update README.md * Update .gitignore * update * merge * revert changes * revert * feat: change width of LoRA columns * refactor: rename lora_enable to lora_enabled, optimize code --------- Co-authored-by: Manuel Schmid --- modules/async_worker.py | 10 +++++++++- modules/html.py | 24 ++++++++++++++++++++++++ modules/meta_parser.py | 6 ++++-- webui.py | 9 ++++++--- 4 files changed, 43 insertions(+), 6 deletions(-) diff --git a/modules/async_worker.py b/modules/async_worker.py index a304e697..34cd2e5a 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -115,6 +115,14 @@ def worker(): # must use deep copy otherwise gradio is super laggy. Do not use list.append() . async_task.results = async_task.results + [wall] return + + def apply_enabled_loras(loras): + enabled_loras = [] + for lora_enabled, lora_model, lora_weight in loras: + if lora_enabled: + enabled_loras.append([lora_model, lora_weight]) + + return enabled_loras @torch.no_grad() @torch.inference_mode() @@ -137,7 +145,7 @@ def worker(): base_model_name = args.pop() refiner_model_name = args.pop() refiner_switch = args.pop() - loras = [[str(args.pop()), float(args.pop())] for _ in range(5)] + loras = apply_enabled_loras([[bool(args.pop()), str(args.pop()), float(args.pop()), ] for _ in range(5)]) input_image_checkbox = args.pop() current_tab = args.pop() uov_method = args.pop() diff --git a/modules/html.py b/modules/html.py index 3ec6f2d6..47a1483a 100644 --- a/modules/html.py +++ b/modules/html.py @@ -112,6 +112,30 @@ progress::after { margin-left: -5px !important; } +.lora_enable { + flex-grow: 1 !important; +} + +.lora_enable label { + height: 100%; +} + +.lora_enable label input { + margin: auto; +} + +.lora_enable label span { + display: none; +} + +.lora_model { + flex-grow: 5 !important; +} + +.lora_weight { + flex-grow: 5 !important; +} + ''' progress_html = '''
diff --git a/modules/meta_parser.py b/modules/meta_parser.py index 07b42a16..bd8f555e 100644 --- a/modules/meta_parser.py +++ b/modules/meta_parser.py @@ -139,10 +139,12 @@ def load_parameter_button_click(raw_prompt_txt, is_generating): try: n, w = loaded_parameter_dict.get(f'LoRA {i}').split(' : ') w = float(w) + results.append(True) results.append(n) results.append(w) except: - results.append(gr.update()) - results.append(gr.update()) + results.append(True) + results.append("None") + results.append(1.0) return results diff --git a/webui.py b/webui.py index 6d72c67c..1463ff90 100644 --- a/webui.py +++ b/webui.py @@ -322,11 +322,14 @@ with shared.gradio_root: for i, (n, v) in enumerate(modules.config.default_loras): with gr.Row(): + lora_enabled = gr.Checkbox(label='Enable', value=True, + elem_classes=['lora_enable', 'min_check']) lora_model = gr.Dropdown(label=f'LoRA {i + 1}', - choices=['None'] + modules.config.lora_filenames, value=n) + choices=['None'] + modules.config.lora_filenames, value=n, + elem_classes='lora_model') lora_weight = gr.Slider(label='Weight', minimum=-2, maximum=2, step=0.01, value=v, elem_classes='lora_weight') - lora_ctrls += [lora_model, lora_weight] + lora_ctrls += [lora_enabled, lora_model, lora_weight] with gr.Row(): model_refresh = gr.Button(label='Refresh', value='\U0001f504 Refresh All Files', variant='secondary', elem_classes='refresh_button') @@ -471,7 +474,7 @@ with shared.gradio_root: results = [] results += [gr.update(choices=modules.config.model_filenames), gr.update(choices=['None'] + modules.config.model_filenames)] for i in range(5): - results += [gr.update(choices=['None'] + modules.config.lora_filenames), gr.update()] + results += [gr.update(choices=['None'] + modules.config.lora_filenames), gr.update(), gr.update(interactive=True)] return results model_refresh.click(model_refresh_clicked, [], [base_model, refiner_model] + lora_ctrls, From 18f9f7dc313ee279fd3241784aafad9e948b402b Mon Sep 17 00:00:00 2001 From: MindOfMatter <35126123+MindOfMatter@users.noreply.github.com> Date: Sun, 25 Feb 2024 15:12:26 -0500 Subject: [PATCH 040/103] feat: make lora number editable in config (#2215) * Initial commit * Update README.md * sync with original main Fooocus repo * update with my gitignore setup * add max lora config feature * Revert "add max lora config feature" This reverts commit cfe7463fe25475b6d59f36072ade410a2d8d5124. * add max loras config feature * Update README.md * Update .gitignore * update * merge * revert * refactor: rename default_loras_max_number to default_max_lora_number, validate config for int * fix: add missing patch_all call and imports again --------- Co-authored-by: Manuel Schmid --- modules/async_worker.py | 7 +++---- modules/config.py | 8 +++++++- modules/meta_parser.py | 6 +++--- webui.py | 8 ++++---- 4 files changed, 17 insertions(+), 12 deletions(-) diff --git a/modules/async_worker.py b/modules/async_worker.py index 34cd2e5a..47848ad6 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -4,7 +4,6 @@ from modules.patch import PatchSettings, patch_settings, patch_all patch_all() - class AsyncTask: def __init__(self, args): self.args = args @@ -115,13 +114,13 @@ def worker(): # must use deep copy otherwise gradio is super laggy. Do not use list.append() . async_task.results = async_task.results + [wall] return - + def apply_enabled_loras(loras): enabled_loras = [] for lora_enabled, lora_model, lora_weight in loras: if lora_enabled: enabled_loras.append([lora_model, lora_weight]) - + return enabled_loras @torch.no_grad() @@ -145,7 +144,7 @@ def worker(): base_model_name = args.pop() refiner_model_name = args.pop() refiner_switch = args.pop() - loras = apply_enabled_loras([[bool(args.pop()), str(args.pop()), float(args.pop()), ] for _ in range(5)]) + loras = apply_enabled_loras([[bool(args.pop()), str(args.pop()), float(args.pop()), ] for _ in range(modules.config.default_max_lora_number)]) input_image_checkbox = args.pop() current_tab = args.pop() uov_method = args.pop() diff --git a/modules/config.py b/modules/config.py index 6f713916..bb1ee26c 100644 --- a/modules/config.py +++ b/modules/config.py @@ -235,6 +235,11 @@ default_loras = get_config_item_or_set_default( ], validator=lambda x: isinstance(x, list) and all(len(y) == 2 and isinstance(y[0], str) and isinstance(y[1], numbers.Number) for y in x) ) +default_max_lora_number = get_config_item_or_set_default( + key='default_max_lora_number', + default_value=len(default_loras), + validator=lambda x: isinstance(x, int) and x >= 1 +) default_cfg_scale = get_config_item_or_set_default( key='default_cfg_scale', default_value=7.0, @@ -357,13 +362,14 @@ example_inpaint_prompts = get_config_item_or_set_default( example_inpaint_prompts = [[x] for x in example_inpaint_prompts] -config_dict["default_loras"] = default_loras = default_loras[:5] + [['None', 1.0] for _ in range(5 - len(default_loras))] +config_dict["default_loras"] = default_loras = default_loras[:default_max_lora_number] + [['None', 1.0] for _ in range(default_max_lora_number - len(default_loras))] possible_preset_keys = [ "default_model", "default_refiner", "default_refiner_switch", "default_loras", + "default_max_lora_number", "default_cfg_scale", "default_sample_sharpness", "default_sampler", diff --git a/modules/meta_parser.py b/modules/meta_parser.py index bd8f555e..061e1f8a 100644 --- a/modules/meta_parser.py +++ b/modules/meta_parser.py @@ -135,16 +135,16 @@ def load_parameter_button_click(raw_prompt_txt, is_generating): results.append(gr.update(visible=False)) - for i in range(1, 6): + for i in range(1, modules.config.default_max_lora_number + 1): try: - n, w = loaded_parameter_dict.get(f'LoRA {i}').split(' : ') + n, w = loaded_parameter_dict.get(f'LoRA {i}', ' : ').split(' : ') w = float(w) results.append(True) results.append(n) results.append(w) except: results.append(True) - results.append("None") + results.append('None') results.append(1.0) return results diff --git a/webui.py b/webui.py index 1463ff90..270f0ffa 100644 --- a/webui.py +++ b/webui.py @@ -471,10 +471,10 @@ with shared.gradio_root: def model_refresh_clicked(): modules.config.update_all_model_names() - results = [] - results += [gr.update(choices=modules.config.model_filenames), gr.update(choices=['None'] + modules.config.model_filenames)] - for i in range(5): - results += [gr.update(choices=['None'] + modules.config.lora_filenames), gr.update(), gr.update(interactive=True)] + results = [gr.update(choices=modules.config.model_filenames)] + results += [gr.update(choices=['None'] + modules.config.model_filenames)] + for i in range(modules.config.default_max_lora_number): + results += [gr.update(interactive=True), gr.update(choices=['None'] + modules.config.lora_filenames), gr.update()] return results model_refresh.click(model_refresh_clicked, [], [base_model, refiner_model] + lora_ctrls, From 3be76ef8a3d503273f3800fa35b8148888bb9d4d Mon Sep 17 00:00:00 2001 From: MindOfMatter <35126123+MindOfMatter@users.noreply.github.com> Date: Sun, 25 Feb 2024 15:36:25 -0500 Subject: [PATCH 041/103] feat: make lora min max weight editable in config (#2216) * Initial commit * Update README.md * sync with original main Fooocus repo * update with my gitignore setup * add min max weight configs feature * add max lora config feature * Revert "add max lora config feature" This reverts commit cfe7463fe25475b6d59f36072ade410a2d8d5124. * Update README.md * Update .gitignore * update * merge * revert --------- Co-authored-by: Manuel Schmid --- modules/config.py | 12 ++++++++++++ webui.py | 3 ++- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/modules/config.py b/modules/config.py index bb1ee26c..acf19b60 100644 --- a/modules/config.py +++ b/modules/config.py @@ -209,6 +209,16 @@ default_refiner_switch = get_config_item_or_set_default( default_value=0.8, validator=lambda x: isinstance(x, numbers.Number) and 0 <= x <= 1 ) +default_loras_min_weight = get_config_item_or_set_default( + key='default_loras_min_weight', + default_value=-2, + validator=lambda x: isinstance(x, numbers.Number) and -10 <= x <= 10 +) +default_loras_max_weight = get_config_item_or_set_default( + key='default_loras_max_weight', + default_value=2, + validator=lambda x: isinstance(x, numbers.Number) and -10 <= x <= 10 +) default_loras = get_config_item_or_set_default( key='default_loras', default_value=[ @@ -368,6 +378,8 @@ possible_preset_keys = [ "default_model", "default_refiner", "default_refiner_switch", + "default_loras_min_weight", + "default_loras_max_weight", "default_loras", "default_max_lora_number", "default_cfg_scale", diff --git a/webui.py b/webui.py index 270f0ffa..a3cb45e4 100644 --- a/webui.py +++ b/webui.py @@ -327,7 +327,8 @@ with shared.gradio_root: lora_model = gr.Dropdown(label=f'LoRA {i + 1}', choices=['None'] + modules.config.lora_filenames, value=n, elem_classes='lora_model') - lora_weight = gr.Slider(label='Weight', minimum=-2, maximum=2, step=0.01, value=v, + lora_weight = gr.Slider(label='Weight', minimum=modules.config.default_loras_min_weight, + maximum=modules.config.default_loras_max_weight, step=0.01, value=v, elem_classes='lora_weight') lora_ctrls += [lora_enabled, lora_model, lora_weight] From c898e6a4dca5199653e0f05e0420c6d158bdd4f2 Mon Sep 17 00:00:00 2001 From: Brian Flannery Date: Sun, 25 Feb 2024 15:22:49 -0600 Subject: [PATCH 042/103] feat: add array support on main prompt (#1503) * prompt array support * update change log * update change log * docs: remove 2.1.847 change log * refactor: rename freeze_seed to disable_seed_increment, move to developer debug mode * feat: add translation for new labels * fix: use task_rng based on task_seed, not initial seed --------- Co-authored-by: Manuel Schmid --- language/en.json | 2 ++ modules/async_worker.py | 11 ++++++++--- modules/sdxl_styles.py | 36 ++++++++++++++++++++++++++++++++++++ webui.py | 5 ++++- 4 files changed, 50 insertions(+), 4 deletions(-) diff --git a/language/en.json b/language/en.json index 8a782e3f..a3e47c1a 100644 --- a/language/en.json +++ b/language/en.json @@ -48,6 +48,8 @@ "Describing what you do not want to see.": "Describing what you do not want to see.", "Random": "Random", "Seed": "Seed", + "Disable seed increment": "Disable seed increment", + "Disable automatic seed increment when image number is > 1.": "Disable automatic seed increment when image number is > 1.", "\ud83d\udcda History Log": "\uD83D\uDCDA History Log", "Image Style": "Image Style", "Fooocus V2": "Fooocus V2", diff --git a/modules/async_worker.py b/modules/async_worker.py index 47848ad6..4fca0966 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -40,7 +40,7 @@ def worker(): import extras.face_crop import fooocus_version - from modules.sdxl_styles import apply_style, apply_wildcards, fooocus_expansion + from modules.sdxl_styles import apply_style, apply_wildcards, fooocus_expansion, apply_arrays from modules.private_logger import log from extras.expansion import safe_str from modules.util import remove_empty_str, HWC3, resize_image, \ @@ -155,6 +155,7 @@ def worker(): inpaint_mask_image_upload = args.pop() disable_preview = args.pop() disable_intermediate_results = args.pop() + disable_seed_increment = args.pop() adm_scaler_positive = args.pop() adm_scaler_negative = args.pop() adm_scaler_end = args.pop() @@ -424,10 +425,14 @@ def worker(): progressbar(async_task, 3, 'Processing prompts ...') tasks = [] for i in range(image_number): - task_seed = (seed + i) % (constants.MAX_SEED + 1) # randint is inclusive, % is not - task_rng = random.Random(task_seed) # may bind to inpaint noise in the future + if disable_seed_increment: + task_seed = seed + else: + task_seed = (seed + i) % (constants.MAX_SEED + 1) # randint is inclusive, % is not + task_rng = random.Random(task_seed) # may bind to inpaint noise in the future task_prompt = apply_wildcards(prompt, task_rng) + task_prompt = apply_arrays(task_prompt, i) task_negative_prompt = apply_wildcards(negative_prompt, task_rng) task_extra_positive_prompts = [apply_wildcards(pmt, task_rng) for pmt in extra_positive_prompts] task_extra_negative_prompts = [apply_wildcards(pmt, task_rng) for pmt in extra_negative_prompts] diff --git a/modules/sdxl_styles.py b/modules/sdxl_styles.py index f5bb6276..71afc402 100644 --- a/modules/sdxl_styles.py +++ b/modules/sdxl_styles.py @@ -1,6 +1,7 @@ import os import re import json +import math from modules.util import get_files_from_folder @@ -80,3 +81,38 @@ def apply_wildcards(wildcard_text, rng, directory=wildcards_path): print(f'[Wildcards] BFS stack overflow. Current text: {wildcard_text}') return wildcard_text + +def get_words(arrays, totalMult, index): + if(len(arrays) == 1): + return [arrays[0].split(',')[index]] + else: + words = arrays[0].split(',') + word = words[index % len(words)] + index -= index % len(words) + index /= len(words) + index = math.floor(index) + return [word] + get_words(arrays[1:], math.floor(totalMult/len(words)), index) + + + +def apply_arrays(text, index): + arrays = re.findall(r'\[\[([\s,\w-]+)\]\]', text) + if len(arrays) == 0: + return text + + print(f'[Arrays] processing: {text}') + mult = 1 + for arr in arrays: + words = arr.split(',') + mult *= len(words) + + index %= mult + chosen_words = get_words(arrays, mult, index) + + i = 0 + for arr in arrays: + text = text.replace(f'[[{arr}]]', chosen_words[i], 1) + i = i+1 + + return text + diff --git a/webui.py b/webui.py index a3cb45e4..14ba2a1f 100644 --- a/webui.py +++ b/webui.py @@ -398,6 +398,9 @@ with shared.gradio_root: value=modules.config.default_performance == 'Extreme Speed', interactive=modules.config.default_performance != 'Extreme Speed', info='Disable intermediate results during generation, only show final gallery.') + disable_seed_increment = gr.Checkbox(label='Disable seed increment', + info='Disable automatic seed increment when image number is > 1.', + value=False) with gr.Tab(label='Control'): debugging_cn_preprocessor = gr.Checkbox(label='Debug Preprocessors', value=False, @@ -538,7 +541,7 @@ with shared.gradio_root: ctrls += [input_image_checkbox, current_tab] ctrls += [uov_method, uov_input_image] ctrls += [outpaint_selections, inpaint_input_image, inpaint_additional_prompt, inpaint_mask_image] - ctrls += [disable_preview, disable_intermediate_results] + ctrls += [disable_preview, disable_intermediate_results, disable_seed_increment] ctrls += [adm_scaler_positive, adm_scaler_negative, adm_scaler_end, adaptive_cfg] ctrls += [sampler_name, scheduler_name] ctrls += [overwrite_step, overwrite_switch, overwrite_width, overwrite_height, overwrite_vary_strength] From d3113f5c3f6aa266cf9cae498690cece6f3784c3 Mon Sep 17 00:00:00 2001 From: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Date: Sun, 25 Feb 2024 22:56:38 +0100 Subject: [PATCH 043/103] feat: use consistent file name in gradio (#1932) * feat: use consistent file name in gradio returns and uses filepaths instead of numpy image by saving to temp dir uses double the temp dir file storage on disk as it saves to temp dir and gradio temp dir when displaying the image, but reuses logged output image * feat: delete temp images after yielding to gradio * feat: use args temp path if given * chore: code cleanup, remove redundant if statement --- args_manager.py | 6 ++++++ ldm_patched/modules/args_parser.py | 1 - modules/async_worker.py | 9 +++++---- modules/private_logger.py | 16 +++++++++------- webui.py | 5 +++++ 5 files changed, 25 insertions(+), 12 deletions(-) diff --git a/args_manager.py b/args_manager.py index eeb38e1f..1675c31d 100644 --- a/args_manager.py +++ b/args_manager.py @@ -1,5 +1,7 @@ import ldm_patched.modules.args_parser as args_parser +import os +from tempfile import gettempdir args_parser.parser.add_argument("--share", action='store_true', help="Set whether to share on Gradio.") args_parser.parser.add_argument("--preset", type=str, default=None, help="Apply specified UI preset.") @@ -40,7 +42,11 @@ args_parser.args.always_offload_from_vram = not args_parser.args.disable_offload if args_parser.args.disable_analytics: import os os.environ["GRADIO_ANALYTICS_ENABLED"] = "False" + if args_parser.args.disable_in_browser: args_parser.args.in_browser = False +if args_parser.args.temp_path is None: + args_parser.args.temp_path = os.path.join(gettempdir(), 'Fooocus') + args = args_parser.args diff --git a/ldm_patched/modules/args_parser.py b/ldm_patched/modules/args_parser.py index 272deb83..0c6165a7 100644 --- a/ldm_patched/modules/args_parser.py +++ b/ldm_patched/modules/args_parser.py @@ -102,7 +102,6 @@ vram_group.add_argument("--always-low-vram", action="store_true") vram_group.add_argument("--always-no-vram", action="store_true") vram_group.add_argument("--always-cpu", type=int, nargs="?", metavar="CPU_NUM_THREADS", const=-1) - parser.add_argument("--always-offload-from-vram", action="store_true") parser.add_argument("--pytorch-deterministic", action="store_true") diff --git a/modules/async_worker.py b/modules/async_worker.py index 4fca0966..2a31aae1 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -563,8 +563,8 @@ def worker(): if direct_return: d = [('Upscale (Fast)', '2x')] - log(uov_input_image, d) - yield_result(async_task, uov_input_image, do_not_show_finished_images=True) + uov_input_image_path = log(uov_input_image, d) + yield_result(async_task, uov_input_image_path, do_not_show_finished_images=True) return tiled = True @@ -828,6 +828,7 @@ def worker(): if inpaint_worker.current_task is not None: imgs = [inpaint_worker.current_task.post_process(x) for x in imgs] + img_paths = [] for x in imgs: d = [ ('Prompt', task['log_positive_prompt']), @@ -853,9 +854,9 @@ def worker(): if n != 'None': d.append((f'LoRA {li + 1}', f'{n} : {w}')) d.append(('Version', 'v' + fooocus_version.version)) - log(x, d) + img_paths.append(log(x, d)) - yield_result(async_task, imgs, do_not_show_finished_images=len(tasks) == 1 or disable_intermediate_results) + yield_result(async_task, img_paths, do_not_show_finished_images=len(tasks) == 1 or disable_intermediate_results) except ldm_patched.modules.model_management.InterruptProcessingException as e: if async_task.last_stop == 'skip': print('User skipped') diff --git a/modules/private_logger.py b/modules/private_logger.py index 49f17dca..506b1055 100644 --- a/modules/private_logger.py +++ b/modules/private_logger.py @@ -6,7 +6,7 @@ import urllib.parse from PIL import Image from modules.util import generate_temp_filename - +from tempfile import gettempdir log_cache = {} @@ -18,13 +18,15 @@ def get_current_html_path(): return html_name -def log(img, dic): - if args_manager.args.disable_image_log: - return - - date_string, local_temp_filename, only_name = generate_temp_filename(folder=modules.config.path_outputs, extension='png') +def log(img, dic) -> str: + path_outputs = args_manager.args.temp_path if args_manager.args.disable_image_log else modules.config.path_outputs + date_string, local_temp_filename, only_name = generate_temp_filename(folder=path_outputs, extension='png') os.makedirs(os.path.dirname(local_temp_filename), exist_ok=True) Image.fromarray(img).save(local_temp_filename) + + if args_manager.args.disable_image_log: + return local_temp_filename + html_name = os.path.join(os.path.dirname(local_temp_filename), 'log.html') css_styles = ( @@ -105,4 +107,4 @@ def log(img, dic): log_cache[html_name] = middle_part - return + return local_temp_filename diff --git a/webui.py b/webui.py index 14ba2a1f..a3663dfa 100644 --- a/webui.py +++ b/webui.py @@ -72,6 +72,11 @@ def generate_clicked(task): gr.update(visible=True, value=product) finished = True + # delete Fooocus temp images, only keep gradio temp images + if args_manager.args.disable_image_log: + for filepath in product: + os.remove(filepath) + execution_time = time.perf_counter() - execution_start_time print(f'Total time: {execution_time:.2f} seconds') return From ba9eadbcda33839b3f6f12b21ce7b10a4c90a93c Mon Sep 17 00:00:00 2001 From: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Date: Mon, 26 Feb 2024 14:27:57 +0100 Subject: [PATCH 044/103] feat: add metadata to images (#1940) * feat: add metadata logging for images inspired by https://github.com/MoonRide303/Fooocus-MRE * feat: add config and checkbox for save_metadata_to_images * feat: add argument disable_metadata * feat: add support for A1111 metadata schema https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/cf2772fab0af5573da775e7437e6acdca424f26e/modules/processing.py#L672 * feat: add model hash support for a1111 * feat: use resolved prompts with included expansion and styles for a1111 metadata * fix: code cleanup and resolved prompt fixes * feat: add config metadata_created_by * fix: use stting isntead of quote wrap for A1111 created_by * fix: correctlyy hide/show metadata schema on app start * fix: do not generate hashes when arg --disable-metadata is used * refactor: rename metadata_schema to metadata_scheme * fix: use pnginfo "parameters" insteadf of "Comments" see https://github.com/RupertAvery/DiffusionToolkit/issues/202 and https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/cf2772fab0af5573da775e7437e6acdca424f26e/modules/processing.py#L939 * feat: add resolved prompts to metadata * fix: use correct default value in metadata check for created_by * wip: add metadata mapping, reading and writing applying data after reading currently not functional for A1111 * feat: rename metadata tab and import button label * feat: map basic information for scheme A1111 * wip: optimize handling for metadata in Gradio calls * feat: add enums for Performance, Steps and StepsUOV also move MetadataSchema enum to prevent circular dependency * fix: correctly map resolution, use empty styles for A1111 * chore: code cleanup * feat: add A1111 prompt style detection only detects one style as Fooocus doesn't wrap {prompt} with the whole style, but has a separate prompt string for each style * wip: add prompt style extraction for A1111 scheme * feat: sort styles after metadata import * refactor: use central flag for LoRA count * refactor: use central flag for ControlNet image count * fix: use correct LoRA mapping, add fallback for backwards compatibility * feat: add created_by again * feat: add prefix "Fooocus" to version * wip: code cleanup, update todos * fix: use correct order to read LoRA in meta parser * wip: code cleanup, update todos * feat: make sha256 with length 10 default * feat: add lora handling to A1111 scheme * feat: override existing LoRA values when importing, would cause images to differ * fix: correctly extract prompt style when only prompt expansion is selected * feat: allow model / LoRA loading from subfolders * feat: code cleanup, do not queue metadata preview on image upload * refactor: add flag for refiner_swap_method * feat: add metadata handling for all non-img2img parameters * refactor: code cleanup * chore: use str as return type in calculate_sha256 * feat: add hash cache to metadata * chore: code cleanup * feat: add method get_scheme to Metadata * fix: align handling for scheme Fooocus by removing lcm lora from json parsing * refactor: add step before parsing to set data in parser - add constructor for MetadataSchema class - remove showable and copyable from log output - add functional hash cache (model hashing takes about 5 seconds, only required once per model, using hash lazy loading) * feat: sort metadata attributes before writing to image * feat: add translations and hint for image prompt parameters * chore: check and remove ToDo's * refactor: merge metadata.py into meta_parser.py * fix: add missing refiner in A1111 parse_json * wip: add TODO for ultiline prompt style resolution * fix: remove sorting for A1111, change performance key position fixes https://github.com/lllyasviel/Fooocus/pull/1940#issuecomment-1924444633 * fix: add workaround for multiline prompts * feat: add sampler mapping * feat: prevent config reset by renaming metadata_scheme to match config options * chore: remove remaining todos after analysis refiner is added when set restoring multiline prompts has been resolved by using separate parameters "raw_prompt" and "raw_negative_prompt" * chore: specify too broad exception types * feat: add mapping for _gpu samplers to cpu samplers gpu samplers are less deterministic than cpu but in general similar, see https://www.reddit.com/r/comfyui/comments/15hayzo/comment/juqcpep/ * feat: add better handling for image import with empty metadata * fix: parse adaptive_cfg as float instead of string * chore: loosen strict type for parse_json, fix indent * chore: make steps enums more strict * feat: only override steps if metadata value is not in steps enum or in steps enum and performance is not the same * fix: handle empty strings in metadata e.g. raw negative prompt when none is set --- args_manager.py | 5 +- language/en.json | 9 +- modules/async_worker.py | 101 ++++---- modules/config.py | 22 +- modules/flags.py | 91 ++++++- modules/meta_parser.py | 529 ++++++++++++++++++++++++++++++++------ modules/private_logger.py | 28 +- modules/util.py | 169 +++++++++++- webui.py | 102 +++++--- 9 files changed, 871 insertions(+), 185 deletions(-) diff --git a/args_manager.py b/args_manager.py index 1675c31d..c7c1b7ab 100644 --- a/args_manager.py +++ b/args_manager.py @@ -20,7 +20,10 @@ args_parser.parser.add_argument("--disable-image-log", action='store_true', help="Prevent writing images and logs to hard drive.") args_parser.parser.add_argument("--disable-analytics", action='store_true', - help="Disables analytics for Gradio", default=False) + help="Disables analytics for Gradio.") + +args_parser.parser.add_argument("--disable-metadata", action='store_true', + help="Disables saving metadata to images.") args_parser.parser.add_argument("--disable-preset-download", action='store_true', help="Disables downloading models for presets", default=False) diff --git a/language/en.json b/language/en.json index a3e47c1a..cb5603f9 100644 --- a/language/en.json +++ b/language/en.json @@ -374,5 +374,12 @@ "* Powered by Fooocus Inpaint Engine (beta)": "* Powered by Fooocus Inpaint Engine (beta)", "Fooocus Enhance": "Fooocus Enhance", "Fooocus Cinematic": "Fooocus Cinematic", - "Fooocus Sharp": "Fooocus Sharp" + "Fooocus Sharp": "Fooocus Sharp", + "Drag any image generated by Fooocus here": "Drag any image generated by Fooocus here", + "Metadata": "Metadata", + "Apply Metadata": "Apply Metadata", + "Metadata Scheme": "Metadata Scheme", + "Image Prompt parameters are not included. Use a1111 for compatibility with Civitai.": "Image Prompt parameters are not included. Use a1111 for compatibility with Civitai.", + "fooocus (json)": "fooocus (json)", + "a1111 (plain text)": "a1111 (plain text)" } \ No newline at end of file diff --git a/modules/async_worker.py b/modules/async_worker.py index 2a31aae1..677cf469 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -19,6 +19,7 @@ async_tasks = [] def worker(): global async_tasks + import os import traceback import math import numpy as np @@ -39,6 +40,7 @@ def worker(): import extras.ip_adapter as ip_adapter import extras.face_crop import fooocus_version + import args_manager from modules.sdxl_styles import apply_style, apply_wildcards, fooocus_expansion, apply_arrays from modules.private_logger import log @@ -46,6 +48,8 @@ def worker(): from modules.util import remove_empty_str, HWC3, resize_image, \ get_image_shape_ceil, set_image_shape_ceil, get_shape_ceil, resample_image, erode_or_dilate, ordinal_suffix from modules.upscaler import perform_upscale + from modules.flags import Performance + from modules.meta_parser import get_metadata_parser, MetadataScheme pid = os.getpid() print(f'Started worker with PID {pid}') @@ -135,7 +139,7 @@ def worker(): prompt = args.pop() negative_prompt = args.pop() style_selections = args.pop() - performance_selection = args.pop() + performance_selection = Performance(args.pop()) aspect_ratios_selection = args.pop() image_number = args.pop() image_seed = args.pop() @@ -153,6 +157,7 @@ def worker(): inpaint_input_image = args.pop() inpaint_additional_prompt = args.pop() inpaint_mask_image_upload = args.pop() + disable_preview = args.pop() disable_intermediate_results = args.pop() disable_seed_increment = args.pop() @@ -190,8 +195,11 @@ def worker(): invert_mask_checkbox = args.pop() inpaint_erode_or_dilate = args.pop() + save_metadata_to_images = args.pop() if not args_manager.args.disable_metadata else False + metadata_scheme = MetadataScheme(args.pop()) if not args_manager.args.disable_metadata else MetadataScheme.FOOOCUS + cn_tasks = {x: [] for x in flags.ip_list} - for _ in range(4): + for _ in range(flags.controlnet_image_count): cn_img = args.pop() cn_stop = args.pop() cn_weight = args.pop() @@ -216,17 +224,9 @@ def worker(): print(f'Refiner disabled because base model and refiner are same.') refiner_model_name = 'None' - assert performance_selection in ['Speed', 'Quality', 'Extreme Speed'] + steps = performance_selection.steps() - steps = 30 - - if performance_selection == 'Speed': - steps = 30 - - if performance_selection == 'Quality': - steps = 60 - - if performance_selection == 'Extreme Speed': + if performance_selection == Performance.EXTREME_SPEED: print('Enter LCM mode.') progressbar(async_task, 1, 'Downloading LCM components ...') loras += [(modules.config.downloading_sdxl_lcm_lora(), 1.0)] @@ -244,7 +244,6 @@ def worker(): adm_scaler_positive = 1.0 adm_scaler_negative = 1.0 adm_scaler_end = 0.0 - steps = 8 print(f'[Parameters] Adaptive CFG = {adaptive_cfg}') print(f'[Parameters] Sharpness = {sharpness}') @@ -305,16 +304,7 @@ def worker(): if 'fast' in uov_method: skip_prompt_processing = True else: - steps = 18 - - if performance_selection == 'Speed': - steps = 18 - - if performance_selection == 'Quality': - steps = 36 - - if performance_selection == 'Extreme Speed': - steps = 8 + steps = performance_selection.steps_uov() progressbar(async_task, 1, 'Downloading upscale models ...') modules.config.downloading_upscale_model() @@ -830,31 +820,50 @@ def worker(): img_paths = [] for x in imgs: - d = [ - ('Prompt', task['log_positive_prompt']), - ('Negative Prompt', task['log_negative_prompt']), - ('Fooocus V2 Expansion', task['expansion']), - ('Styles', str(raw_style_selections)), - ('Performance', performance_selection), - ('Resolution', str((width, height))), - ('Sharpness', sharpness), - ('Guidance Scale', guidance_scale), - ('ADM Guidance', str(( - modules.patch.patch_settings[pid].positive_adm_scale, - modules.patch.patch_settings[pid].negative_adm_scale, - modules.patch.patch_settings[pid].adm_scaler_end))), - ('Base Model', base_model_name), - ('Refiner Model', refiner_model_name), - ('Refiner Switch', refiner_switch), - ('Sampler', sampler_name), - ('Scheduler', scheduler_name), - ('Seed', task['task_seed']), - ] + d = [('Prompt', 'prompt', task['log_positive_prompt']), + ('Negative Prompt', 'negative_prompt', task['log_negative_prompt']), + ('Fooocus V2 Expansion', 'prompt_expansion', task['expansion']), + ('Styles', 'styles', str(raw_style_selections)), + ('Performance', 'performance', performance_selection.value), + ('Resolution', 'resolution', str((width, height))), + ('Guidance Scale', 'guidance_scale', guidance_scale), + ('Sharpness', 'sharpness', sharpness), + ('ADM Guidance', 'adm_guidance', str(( + modules.patch.patch_settings[pid].positive_adm_scale, + modules.patch.patch_settings[pid].negative_adm_scale, + modules.patch.patch_settings[pid].adm_scaler_end))), + ('Base Model', 'base_model', base_model_name), + ('Refiner Model', 'refiner_model', refiner_model_name), + ('Refiner Switch', 'refiner_switch', refiner_switch)] + + if refiner_model_name != 'None': + if overwrite_switch > 0: + d.append(('Overwrite Switch', 'overwrite_switch', overwrite_switch)) + if refiner_swap_method != flags.refiner_swap_method: + d.append(('Refiner Swap Method', 'refiner_swap_method', refiner_swap_method)) + if modules.patch.patch_settings[pid].adaptive_cfg != modules.config.default_cfg_tsnr: + d.append(('CFG Mimicking from TSNR', 'adaptive_cfg', modules.patch.patch_settings[pid].adaptive_cfg)) + + d.append(('Sampler', 'sampler', sampler_name)) + d.append(('Scheduler', 'scheduler', scheduler_name)) + d.append(('Seed', 'seed', task['task_seed'])) + + if freeu_enabled: + d.append(('FreeU', 'freeu', str((freeu_b1, freeu_b2, freeu_s1, freeu_s2)))) + + metadata_parser = None + if save_metadata_to_images: + metadata_parser = modules.meta_parser.get_metadata_parser(metadata_scheme) + metadata_parser.set_data(task['log_positive_prompt'], task['positive'], + task['log_negative_prompt'], task['negative'], + steps, base_model_name, refiner_model_name, loras) + for li, (n, w) in enumerate(loras): if n != 'None': - d.append((f'LoRA {li + 1}', f'{n} : {w}')) - d.append(('Version', 'v' + fooocus_version.version)) - img_paths.append(log(x, d)) + d.append((f'LoRA {li + 1}', f'lora_combined_{li + 1}', f'{n} : {w}')) + + d.append(('Version', 'version', 'Fooocus v' + fooocus_version.version)) + img_paths.append(log(x, d, metadata_parser)) yield_result(async_task, img_paths, do_not_show_finished_images=len(tasks) == 1 or disable_intermediate_results) except ldm_patched.modules.model_management.InterruptProcessingException as e: diff --git a/modules/config.py b/modules/config.py index acf19b60..a393e24c 100644 --- a/modules/config.py +++ b/modules/config.py @@ -8,7 +8,7 @@ import modules.sdxl_styles from modules.model_loader import load_file_from_url from modules.util import get_files_from_folder, makedirs_with_log - +from modules.flags import Performance, MetadataScheme config_path = os.path.abspath("./config.txt") config_example_path = os.path.abspath("config_modification_tutorial.txt") @@ -293,8 +293,8 @@ default_prompt = get_config_item_or_set_default( ) default_performance = get_config_item_or_set_default( key='default_performance', - default_value='Speed', - validator=lambda x: x in modules.flags.performance_selections + default_value=Performance.SPEED.value, + validator=lambda x: x in Performance.list() ) default_advanced_checkbox = get_config_item_or_set_default( key='default_advanced_checkbox', @@ -369,6 +369,21 @@ example_inpaint_prompts = get_config_item_or_set_default( ], validator=lambda x: isinstance(x, list) and all(isinstance(v, str) for v in x) ) +default_save_metadata_to_images = get_config_item_or_set_default( + key='default_save_metadata_to_images', + default_value=False, + validator=lambda x: isinstance(x, bool) +) +default_metadata_scheme = get_config_item_or_set_default( + key='default_metadata_scheme', + default_value=MetadataScheme.FOOOCUS.value, + validator=lambda x: x in [y[1] for y in modules.flags.metadata_scheme if y[1] == x] +) +metadata_created_by = get_config_item_or_set_default( + key='metadata_created_by', + default_value='', + validator=lambda x: isinstance(x, str) +) example_inpaint_prompts = [[x] for x in example_inpaint_prompts] @@ -391,6 +406,7 @@ possible_preset_keys = [ "default_prompt_negative", "default_styles", "default_aspect_ratio", + "default_save_metadata_to_images", "checkpoint_downloads", "embeddings_downloads", "lora_downloads", diff --git a/modules/flags.py b/modules/flags.py index 27f2d716..206f5121 100644 --- a/modules/flags.py +++ b/modules/flags.py @@ -1,3 +1,5 @@ +from enum import IntEnum, Enum + disabled = 'Disabled' enabled = 'Enabled' subtle_variation = 'Vary (Subtle)' @@ -10,16 +12,49 @@ uov_list = [ disabled, subtle_variation, strong_variation, upscale_15, upscale_2, upscale_fast ] -KSAMPLER_NAMES = ["euler", "euler_ancestral", "heun", "heunpp2","dpm_2", "dpm_2_ancestral", - "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_sde", "dpmpp_sde_gpu", - "dpmpp_2m", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm"] +CIVITAI_NO_KARRAS = ["euler", "euler_ancestral", "heun", "dpm_fast", "dpm_adaptive", "ddim", "uni_pc"] + +# fooocus: a1111 (Civitai) +KSAMPLER = { + "euler": "Euler", + "euler_ancestral": "Euler a", + "heun": "Heun", + "heunpp2": "", + "dpm_2": "DPM2", + "dpm_2_ancestral": "DPM2 a", + "lms": "LMS", + "dpm_fast": "DPM fast", + "dpm_adaptive": "DPM adaptive", + "dpmpp_2s_ancestral": "DPM++ 2S a", + "dpmpp_sde": "DPM++ SDE", + "dpmpp_sde_gpu": "DPM++ SDE", + "dpmpp_2m": "DPM++ 2M", + "dpmpp_2m_sde": "DPM++ 2M SDE", + "dpmpp_2m_sde_gpu": "DPM++ 2M SDE", + "dpmpp_3m_sde": "", + "dpmpp_3m_sde_gpu": "", + "ddpm": "", + "lcm": "LCM" +} + +SAMPLER_EXTRA = { + "ddim": "DDIM", + "uni_pc": "UniPC", + "uni_pc_bh2": "" +} + +SAMPLERS = KSAMPLER | SAMPLER_EXTRA + +KSAMPLER_NAMES = list(KSAMPLER.keys()) SCHEDULER_NAMES = ["normal", "karras", "exponential", "sgm_uniform", "simple", "ddim_uniform", "lcm", "turbo"] -SAMPLER_NAMES = KSAMPLER_NAMES + ["ddim", "uni_pc", "uni_pc_bh2"] +SAMPLER_NAMES = KSAMPLER_NAMES + list(SAMPLER_EXTRA.keys()) sampler_list = SAMPLER_NAMES scheduler_list = SCHEDULER_NAMES +refiner_swap_method = 'joint' + cn_ip = "ImagePrompt" cn_ip_face = "FaceSwap" cn_canny = "PyraCanny" @@ -33,8 +68,6 @@ default_parameters = { } # stop, weight inpaint_engine_versions = ['None', 'v1', 'v2.5', 'v2.6'] -performance_selections = ['Speed', 'Quality', 'Extreme Speed'] - inpaint_option_default = 'Inpaint or Outpaint (default)' inpaint_option_detail = 'Improve Detail (face, hand, eyes, etc.)' inpaint_option_modify = 'Modify Content (add objects, change background, etc.)' @@ -42,3 +75,49 @@ inpaint_options = [inpaint_option_default, inpaint_option_detail, inpaint_option desc_type_photo = 'Photograph' desc_type_anime = 'Art/Anime' + + +class MetadataScheme(Enum): + FOOOCUS = 'fooocus' + A1111 = 'a1111' + + +metadata_scheme = [ + (f'{MetadataScheme.FOOOCUS.value} (json)', MetadataScheme.FOOOCUS.value), + (f'{MetadataScheme.A1111.value} (plain text)', MetadataScheme.A1111.value), +] + +lora_count = 5 + +controlnet_image_count = 4 + + +class Steps(IntEnum): + QUALITY = 60 + SPEED = 30 + EXTREME_SPEED = 8 + + +class StepsUOV(IntEnum): + QUALITY = 36 + SPEED = 18 + EXTREME_SPEED = 8 + + +class Performance(Enum): + QUALITY = 'Quality' + SPEED = 'Speed' + EXTREME_SPEED = 'Extreme Speed' + + @classmethod + def list(cls) -> list: + return list(map(lambda c: c.value, cls)) + + def steps(self) -> int | None: + return Steps[self.name].value if Steps[self.name] else None + + def steps_uov(self) -> int | None: + return StepsUOV[self.name].value if Steps[self.name] else None + + +performance_selections = Performance.list() diff --git a/modules/meta_parser.py b/modules/meta_parser.py index 061e1f8a..e9f1d033 100644 --- a/modules/meta_parser.py +++ b/modules/meta_parser.py @@ -1,45 +1,113 @@ import json +import os +import re +from abc import ABC, abstractmethod +from pathlib import Path + import gradio as gr +from PIL import Image + import modules.config +import modules.sdxl_styles +from modules.flags import MetadataScheme, Performance, Steps +from modules.flags import SAMPLERS, CIVITAI_NO_KARRAS +from modules.util import quote, unquote, extract_styles_from_prompt, is_json, get_file_from_folder_list, calculate_sha256 + +re_param_code = r'\s*(\w[\w \-/]+):\s*("(?:\\.|[^\\"])+"|[^,]*)(?:,|$)' +re_param = re.compile(re_param_code) +re_imagesize = re.compile(r"^(\d+)x(\d+)$") + +hash_cache = {} -def load_parameter_button_click(raw_prompt_txt, is_generating): - loaded_parameter_dict = json.loads(raw_prompt_txt) +def load_parameter_button_click(raw_metadata: dict | str, is_generating: bool): + loaded_parameter_dict = raw_metadata + if isinstance(raw_metadata, str): + loaded_parameter_dict = json.loads(raw_metadata) assert isinstance(loaded_parameter_dict, dict) - results = [True, 1] + results = [len(loaded_parameter_dict) > 0, 1] + get_str('prompt', 'Prompt', loaded_parameter_dict, results) + get_str('negative_prompt', 'Negative Prompt', loaded_parameter_dict, results) + get_list('styles', 'Styles', loaded_parameter_dict, results) + get_str('performance', 'Performance', loaded_parameter_dict, results) + get_steps('steps', 'Steps', loaded_parameter_dict, results) + get_float('overwrite_switch', 'Overwrite Switch', loaded_parameter_dict, results) + get_resolution('resolution', 'Resolution', loaded_parameter_dict, results) + get_float('guidance_scale', 'Guidance Scale', loaded_parameter_dict, results) + get_float('sharpness', 'Sharpness', loaded_parameter_dict, results) + get_adm_guidance('adm_guidance', 'ADM Guidance', loaded_parameter_dict, results) + get_str('refiner_swap_method', 'Refiner Swap Method', loaded_parameter_dict, results) + get_float('adaptive_cfg', 'CFG Mimicking from TSNR', loaded_parameter_dict, results) + get_str('base_model', 'Base Model', loaded_parameter_dict, results) + get_str('refiner_model', 'Refiner Model', loaded_parameter_dict, results) + get_float('refiner_switch', 'Refiner Switch', loaded_parameter_dict, results) + get_str('sampler', 'Sampler', loaded_parameter_dict, results) + get_str('scheduler', 'Scheduler', loaded_parameter_dict, results) + get_seed('seed', 'Seed', loaded_parameter_dict, results) + + if is_generating: + results.append(gr.update()) + else: + results.append(gr.update(visible=True)) + + results.append(gr.update(visible=False)) + + get_freeu('freeu', 'FreeU', loaded_parameter_dict, results) + + for i in range(modules.config.default_max_lora_number): + get_lora(f'lora_combined_{i + 1}', f'LoRA {i + 1}', loaded_parameter_dict, results) + + return results + + +def get_str(key: str, fallback: str | None, source_dict: dict, results: list, default=None): try: - h = loaded_parameter_dict.get('Prompt', None) + h = source_dict.get(key, source_dict.get(fallback, default)) assert isinstance(h, str) results.append(h) except: results.append(gr.update()) - try: - h = loaded_parameter_dict.get('Negative Prompt', None) - assert isinstance(h, str) - results.append(h) - except: - results.append(gr.update()) +def get_list(key: str, fallback: str | None, source_dict: dict, results: list, default=None): try: - h = loaded_parameter_dict.get('Styles', None) + h = source_dict.get(key, source_dict.get(fallback, default)) h = eval(h) assert isinstance(h, list) results.append(h) except: results.append(gr.update()) + +def get_float(key: str, fallback: str | None, source_dict: dict, results: list, default=None): try: - h = loaded_parameter_dict.get('Performance', None) - assert isinstance(h, str) + h = source_dict.get(key, source_dict.get(fallback, default)) + assert h is not None + h = float(h) results.append(h) except: results.append(gr.update()) + +def get_steps(key: str, fallback: str | None, source_dict: dict, results: list, default=None): try: - h = loaded_parameter_dict.get('Resolution', None) + h = source_dict.get(key, source_dict.get(fallback, default)) + assert h is not None + h = int(h) + # if not in steps or in steps and performance is not the same + if h not in iter(Steps) or Steps(h).name.casefold() != source_dict.get('performance', '').replace(' ', '_').casefold(): + results.append(h) + return + results.append(-1) + except: + results.append(-1) + + +def get_resolution(key: str, fallback: str | None, source_dict: dict, results: list, default=None): + try: + h = source_dict.get(key, source_dict.get(fallback, default)) width, height = eval(h) formatted = modules.config.add_ratio(f'{width}*{height}') if formatted in modules.config.available_aspect_ratios: @@ -55,24 +123,22 @@ def load_parameter_button_click(raw_prompt_txt, is_generating): results.append(gr.update()) results.append(gr.update()) + +def get_seed(key: str, fallback: str | None, source_dict: dict, results: list, default=None): try: - h = loaded_parameter_dict.get('Sharpness', None) + h = source_dict.get(key, source_dict.get(fallback, default)) assert h is not None - h = float(h) + h = int(h) + results.append(False) results.append(h) except: results.append(gr.update()) - - try: - h = loaded_parameter_dict.get('Guidance Scale', None) - assert h is not None - h = float(h) - results.append(h) - except: results.append(gr.update()) + +def get_adm_guidance(key: str, fallback: str | None, source_dict: dict, results: list, default=None): try: - h = loaded_parameter_dict.get('ADM Guidance', None) + h = source_dict.get(key, source_dict.get(fallback, default)) p, n, e = eval(h) results.append(float(p)) results.append(float(n)) @@ -82,69 +148,368 @@ def load_parameter_button_click(raw_prompt_txt, is_generating): results.append(gr.update()) results.append(gr.update()) - try: - h = loaded_parameter_dict.get('Base Model', None) - assert isinstance(h, str) - results.append(h) - except: - results.append(gr.update()) +def get_freeu(key: str, fallback: str | None, source_dict: dict, results: list, default=None): try: - h = loaded_parameter_dict.get('Refiner Model', None) - assert isinstance(h, str) - results.append(h) + h = source_dict.get(key, source_dict.get(fallback, default)) + b1, b2, s1, s2 = eval(h) + results.append(True) + results.append(float(b1)) + results.append(float(b2)) + results.append(float(s1)) + results.append(float(s2)) except: - results.append(gr.update()) - - try: - h = loaded_parameter_dict.get('Refiner Switch', None) - assert h is not None - h = float(h) - results.append(h) - except: - results.append(gr.update()) - - try: - h = loaded_parameter_dict.get('Sampler', None) - assert isinstance(h, str) - results.append(h) - except: - results.append(gr.update()) - - try: - h = loaded_parameter_dict.get('Scheduler', None) - assert isinstance(h, str) - results.append(h) - except: - results.append(gr.update()) - - try: - h = loaded_parameter_dict.get('Seed', None) - assert h is not None - h = int(h) results.append(False) - results.append(h) + results.append(gr.update()) + results.append(gr.update()) + results.append(gr.update()) + results.append(gr.update()) + + +def get_lora(key: str, fallback: str | None, source_dict: dict, results: list): + try: + n, w = source_dict.get(key, source_dict.get(fallback)).split(' : ') + w = float(w) + results.append(True) + results.append(n) + results.append(w) except: - results.append(gr.update()) - results.append(gr.update()) + results.append(True) + results.append('None') + results.append(1) - if is_generating: - results.append(gr.update()) - else: - results.append(gr.update(visible=True)) - - results.append(gr.update(visible=False)) - for i in range(1, modules.config.default_max_lora_number + 1): - try: - n, w = loaded_parameter_dict.get(f'LoRA {i}', ' : ').split(' : ') - w = float(w) - results.append(True) - results.append(n) - results.append(w) - except: - results.append(True) - results.append('None') - results.append(1.0) +def get_sha256(filepath): + global hash_cache - return results + if filepath not in hash_cache: + hash_cache[filepath] = calculate_sha256(filepath) + + return hash_cache[filepath] + + +class MetadataParser(ABC): + def __init__(self): + self.raw_prompt: str = '' + self.full_prompt: str = '' + self.raw_negative_prompt: str = '' + self.full_negative_prompt: str = '' + self.steps: int = 30 + self.base_model_name: str = '' + self.base_model_hash: str = '' + self.refiner_model_name: str = '' + self.refiner_model_hash: str = '' + self.loras: list = [] + + @abstractmethod + def get_scheme(self) -> MetadataScheme: + raise NotImplementedError + + @abstractmethod + def parse_json(self, metadata: dict | str) -> dict: + raise NotImplementedError + + @abstractmethod + def parse_string(self, metadata: dict) -> str: + raise NotImplementedError + + def set_data(self, raw_prompt, full_prompt, raw_negative_prompt, full_negative_prompt, steps, base_model_name, refiner_model_name, loras): + self.raw_prompt = raw_prompt + self.full_prompt = full_prompt + self.raw_negative_prompt = raw_negative_prompt + self.full_negative_prompt = full_negative_prompt + self.steps = steps + self.base_model_name = Path(base_model_name).stem + + base_model_path = get_file_from_folder_list(base_model_name, modules.config.paths_checkpoints) + self.base_model_hash = get_sha256(base_model_path) + + if refiner_model_name not in ['', 'None']: + self.refiner_model_name = Path(refiner_model_name).stem + refiner_model_path = get_file_from_folder_list(refiner_model_name, modules.config.paths_checkpoints) + self.refiner_model_hash = get_sha256(refiner_model_path) + + self.loras = [] + for (lora_name, lora_weight) in loras: + if lora_name != 'None': + lora_path = get_file_from_folder_list(lora_name, modules.config.paths_loras) + lora_hash = get_sha256(lora_path) + self.loras.append((Path(lora_name).stem, lora_weight, lora_hash)) + + +class A1111MetadataParser(MetadataParser): + def get_scheme(self) -> MetadataScheme: + return MetadataScheme.A1111 + + fooocus_to_a1111 = { + 'raw_prompt': 'Raw prompt', + 'raw_negative_prompt': 'Raw negative prompt', + 'negative_prompt': 'Negative prompt', + 'styles': 'Styles', + 'performance': 'Performance', + 'steps': 'Steps', + 'sampler': 'Sampler', + 'scheduler': 'Scheduler', + 'guidance_scale': 'CFG scale', + 'seed': 'Seed', + 'resolution': 'Size', + 'sharpness': 'Sharpness', + 'adm_guidance': 'ADM Guidance', + 'refiner_swap_method': 'Refiner Swap Method', + 'adaptive_cfg': 'Adaptive CFG', + 'overwrite_switch': 'Overwrite Switch', + 'freeu': 'FreeU', + 'base_model': 'Model', + 'base_model_hash': 'Model hash', + 'refiner_model': 'Refiner', + 'refiner_model_hash': 'Refiner hash', + 'lora_hashes': 'Lora hashes', + 'lora_weights': 'Lora weights', + 'created_by': 'User', + 'version': 'Version' + } + + def parse_json(self, metadata: str) -> dict: + metadata_prompt = '' + metadata_negative_prompt = '' + + done_with_prompt = False + + *lines, lastline = metadata.strip().split("\n") + if len(re_param.findall(lastline)) < 3: + lines.append(lastline) + lastline = '' + + for line in lines: + line = line.strip() + if line.startswith(f"{self.fooocus_to_a1111['negative_prompt']}:"): + done_with_prompt = True + line = line[len(f"{self.fooocus_to_a1111['negative_prompt']}:"):].strip() + if done_with_prompt: + metadata_negative_prompt += ('' if metadata_negative_prompt == '' else "\n") + line + else: + metadata_prompt += ('' if metadata_prompt == '' else "\n") + line + + found_styles, prompt, negative_prompt = extract_styles_from_prompt(metadata_prompt, metadata_negative_prompt) + + data = { + 'prompt': prompt, + 'negative_prompt': negative_prompt + } + + for k, v in re_param.findall(lastline): + try: + if v != '' and v[0] == '"' and v[-1] == '"': + v = unquote(v) + + m = re_imagesize.match(v) + if m is not None: + data['resolution'] = str((m.group(1), m.group(2))) + else: + data[list(self.fooocus_to_a1111.keys())[list(self.fooocus_to_a1111.values()).index(k)]] = v + except Exception: + print(f"Error parsing \"{k}: {v}\"") + + # workaround for multiline prompts + if 'raw_prompt' in data: + data['prompt'] = data['raw_prompt'] + raw_prompt = data['raw_prompt'].replace("\n", ', ') + if metadata_prompt != raw_prompt and modules.sdxl_styles.fooocus_expansion not in found_styles: + found_styles.append(modules.sdxl_styles.fooocus_expansion) + + if 'raw_negative_prompt' in data: + data['negative_prompt'] = data['raw_negative_prompt'] + + data['styles'] = str(found_styles) + + # try to load performance based on steps, fallback for direct A1111 imports + if 'steps' in data and 'performance' not in data: + try: + data['performance'] = Performance[Steps(int(data['steps'])).name].value + except ValueError | KeyError: + pass + + if 'sampler' in data: + data['sampler'] = data['sampler'].replace(' Karras', '') + # get key + for k, v in SAMPLERS.items(): + if v == data['sampler']: + data['sampler'] = k + break + + for key in ['base_model', 'refiner_model']: + if key in data: + for filename in modules.config.model_filenames: + path = Path(filename) + if data[key] == path.stem: + data[key] = filename + break + + if 'lora_hashes' in data: + lora_filenames = modules.config.lora_filenames.copy() + lora_filenames.remove(modules.config.downloading_sdxl_lcm_lora()) + for li, lora in enumerate(data['lora_hashes'].split(', ')): + lora_name, lora_hash, lora_weight = lora.split(': ') + for filename in lora_filenames: + path = Path(filename) + if lora_name == path.stem: + data[f'lora_combined_{li + 1}'] = f'{filename} : {lora_weight}' + break + + return data + + def parse_string(self, metadata: dict) -> str: + data = {k: v for _, k, v in metadata} + + width, height = eval(data['resolution']) + + sampler = data['sampler'] + scheduler = data['scheduler'] + if sampler in SAMPLERS and SAMPLERS[sampler] != '': + sampler = SAMPLERS[sampler] + if sampler not in CIVITAI_NO_KARRAS and scheduler == 'karras': + sampler += f' Karras' + + generation_params = { + self.fooocus_to_a1111['steps']: self.steps, + self.fooocus_to_a1111['sampler']: sampler, + self.fooocus_to_a1111['seed']: data['seed'], + self.fooocus_to_a1111['resolution']: f'{width}x{height}', + self.fooocus_to_a1111['guidance_scale']: data['guidance_scale'], + self.fooocus_to_a1111['sharpness']: data['sharpness'], + self.fooocus_to_a1111['adm_guidance']: data['adm_guidance'], + self.fooocus_to_a1111['base_model']: Path(data['base_model']).stem, + self.fooocus_to_a1111['base_model_hash']: self.base_model_hash, + + self.fooocus_to_a1111['performance']: data['performance'], + self.fooocus_to_a1111['scheduler']: scheduler, + # workaround for multiline prompts + self.fooocus_to_a1111['raw_prompt']: self.raw_prompt, + self.fooocus_to_a1111['raw_negative_prompt']: self.raw_negative_prompt, + } + + if self.refiner_model_name not in ['', 'None']: + generation_params |= { + self.fooocus_to_a1111['refiner_model']: self.refiner_model_name, + self.fooocus_to_a1111['refiner_model_hash']: self.refiner_model_hash + } + + for key in ['adaptive_cfg', 'overwrite_switch', 'refiner_swap_method', 'freeu']: + if key in data: + generation_params[self.fooocus_to_a1111[key]] = data[key] + + lora_hashes = [] + for index, (lora_name, lora_weight, lora_hash) in enumerate(self.loras): + # workaround for Fooocus not knowing LoRA name in LoRA metadata + lora_hashes.append(f'{lora_name}: {lora_hash}: {lora_weight}') + lora_hashes_string = ', '.join(lora_hashes) + + generation_params |= { + self.fooocus_to_a1111['lora_hashes']: lora_hashes_string, + self.fooocus_to_a1111['version']: data['version'] + } + + if modules.config.metadata_created_by != '': + generation_params[self.fooocus_to_a1111['created_by']] = modules.config.metadata_created_by + + generation_params_text = ", ".join( + [k if k == v else f'{k}: {quote(v)}' for k, v in generation_params.items() if + v is not None]) + positive_prompt_resolved = ', '.join(self.full_prompt) + negative_prompt_resolved = ', '.join(self.full_negative_prompt) + negative_prompt_text = f"\nNegative prompt: {negative_prompt_resolved}" if negative_prompt_resolved else "" + return f"{positive_prompt_resolved}{negative_prompt_text}\n{generation_params_text}".strip() + + +class FooocusMetadataParser(MetadataParser): + def get_scheme(self) -> MetadataScheme: + return MetadataScheme.FOOOCUS + + def parse_json(self, metadata: dict) -> dict: + model_filenames = modules.config.model_filenames.copy() + lora_filenames = modules.config.lora_filenames.copy() + lora_filenames.remove(modules.config.downloading_sdxl_lcm_lora()) + + for key, value in metadata.items(): + if value in ['', 'None']: + continue + if key in ['base_model', 'refiner_model']: + metadata[key] = self.replace_value_with_filename(key, value, model_filenames) + elif key.startswith('lora_combined_'): + metadata[key] = self.replace_value_with_filename(key, value, lora_filenames) + else: + continue + + return metadata + + def parse_string(self, metadata: list) -> str: + for li, (label, key, value) in enumerate(metadata): + # remove model folder paths from metadata + if key.startswith('lora_combined_'): + name, weight = value.split(' : ') + name = Path(name).stem + value = f'{name} : {weight}' + metadata[li] = (label, key, value) + + res = {k: v for _, k, v in metadata} + + res['full_prompt'] = self.full_prompt + res['full_negative_prompt'] = self.full_negative_prompt + res['steps'] = self.steps + res['base_model'] = self.base_model_name + res['base_model_hash'] = self.base_model_hash + + if self.refiner_model_name not in ['', 'None']: + res['refiner_model'] = self.refiner_model_name + res['refiner_model_hash'] = self.refiner_model_hash + + res['loras'] = self.loras + + if modules.config.metadata_created_by != '': + res['created_by'] = modules.config.metadata_created_by + + return json.dumps(dict(sorted(res.items()))) + + @staticmethod + def replace_value_with_filename(key, value, filenames): + for filename in filenames: + path = Path(filename) + if key.startswith('lora_combined_'): + name, weight = value.split(' : ') + if name == path.stem: + return f'{filename} : {weight}' + elif value == path.stem: + return filename + + +def get_metadata_parser(metadata_scheme: MetadataScheme) -> MetadataParser: + match metadata_scheme: + case MetadataScheme.FOOOCUS: + return FooocusMetadataParser() + case MetadataScheme.A1111: + return A1111MetadataParser() + case _: + raise NotImplementedError + + +def read_info_from_image(filepath) -> tuple[str | None, dict, MetadataScheme | None]: + with Image.open(filepath) as image: + items = (image.info or {}).copy() + + parameters = items.pop('parameters', None) + if parameters is not None and is_json(parameters): + parameters = json.loads(parameters) + + try: + metadata_scheme = MetadataScheme(items.pop('fooocus_scheme', None)) + except ValueError: + metadata_scheme = None + + # broad fallback + if isinstance(parameters, dict): + metadata_scheme = MetadataScheme.FOOOCUS + + if isinstance(parameters, str): + metadata_scheme = MetadataScheme.A1111 + + return parameters, items, metadata_scheme diff --git a/modules/private_logger.py b/modules/private_logger.py index 506b1055..2213cbba 100644 --- a/modules/private_logger.py +++ b/modules/private_logger.py @@ -5,7 +5,9 @@ import json import urllib.parse from PIL import Image +from PIL.PngImagePlugin import PngInfo from modules.util import generate_temp_filename +from modules.meta_parser import MetadataParser from tempfile import gettempdir log_cache = {} @@ -18,11 +20,21 @@ def get_current_html_path(): return html_name -def log(img, dic) -> str: +def log(img, metadata, metadata_parser: MetadataParser | None = None) -> str: path_outputs = args_manager.args.temp_path if args_manager.args.disable_image_log else modules.config.path_outputs date_string, local_temp_filename, only_name = generate_temp_filename(folder=path_outputs, extension='png') os.makedirs(os.path.dirname(local_temp_filename), exist_ok=True) - Image.fromarray(img).save(local_temp_filename) + + parsed_parameters = metadata_parser.parse_string(metadata) if metadata_parser is not None else '' + image = Image.fromarray(img) + + if parsed_parameters != '': + pnginfo = PngInfo() + pnginfo.add_text('parameters', parsed_parameters) + pnginfo.add_text('fooocus_scheme', metadata_parser.get_scheme().value) + else: + pnginfo = None + image.save(local_temp_filename, pnginfo=pnginfo) if args_manager.args.disable_image_log: return local_temp_filename @@ -34,7 +46,7 @@ def log(img, dic) -> str: "body { background-color: #121212; color: #E0E0E0; } " "a { color: #BB86FC; } " ".metadata { border-collapse: collapse; width: 100%; } " - ".metadata .key { width: 15%; } " + ".metadata .label { width: 15%; } " ".metadata .value { width: 85%; font-weight: bold; } " ".metadata th, .metadata td { border: 1px solid #4d4d4d; padding: 4px; } " ".image-container img { height: auto; max-width: 512px; display: block; padding-right:10px; } " @@ -87,13 +99,13 @@ def log(img, dic) -> str: item = f"

\n" item += f"" item += "" item += "
{only_name}
" - for key, value in dic: - value_txt = str(value).replace('\n', '
') - item += f"\n" + for label, key, value in metadata: + value_txt = str(value).replace('\n', '
') + item += f"\n" item += "" - js_txt = urllib.parse.quote(json.dumps({k: v for k, v in dic}, indent=0), safe='') - item += f"
" + js_txt = urllib.parse.quote(json.dumps({k: v for _, k, v in metadata}, indent=0), safe='') + item += f"
" item += "
\n\n" diff --git a/modules/util.py b/modules/util.py index 1b165115..29d48696 100644 --- a/modules/util.py +++ b/modules/util.py @@ -1,15 +1,20 @@ +import typing + import numpy as np import datetime import random import math import os import cv2 +import json from PIL import Image +from hashlib import sha256 +import modules.sdxl_styles LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS) - +HASH_SHA256_LENGTH = 10 def erode_or_dilate(x, k): k = int(k) @@ -170,13 +175,173 @@ def get_files_from_folder(folder_path, exensions=None, name_filter=None): relative_path = "" for filename in sorted(files, key=lambda s: s.casefold()): _, file_extension = os.path.splitext(filename) - if (exensions == None or file_extension.lower() in exensions) and (name_filter == None or name_filter in _): + if (exensions is None or file_extension.lower() in exensions) and (name_filter is None or name_filter in _): path = os.path.join(relative_path, filename) filenames.append(path) return filenames +def calculate_sha256(filename, length=HASH_SHA256_LENGTH) -> str: + hash_sha256 = sha256() + blksize = 1024 * 1024 + + with open(filename, "rb") as f: + for chunk in iter(lambda: f.read(blksize), b""): + hash_sha256.update(chunk) + + res = hash_sha256.hexdigest() + return res[:length] if length else res + + +def quote(text): + if ',' not in str(text) and '\n' not in str(text) and ':' not in str(text): + return text + + return json.dumps(text, ensure_ascii=False) + + +def unquote(text): + if len(text) == 0 or text[0] != '"' or text[-1] != '"': + return text + + try: + return json.loads(text) + except Exception: + return text + + +def unwrap_style_text_from_prompt(style_text, prompt): + """ + Checks the prompt to see if the style text is wrapped around it. If so, + returns True plus the prompt text without the style text. Otherwise, returns + False with the original prompt. + + Note that the "cleaned" version of the style text is only used for matching + purposes here. It isn't returned; the original style text is not modified. + """ + stripped_prompt = prompt + stripped_style_text = style_text + if "{prompt}" in stripped_style_text: + # Work out whether the prompt is wrapped in the style text. If so, we + # return True and the "inner" prompt text that isn't part of the style. + try: + left, right = stripped_style_text.split("{prompt}", 2) + except ValueError as e: + # If the style text has multple "{prompt}"s, we can't split it into + # two parts. This is an error, but we can't do anything about it. + print(f"Unable to compare style text to prompt:\n{style_text}") + print(f"Error: {e}") + return False, prompt, '' + + left_pos = stripped_prompt.find(left) + right_pos = stripped_prompt.find(right) + if 0 <= left_pos < right_pos: + real_prompt = stripped_prompt[left_pos + len(left):right_pos] + prompt = stripped_prompt.replace(left + real_prompt + right, '', 1) + if prompt.startswith(", "): + prompt = prompt[2:] + if prompt.endswith(", "): + prompt = prompt[:-2] + return True, prompt, real_prompt + else: + # Work out whether the given prompt starts with the style text. If so, we + # return True and the prompt text up to where the style text starts. + if stripped_prompt.endswith(stripped_style_text): + prompt = stripped_prompt[: len(stripped_prompt) - len(stripped_style_text)] + if prompt.endswith(", "): + prompt = prompt[:-2] + return True, prompt, prompt + + return False, prompt, '' + + +def extract_original_prompts(style, prompt, negative_prompt): + """ + Takes a style and compares it to the prompt and negative prompt. If the style + matches, returns True plus the prompt and negative prompt with the style text + removed. Otherwise, returns False with the original prompt and negative prompt. + """ + if not style.prompt and not style.negative_prompt: + return False, prompt, negative_prompt + + match_positive, extracted_positive, real_prompt = unwrap_style_text_from_prompt( + style.prompt, prompt + ) + if not match_positive: + return False, prompt, negative_prompt, '' + + match_negative, extracted_negative, _ = unwrap_style_text_from_prompt( + style.negative_prompt, negative_prompt + ) + if not match_negative: + return False, prompt, negative_prompt, '' + + return True, extracted_positive, extracted_negative, real_prompt + + +def extract_styles_from_prompt(prompt, negative_prompt): + extracted = [] + applicable_styles = [] + + for style_name, (style_prompt, style_negative_prompt) in modules.sdxl_styles.styles.items(): + applicable_styles.append(PromptStyle(name=style_name, prompt=style_prompt, negative_prompt=style_negative_prompt)) + + real_prompt = '' + + while True: + found_style = None + + for style in applicable_styles: + is_match, new_prompt, new_neg_prompt, new_real_prompt = extract_original_prompts( + style, prompt, negative_prompt + ) + if is_match: + found_style = style + prompt = new_prompt + negative_prompt = new_neg_prompt + if real_prompt == '' and new_real_prompt != '' and new_real_prompt != prompt: + real_prompt = new_real_prompt + break + + if not found_style: + break + + applicable_styles.remove(found_style) + extracted.append(found_style.name) + + # add prompt expansion if not all styles could be resolved + if prompt != '': + if real_prompt != '': + extracted.append(modules.sdxl_styles.fooocus_expansion) + else: + # find real_prompt when only prompt expansion is selected + first_word = prompt.split(', ')[0] + first_word_positions = [i for i in range(len(prompt)) if prompt.startswith(first_word, i)] + if len(first_word_positions) > 1: + real_prompt = prompt[:first_word_positions[-1]] + extracted.append(modules.sdxl_styles.fooocus_expansion) + if real_prompt.endswith(', '): + real_prompt = real_prompt[:-2] + + return list(reversed(extracted)), real_prompt, negative_prompt + + +class PromptStyle(typing.NamedTuple): + name: str + prompt: str + negative_prompt: str + + +def is_json(data: str) -> bool: + try: + loaded_json = json.loads(data) + assert isinstance(loaded_json, dict) + except (ValueError, AssertionError): + return False + return True + + def get_file_from_folder_list(name, folders): for folder in folders: filename = os.path.abspath(os.path.realpath(os.path.join(folder, name))) diff --git a/webui.py b/webui.py index a3663dfa..7020438e 100644 --- a/webui.py +++ b/webui.py @@ -20,6 +20,7 @@ from modules.sdxl_styles import legal_style_names from modules.private_logger import get_current_html_path from modules.ui_gradio_extensions import reload_javascript from modules.auth import auth_enabled, check_auth +from modules.util import is_json def get_task(*args): args = list(args) @@ -158,7 +159,7 @@ with shared.gradio_root: ip_weights = [] ip_ctrls = [] ip_ad_cols = [] - for _ in range(4): + for _ in range(flags.controlnet_image_count): with gr.Column(): ip_image = grh.Image(label='Image', source='upload', type='numpy', show_label=False, height=300) ip_images.append(ip_image) @@ -216,6 +217,30 @@ with shared.gradio_root: value=flags.desc_type_photo) desc_btn = gr.Button(value='Describe this Image into Prompt') gr.HTML('\U0001F4D4 Document') + with gr.TabItem(label='Metadata') as load_tab: + with gr.Column(): + metadata_input_image = grh.Image(label='Drag any image generated by Fooocus here', source='upload', type='filepath') + metadata_json = gr.JSON(label='Metadata') + metadata_import_button = gr.Button(value='Apply Metadata') + + def trigger_metadata_preview(filepath): + parameters, items, metadata_scheme = modules.meta_parser.read_info_from_image(filepath) + + results = {} + if parameters is not None: + results['parameters'] = parameters + + if items: + results['items'] = items + + if isinstance(metadata_scheme, flags.MetadataScheme): + results['metadata_scheme'] = metadata_scheme.value + + return results + + metadata_input_image.upload(trigger_metadata_preview, inputs=metadata_input_image, + outputs=metadata_json, queue=False, show_progress=True) + switch_js = "(x) => {if(x){viewer_to_bottom(100);viewer_to_bottom(500);}else{viewer_to_top();} return x;}" down_js = "() => {viewer_to_bottom();}" @@ -359,7 +384,7 @@ with shared.gradio_root: step=0.001, value=0.3, info='When to end the guidance from positive/negative ADM. ') - refiner_swap_method = gr.Dropdown(label='Refiner swap method', value='joint', + refiner_swap_method = gr.Dropdown(label='Refiner swap method', value=flags.refiner_swap_method, choices=['joint', 'separate', 'vae']) adaptive_cfg = gr.Slider(label='CFG Mimicking from TSNR', minimum=1.0, maximum=30.0, step=0.01, @@ -407,6 +432,16 @@ with shared.gradio_root: info='Disable automatic seed increment when image number is > 1.', value=False) + if not args_manager.args.disable_metadata: + save_metadata_to_images = gr.Checkbox(label='Save Metadata to Images', value=modules.config.default_save_metadata_to_images, + info='Adds parameters to generated images allowing manual regeneration.') + metadata_scheme = gr.Radio(label='Metadata Scheme', choices=flags.metadata_scheme, value=modules.config.default_metadata_scheme, + info='Image Prompt parameters are not included. Use a1111 for compatibility with Civitai.', + visible=modules.config.default_save_metadata_to_images) + + save_metadata_to_images.change(lambda x: gr.update(visible=x), inputs=[save_metadata_to_images], outputs=[metadata_scheme], + queue=False, show_progress=False) + with gr.Tab(label='Control'): debugging_cn_preprocessor = gr.Checkbox(label='Debug Preprocessors', value=False, info='See the results from preprocessors.') @@ -484,7 +519,6 @@ with shared.gradio_root: results += [gr.update(choices=['None'] + modules.config.model_filenames)] for i in range(modules.config.default_max_lora_number): results += [gr.update(interactive=True), gr.update(choices=['None'] + modules.config.lora_filenames), gr.update()] - return results model_refresh.click(model_refresh_clicked, [], [base_model, refiner_model] + lora_ctrls, queue=False, show_progress=False) @@ -555,20 +589,18 @@ with shared.gradio_root: ctrls += [refiner_swap_method, controlnet_softness] ctrls += freeu_ctrls ctrls += inpaint_ctrls + + if not args_manager.args.disable_metadata: + ctrls += [save_metadata_to_images, metadata_scheme] + ctrls += ip_ctrls state_is_generating = gr.State(False) def parse_meta(raw_prompt_txt, is_generating): loaded_json = None - try: - if '{' in raw_prompt_txt: - if '}' in raw_prompt_txt: - if ':' in raw_prompt_txt: - loaded_json = json.loads(raw_prompt_txt) - assert isinstance(loaded_json, dict) - except: - loaded_json = None + if is_json(raw_prompt_txt): + loaded_json = json.loads(raw_prompt_txt) if loaded_json is None: if is_generating: @@ -580,31 +612,29 @@ with shared.gradio_root: prompt.input(parse_meta, inputs=[prompt, state_is_generating], outputs=[prompt, generate_button, load_parameter_button], queue=False, show_progress=False) - load_parameter_button.click(modules.meta_parser.load_parameter_button_click, inputs=[prompt, state_is_generating], outputs=[ - advanced_checkbox, - image_number, - prompt, - negative_prompt, - style_selections, - performance_selection, - aspect_ratios_selection, - overwrite_width, - overwrite_height, - sharpness, - guidance_scale, - adm_scaler_positive, - adm_scaler_negative, - adm_scaler_end, - base_model, - refiner_model, - refiner_switch, - sampler_name, - scheduler_name, - seed_random, - image_seed, - generate_button, - load_parameter_button - ] + lora_ctrls, queue=False, show_progress=False) + load_data_outputs = [advanced_checkbox, image_number, prompt, negative_prompt, style_selections, + performance_selection, overwrite_step, overwrite_switch, aspect_ratios_selection, + overwrite_width, overwrite_height, guidance_scale, sharpness, adm_scaler_positive, + adm_scaler_negative, adm_scaler_end, refiner_swap_method, adaptive_cfg, base_model, + refiner_model, refiner_switch, sampler_name, scheduler_name, seed_random, image_seed, + generate_button, load_parameter_button] + freeu_ctrls + lora_ctrls + + load_parameter_button.click(modules.meta_parser.load_parameter_button_click, inputs=[prompt, state_is_generating], outputs=load_data_outputs, queue=False, show_progress=False) + + def trigger_metadata_import(filepath, state_is_generating): + parameters, items, metadata_scheme = modules.meta_parser.read_info_from_image(filepath) + if parameters is None: + print('Could not find metadata in the image!') + parsed_parameters = {} + else: + metadata_parser = modules.meta_parser.get_metadata_parser(metadata_scheme) + parsed_parameters = metadata_parser.parse_json(parameters) + + return modules.meta_parser.load_parameter_button_click(parsed_parameters, state_is_generating) + + + metadata_import_button.click(trigger_metadata_import, inputs=[metadata_input_image, state_is_generating], outputs=load_data_outputs, queue=False, show_progress=True) \ + .then(style_sorter.sort_styles, inputs=style_selections, outputs=style_selections, queue=False, show_progress=False) generate_button.click(lambda: (gr.update(visible=True, interactive=True), gr.update(visible=True, interactive=True), gr.update(visible=False, interactive=False), [], True), outputs=[stop_button, skip_button, generate_button, gallery, state_is_generating]) \ From b6d23670d87277f4ae237c499237d4eb3b9d9903 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Mon, 26 Feb 2024 15:31:32 +0100 Subject: [PATCH 045/103] feat: add jpg and webp support, add exif data handling for metadata (#1863) * feature: added flag, config and ui update for image extension change #1789 * moved function to config module * moved image extension to webui via async worker. Passing as parameter to log and get_current_html_path functions per feedback * check flag before displaying image extension radio button * disabled if image log flag is passed in * fix: add missing image_extension parameter to log call * refactor: change label * feat: add webp to image_extensions supported image extemsions: see https://pillow.readthedocs.io/en/stable/handbook/image-file-formats.html * feat: use consistent file name in gradio returns and uses filepaths instead of numpy image by saving to temp dir uses double the temp dir file storage on disk as it saves to temp dir and gradio temp dir when displaying the image, but reuses logged output image * feat: delete temp images after yielding to gradio * feat: use args temp path if given * chore: code cleanup, remove redundant if statement * feat: always show image_extension element this is now possible due to image extension support in gradio via https://github.com/lllyasviel/Fooocus/pull/1932 * refactor: rename image_extension to image_file_extension * feat: use optimized jpg parameters when saving the image quality=95 optimize=True progressive=True * refactor: rename image_file_extension to output_format * feat: add exif handling * refactor: code cleanup, remove items from metadata output --------- Co-authored-by: Manuel Schmid Co-authored-by: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Co-authored-by: Manuel Schmid Co-authored by: eddyizm --- modules/async_worker.py | 7 +++-- modules/config.py | 5 +++ modules/flags.py | 2 ++ modules/meta_parser.py | 66 ++++++++++++++++++++++++++++++++++++--- modules/private_logger.py | 32 ++++++++++++------- webui.py | 20 +++++++----- 6 files changed, 104 insertions(+), 28 deletions(-) diff --git a/modules/async_worker.py b/modules/async_worker.py index 677cf469..2c029cfb 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -1,5 +1,4 @@ import threading -import os from modules.patch import PatchSettings, patch_settings, patch_all patch_all() @@ -142,6 +141,7 @@ def worker(): performance_selection = Performance(args.pop()) aspect_ratios_selection = args.pop() image_number = args.pop() + output_format = args.pop() image_seed = args.pop() sharpness = args.pop() guidance_scale = args.pop() @@ -414,6 +414,7 @@ def worker(): progressbar(async_task, 3, 'Processing prompts ...') tasks = [] + for i in range(image_number): if disable_seed_increment: task_seed = seed @@ -553,7 +554,7 @@ def worker(): if direct_return: d = [('Upscale (Fast)', '2x')] - uov_input_image_path = log(uov_input_image, d) + uov_input_image_path = log(uov_input_image, d, output_format) yield_result(async_task, uov_input_image_path, do_not_show_finished_images=True) return @@ -863,7 +864,7 @@ def worker(): d.append((f'LoRA {li + 1}', f'lora_combined_{li + 1}', f'{n} : {w}')) d.append(('Version', 'version', 'Fooocus v' + fooocus_version.version)) - img_paths.append(log(x, d, metadata_parser)) + img_paths.append(log(x, d, metadata_parser, output_format)) yield_result(async_task, img_paths, do_not_show_finished_images=len(tasks) == 1 or disable_intermediate_results) except ldm_patched.modules.model_management.InterruptProcessingException as e: diff --git a/modules/config.py b/modules/config.py index a393e24c..6800c004 100644 --- a/modules/config.py +++ b/modules/config.py @@ -306,6 +306,11 @@ default_max_image_number = get_config_item_or_set_default( default_value=32, validator=lambda x: isinstance(x, int) and x >= 1 ) +default_output_format = get_config_item_or_set_default( + key='default_output_format', + default_value='png', + validator=lambda x: x in modules.flags.output_formats +) default_image_number = get_config_item_or_set_default( key='default_image_number', default_value=2, diff --git a/modules/flags.py b/modules/flags.py index 206f5121..6f12bc8f 100644 --- a/modules/flags.py +++ b/modules/flags.py @@ -67,6 +67,8 @@ default_parameters = { cn_ip: (0.5, 0.6), cn_ip_face: (0.9, 0.75), cn_canny: (0.5, 1.0), cn_cpds: (0.5, 1.0) } # stop, weight +output_formats = ['png', 'jpg', 'webp'] + inpaint_engine_versions = ['None', 'v1', 'v2.5', 'v2.6'] inpaint_option_default = 'Inpaint or Outpaint (default)' inpaint_option_detail = 'Improve Detail (face, hand, eyes, etc.)' diff --git a/modules/meta_parser.py b/modules/meta_parser.py index e9f1d033..9b2dadb3 100644 --- a/modules/meta_parser.py +++ b/modules/meta_parser.py @@ -7,6 +7,7 @@ from pathlib import Path import gradio as gr from PIL import Image +import fooocus_version import modules.config import modules.sdxl_styles from modules.flags import MetadataScheme, Performance, Steps @@ -181,13 +182,43 @@ def get_lora(key: str, fallback: str | None, source_dict: dict, results: list): def get_sha256(filepath): global hash_cache - if filepath not in hash_cache: hash_cache[filepath] = calculate_sha256(filepath) return hash_cache[filepath] +def parse_meta_from_preset(preset_content): + assert isinstance(preset_content, dict) + preset_prepared = {} + items = preset_content + + for settings_key, meta_key in modules.config.possible_preset_keys.items(): + if settings_key == "default_loras": + loras = getattr(modules.config, settings_key) + if settings_key in items: + loras = items[settings_key] + for index, lora in enumerate(loras[:5]): + preset_prepared[f'lora_combined_{index + 1}'] = ' : '.join(map(str, lora)) + elif settings_key == "default_aspect_ratio": + if settings_key in items and items[settings_key] is not None: + default_aspect_ratio = items[settings_key] + width, height = default_aspect_ratio.split('*') + else: + default_aspect_ratio = getattr(modules.config, settings_key) + width, height = default_aspect_ratio.split('×') + height = height[:height.index(" ")] + preset_prepared[meta_key] = (width, height) + else: + preset_prepared[meta_key] = items[settings_key] if settings_key in items and items[ + settings_key] is not None else getattr(modules.config, settings_key) + + if settings_key == "default_styles" or settings_key == "default_aspect_ratio": + preset_prepared[meta_key] = str(preset_prepared[meta_key]) + + return preset_prepared + + class MetadataParser(ABC): def __init__(self): self.raw_prompt: str = '' @@ -213,7 +244,8 @@ class MetadataParser(ABC): def parse_string(self, metadata: dict) -> str: raise NotImplementedError - def set_data(self, raw_prompt, full_prompt, raw_negative_prompt, full_negative_prompt, steps, base_model_name, refiner_model_name, loras): + def set_data(self, raw_prompt, full_prompt, raw_negative_prompt, full_negative_prompt, steps, base_model_name, + refiner_model_name, loras): self.raw_prompt = raw_prompt self.full_prompt = full_prompt self.raw_negative_prompt = raw_negative_prompt @@ -492,16 +524,28 @@ def get_metadata_parser(metadata_scheme: MetadataScheme) -> MetadataParser: raise NotImplementedError -def read_info_from_image(filepath) -> tuple[str | None, dict, MetadataScheme | None]: +def read_info_from_image(filepath) -> tuple[str | None, MetadataScheme | None]: with Image.open(filepath) as image: items = (image.info or {}).copy() parameters = items.pop('parameters', None) + metadata_scheme = items.pop('fooocus_scheme', None) + exif = items.pop('exif', None) + if parameters is not None and is_json(parameters): parameters = json.loads(parameters) + elif exif is not None: + exif = image.getexif() + # 0x9286 = UserComment + parameters = exif.get(0x9286, None) + # 0x927C = MakerNote + metadata_scheme = exif.get(0x927C, None) + + if is_json(parameters): + parameters = json.loads(parameters) try: - metadata_scheme = MetadataScheme(items.pop('fooocus_scheme', None)) + metadata_scheme = MetadataScheme(metadata_scheme) except ValueError: metadata_scheme = None @@ -512,4 +556,16 @@ def read_info_from_image(filepath) -> tuple[str | None, dict, MetadataScheme | N if isinstance(parameters, str): metadata_scheme = MetadataScheme.A1111 - return parameters, items, metadata_scheme + return parameters, metadata_scheme + + +def get_exif(metadata: str | None, metadata_scheme: str): + exif = Image.Exif() + # tags see see https://github.com/python-pillow/Pillow/blob/9.2.x/src/PIL/ExifTags.py + # 0x9286 = UserComment + exif[0x9286] = metadata + # 0x0131 = Software + exif[0x0131] = 'Fooocus v' + fooocus_version.version + # 0x927C = MakerNote + exif[0x927C] = metadata_scheme + return exif \ No newline at end of file diff --git a/modules/private_logger.py b/modules/private_logger.py index 2213cbba..8fa5f73c 100644 --- a/modules/private_logger.py +++ b/modules/private_logger.py @@ -7,34 +7,42 @@ import urllib.parse from PIL import Image from PIL.PngImagePlugin import PngInfo from modules.util import generate_temp_filename -from modules.meta_parser import MetadataParser -from tempfile import gettempdir +from modules.meta_parser import MetadataParser, get_exif log_cache = {} -def get_current_html_path(): +def get_current_html_path(output_format=None): + output_format = output_format if output_format else modules.config.default_output_format date_string, local_temp_filename, only_name = generate_temp_filename(folder=modules.config.path_outputs, - extension='png') + extension=output_format) html_name = os.path.join(os.path.dirname(local_temp_filename), 'log.html') return html_name -def log(img, metadata, metadata_parser: MetadataParser | None = None) -> str: +def log(img, metadata, metadata_parser: MetadataParser | None = None, output_format=None) -> str: path_outputs = args_manager.args.temp_path if args_manager.args.disable_image_log else modules.config.path_outputs - date_string, local_temp_filename, only_name = generate_temp_filename(folder=path_outputs, extension='png') + output_format = output_format if output_format else modules.config.default_output_format + date_string, local_temp_filename, only_name = generate_temp_filename(folder=path_outputs, extension=output_format) os.makedirs(os.path.dirname(local_temp_filename), exist_ok=True) parsed_parameters = metadata_parser.parse_string(metadata) if metadata_parser is not None else '' image = Image.fromarray(img) - if parsed_parameters != '': - pnginfo = PngInfo() - pnginfo.add_text('parameters', parsed_parameters) - pnginfo.add_text('fooocus_scheme', metadata_parser.get_scheme().value) + if output_format == 'png': + if parsed_parameters != '': + pnginfo = PngInfo() + pnginfo.add_text('parameters', parsed_parameters) + pnginfo.add_text('fooocus_scheme', metadata_parser.get_scheme().value) + else: + pnginfo = None + image.save(local_temp_filename, pnginfo=pnginfo) + elif output_format == 'jpg': + image.save(local_temp_filename, quality=95, optimize=True, progressive=True, exif=get_exif(parsed_parameters, metadata_parser.get_scheme().value) if metadata_parser else Image.Exif()) + elif output_format == 'webp': + image.save(local_temp_filename, quality=95, lossless=False, exif=get_exif(parsed_parameters, metadata_parser.get_scheme().value) if metadata_parser else Image.Exif()) else: - pnginfo = None - image.save(local_temp_filename, pnginfo=pnginfo) + image.save(local_temp_filename) if args_manager.args.disable_image_log: return local_temp_filename diff --git a/webui.py b/webui.py index 7020438e..5e8853ed 100644 --- a/webui.py +++ b/webui.py @@ -224,15 +224,12 @@ with shared.gradio_root: metadata_import_button = gr.Button(value='Apply Metadata') def trigger_metadata_preview(filepath): - parameters, items, metadata_scheme = modules.meta_parser.read_info_from_image(filepath) + parameters, metadata_scheme = modules.meta_parser.read_info_from_image(filepath) results = {} if parameters is not None: results['parameters'] = parameters - if items: - results['items'] = items - if isinstance(metadata_scheme, flags.MetadataScheme): results['metadata_scheme'] = metadata_scheme.value @@ -263,6 +260,11 @@ with shared.gradio_root: value=modules.config.default_aspect_ratio, info='width × height', elem_classes='aspect_ratios') image_number = gr.Slider(label='Image Number', minimum=1, maximum=modules.config.default_max_image_number, step=1, value=modules.config.default_image_number) + + output_format = gr.Radio(label='Output Format', + choices=modules.flags.output_formats, + value=modules.config.default_output_format) + negative_prompt = gr.Textbox(label='Negative Prompt', show_label=True, placeholder="Type prompt here.", info='Describing what you do not want to see.', lines=2, elem_id='negative_prompt', @@ -292,7 +294,7 @@ with shared.gradio_root: if args_manager.args.disable_image_log: return gr.update(value='') - return gr.update(value=f'\U0001F4DA History Log') + return gr.update(value=f'\U0001F4DA History Log') history_link = gr.HTML() shared.gradio_root.load(update_history_link, outputs=history_link, queue=False, show_progress=False) @@ -532,7 +534,9 @@ with shared.gradio_root: adm_scaler_negative, refiner_switch, refiner_model, sampler_name, scheduler_name, adaptive_cfg, refiner_swap_method, negative_prompt, disable_intermediate_results ], queue=False, show_progress=False) - + + output_format.input(lambda x: gr.update(output_format=x), inputs=output_format) + advanced_checkbox.change(lambda x: gr.update(visible=x), advanced_checkbox, advanced_column, queue=False, show_progress=False) \ .then(fn=lambda: None, _js='refresh_grid_delayed', queue=False, show_progress=False) @@ -573,7 +577,7 @@ with shared.gradio_root: ctrls = [currentTask, generate_image_grid] ctrls += [ prompt, negative_prompt, style_selections, - performance_selection, aspect_ratios_selection, image_number, image_seed, sharpness, guidance_scale + performance_selection, aspect_ratios_selection, image_number, output_format, image_seed, sharpness, guidance_scale ] ctrls += [base_model, refiner_model, refiner_switch] + lora_ctrls @@ -622,7 +626,7 @@ with shared.gradio_root: load_parameter_button.click(modules.meta_parser.load_parameter_button_click, inputs=[prompt, state_is_generating], outputs=load_data_outputs, queue=False, show_progress=False) def trigger_metadata_import(filepath, state_is_generating): - parameters, items, metadata_scheme = modules.meta_parser.read_info_from_image(filepath) + parameters, metadata_scheme = modules.meta_parser.read_info_from_image(filepath) if parameters is None: print('Could not find metadata in the image!') parsed_parameters = {} From f4a6350300e03fb0aa7619d02a4704434dd5e89c Mon Sep 17 00:00:00 2001 From: whitehara <58582589+whitehara@users.noreply.github.com> Date: Mon, 26 Feb 2024 18:30:05 +0200 Subject: [PATCH 046/103] feat: add docker files (#1418) * Add docker files * Add python precompiled cache file in the image * Add Notes in docker.md * Create docker-publish.yml * Modify docker-compose.yml not to use the bind mount * Update torch version * Change --share to --listen * Update torch version * Change '--share' to '--listen` * adjust code comments * Update requirements-docker.txt * chore: code cleanup - default_model env var isn't necessary as model is included in default preset, same for speed - ENV CMDARGS --listen is now synched with docker-compose.yml file - remove * Change entry_with_update.py to launch.py in entrypoint.sh * Change CMD in Dockerfile * Change default CMDARGS to --listen in Dockerfile * Modify CMD in Dockerfile * Fix docker-compose.yml * Import files from models,outputs * docs: change wording in docker.md, change git clone URL, add quotes to port mapping * docs: remove docker publish github action, remove pre-built image from docs * Modify modules versions for linux/arm64 * docs: update docker readme --------- Co-authored-by: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Co-authored-by: Manuel Schmid Co-authored-by: Manuel Schmid --- .dockerignore | 1 + Dockerfile | 29 ++++++++++++++++++ docker-compose.yml | 38 ++++++++++++++++++++++++ docker.md | 66 +++++++++++++++++++++++++++++++++++++++++ entrypoint.sh | 33 +++++++++++++++++++++ modules/config.py | 25 +++++++++++++--- modules/util.py | 2 +- readme.md | 4 +++ requirements_docker.txt | 5 ++++ 9 files changed, 198 insertions(+), 5 deletions(-) create mode 100644 .dockerignore create mode 100644 Dockerfile create mode 100644 docker-compose.yml create mode 100644 docker.md create mode 100755 entrypoint.sh create mode 100644 requirements_docker.txt diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000..485dee64 --- /dev/null +++ b/.dockerignore @@ -0,0 +1 @@ +.idea diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 00000000..2aea2810 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,29 @@ +FROM nvidia/cuda:12.3.1-base-ubuntu22.04 +ENV DEBIAN_FRONTEND noninteractive +ENV CMDARGS --listen + +RUN apt-get update -y && \ + apt-get install -y curl libgl1 libglib2.0-0 python3-pip python-is-python3 git && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* + +COPY requirements_docker.txt requirements_versions.txt /tmp/ +RUN pip install --no-cache-dir -r /tmp/requirements_docker.txt -r /tmp/requirements_versions.txt && \ + rm -f /tmp/requirements_docker.txt /tmp/requirements_versions.txt +RUN pip install --no-cache-dir xformers==0.0.22 --no-dependencies +RUN curl -fsL -o /usr/local/lib/python3.10/dist-packages/gradio/frpc_linux_amd64_v0.2 https://cdn-media.huggingface.co/frpc-gradio-0.2/frpc_linux_amd64 && \ + chmod +x /usr/local/lib/python3.10/dist-packages/gradio/frpc_linux_amd64_v0.2 + +RUN adduser --disabled-password --gecos '' user && \ + mkdir -p /content/app /content/data + +COPY entrypoint.sh /content/ +RUN chown -R user:user /content + +WORKDIR /content +USER user + +RUN git clone https://github.com/lllyasviel/Fooocus /content/app +RUN mv /content/app/models /content/app/models.org + +CMD [ "sh", "-c", "/content/entrypoint.sh ${CMDARGS}" ] diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 00000000..dee7b3e7 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,38 @@ +version: '3.9' + +volumes: + fooocus-data: + +services: + app: + build: . + image: fooocus + ports: + - "7865:7865" + environment: + - CMDARGS=--listen # Arguments for launch.py. + - DATADIR=/content/data # Directory which stores models, outputs dir + - config_path=/content/data/config.txt + - config_example_path=/content/data/config_modification_tutorial.txt + - path_checkpoints=/content/data/models/checkpoints/ + - path_loras=/content/data/models/loras/ + - path_embeddings=/content/data/models/embeddings/ + - path_vae_approx=/content/data/models/vae_approx/ + - path_upscale_models=/content/data/models/upscale_models/ + - path_inpaint=/content/data/models/inpaint/ + - path_controlnet=/content/data/models/controlnet/ + - path_clip_vision=/content/data/models/clip_vision/ + - path_fooocus_expansion=/content/data/models/prompt_expansion/fooocus_expansion/ + - path_outputs=/content/app/outputs/ # Warning: If it is not located under '/content/app', you can't see history log! + volumes: + - fooocus-data:/content/data + #- ./models:/import/models # Once you import files, you don't need to mount again. + #- ./outputs:/import/outputs # Once you import files, you don't need to mount again. + tty: true + deploy: + resources: + reservations: + devices: + - driver: nvidia + device_ids: ['0'] + capabilities: [compute, utility] diff --git a/docker.md b/docker.md new file mode 100644 index 00000000..36cfa632 --- /dev/null +++ b/docker.md @@ -0,0 +1,66 @@ +# Fooocus on Docker + +The docker image is based on NVIDIA CUDA 12.3 and PyTorch 2.0, see [Dockerfile](Dockerfile) and [requirements_docker.txt](requirements_docker.txt) for details. + +## Quick start + +**This is just an easy way for testing. Please find more information in the [notes](#notes).** + +1. Clone this repository +2. Build the image with `docker compose build` +3. Run the docker container with `docker compose up`. Building the image takes some time. + +When you see the message `Use the app with http://0.0.0.0:7865/` in the console, you can access the URL in your browser. + +Your models and outputs are stored in the `fooocus-data` volume, which, depending on OS, is stored in `/var/lib/docker/volumes`. + +## Details + +### Update the container manually + +When you are using `docker compose up` continuously, the container is not updated to the latest version of Fooocus automatically. +Run `git pull` before executing `docker compose build --no-cache` to build an image with the latest Fooocus version. +You can then start it with `docker compose up` + +### Import models, outputs +If you want to import files from models or the outputs folder, you can uncomment the following settings in the [docker-compose.yml](docker-compose.yml): +``` +#- ./models:/import/models # Once you import files, you don't need to mount again. +#- ./outputs:/import/outputs # Once you import files, you don't need to mount again. +``` +After running `docker compose up`, your files will be copied into `/content/data/models` and `/content/data/outputs` +Since `/content/data` is a persistent volume folder, your files will be persisted even when you re-run `docker compose up --build` without above volume settings. + + +### Paths inside the container + +|Path|Details| +|-|-| +|/content/app|The application stored folder| +|/content/app/models.org|Original 'models' folder.
Files are copied to the '/content/app/models' which is symlinked to '/content/data/models' every time the container boots. (Existing files will not be overwritten.) | +|/content/data|Persistent volume mount point| +|/content/data/models|The folder is symlinked to '/content/app/models'| +|/content/data/outputs|The folder is symlinked to '/content/app/outputs'| + +### Environments + +You can change `config.txt` parameters by using environment variables. +**The priority of using the environments is higher than the values defined in `config.txt`, and they will be saved to the `config_modification_tutorial.txt`** + +Docker specified environments are there. They are used by 'entrypoint.sh' +|Environment|Details| +|-|-| +|DATADIR|'/content/data' location.| +|CMDARGS|Arguments for [entry_with_update.py](entry_with_update.py) which is called by [entrypoint.sh](entrypoint.sh)| +|config_path|'config.txt' location| +|config_example_path|'config_modification_tutorial.txt' location| + +You can also use the same json key names and values explained in the 'config_modification_tutorial.txt' as the environments. +See examples in the [docker-compose.yml](docker-compose.yml) + +## Notes + +- Please keep 'path_outputs' under '/content/app'. Otherwise, you may get an error when you open the history log. +- Docker on Mac/Windows still has issues in the form of slow volume access when you use "bind mount" volumes. Please refer to [this article](https://docs.docker.com/storage/volumes/#use-a-volume-with-docker-compose) for not using "bind mount". +- The MPS backend (Metal Performance Shaders, Apple Silicon M1/M2/etc.) is not yet supported in Docker, see https://github.com/pytorch/pytorch/issues/81224 +- You can also use `docker compose up -d` to start the container detached and connect to the logs with `docker compose logs -f`. This way you can also close the terminal and keep the container running. \ No newline at end of file diff --git a/entrypoint.sh b/entrypoint.sh new file mode 100755 index 00000000..d0dba09c --- /dev/null +++ b/entrypoint.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +ORIGINALDIR=/content/app +# Use predefined DATADIR if it is defined +[[ x"${DATADIR}" == "x" ]] && DATADIR=/content/data + +# Make persistent dir from original dir +function mklink () { + mkdir -p $DATADIR/$1 + ln -s $DATADIR/$1 $ORIGINALDIR +} + +# Copy old files from import dir +function import () { + (test -d /import/$1 && cd /import/$1 && cp -Rpn . $DATADIR/$1/) +} + +cd $ORIGINALDIR + +# models +mklink models +# Copy original files +(cd $ORIGINALDIR/models.org && cp -Rpn . $ORIGINALDIR/models/) +# Import old files +import models + +# outputs +mklink outputs +# Import old files +import outputs + +# Start application +python launch.py $* diff --git a/modules/config.py b/modules/config.py index 6800c004..328878cc 100644 --- a/modules/config.py +++ b/modules/config.py @@ -10,8 +10,16 @@ from modules.model_loader import load_file_from_url from modules.util import get_files_from_folder, makedirs_with_log from modules.flags import Performance, MetadataScheme -config_path = os.path.abspath("./config.txt") -config_example_path = os.path.abspath("config_modification_tutorial.txt") +def get_config_path(key, default_value): + env = os.getenv(key) + if env is not None and isinstance(env, str): + print(f"Environment: {key} = {env}") + return env + else: + return os.path.abspath(default_value) + +config_path = get_config_path('config_path', "./config.txt") +config_example_path = get_config_path('config_example_path', "config_modification_tutorial.txt") config_dict = {} always_save_keys = [] visited_keys = [] @@ -123,7 +131,12 @@ def get_dir_or_set_default(key, default_value, as_array=False, make_directory=Fa if key not in always_save_keys: always_save_keys.append(key) - v = config_dict.get(key, None) + v = os.getenv(key) + if v is not None: + print(f"Environment: {key} = {v}") + config_dict[key] = v + else: + v = config_dict.get(key, None) if isinstance(v, str): if make_directory: @@ -165,13 +178,17 @@ path_clip_vision = get_dir_or_set_default('path_clip_vision', '../models/clip_vi path_fooocus_expansion = get_dir_or_set_default('path_fooocus_expansion', '../models/prompt_expansion/fooocus_expansion') path_outputs = get_path_output() - def get_config_item_or_set_default(key, default_value, validator, disable_empty_as_none=False): global config_dict, visited_keys if key not in visited_keys: visited_keys.append(key) + v = os.getenv(key) + if v is not None: + print(f"Environment: {key} = {v}") + config_dict[key] = v + if key not in config_dict: config_dict[key] = default_value return default_value diff --git a/modules/util.py b/modules/util.py index 29d48696..c7923ec8 100644 --- a/modules/util.py +++ b/modules/util.py @@ -160,7 +160,7 @@ def generate_temp_filename(folder='./outputs/', extension='png'): random_number = random.randint(1000, 9999) filename = f"{time_string}_{random_number}.{extension}" result = os.path.join(folder, date_string, filename) - return date_string, os.path.abspath(os.path.realpath(result)), filename + return date_string, os.path.abspath(result), filename def get_files_from_folder(folder_path, exensions=None, name_filter=None): diff --git a/readme.md b/readme.md index 18b48f3a..a1e62fa4 100644 --- a/readme.md +++ b/readme.md @@ -237,6 +237,10 @@ You can install Fooocus on Apple Mac silicon (M1 or M2) with macOS 'Catalina' or Use `python entry_with_update.py --preset anime` or `python entry_with_update.py --preset realistic` for Fooocus Anime/Realistic Edition. +### Docker + +See [docker.md](docker.md) + ### Download Previous Version See the guidelines [here](https://github.com/lllyasviel/Fooocus/discussions/1405). diff --git a/requirements_docker.txt b/requirements_docker.txt new file mode 100644 index 00000000..3cf4aa89 --- /dev/null +++ b/requirements_docker.txt @@ -0,0 +1,5 @@ +torch==2.0.1 +torchvision==0.15.2 +torchaudio==2.0.2 +torchtext==0.15.2 +torchdata==0.6.1 From 4e526e255ea52ea7420b2b85897bd36430915b57 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Mon, 26 Feb 2024 17:39:29 +0100 Subject: [PATCH 047/103] docs: add missing release notes for 2.1.865 --- update_log.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/update_log.md b/update_log.md index e052d24c..79b523a4 100644 --- a/update_log.md +++ b/update_log.md @@ -1,3 +1,8 @@ +# 2.1.865 + +* Various bugfixes +* Add authentication to --listen + # 2.1.864 * New model list. See also discussions. From 692beadbdcf36e4e9f04d21eafba3090448915ee Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Mon, 26 Feb 2024 17:41:29 +0100 Subject: [PATCH 048/103] docs: bump version number to 2.2.0-rc1 easier debugging and issue handling --- fooocus_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fooocus_version.py b/fooocus_version.py index 91c2ddda..de51863a 100644 --- a/fooocus_version.py +++ b/fooocus_version.py @@ -1 +1 @@ -version = '2.1.865' +version = '2.2.0-rc1' From 9c30961efda2c63726c0aede238b16f666b1dfaa Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Mon, 26 Feb 2024 21:12:27 +0100 Subject: [PATCH 049/103] fix: add missing return statement in model_refresh_clicked --- webui.py | 1 + 1 file changed, 1 insertion(+) diff --git a/webui.py b/webui.py index 5e8853ed..180c7d2b 100644 --- a/webui.py +++ b/webui.py @@ -521,6 +521,7 @@ with shared.gradio_root: results += [gr.update(choices=['None'] + modules.config.model_filenames)] for i in range(modules.config.default_max_lora_number): results += [gr.update(interactive=True), gr.update(choices=['None'] + modules.config.lora_filenames), gr.update()] + return results model_refresh.click(model_refresh_clicked, [], [base_model, refiner_model] + lora_ctrls, queue=False, show_progress=False) From 4f4d23f4e3e6896daeb16025b483f9de50f8cdc6 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Mon, 26 Feb 2024 21:14:11 +0100 Subject: [PATCH 050/103] fix: use filename instead of download function call for lcm lora do not require lcm lora to be downloaded for metadata parsing --- modules/config.py | 5 +++-- modules/meta_parser.py | 6 ++++-- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/modules/config.py b/modules/config.py index 328878cc..09c8fd7c 100644 --- a/modules/config.py +++ b/modules/config.py @@ -474,6 +474,7 @@ with open(config_example_path, "w", encoding="utf-8") as json_file: model_filenames = [] lora_filenames = [] +sdxl_lcm_lora = 'sdxl_lcm_lora.safetensors' def get_model_filenames(folder_paths, name_filter=None): @@ -533,9 +534,9 @@ def downloading_sdxl_lcm_lora(): load_file_from_url( url='https://huggingface.co/lllyasviel/misc/resolve/main/sdxl_lcm_lora.safetensors', model_dir=paths_loras[0], - file_name='sdxl_lcm_lora.safetensors' + file_name=sdxl_lcm_lora ) - return 'sdxl_lcm_lora.safetensors' + return sdxl_lcm_lora def downloading_controlnet_canny(): diff --git a/modules/meta_parser.py b/modules/meta_parser.py index 9b2dadb3..da8c70b2 100644 --- a/modules/meta_parser.py +++ b/modules/meta_parser.py @@ -379,7 +379,8 @@ class A1111MetadataParser(MetadataParser): if 'lora_hashes' in data: lora_filenames = modules.config.lora_filenames.copy() - lora_filenames.remove(modules.config.downloading_sdxl_lcm_lora()) + if modules.config.sdxl_lcm_lora in lora_filenames: + lora_filenames.remove(modules.config.sdxl_lcm_lora) for li, lora in enumerate(data['lora_hashes'].split(', ')): lora_name, lora_hash, lora_weight = lora.split(': ') for filename in lora_filenames: @@ -460,7 +461,8 @@ class FooocusMetadataParser(MetadataParser): def parse_json(self, metadata: dict) -> dict: model_filenames = modules.config.model_filenames.copy() lora_filenames = modules.config.lora_filenames.copy() - lora_filenames.remove(modules.config.downloading_sdxl_lcm_lora()) + if modules.config.sdxl_lcm_lora in lora_filenames: + lora_filenames.remove(modules.config.sdxl_lcm_lora) for key, value in metadata.items(): if value in ['', 'None']: From 41e88a4e8d9acd20dd088e2a13e09d3ae2fd0500 Mon Sep 17 00:00:00 2001 From: Gianluca Teti <51110452+gteti@users.noreply.github.com> Date: Thu, 29 Feb 2024 16:10:34 +0100 Subject: [PATCH 051/103] docs: fix typo in readme (#2368) --- readme.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/readme.md b/readme.md index a1e62fa4..0bfee5b4 100644 --- a/readme.md +++ b/readme.md @@ -297,7 +297,7 @@ In both ways the access is unauthenticated by default. You can add basic authent The below things are already inside the software, and **users do not need to do anything about these**. -1. GPT2-based [prompt expansion as a dynamic style "Fooocus V2".](https://github.com/lllyasviel/Fooocus/discussions/117#raw) (similar to Midjourney's hidden pre-processsing and "raw" mode, or the LeonardoAI's Prompt Magic). +1. GPT2-based [prompt expansion as a dynamic style "Fooocus V2".](https://github.com/lllyasviel/Fooocus/discussions/117#raw) (similar to Midjourney's hidden pre-processing and "raw" mode, or the LeonardoAI's Prompt Magic). 2. Native refiner swap inside one single k-sampler. The advantage is that the refiner model can now reuse the base model's momentum (or ODE's history parameters) collected from k-sampling to achieve more coherent sampling. In Automatic1111's high-res fix and ComfyUI's node system, the base model and refiner use two independent k-samplers, which means the momentum is largely wasted, and the sampling continuity is broken. Fooocus uses its own advanced k-diffusion sampling that ensures seamless, native, and continuous swap in a refiner setup. (Update Aug 13: Actually, I discussed this with Automatic1111 several days ago, and it seems that the “native refiner swap inside one single k-sampler” is [merged]( https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12371) into the dev branch of webui. Great!) 3. Negative ADM guidance. Because the highest resolution level of XL Base does not have cross attentions, the positive and negative signals for XL's highest resolution level cannot receive enough contrasts during the CFG sampling, causing the results to look a bit plastic or overly smooth in certain cases. Fortunately, since the XL's highest resolution level is still conditioned on image aspect ratios (ADM), we can modify the adm on the positive/negative side to compensate for the lack of CFG contrast in the highest resolution level. (Update Aug 16, the IOS App [Draw Things](https://apps.apple.com/us/app/draw-things-ai-generation/id6444050820) will support Negative ADM Guidance. Great!) 4. We implemented a carefully tuned variation of Section 5.1 of ["Improving Sample Quality of Diffusion Models Using Self-Attention Guidance"](https://arxiv.org/pdf/2210.00939.pdf). The weight is set to very low, but this is Fooocus's final guarantee to make sure that the XL will never yield an overly smooth or plastic appearance (examples [here](https://github.com/lllyasviel/Fooocus/discussions/117#sharpness)). This can almost eliminate all cases for which XL still occasionally produces overly smooth results, even with negative ADM guidance. (Update 2023 Aug 18, the Gaussian kernel of SAG is changed to an anisotropic kernel for better structure preservation and fewer artifacts.) From 6db14acf8e2c383d6e33f689646ebf9599f83e9a Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Sat, 2 Mar 2024 16:25:31 +0100 Subject: [PATCH 052/103] docs: update version and changelog --- fooocus_version.py | 2 +- update_log.md | 10 +++++++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/fooocus_version.py b/fooocus_version.py index de51863a..d4b750f9 100644 --- a/fooocus_version.py +++ b/fooocus_version.py @@ -1 +1 @@ -version = '2.2.0-rc1' +version = '2.2.0' diff --git a/update_log.md b/update_log.md index 79b523a4..b0192d0d 100644 --- a/update_log.md +++ b/update_log.md @@ -1,4 +1,12 @@ -# 2.1.865 +# [2.2.0](https://github.com/lllyasviel/Fooocus/releases/tag/2.2.0) + +* Isolate every image generation to truly allow multi-user usage +* Add array support, changes the main prompt when increasing the image number. Syntax: `[[red, green, blue]] flower` +* Add optional metadata to images, allowing you to regenerate and modify them later with the same parameters +* Now supports native PNG, JPG and WEBP image generation +* Add Docker support + +# [2.1.865](https://github.com/lllyasviel/Fooocus/releases/tag/2.1.865) * Various bugfixes * Add authentication to --listen From 90839430da4cd0badff2f718f1365fd81e00b673 Mon Sep 17 00:00:00 2001 From: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Date: Sat, 2 Mar 2024 19:05:11 +0100 Subject: [PATCH 053/103] fix: adjust parameters for upscale fast 2x (#2411) --- modules/async_worker.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/async_worker.py b/modules/async_worker.py index 2c029cfb..908cc8c2 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -553,8 +553,8 @@ def worker(): direct_return = False if direct_return: - d = [('Upscale (Fast)', '2x')] - uov_input_image_path = log(uov_input_image, d, output_format) + d = [('Upscale (Fast)', 'upscale_fast', '2x')] + uov_input_image_path = log(uov_input_image, d, output_format=output_format) yield_result(async_task, uov_input_image_path, do_not_show_finished_images=True) return From 4ea3baff501f876f1e1e6628491b56fb5a3832ee Mon Sep 17 00:00:00 2001 From: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Date: Sun, 3 Mar 2024 00:21:59 +0100 Subject: [PATCH 054/103] fix: add handling for filepaths to image grid (#2414) previously skipped due to not being in np.ndarray format but string --- modules/async_worker.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/modules/async_worker.py b/modules/async_worker.py index 908cc8c2..fd785f07 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -22,6 +22,7 @@ def worker(): import traceback import math import numpy as np + import cv2 import torch import time import shared @@ -79,16 +80,20 @@ def worker(): return def build_image_wall(async_task): - results = async_task.results + results = [] - if len(results) < 2: + if len(async_task.results) < 2: return - for img in results: + for img in async_task.results: + if isinstance(img, str) and os.path.exists(img): + img = cv2.imread(img) + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) if not isinstance(img, np.ndarray): return if img.ndim != 3: return + results.append(img) H, W, C = results[0].shape From fb94394b10807b4ae23d21fe69e703dc85abae94 Mon Sep 17 00:00:00 2001 From: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Date: Sun, 3 Mar 2024 18:46:26 +0100 Subject: [PATCH 055/103] fix: add fallback value for default_max_lora_number when default_loras is empty (#2430) --- modules/config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/config.py b/modules/config.py index 09c8fd7c..a68bd218 100644 --- a/modules/config.py +++ b/modules/config.py @@ -264,7 +264,7 @@ default_loras = get_config_item_or_set_default( ) default_max_lora_number = get_config_item_or_set_default( key='default_max_lora_number', - default_value=len(default_loras), + default_value=len(default_loras) if isinstance(default_loras, list) and len(default_loras) > 0 else 5, validator=lambda x: isinstance(x, int) and x >= 1 ) default_cfg_scale = get_config_item_or_set_default( From c3fd57acb9dc29383a81542b07ae8c2ac863a1ea Mon Sep 17 00:00:00 2001 From: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Date: Sun, 3 Mar 2024 19:34:38 +0100 Subject: [PATCH 056/103] feat: add metadata flag and steps override to history log (#2425) * feat: add metadata hint to history log * feat: add actual metadata_scheme to log instead of only boolean * feat: add steps to log if they were overridden * fix: pass copy of metadata prevents LoRA file extension removal in history log caused by passing reference to meta_parser fooocus scheme --- modules/async_worker.py | 36 ++++++++++++++++++++---------------- modules/private_logger.py | 4 ++-- 2 files changed, 22 insertions(+), 18 deletions(-) diff --git a/modules/async_worker.py b/modules/async_worker.py index fd785f07..a8661f4d 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -830,17 +830,21 @@ def worker(): ('Negative Prompt', 'negative_prompt', task['log_negative_prompt']), ('Fooocus V2 Expansion', 'prompt_expansion', task['expansion']), ('Styles', 'styles', str(raw_style_selections)), - ('Performance', 'performance', performance_selection.value), - ('Resolution', 'resolution', str((width, height))), - ('Guidance Scale', 'guidance_scale', guidance_scale), - ('Sharpness', 'sharpness', sharpness), - ('ADM Guidance', 'adm_guidance', str(( - modules.patch.patch_settings[pid].positive_adm_scale, - modules.patch.patch_settings[pid].negative_adm_scale, - modules.patch.patch_settings[pid].adm_scaler_end))), - ('Base Model', 'base_model', base_model_name), - ('Refiner Model', 'refiner_model', refiner_model_name), - ('Refiner Switch', 'refiner_switch', refiner_switch)] + ('Performance', 'performance', performance_selection.value)] + + if performance_selection.steps() != steps: + d.append(('Steps', 'steps', steps)) + + d += [('Resolution', 'resolution', str((width, height))), + ('Guidance Scale', 'guidance_scale', guidance_scale), + ('Sharpness', 'sharpness', sharpness), + ('ADM Guidance', 'adm_guidance', str(( + modules.patch.patch_settings[pid].positive_adm_scale, + modules.patch.patch_settings[pid].negative_adm_scale, + modules.patch.patch_settings[pid].adm_scaler_end))), + ('Base Model', 'base_model', base_model_name), + ('Refiner Model', 'refiner_model', refiner_model_name), + ('Refiner Switch', 'refiner_switch', refiner_switch)] if refiner_model_name != 'None': if overwrite_switch > 0: @@ -857,17 +861,17 @@ def worker(): if freeu_enabled: d.append(('FreeU', 'freeu', str((freeu_b1, freeu_b2, freeu_s1, freeu_s2)))) + for li, (n, w) in enumerate(loras): + if n != 'None': + d.append((f'LoRA {li + 1}', f'lora_combined_{li + 1}', f'{n} : {w}')) + metadata_parser = None if save_metadata_to_images: metadata_parser = modules.meta_parser.get_metadata_parser(metadata_scheme) metadata_parser.set_data(task['log_positive_prompt'], task['positive'], task['log_negative_prompt'], task['negative'], steps, base_model_name, refiner_model_name, loras) - - for li, (n, w) in enumerate(loras): - if n != 'None': - d.append((f'LoRA {li + 1}', f'lora_combined_{li + 1}', f'{n} : {w}')) - + d.append(('Metadata Scheme', 'metadata_scheme', metadata_scheme.value if save_metadata_to_images else save_metadata_to_images)) d.append(('Version', 'version', 'Fooocus v' + fooocus_version.version)) img_paths.append(log(x, d, metadata_parser, output_format)) diff --git a/modules/private_logger.py b/modules/private_logger.py index 8fa5f73c..01e570a7 100644 --- a/modules/private_logger.py +++ b/modules/private_logger.py @@ -26,7 +26,7 @@ def log(img, metadata, metadata_parser: MetadataParser | None = None, output_for date_string, local_temp_filename, only_name = generate_temp_filename(folder=path_outputs, extension=output_format) os.makedirs(os.path.dirname(local_temp_filename), exist_ok=True) - parsed_parameters = metadata_parser.parse_string(metadata) if metadata_parser is not None else '' + parsed_parameters = metadata_parser.parse_string(metadata.copy()) if metadata_parser is not None else '' image = Image.fromarray(img) if output_format == 'png': @@ -90,7 +90,7 @@ def log(img, metadata, metadata_parser: MetadataParser | None = None, output_for """ ) - begin_part = f"Fooocus Log {date_string}{css_styles}{js}

Fooocus Log {date_string} (private)

\n

All images are clean, without any hidden data/meta, and safe to share with others.

\n\n" + begin_part = f"Fooocus Log {date_string}{css_styles}{js}

Fooocus Log {date_string} (private)

\n

Metadata is embedded if enabled in the config or developer debug mode. You can find the information for each image in line Metadata Scheme.

\n\n" end_part = f'\n' middle_part = log_cache.get(html_name, "") From e241c53f0e20df66135625f8751dec58a4ca6cb5 Mon Sep 17 00:00:00 2001 From: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Date: Sun, 3 Mar 2024 21:15:42 +0100 Subject: [PATCH 057/103] feat: adjust width of lora_weight for firefox (#2431) --- modules/html.py | 14 ++++---------- webui.py | 6 +++--- 2 files changed, 7 insertions(+), 13 deletions(-) diff --git a/modules/html.py b/modules/html.py index 47a1483a..769151a9 100644 --- a/modules/html.py +++ b/modules/html.py @@ -112,10 +112,6 @@ progress::after { margin-left: -5px !important; } -.lora_enable { - flex-grow: 1 !important; -} - .lora_enable label { height: 100%; } @@ -128,12 +124,10 @@ progress::after { display: none; } -.lora_model { - flex-grow: 5 !important; -} - -.lora_weight { - flex-grow: 5 !important; +@-moz-document url-prefix() { + .lora_weight input[type=number] { + width: 80px; + } } ''' diff --git a/webui.py b/webui.py index 180c7d2b..944f49b7 100644 --- a/webui.py +++ b/webui.py @@ -355,13 +355,13 @@ with shared.gradio_root: for i, (n, v) in enumerate(modules.config.default_loras): with gr.Row(): lora_enabled = gr.Checkbox(label='Enable', value=True, - elem_classes=['lora_enable', 'min_check']) + elem_classes=['lora_enable', 'min_check'], scale=1) lora_model = gr.Dropdown(label=f'LoRA {i + 1}', choices=['None'] + modules.config.lora_filenames, value=n, - elem_classes='lora_model') + elem_classes='lora_model', scale=5) lora_weight = gr.Slider(label='Weight', minimum=modules.config.default_loras_min_weight, maximum=modules.config.default_loras_max_weight, step=0.01, value=v, - elem_classes='lora_weight') + elem_classes='lora_weight', scale=5) lora_ctrls += [lora_enabled, lora_model, lora_weight] with gr.Row(): From e965bfc39caaef96a08f2198633a50815afb2b02 Mon Sep 17 00:00:00 2001 From: eddyizm Date: Sun, 3 Mar 2024 15:22:47 -0800 Subject: [PATCH 058/103] fix: add hint for png to metadata scheme selection (#2434) --- language/en.json | 2 +- webui.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/language/en.json b/language/en.json index cb5603f9..f61255c9 100644 --- a/language/en.json +++ b/language/en.json @@ -379,7 +379,7 @@ "Metadata": "Metadata", "Apply Metadata": "Apply Metadata", "Metadata Scheme": "Metadata Scheme", - "Image Prompt parameters are not included. Use a1111 for compatibility with Civitai.": "Image Prompt parameters are not included. Use a1111 for compatibility with Civitai.", + "Image Prompt parameters are not included. Use png and a1111 for compatibility with Civitai.": "Image Prompt parameters are not included. Use png and a1111 for compatibility with Civitai.", "fooocus (json)": "fooocus (json)", "a1111 (plain text)": "a1111 (plain text)" } \ No newline at end of file diff --git a/webui.py b/webui.py index 944f49b7..42dd890f 100644 --- a/webui.py +++ b/webui.py @@ -438,7 +438,7 @@ with shared.gradio_root: save_metadata_to_images = gr.Checkbox(label='Save Metadata to Images', value=modules.config.default_save_metadata_to_images, info='Adds parameters to generated images allowing manual regeneration.') metadata_scheme = gr.Radio(label='Metadata Scheme', choices=flags.metadata_scheme, value=modules.config.default_metadata_scheme, - info='Image Prompt parameters are not included. Use a1111 for compatibility with Civitai.', + info='Image Prompt parameters are not included. Use png and a1111 for compatibility with Civitai.', visible=modules.config.default_save_metadata_to_images) save_metadata_to_images.change(lambda x: gr.update(visible=x), inputs=[save_metadata_to_images], outputs=[metadata_scheme], From e54fb54f914575dc3b9da5f8b81facdb295fb879 Mon Sep 17 00:00:00 2001 From: nbs Date: Mon, 4 Mar 2024 02:19:49 -0700 Subject: [PATCH 059/103] fix: typo in wildcards/animal.txt (#2433) * Fix typo in animal wildcards * Update animal.txt --- wildcards/animal.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/wildcards/animal.txt b/wildcards/animal.txt index 9a6f09ba..3c479daa 100644 --- a/wildcards/animal.txt +++ b/wildcards/animal.txt @@ -18,7 +18,7 @@ Chihuahua Chimpanzee Chinchilla Chipmunk -Comodo Dragon +Komodo Dragon Cow Coyote Crocodile @@ -97,4 +97,4 @@ Whale Wolf Wombat Yak -Zebra \ No newline at end of file +Zebra From 9155d940674d8f90c927920b60e0ce066ae0cf16 Mon Sep 17 00:00:00 2001 From: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Date: Mon, 4 Mar 2024 11:22:24 +0100 Subject: [PATCH 060/103] feat: match anything in array syntax, not only words and whitespace (#2438) allows e.g. [[ (red:1.1), (blue:1.2) ]] and enables same seed checks for different prompt weight --- modules/sdxl_styles.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/modules/sdxl_styles.py b/modules/sdxl_styles.py index 71afc402..2a310024 100644 --- a/modules/sdxl_styles.py +++ b/modules/sdxl_styles.py @@ -94,9 +94,8 @@ def get_words(arrays, totalMult, index): return [word] + get_words(arrays[1:], math.floor(totalMult/len(words)), index) - def apply_arrays(text, index): - arrays = re.findall(r'\[\[([\s,\w-]+)\]\]', text) + arrays = re.findall(r'\[\[(.*?)\]\]', text) if len(arrays) == 0: return text From ee96b854d973d16fbf69c2c251578c9d74782152 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Mon, 4 Mar 2024 11:33:49 +0100 Subject: [PATCH 061/103] docs: update version and changelog --- fooocus_version.py | 2 +- update_log.md | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/fooocus_version.py b/fooocus_version.py index d4b750f9..6c3c2c90 100644 --- a/fooocus_version.py +++ b/fooocus_version.py @@ -1 +1 @@ -version = '2.2.0' +version = '2.2.1' diff --git a/update_log.md b/update_log.md index b0192d0d..322c19c1 100644 --- a/update_log.md +++ b/update_log.md @@ -1,3 +1,9 @@ +# [2.2.1](https://github.com/lllyasviel/Fooocus/releases/tag/2.2.1) + +* Fix some small bugs (e.g. image grid, upscale fast 2x, LoRA weight width in Firefox) +* Allow prompt weights in array syntax +* Add steps override and metadata scheme to history log + # [2.2.0](https://github.com/lllyasviel/Fooocus/releases/tag/2.2.0) * Isolate every image generation to truly allow multi-user usage From 6cfcc620004be58830a55e9f5ee67ca11cfba12c Mon Sep 17 00:00:00 2001 From: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Date: Tue, 5 Mar 2024 18:18:47 +0100 Subject: [PATCH 062/103] fix: parse width and height as int when applying metadata (#2452) fixes an issue with A1111 metadata scheme where width and height are strings after splitting resolution --- modules/meta_parser.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/meta_parser.py b/modules/meta_parser.py index da8c70b2..546c093f 100644 --- a/modules/meta_parser.py +++ b/modules/meta_parser.py @@ -117,8 +117,8 @@ def get_resolution(key: str, fallback: str | None, source_dict: dict, results: l results.append(-1) else: results.append(gr.update()) - results.append(width) - results.append(height) + results.append(int(width)) + results.append(int(height)) except: results.append(gr.update()) results.append(gr.update()) From 3a64fe3eb376d8c89dda839f169733a79edcca43 Mon Sep 17 00:00:00 2001 From: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Date: Tue, 5 Mar 2024 21:16:21 +0100 Subject: [PATCH 063/103] fix: do not attempt to remove non-existing image grid file (#2456) image grid is actually not an image here but a numpy array, as the grid isn't saved by default --- webui.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/webui.py b/webui.py index 42dd890f..80b1a3d8 100644 --- a/webui.py +++ b/webui.py @@ -76,7 +76,8 @@ def generate_clicked(task): # delete Fooocus temp images, only keep gradio temp images if args_manager.args.disable_image_log: for filepath in product: - os.remove(filepath) + if isinstance(filepath, str) and os.path.exists(filepath): + os.remove(filepath) execution_time = time.perf_counter() - execution_start_time print(f'Total time: {execution_time:.2f} seconds') From 831c6b93cc95048da8a37f15c3babe2f894db2fb Mon Sep 17 00:00:00 2001 From: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Date: Sat, 9 Mar 2024 14:13:16 +0100 Subject: [PATCH 064/103] feat: add troubleshooting guide to bug report template again (#2489) --- .github/ISSUE_TEMPLATE/bug_report.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index 483e0de1..5b9cded6 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -21,6 +21,7 @@ body:  5. Try a fresh installation of Fooocus in a different directory - see if a clean installation solves the issue Before making a issue report please, check that the issue hasn't been reported recently. options: + - label: The issue has not been resolved by following the [troubleshooting guide](https://github.com/lllyasviel/Fooocus/blob/main/troubleshoot.md) - label: The issue exists on a clean installation of Fooocus - label: The issue exists in the current version of Fooocus - label: The issue has not been reported before recently From b6e4bb86f4dc39119f069657b3dd502af7251378 Mon Sep 17 00:00:00 2001 From: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Date: Sat, 9 Mar 2024 16:00:25 +0100 Subject: [PATCH 065/103] feat: use jpeg instead of jpg, use enums instead of strings (#2453) * fix: parse width and height as int when applying metadata (#2452) fixes an issue with A1111 metadata scheme where width and height are strings after splitting resolution * feat: use jpeg instead of jpg, use enums instead of strings --- modules/config.py | 4 ++-- modules/flags.py | 17 +++++++++++------ modules/meta_parser.py | 4 ++-- modules/private_logger.py | 9 +++++---- webui.py | 14 +++++++------- 5 files changed, 27 insertions(+), 21 deletions(-) diff --git a/modules/config.py b/modules/config.py index a68bd218..ef6de2ae 100644 --- a/modules/config.py +++ b/modules/config.py @@ -8,7 +8,7 @@ import modules.sdxl_styles from modules.model_loader import load_file_from_url from modules.util import get_files_from_folder, makedirs_with_log -from modules.flags import Performance, MetadataScheme +from modules.flags import OutputFormat, Performance, MetadataScheme def get_config_path(key, default_value): env = os.getenv(key) @@ -326,7 +326,7 @@ default_max_image_number = get_config_item_or_set_default( default_output_format = get_config_item_or_set_default( key='default_output_format', default_value='png', - validator=lambda x: x in modules.flags.output_formats + validator=lambda x: x in OutputFormat.list() ) default_image_number = get_config_item_or_set_default( key='default_image_number', diff --git a/modules/flags.py b/modules/flags.py index 6f12bc8f..95621c2b 100644 --- a/modules/flags.py +++ b/modules/flags.py @@ -67,7 +67,7 @@ default_parameters = { cn_ip: (0.5, 0.6), cn_ip_face: (0.9, 0.75), cn_canny: (0.5, 1.0), cn_cpds: (0.5, 1.0) } # stop, weight -output_formats = ['png', 'jpg', 'webp'] +output_formats = ['png', 'jpeg', 'webp'] inpaint_engine_versions = ['None', 'v1', 'v2.5', 'v2.6'] inpaint_option_default = 'Inpaint or Outpaint (default)' @@ -89,11 +89,19 @@ metadata_scheme = [ (f'{MetadataScheme.A1111.value} (plain text)', MetadataScheme.A1111.value), ] -lora_count = 5 - controlnet_image_count = 4 +class OutputFormat(Enum): + PNG = 'png' + JPEG = 'jpeg' + WEBP = 'webp' + + @classmethod + def list(cls) -> list: + return list(map(lambda c: c.value, cls)) + + class Steps(IntEnum): QUALITY = 60 SPEED = 30 @@ -120,6 +128,3 @@ class Performance(Enum): def steps_uov(self) -> int | None: return StepsUOV[self.name].value if Steps[self.name] else None - - -performance_selections = Performance.list() diff --git a/modules/meta_parser.py b/modules/meta_parser.py index da8c70b2..546c093f 100644 --- a/modules/meta_parser.py +++ b/modules/meta_parser.py @@ -117,8 +117,8 @@ def get_resolution(key: str, fallback: str | None, source_dict: dict, results: l results.append(-1) else: results.append(gr.update()) - results.append(width) - results.append(height) + results.append(int(width)) + results.append(int(height)) except: results.append(gr.update()) results.append(gr.update()) diff --git a/modules/private_logger.py b/modules/private_logger.py index 01e570a7..916d7bf0 100644 --- a/modules/private_logger.py +++ b/modules/private_logger.py @@ -6,8 +6,9 @@ import urllib.parse from PIL import Image from PIL.PngImagePlugin import PngInfo -from modules.util import generate_temp_filename +from modules.flags import OutputFormat from modules.meta_parser import MetadataParser, get_exif +from modules.util import generate_temp_filename log_cache = {} @@ -29,7 +30,7 @@ def log(img, metadata, metadata_parser: MetadataParser | None = None, output_for parsed_parameters = metadata_parser.parse_string(metadata.copy()) if metadata_parser is not None else '' image = Image.fromarray(img) - if output_format == 'png': + if output_format == OutputFormat.PNG.value: if parsed_parameters != '': pnginfo = PngInfo() pnginfo.add_text('parameters', parsed_parameters) @@ -37,9 +38,9 @@ def log(img, metadata, metadata_parser: MetadataParser | None = None, output_for else: pnginfo = None image.save(local_temp_filename, pnginfo=pnginfo) - elif output_format == 'jpg': + elif output_format == OutputFormat.JPEG.value: image.save(local_temp_filename, quality=95, optimize=True, progressive=True, exif=get_exif(parsed_parameters, metadata_parser.get_scheme().value) if metadata_parser else Image.Exif()) - elif output_format == 'webp': + elif output_format == OutputFormat.WEBP.value: image.save(local_temp_filename, quality=95, lossless=False, exif=get_exif(parsed_parameters, metadata_parser.get_scheme().value) if metadata_parser else Image.Exif()) else: image.save(local_temp_filename) diff --git a/webui.py b/webui.py index 42dd890f..5dab79d0 100644 --- a/webui.py +++ b/webui.py @@ -254,7 +254,7 @@ with shared.gradio_root: with gr.Column(scale=1, visible=modules.config.default_advanced_checkbox) as advanced_column: with gr.Tab(label='Setting'): performance_selection = gr.Radio(label='Performance', - choices=modules.flags.performance_selections, + choices=flags.Performance.list(), value=modules.config.default_performance) aspect_ratios_selection = gr.Radio(label='Aspect Ratios', choices=modules.config.available_aspect_ratios, value=modules.config.default_aspect_ratio, info='width × height', @@ -262,7 +262,7 @@ with shared.gradio_root: image_number = gr.Slider(label='Image Number', minimum=1, maximum=modules.config.default_max_image_number, step=1, value=modules.config.default_image_number) output_format = gr.Radio(label='Output Format', - choices=modules.flags.output_formats, + choices=flags.OutputFormat.list(), value=modules.config.default_output_format) negative_prompt = gr.Textbox(label='Negative Prompt', show_label=True, placeholder="Type prompt here.", @@ -427,8 +427,8 @@ with shared.gradio_root: disable_preview = gr.Checkbox(label='Disable Preview', value=False, info='Disable preview during generation.') disable_intermediate_results = gr.Checkbox(label='Disable Intermediate Results', - value=modules.config.default_performance == 'Extreme Speed', - interactive=modules.config.default_performance != 'Extreme Speed', + value=modules.config.default_performance == flags.Performance.EXTREME_SPEED.value, + interactive=modules.config.default_performance != flags.Performance.EXTREME_SPEED.value, info='Disable intermediate results during generation, only show final gallery.') disable_seed_increment = gr.Checkbox(label='Disable seed increment', info='Disable automatic seed increment when image number is > 1.', @@ -526,9 +526,9 @@ with shared.gradio_root: model_refresh.click(model_refresh_clicked, [], [base_model, refiner_model] + lora_ctrls, queue=False, show_progress=False) - performance_selection.change(lambda x: [gr.update(interactive=x != 'Extreme Speed')] * 11 + - [gr.update(visible=x != 'Extreme Speed')] * 1 + - [gr.update(interactive=x != 'Extreme Speed', value=x == 'Extreme Speed', )] * 1, + performance_selection.change(lambda x: [gr.update(interactive=x != flags.Performance.EXTREME_SPEED.value)] * 11 + + [gr.update(visible=x != flags.Performance.EXTREME_SPEED.value)] * 1 + + [gr.update(interactive=x != flags.Performance.EXTREME_SPEED.value, value=x == flags.Performance.EXTREME_SPEED.value, )] * 1, inputs=performance_selection, outputs=[ guidance_scale, sharpness, adm_scaler_end, adm_scaler_positive, From 25650b4bc4a9e6103c1d384d31c61aae13391de5 Mon Sep 17 00:00:00 2001 From: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Date: Sun, 10 Mar 2024 14:34:48 +0100 Subject: [PATCH 066/103] feat: add performance lightning with 4 step LoRA (#2415) * feat: add performance sdxl lightning based on https://huggingface.co/ByteDance/SDXL-Lightning/blob/main/sdxl_lightning_4step_lora.safetensors * feat: add method for centralized restriction of features for specific performance modes * feat: add lightning preset --- modules/async_worker.py | 19 +++++++++++++++ modules/config.py | 9 +++++++ modules/flags.py | 9 +++++++ presets/lightning.json | 52 +++++++++++++++++++++++++++++++++++++++++ webui.py | 6 ++--- 5 files changed, 92 insertions(+), 3 deletions(-) create mode 100644 presets/lightning.json diff --git a/modules/async_worker.py b/modules/async_worker.py index a8661f4d..17c2645c 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -250,6 +250,25 @@ def worker(): adm_scaler_negative = 1.0 adm_scaler_end = 0.0 + elif performance_selection == Performance.LIGHTNING: + print('Enter Lightning mode.') + progressbar(async_task, 1, 'Downloading Lightning components ...') + loras += [(modules.config.downloading_sdxl_lightning_lora(), 1.0)] + + if refiner_model_name != 'None': + print(f'Refiner disabled in Lightning mode.') + + refiner_model_name = 'None' + sampler_name = 'euler' + scheduler_name = 'sgm_uniform' + sharpness = 0.0 + guidance_scale = 1.0 + adaptive_cfg = 1.0 + refiner_switch = 1.0 + adm_scaler_positive = 1.0 + adm_scaler_negative = 1.0 + adm_scaler_end = 0.0 + print(f'[Parameters] Adaptive CFG = {adaptive_cfg}') print(f'[Parameters] Sharpness = {sharpness}') print(f'[Parameters] ControlNet Softness = {controlnet_softness}') diff --git a/modules/config.py b/modules/config.py index ef6de2ae..0d4156c7 100644 --- a/modules/config.py +++ b/modules/config.py @@ -475,6 +475,7 @@ with open(config_example_path, "w", encoding="utf-8") as json_file: model_filenames = [] lora_filenames = [] sdxl_lcm_lora = 'sdxl_lcm_lora.safetensors' +sdxl_lightning_lora = 'sdxl_lightning_4step_lora.safetensors' def get_model_filenames(folder_paths, name_filter=None): @@ -538,6 +539,14 @@ def downloading_sdxl_lcm_lora(): ) return sdxl_lcm_lora +def downloading_sdxl_lightning_lora(): + load_file_from_url( + url='https://huggingface.co/ByteDance/SDXL-Lightning/resolve/main/sdxl_lightning_4step_lora.safetensors', + model_dir=paths_loras[0], + file_name=sdxl_lightning_lora + ) + return sdxl_lightning_lora + def downloading_controlnet_canny(): load_file_from_url( diff --git a/modules/flags.py b/modules/flags.py index 95621c2b..c9d13fd8 100644 --- a/modules/flags.py +++ b/modules/flags.py @@ -106,23 +106,32 @@ class Steps(IntEnum): QUALITY = 60 SPEED = 30 EXTREME_SPEED = 8 + LIGHTNING = 4 class StepsUOV(IntEnum): QUALITY = 36 SPEED = 18 EXTREME_SPEED = 8 + LIGHTNING = 4 class Performance(Enum): QUALITY = 'Quality' SPEED = 'Speed' EXTREME_SPEED = 'Extreme Speed' + LIGHTNING = 'Lightning' @classmethod def list(cls) -> list: return list(map(lambda c: c.value, cls)) + @classmethod + def has_restricted_features(cls, x) -> bool: + if isinstance(x, Performance): + x = x.value + return x in [cls.EXTREME_SPEED.value, cls.LIGHTNING.value] + def steps(self) -> int | None: return Steps[self.name].value if Steps[self.name] else None diff --git a/presets/lightning.json b/presets/lightning.json new file mode 100644 index 00000000..64249358 --- /dev/null +++ b/presets/lightning.json @@ -0,0 +1,52 @@ +{ + "default_model": "juggernautXL_v8Rundiffusion.safetensors", + "default_refiner": "None", + "default_refiner_switch": 0.5, + "default_loras": [ + [ + "None", + 1.0 + ], + [ + "None", + 1.0 + ], + [ + "None", + 1.0 + ], + [ + "None", + 1.0 + ], + [ + "None", + 1.0 + ] + ], + "default_cfg_scale": 4.0, + "default_sample_sharpness": 2.0, + "default_sampler": "dpmpp_2m_sde_gpu", + "default_scheduler": "karras", + "default_performance": "Lightning", + "default_prompt": "", + "default_prompt_negative": "", + "default_styles": [ + "Fooocus V2", + "Fooocus Enhance", + "Fooocus Sharp" + ], + "default_aspect_ratio": "1152*896", + "checkpoint_downloads": { + "juggernautXL_v8Rundiffusion.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/juggernautXL_v8Rundiffusion.safetensors" + }, + "embeddings_downloads": {}, + "lora_downloads": {}, + "previous_default_models": [ + "juggernautXL_version8Rundiffusion.safetensors", + "juggernautXL_version7Rundiffusion.safetensors", + "juggernautXL_v7Rundiffusion.safetensors", + "juggernautXL_version6Rundiffusion.safetensors", + "juggernautXL_v6Rundiffusion.safetensors" + ] +} \ No newline at end of file diff --git a/webui.py b/webui.py index 5dab79d0..bcd2a5fd 100644 --- a/webui.py +++ b/webui.py @@ -526,9 +526,9 @@ with shared.gradio_root: model_refresh.click(model_refresh_clicked, [], [base_model, refiner_model] + lora_ctrls, queue=False, show_progress=False) - performance_selection.change(lambda x: [gr.update(interactive=x != flags.Performance.EXTREME_SPEED.value)] * 11 + - [gr.update(visible=x != flags.Performance.EXTREME_SPEED.value)] * 1 + - [gr.update(interactive=x != flags.Performance.EXTREME_SPEED.value, value=x == flags.Performance.EXTREME_SPEED.value, )] * 1, + performance_selection.change(lambda x: [gr.update(interactive=not flags.Performance.has_restricted_features(x))] * 11 + + [gr.update(visible=not flags.Performance.has_restricted_features(x))] * 1 + + [gr.update(interactive=not flags.Performance.has_restricted_features(x), value=flags.Performance.has_restricted_features(x))] * 1, inputs=performance_selection, outputs=[ guidance_scale, sharpness, adm_scaler_end, adm_scaler_positive, From db7d2018ca6d34757e1a6b97fab22c4c0ef3cd19 Mon Sep 17 00:00:00 2001 From: xhoxye <129571231+xhoxye@users.noreply.github.com> Date: Sun, 10 Mar 2024 21:42:03 +0800 Subject: [PATCH 067/103] fix: change synthetic refiner switch from 0.5 to 0.8 (#2165) * fix problem 1. In partial redrawing, when refiner is empty, enable use_synthetic_refiner. The default switching timing of 0.5 is too early, which is now modified to SDXL default of 0.8. 2. When using custom steps, the calculation of switching timing is wrong. Now it is modified to calculate "steps x timing" after custom steps are used. * fix: parse width and height as int when applying metadata (#2452) fixes an issue with A1111 metadata scheme where width and height are strings after splitting resolution * fix: do not attempt to remove non-existing image grid file (#2456) image grid is actually not an image here but a numpy array, as the grid isn't saved by default * feat: add troubleshooting guide to bug report template again (#2489) --------- Co-authored-by: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Co-authored-by: Manuel Schmid --- modules/async_worker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/async_worker.py b/modules/async_worker.py index 17c2645c..d4fbd95d 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -366,7 +366,7 @@ def worker(): print(f'[Inpaint] Current inpaint model is {inpaint_patch_model_path}') if refiner_model_name == 'None': use_synthetic_refiner = True - refiner_switch = 0.5 + refiner_switch = 0.8 else: inpaint_head_model_path, inpaint_patch_model_path = None, None print(f'[Inpaint] Parameterized inpaint is disabled.') From 85e8aa8ce20dad2f87ebc4eafbc97707174ba64b Mon Sep 17 00:00:00 2001 From: Magee Date: Sun, 10 Mar 2024 16:06:08 -0400 Subject: [PATCH 068/103] feat: add config for temp path and temp path cleanup on launch (#1992) * Added options to set the Gradio cache path and clear cache on launch. * Renamed cache to temp * clear temp * feat: do not delete temp folder but only clean content also use fallback to system temp dir see https://github.com/gradio-app/gradio/blob/6683ab2589f9d8658e1f51acc1b7526edce988d3/gradio/utils.py#L1151 * refactor: code cleanup * feat: unify arg --temp-path and new temp_path config value * feat: change default temp dir from gradio to fooocus * refactor: move temp path method definition and configs * feat: rename get_temp_path to init_temp_path --------- Co-authored-by: steveyourcreativepeople Co-authored-by: Manuel Schmid --- args_manager.py | 3 --- launch.py | 21 ++++++++++++++------- modules/config.py | 36 +++++++++++++++++++++++++++++++++++- modules/launch_util.py | 20 +++++++++++++++++--- modules/private_logger.py | 2 +- 5 files changed, 67 insertions(+), 15 deletions(-) diff --git a/args_manager.py b/args_manager.py index c7c1b7ab..8c3e1918 100644 --- a/args_manager.py +++ b/args_manager.py @@ -49,7 +49,4 @@ if args_parser.args.disable_analytics: if args_parser.args.disable_in_browser: args_parser.args.in_browser = False -if args_parser.args.temp_path is None: - args_parser.args.temp_path = os.path.join(gettempdir(), 'Fooocus') - args = args_parser.args diff --git a/launch.py b/launch.py index 4269f1fc..b3b06d6e 100644 --- a/launch.py +++ b/launch.py @@ -1,6 +1,6 @@ import os -import sys import ssl +import sys print('[System ARGV] ' + str(sys.argv)) @@ -15,15 +15,13 @@ if "GRADIO_SERVER_PORT" not in os.environ: ssl._create_default_https_context = ssl._create_unverified_context - import platform import fooocus_version from build_launcher import build_launcher -from modules.launch_util import is_installed, run, python, run_pip, requirements_met +from modules.launch_util import is_installed, run, python, run_pip, requirements_met, delete_folder_content from modules.model_loader import load_file_from_url - REINSTALL_ALL = False TRY_INSTALL_XFORMERS = False @@ -68,6 +66,7 @@ vae_approx_filenames = [ 'https://huggingface.co/lllyasviel/misc/resolve/main/xl-to-v1_interposer-v3.1.safetensors') ] + def ini_args(): from args_manager import args return args @@ -77,14 +76,23 @@ prepare_environment() build_launcher() args = ini_args() - if args.gpu_device_id is not None: os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_device_id) print("Set device to:", args.gpu_device_id) - from modules import config +os.environ['GRADIO_TEMP_DIR'] = config.temp_path + +if config.temp_path_cleanup_on_launch: + print(f'[Cleanup] Attempting to delete content of temp dir {config.temp_path}') + result = delete_folder_content(config.temp_path, '[Cleanup] ') + if result: + print("[Cleanup] Cleanup successful") + else: + print(f"[Cleanup] Failed to delete content of temp dir.") + + def download_models(): for file_name, url in vae_approx_filenames: load_file_from_url(url=url, model_dir=config.path_vae_approx, file_name=file_name) @@ -123,5 +131,4 @@ def download_models(): download_models() - from webui import * diff --git a/modules/config.py b/modules/config.py index a68bd218..543bba5e 100644 --- a/modules/config.py +++ b/modules/config.py @@ -3,6 +3,7 @@ import json import math import numbers import args_manager +import tempfile import modules.flags import modules.sdxl_styles @@ -10,6 +11,7 @@ from modules.model_loader import load_file_from_url from modules.util import get_files_from_folder, makedirs_with_log from modules.flags import Performance, MetadataScheme + def get_config_path(key, default_value): env = os.getenv(key) if env is not None and isinstance(env, str): @@ -18,6 +20,7 @@ def get_config_path(key, default_value): else: return os.path.abspath(default_value) + config_path = get_config_path('config_path', "./config.txt") config_example_path = get_config_path('config_example_path', "config_modification_tutorial.txt") config_dict = {} @@ -117,7 +120,7 @@ def get_path_output() -> str: global config_dict path_output = get_dir_or_set_default('path_outputs', '../outputs/', make_directory=True) if args_manager.args.output_path: - print(f'[CONFIG] Overriding config value path_outputs with {args_manager.args.output_path}') + print(f'Overriding config value path_outputs with {args_manager.args.output_path}') config_dict['path_outputs'] = path_output = args_manager.args.output_path return path_output @@ -178,6 +181,7 @@ path_clip_vision = get_dir_or_set_default('path_clip_vision', '../models/clip_vi path_fooocus_expansion = get_dir_or_set_default('path_fooocus_expansion', '../models/prompt_expansion/fooocus_expansion') path_outputs = get_path_output() + def get_config_item_or_set_default(key, default_value, validator, disable_empty_as_none=False): global config_dict, visited_keys @@ -206,6 +210,36 @@ def get_config_item_or_set_default(key, default_value, validator, disable_empty_ return default_value +def init_temp_path(path: str | None, default_path: str) -> str: + if args_manager.args.temp_path: + path = args_manager.args.temp_path + + if path != '' and path != default_path: + try: + if not os.path.isabs(path): + path = os.path.abspath(path) + os.makedirs(path, exist_ok=True) + print(f'Using temp path {path}') + return path + except Exception as e: + print(f'Could not create temp path {path}. Reason: {e}') + print(f'Using default temp path {default_path} instead.') + + os.makedirs(default_path, exist_ok=True) + return default_path + + +default_temp_path = os.path.join(tempfile.gettempdir(), 'fooocus') +temp_path = init_temp_path(get_config_item_or_set_default( + key='temp_path', + default_value=default_temp_path, + validator=lambda x: isinstance(x, str), +), default_temp_path) +temp_path_cleanup_on_launch = get_config_item_or_set_default( + key='temp_path_cleanup_on_launch', + default_value=True, + validator=lambda x: isinstance(x, bool) +) default_base_model_name = get_config_item_or_set_default( key='default_model', default_value='model.safetensors', diff --git a/modules/launch_util.py b/modules/launch_util.py index b483d515..370dc048 100644 --- a/modules/launch_util.py +++ b/modules/launch_util.py @@ -1,6 +1,7 @@ import os import importlib import importlib.util +import shutil import subprocess import sys import re @@ -9,9 +10,6 @@ import importlib.metadata import packaging.version from packaging.requirements import Requirement - - - logging.getLogger("torch.distributed.nn").setLevel(logging.ERROR) # sshh... logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage()) @@ -101,3 +99,19 @@ def requirements_met(requirements_file): return True + +def delete_folder_content(folder, prefix=None): + result = True + + for filename in os.listdir(folder): + file_path = os.path.join(folder, filename) + try: + if os.path.isfile(file_path) or os.path.islink(file_path): + os.unlink(file_path) + elif os.path.isdir(file_path): + shutil.rmtree(file_path) + except Exception as e: + print(f'{prefix}Failed to delete {file_path}. Reason: {e}') + result = False + + return result \ No newline at end of file diff --git a/modules/private_logger.py b/modules/private_logger.py index 01e570a7..73fdda54 100644 --- a/modules/private_logger.py +++ b/modules/private_logger.py @@ -21,7 +21,7 @@ def get_current_html_path(output_format=None): def log(img, metadata, metadata_parser: MetadataParser | None = None, output_format=None) -> str: - path_outputs = args_manager.args.temp_path if args_manager.args.disable_image_log else modules.config.path_outputs + path_outputs = modules.config.temp_path if args_manager.args.disable_image_log else modules.config.path_outputs output_format = output_format if output_format else modules.config.default_output_format date_string, local_temp_filename, only_name = generate_temp_filename(folder=path_outputs, extension=output_format) os.makedirs(os.path.dirname(local_temp_filename), exist_ok=True) From 5409bfdb2610bc50804b6bedf36d152339aaadc2 Mon Sep 17 00:00:00 2001 From: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Date: Sun, 10 Mar 2024 21:08:55 +0100 Subject: [PATCH 069/103] Revert "feat: add config for temp path and temp path cleanup on launch (#1992)" (#2502) This reverts commit 85e8aa8ce20dad2f87ebc4eafbc97707174ba64b. --- args_manager.py | 3 +++ launch.py | 21 +++++++-------------- modules/config.py | 36 +----------------------------------- modules/launch_util.py | 20 +++----------------- modules/private_logger.py | 2 +- 5 files changed, 15 insertions(+), 67 deletions(-) diff --git a/args_manager.py b/args_manager.py index 8c3e1918..c7c1b7ab 100644 --- a/args_manager.py +++ b/args_manager.py @@ -49,4 +49,7 @@ if args_parser.args.disable_analytics: if args_parser.args.disable_in_browser: args_parser.args.in_browser = False +if args_parser.args.temp_path is None: + args_parser.args.temp_path = os.path.join(gettempdir(), 'Fooocus') + args = args_parser.args diff --git a/launch.py b/launch.py index b3b06d6e..4269f1fc 100644 --- a/launch.py +++ b/launch.py @@ -1,6 +1,6 @@ import os -import ssl import sys +import ssl print('[System ARGV] ' + str(sys.argv)) @@ -15,13 +15,15 @@ if "GRADIO_SERVER_PORT" not in os.environ: ssl._create_default_https_context = ssl._create_unverified_context + import platform import fooocus_version from build_launcher import build_launcher -from modules.launch_util import is_installed, run, python, run_pip, requirements_met, delete_folder_content +from modules.launch_util import is_installed, run, python, run_pip, requirements_met from modules.model_loader import load_file_from_url + REINSTALL_ALL = False TRY_INSTALL_XFORMERS = False @@ -66,7 +68,6 @@ vae_approx_filenames = [ 'https://huggingface.co/lllyasviel/misc/resolve/main/xl-to-v1_interposer-v3.1.safetensors') ] - def ini_args(): from args_manager import args return args @@ -76,23 +77,14 @@ prepare_environment() build_launcher() args = ini_args() + if args.gpu_device_id is not None: os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_device_id) print("Set device to:", args.gpu_device_id) + from modules import config -os.environ['GRADIO_TEMP_DIR'] = config.temp_path - -if config.temp_path_cleanup_on_launch: - print(f'[Cleanup] Attempting to delete content of temp dir {config.temp_path}') - result = delete_folder_content(config.temp_path, '[Cleanup] ') - if result: - print("[Cleanup] Cleanup successful") - else: - print(f"[Cleanup] Failed to delete content of temp dir.") - - def download_models(): for file_name, url in vae_approx_filenames: load_file_from_url(url=url, model_dir=config.path_vae_approx, file_name=file_name) @@ -131,4 +123,5 @@ def download_models(): download_models() + from webui import * diff --git a/modules/config.py b/modules/config.py index 543bba5e..a68bd218 100644 --- a/modules/config.py +++ b/modules/config.py @@ -3,7 +3,6 @@ import json import math import numbers import args_manager -import tempfile import modules.flags import modules.sdxl_styles @@ -11,7 +10,6 @@ from modules.model_loader import load_file_from_url from modules.util import get_files_from_folder, makedirs_with_log from modules.flags import Performance, MetadataScheme - def get_config_path(key, default_value): env = os.getenv(key) if env is not None and isinstance(env, str): @@ -20,7 +18,6 @@ def get_config_path(key, default_value): else: return os.path.abspath(default_value) - config_path = get_config_path('config_path', "./config.txt") config_example_path = get_config_path('config_example_path', "config_modification_tutorial.txt") config_dict = {} @@ -120,7 +117,7 @@ def get_path_output() -> str: global config_dict path_output = get_dir_or_set_default('path_outputs', '../outputs/', make_directory=True) if args_manager.args.output_path: - print(f'Overriding config value path_outputs with {args_manager.args.output_path}') + print(f'[CONFIG] Overriding config value path_outputs with {args_manager.args.output_path}') config_dict['path_outputs'] = path_output = args_manager.args.output_path return path_output @@ -181,7 +178,6 @@ path_clip_vision = get_dir_or_set_default('path_clip_vision', '../models/clip_vi path_fooocus_expansion = get_dir_or_set_default('path_fooocus_expansion', '../models/prompt_expansion/fooocus_expansion') path_outputs = get_path_output() - def get_config_item_or_set_default(key, default_value, validator, disable_empty_as_none=False): global config_dict, visited_keys @@ -210,36 +206,6 @@ def get_config_item_or_set_default(key, default_value, validator, disable_empty_ return default_value -def init_temp_path(path: str | None, default_path: str) -> str: - if args_manager.args.temp_path: - path = args_manager.args.temp_path - - if path != '' and path != default_path: - try: - if not os.path.isabs(path): - path = os.path.abspath(path) - os.makedirs(path, exist_ok=True) - print(f'Using temp path {path}') - return path - except Exception as e: - print(f'Could not create temp path {path}. Reason: {e}') - print(f'Using default temp path {default_path} instead.') - - os.makedirs(default_path, exist_ok=True) - return default_path - - -default_temp_path = os.path.join(tempfile.gettempdir(), 'fooocus') -temp_path = init_temp_path(get_config_item_or_set_default( - key='temp_path', - default_value=default_temp_path, - validator=lambda x: isinstance(x, str), -), default_temp_path) -temp_path_cleanup_on_launch = get_config_item_or_set_default( - key='temp_path_cleanup_on_launch', - default_value=True, - validator=lambda x: isinstance(x, bool) -) default_base_model_name = get_config_item_or_set_default( key='default_model', default_value='model.safetensors', diff --git a/modules/launch_util.py b/modules/launch_util.py index 370dc048..b483d515 100644 --- a/modules/launch_util.py +++ b/modules/launch_util.py @@ -1,7 +1,6 @@ import os import importlib import importlib.util -import shutil import subprocess import sys import re @@ -10,6 +9,9 @@ import importlib.metadata import packaging.version from packaging.requirements import Requirement + + + logging.getLogger("torch.distributed.nn").setLevel(logging.ERROR) # sshh... logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage()) @@ -99,19 +101,3 @@ def requirements_met(requirements_file): return True - -def delete_folder_content(folder, prefix=None): - result = True - - for filename in os.listdir(folder): - file_path = os.path.join(folder, filename) - try: - if os.path.isfile(file_path) or os.path.islink(file_path): - os.unlink(file_path) - elif os.path.isdir(file_path): - shutil.rmtree(file_path) - except Exception as e: - print(f'{prefix}Failed to delete {file_path}. Reason: {e}') - result = False - - return result \ No newline at end of file diff --git a/modules/private_logger.py b/modules/private_logger.py index 73fdda54..01e570a7 100644 --- a/modules/private_logger.py +++ b/modules/private_logger.py @@ -21,7 +21,7 @@ def get_current_html_path(output_format=None): def log(img, metadata, metadata_parser: MetadataParser | None = None, output_format=None) -> str: - path_outputs = modules.config.temp_path if args_manager.args.disable_image_log else modules.config.path_outputs + path_outputs = args_manager.args.temp_path if args_manager.args.disable_image_log else modules.config.path_outputs output_format = output_format if output_format else modules.config.default_output_format date_string, local_temp_filename, only_name = generate_temp_filename(folder=path_outputs, extension=output_format) os.makedirs(os.path.dirname(local_temp_filename), exist_ok=True) From 400471f7afa68d6ee90b4cfc4f181e5bc9cf0a6a Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Sun, 10 Mar 2024 21:09:49 +0100 Subject: [PATCH 070/103] feat: add config for temp path and temp path cleanup on launch (#1992) * Added options to set the Gradio cache path and clear cache on launch. * Renamed cache to temp * clear temp * feat: do not delete temp folder but only clean content also use fallback to system temp dir see https://github.com/gradio-app/gradio/blob/6683ab2589f9d8658e1f51acc1b7526edce988d3/gradio/utils.py#L1151 * refactor: code cleanup * feat: unify arg --temp-path and new temp_path config value * feat: change default temp dir from gradio to fooocus * refactor: move temp path method definition and configs * feat: rename get_temp_path to init_temp_path --------- Co-authored-by: Magee Co-authored-by: steveyourcreativepeople Co-authored-by: Manuel Schmid --- args_manager.py | 3 --- launch.py | 21 ++++++++++++++------- modules/config.py | 36 +++++++++++++++++++++++++++++++++++- modules/launch_util.py | 20 +++++++++++++++++--- modules/private_logger.py | 2 +- 5 files changed, 67 insertions(+), 15 deletions(-) diff --git a/args_manager.py b/args_manager.py index c7c1b7ab..8c3e1918 100644 --- a/args_manager.py +++ b/args_manager.py @@ -49,7 +49,4 @@ if args_parser.args.disable_analytics: if args_parser.args.disable_in_browser: args_parser.args.in_browser = False -if args_parser.args.temp_path is None: - args_parser.args.temp_path = os.path.join(gettempdir(), 'Fooocus') - args = args_parser.args diff --git a/launch.py b/launch.py index 4269f1fc..b3b06d6e 100644 --- a/launch.py +++ b/launch.py @@ -1,6 +1,6 @@ import os -import sys import ssl +import sys print('[System ARGV] ' + str(sys.argv)) @@ -15,15 +15,13 @@ if "GRADIO_SERVER_PORT" not in os.environ: ssl._create_default_https_context = ssl._create_unverified_context - import platform import fooocus_version from build_launcher import build_launcher -from modules.launch_util import is_installed, run, python, run_pip, requirements_met +from modules.launch_util import is_installed, run, python, run_pip, requirements_met, delete_folder_content from modules.model_loader import load_file_from_url - REINSTALL_ALL = False TRY_INSTALL_XFORMERS = False @@ -68,6 +66,7 @@ vae_approx_filenames = [ 'https://huggingface.co/lllyasviel/misc/resolve/main/xl-to-v1_interposer-v3.1.safetensors') ] + def ini_args(): from args_manager import args return args @@ -77,14 +76,23 @@ prepare_environment() build_launcher() args = ini_args() - if args.gpu_device_id is not None: os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_device_id) print("Set device to:", args.gpu_device_id) - from modules import config +os.environ['GRADIO_TEMP_DIR'] = config.temp_path + +if config.temp_path_cleanup_on_launch: + print(f'[Cleanup] Attempting to delete content of temp dir {config.temp_path}') + result = delete_folder_content(config.temp_path, '[Cleanup] ') + if result: + print("[Cleanup] Cleanup successful") + else: + print(f"[Cleanup] Failed to delete content of temp dir.") + + def download_models(): for file_name, url in vae_approx_filenames: load_file_from_url(url=url, model_dir=config.path_vae_approx, file_name=file_name) @@ -123,5 +131,4 @@ def download_models(): download_models() - from webui import * diff --git a/modules/config.py b/modules/config.py index 0d4156c7..66904096 100644 --- a/modules/config.py +++ b/modules/config.py @@ -3,6 +3,7 @@ import json import math import numbers import args_manager +import tempfile import modules.flags import modules.sdxl_styles @@ -10,6 +11,7 @@ from modules.model_loader import load_file_from_url from modules.util import get_files_from_folder, makedirs_with_log from modules.flags import OutputFormat, Performance, MetadataScheme + def get_config_path(key, default_value): env = os.getenv(key) if env is not None and isinstance(env, str): @@ -18,6 +20,7 @@ def get_config_path(key, default_value): else: return os.path.abspath(default_value) + config_path = get_config_path('config_path', "./config.txt") config_example_path = get_config_path('config_example_path', "config_modification_tutorial.txt") config_dict = {} @@ -117,7 +120,7 @@ def get_path_output() -> str: global config_dict path_output = get_dir_or_set_default('path_outputs', '../outputs/', make_directory=True) if args_manager.args.output_path: - print(f'[CONFIG] Overriding config value path_outputs with {args_manager.args.output_path}') + print(f'Overriding config value path_outputs with {args_manager.args.output_path}') config_dict['path_outputs'] = path_output = args_manager.args.output_path return path_output @@ -178,6 +181,7 @@ path_clip_vision = get_dir_or_set_default('path_clip_vision', '../models/clip_vi path_fooocus_expansion = get_dir_or_set_default('path_fooocus_expansion', '../models/prompt_expansion/fooocus_expansion') path_outputs = get_path_output() + def get_config_item_or_set_default(key, default_value, validator, disable_empty_as_none=False): global config_dict, visited_keys @@ -206,6 +210,36 @@ def get_config_item_or_set_default(key, default_value, validator, disable_empty_ return default_value +def init_temp_path(path: str | None, default_path: str) -> str: + if args_manager.args.temp_path: + path = args_manager.args.temp_path + + if path != '' and path != default_path: + try: + if not os.path.isabs(path): + path = os.path.abspath(path) + os.makedirs(path, exist_ok=True) + print(f'Using temp path {path}') + return path + except Exception as e: + print(f'Could not create temp path {path}. Reason: {e}') + print(f'Using default temp path {default_path} instead.') + + os.makedirs(default_path, exist_ok=True) + return default_path + + +default_temp_path = os.path.join(tempfile.gettempdir(), 'fooocus') +temp_path = init_temp_path(get_config_item_or_set_default( + key='temp_path', + default_value=default_temp_path, + validator=lambda x: isinstance(x, str), +), default_temp_path) +temp_path_cleanup_on_launch = get_config_item_or_set_default( + key='temp_path_cleanup_on_launch', + default_value=True, + validator=lambda x: isinstance(x, bool) +) default_base_model_name = get_config_item_or_set_default( key='default_model', default_value='model.safetensors', diff --git a/modules/launch_util.py b/modules/launch_util.py index b483d515..370dc048 100644 --- a/modules/launch_util.py +++ b/modules/launch_util.py @@ -1,6 +1,7 @@ import os import importlib import importlib.util +import shutil import subprocess import sys import re @@ -9,9 +10,6 @@ import importlib.metadata import packaging.version from packaging.requirements import Requirement - - - logging.getLogger("torch.distributed.nn").setLevel(logging.ERROR) # sshh... logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage()) @@ -101,3 +99,19 @@ def requirements_met(requirements_file): return True + +def delete_folder_content(folder, prefix=None): + result = True + + for filename in os.listdir(folder): + file_path = os.path.join(folder, filename) + try: + if os.path.isfile(file_path) or os.path.islink(file_path): + os.unlink(file_path) + elif os.path.isdir(file_path): + shutil.rmtree(file_path) + except Exception as e: + print(f'{prefix}Failed to delete {file_path}. Reason: {e}') + result = False + + return result \ No newline at end of file diff --git a/modules/private_logger.py b/modules/private_logger.py index 916d7bf0..edd9457d 100644 --- a/modules/private_logger.py +++ b/modules/private_logger.py @@ -22,7 +22,7 @@ def get_current_html_path(output_format=None): def log(img, metadata, metadata_parser: MetadataParser | None = None, output_format=None) -> str: - path_outputs = args_manager.args.temp_path if args_manager.args.disable_image_log else modules.config.path_outputs + path_outputs = modules.config.temp_path if args_manager.args.disable_image_log else modules.config.path_outputs output_format = output_format if output_format else modules.config.default_output_format date_string, local_temp_filename, only_name = generate_temp_filename(folder=path_outputs, extension=output_format) os.makedirs(os.path.dirname(local_temp_filename), exist_ok=True) From f6117180d4e02fa90e356755f86ca661af628542 Mon Sep 17 00:00:00 2001 From: Cruxial Date: Sun, 10 Mar 2024 21:35:41 +0100 Subject: [PATCH 071/103] feat: scan wildcard subdirectories (#2466) * Fix typo * Scan wildcards recursively Adds a method for getting the top-most occurrence of a given file in a directory tree * Use already existing method for locating files * Fix issue with incorrect files being loaded When using the `name-filter` parameter in `get_model_filenames`, it doesn't guarantee the best match to be in the first index. This change adds a step to ensure the correct wildcard is being loaded. * feat: make path for wildcards configurable, cache filenames on refresh files, rename button variable * Fix formatting --------- Co-authored-by: Manuel Schmid --- modules/config.py | 15 ++++++++++----- modules/sdxl_styles.py | 11 ++++++----- modules/util.py | 4 ++-- webui.py | 9 ++++----- 4 files changed, 22 insertions(+), 17 deletions(-) diff --git a/modules/config.py b/modules/config.py index 66904096..83590a24 100644 --- a/modules/config.py +++ b/modules/config.py @@ -179,6 +179,7 @@ path_inpaint = get_dir_or_set_default('path_inpaint', '../models/inpaint/') path_controlnet = get_dir_or_set_default('path_controlnet', '../models/controlnet/') path_clip_vision = get_dir_or_set_default('path_clip_vision', '../models/clip_vision/') path_fooocus_expansion = get_dir_or_set_default('path_fooocus_expansion', '../models/prompt_expansion/fooocus_expansion') +path_wildcards = get_dir_or_set_default('path_wildcards', '../wildcards/') path_outputs = get_path_output() @@ -508,22 +509,26 @@ with open(config_example_path, "w", encoding="utf-8") as json_file: model_filenames = [] lora_filenames = [] +wildcard_filenames = [] + sdxl_lcm_lora = 'sdxl_lcm_lora.safetensors' sdxl_lightning_lora = 'sdxl_lightning_4step_lora.safetensors' -def get_model_filenames(folder_paths, name_filter=None): - extensions = ['.pth', '.ckpt', '.bin', '.safetensors', '.fooocus.patch'] +def get_model_filenames(folder_paths, extensions=None, name_filter=None): + if extensions is None: + extensions = ['.pth', '.ckpt', '.bin', '.safetensors', '.fooocus.patch'] files = [] for folder in folder_paths: files += get_files_from_folder(folder, extensions, name_filter) return files -def update_all_model_names(): - global model_filenames, lora_filenames +def update_files(): + global model_filenames, lora_filenames, wildcard_filenames model_filenames = get_model_filenames(paths_checkpoints) lora_filenames = get_model_filenames(paths_loras) + wildcard_filenames = get_files_from_folder(path_wildcards, ['.txt']) return @@ -647,4 +652,4 @@ def downloading_upscale_model(): return os.path.join(path_upscale_models, 'fooocus_upscaler_s409985e5.bin') -update_all_model_names() +update_files() diff --git a/modules/sdxl_styles.py b/modules/sdxl_styles.py index 2a310024..0b07339c 100644 --- a/modules/sdxl_styles.py +++ b/modules/sdxl_styles.py @@ -2,13 +2,12 @@ import os import re import json import math +import modules.config from modules.util import get_files_from_folder - # cannot use modules.config - validators causing circular imports styles_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../sdxl_styles/')) -wildcards_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../wildcards/')) wildcards_max_bfs_depth = 64 @@ -60,7 +59,7 @@ def apply_style(style, positive): return p.replace('{prompt}', positive).splitlines(), n.splitlines() -def apply_wildcards(wildcard_text, rng, directory=wildcards_path): +def apply_wildcards(wildcard_text, rng): for _ in range(wildcards_max_bfs_depth): placeholders = re.findall(r'__([\w-]+)__', wildcard_text) if len(placeholders) == 0: @@ -69,7 +68,8 @@ def apply_wildcards(wildcard_text, rng, directory=wildcards_path): print(f'[Wildcards] processing: {wildcard_text}') for placeholder in placeholders: try: - words = open(os.path.join(directory, f'{placeholder}.txt'), encoding='utf-8').read().splitlines() + matches = [x for x in modules.config.wildcard_filenames if os.path.splitext(os.path.basename(x))[0] == placeholder] + words = open(os.path.join(modules.config.path_wildcards, matches[0]), encoding='utf-8').read().splitlines() words = [x for x in words if x != ''] assert len(words) > 0 wildcard_text = wildcard_text.replace(f'__{placeholder}__', rng.choice(words), 1) @@ -82,8 +82,9 @@ def apply_wildcards(wildcard_text, rng, directory=wildcards_path): print(f'[Wildcards] BFS stack overflow. Current text: {wildcard_text}') return wildcard_text + def get_words(arrays, totalMult, index): - if(len(arrays) == 1): + if len(arrays) == 1: return [arrays[0].split(',')[index]] else: words = arrays[0].split(',') diff --git a/modules/util.py b/modules/util.py index c7923ec8..9c432eb6 100644 --- a/modules/util.py +++ b/modules/util.py @@ -163,7 +163,7 @@ def generate_temp_filename(folder='./outputs/', extension='png'): return date_string, os.path.abspath(result), filename -def get_files_from_folder(folder_path, exensions=None, name_filter=None): +def get_files_from_folder(folder_path, extensions=None, name_filter=None): if not os.path.isdir(folder_path): raise ValueError("Folder path is not a valid directory.") @@ -175,7 +175,7 @@ def get_files_from_folder(folder_path, exensions=None, name_filter=None): relative_path = "" for filename in sorted(files, key=lambda s: s.casefold()): _, file_extension = os.path.splitext(filename) - if (exensions is None or file_extension.lower() in exensions) and (name_filter is None or name_filter in _): + if (extensions is None or file_extension.lower() in extensions) and (name_filter is None or name_filter in _): path = os.path.join(relative_path, filename) filenames.append(path) diff --git a/webui.py b/webui.py index c0f1ec91..808db72d 100644 --- a/webui.py +++ b/webui.py @@ -366,7 +366,7 @@ with shared.gradio_root: lora_ctrls += [lora_enabled, lora_model, lora_weight] with gr.Row(): - model_refresh = gr.Button(label='Refresh', value='\U0001f504 Refresh All Files', variant='secondary', elem_classes='refresh_button') + refresh_files = gr.Button(label='Refresh', value='\U0001f504 Refresh All Files', variant='secondary', elem_classes='refresh_button') with gr.Tab(label='Advanced'): guidance_scale = gr.Slider(label='Guidance Scale', minimum=1.0, maximum=30.0, step=0.01, value=modules.config.default_cfg_scale, @@ -512,19 +512,18 @@ with shared.gradio_root: def dev_mode_checked(r): return gr.update(visible=r) - dev_mode.change(dev_mode_checked, inputs=[dev_mode], outputs=[dev_tools], queue=False, show_progress=False) - def model_refresh_clicked(): - modules.config.update_all_model_names() + def refresh_files_clicked(): + modules.config.update_files() results = [gr.update(choices=modules.config.model_filenames)] results += [gr.update(choices=['None'] + modules.config.model_filenames)] for i in range(modules.config.default_max_lora_number): results += [gr.update(interactive=True), gr.update(choices=['None'] + modules.config.lora_filenames), gr.update()] return results - model_refresh.click(model_refresh_clicked, [], [base_model, refiner_model] + lora_ctrls, + refresh_files.click(refresh_files_clicked, [], [base_model, refiner_model] + lora_ctrls, queue=False, show_progress=False) performance_selection.change(lambda x: [gr.update(interactive=not flags.Performance.has_restricted_features(x))] * 11 + From bc9c58608291592504613129e74eabdd57339847 Mon Sep 17 00:00:00 2001 From: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Date: Sun, 10 Mar 2024 23:13:09 +0100 Subject: [PATCH 072/103] fix: use correct method call for interrupt_current_processing (#2506) actually achieves the same result, stopping the task --- modules/async_worker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/async_worker.py b/modules/async_worker.py index a8661f4d..83fc3912 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -787,7 +787,7 @@ def worker(): try: if async_task.last_stop is not False: - ldm_patched.model_management.interrupt_current_processing() + ldm_patched.modules.model_management.interrupt_current_processing() positive_cond, negative_cond = task['c'], task['uc'] if 'cn' in goals: From ead24c9361337a1ab52720c85b3daab431b00f24 Mon Sep 17 00:00:00 2001 From: xhoxye <129571231+xhoxye@users.noreply.github.com> Date: Mon, 11 Mar 2024 06:18:36 +0800 Subject: [PATCH 073/103] =?UTF-8?q?feat:=20read=20wildcards=20in=20order?= =?UTF-8?q?=20=E9=80=9A=E9=85=8D=E7=AC=A6=E5=A2=9E=E5=BC=BA=EF=BC=8C?= =?UTF-8?q?=E5=88=87=E6=8D=A2=E9=A1=BA=E5=BA=8F=E8=AF=BB=E5=8F=96=E3=80=82?= =?UTF-8?q?(#1761)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * 通配符增强,切换顺序读取 通配符增强,通过勾选切换通配符读取方法,默认不勾选为随机读取一行,勾选后为按顺序读取,并使用相同的种子。 * 代码来自刁璐璐 * update * Update async_worker.py * refactor: rename read_wildcard_in_order_checkbox to read_wildcard_in_order * fix: use correct method call for interrupt_current_processing actually achieves the same result, stopping the task * refactor: move checkbox to developer debug mode, rename to plural below disable seed increment * refactor: code cleanup, separate code for disable_seed_increment * i18n: add translation for checkbox text --------- Co-authored-by: Manuel Schmid --- language/en.json | 1 + modules/async_worker.py | 12 +++++++----- modules/sdxl_styles.py | 7 +++++-- webui.py | 4 +++- 4 files changed, 16 insertions(+), 8 deletions(-) diff --git a/language/en.json b/language/en.json index f61255c9..241c5d54 100644 --- a/language/en.json +++ b/language/en.json @@ -50,6 +50,7 @@ "Seed": "Seed", "Disable seed increment": "Disable seed increment", "Disable automatic seed increment when image number is > 1.": "Disable automatic seed increment when image number is > 1.", + "Read wildcards in order": "Read wildcards in order", "\ud83d\udcda History Log": "\uD83D\uDCDA History Log", "Image Style": "Image Style", "Fooocus V2": "Fooocus V2", diff --git a/modules/async_worker.py b/modules/async_worker.py index c7df14f5..c5953a58 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -1,4 +1,5 @@ import threading +import re from modules.patch import PatchSettings, patch_settings, patch_all patch_all() @@ -148,6 +149,7 @@ def worker(): image_number = args.pop() output_format = args.pop() image_seed = args.pop() + read_wildcards_in_order = args.pop() sharpness = args.pop() guidance_scale = args.pop() base_model_name = args.pop() @@ -441,16 +443,16 @@ def worker(): for i in range(image_number): if disable_seed_increment: - task_seed = seed + task_seed = seed % (constants.MAX_SEED + 1) else: task_seed = (seed + i) % (constants.MAX_SEED + 1) # randint is inclusive, % is not task_rng = random.Random(task_seed) # may bind to inpaint noise in the future - task_prompt = apply_wildcards(prompt, task_rng) + task_prompt = apply_wildcards(prompt, task_rng, i, read_wildcards_in_order) task_prompt = apply_arrays(task_prompt, i) - task_negative_prompt = apply_wildcards(negative_prompt, task_rng) - task_extra_positive_prompts = [apply_wildcards(pmt, task_rng) for pmt in extra_positive_prompts] - task_extra_negative_prompts = [apply_wildcards(pmt, task_rng) for pmt in extra_negative_prompts] + task_negative_prompt = apply_wildcards(negative_prompt, task_rng, i, read_wildcards_in_order) + task_extra_positive_prompts = [apply_wildcards(pmt, task_rng, i, read_wildcards_in_order) for pmt in extra_positive_prompts] + task_extra_negative_prompts = [apply_wildcards(pmt, task_rng, i, read_wildcards_in_order) for pmt in extra_negative_prompts] positive_basic_workloads = [] negative_basic_workloads = [] diff --git a/modules/sdxl_styles.py b/modules/sdxl_styles.py index 0b07339c..77ad6b57 100644 --- a/modules/sdxl_styles.py +++ b/modules/sdxl_styles.py @@ -59,7 +59,7 @@ def apply_style(style, positive): return p.replace('{prompt}', positive).splitlines(), n.splitlines() -def apply_wildcards(wildcard_text, rng): +def apply_wildcards(wildcard_text, rng, i, read_wildcards_in_order): for _ in range(wildcards_max_bfs_depth): placeholders = re.findall(r'__([\w-]+)__', wildcard_text) if len(placeholders) == 0: @@ -72,7 +72,10 @@ def apply_wildcards(wildcard_text, rng): words = open(os.path.join(modules.config.path_wildcards, matches[0]), encoding='utf-8').read().splitlines() words = [x for x in words if x != ''] assert len(words) > 0 - wildcard_text = wildcard_text.replace(f'__{placeholder}__', rng.choice(words), 1) + if read_wildcards_in_order: + wildcard_text = wildcard_text.replace(f'__{placeholder}__', words[i % len(words)], 1) + else: + wildcard_text = wildcard_text.replace(f'__{placeholder}__', rng.choice(words), 1) except: print(f'[Wildcards] Warning: {placeholder}.txt missing or empty. ' f'Using "{placeholder}" as a normal word.') diff --git a/webui.py b/webui.py index 808db72d..ee7edc2d 100644 --- a/webui.py +++ b/webui.py @@ -434,6 +434,7 @@ with shared.gradio_root: disable_seed_increment = gr.Checkbox(label='Disable seed increment', info='Disable automatic seed increment when image number is > 1.', value=False) + read_wildcards_in_order = gr.Checkbox(label="Read wildcards in order", value=False) if not args_manager.args.disable_metadata: save_metadata_to_images = gr.Checkbox(label='Save Metadata to Images', value=modules.config.default_save_metadata_to_images, @@ -578,7 +579,8 @@ with shared.gradio_root: ctrls = [currentTask, generate_image_grid] ctrls += [ prompt, negative_prompt, style_selections, - performance_selection, aspect_ratios_selection, image_number, output_format, image_seed, sharpness, guidance_scale + performance_selection, aspect_ratios_selection, image_number, output_format, image_seed, + read_wildcards_in_order, sharpness, guidance_scale ] ctrls += [base_model, refiner_model, refiner_switch] + lora_ctrls From 84e3124c37e26acb39371b73e00edbb611335cd9 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Mon, 11 Mar 2024 00:47:31 +0100 Subject: [PATCH 074/103] i18n: add translation for lightning --- language/en.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/language/en.json b/language/en.json index 241c5d54..3e42fff0 100644 --- a/language/en.json +++ b/language/en.json @@ -41,6 +41,8 @@ "Performance": "Performance", "Speed": "Speed", "Quality": "Quality", + "Extreme Speed": "Extreme Speed", + "Lightning": "Lightning", "Aspect Ratios": "Aspect Ratios", "width \u00d7 height": "width \u00d7 height", "Image Number": "Image Number", @@ -368,7 +370,6 @@ "B2": "B2", "S1": "S1", "S2": "S2", - "Extreme Speed": "Extreme Speed", "\uD83D\uDD0E Type here to search styles ...": "\uD83D\uDD0E Type here to search styles ...", "Type prompt here.": "Type prompt here.", "Outpaint Expansion Direction:": "Outpaint Expansion Direction:", From 2831dc70a7fb772f077776d7e92208af2ff62c7b Mon Sep 17 00:00:00 2001 From: hswlab Date: Mon, 11 Mar 2024 16:35:03 +0100 Subject: [PATCH 075/103] feat: use scrollable 2 column layout for styles (#1883) * Styles Grouping/Sorting #1770 * Update css/style.css Co-authored-by: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> * Update javascript/script.js Co-authored-by: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> * feat: use standard padding again --------- Co-authored-by: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Co-authored-by: Manuel Schmid --- css/style.css | 45 ++++++++++++++++++++++++++++++++++++++++++++ javascript/script.js | 16 +++++++++++----- webui.py | 2 +- 3 files changed, 57 insertions(+), 6 deletions(-) diff --git a/css/style.css b/css/style.css index 010c8e7f..3cc1e5e5 100644 --- a/css/style.css +++ b/css/style.css @@ -218,3 +218,48 @@ #stylePreviewOverlay.lower-half { transform: translate(-140px, -140px); } + +/* scrollable box for style selections */ +.contain .tabs { + height: 100%; +} + +.contain .tabs .tabitem.style_selections_tab { + height: 100%; +} + +.contain .tabs .tabitem.style_selections_tab > div:first-child { + height: 100%; +} + +.contain .tabs .tabitem.style_selections_tab .style_selections { + min-height: 200px; + height: 100%; +} + +.contain .tabs .tabitem.style_selections_tab .style_selections .wrap[data-testid="checkbox-group"] { + position: absolute; /* remove this to disable scrolling within the checkbox-group */ + overflow: auto; + padding-right: 2px; + max-height: 100%; +} + +.contain .tabs .tabitem.style_selections_tab .style_selections .wrap[data-testid="checkbox-group"] label { + /* max-width: calc(35% - 15px) !important; */ /* add this to enable 3 columns layout */ + flex: calc(50% - 5px) !important; +} + +.contain .tabs .tabitem.style_selections_tab .style_selections .wrap[data-testid="checkbox-group"] label span { + /* white-space:nowrap; */ /* add this to disable text wrapping (better choice for 3 columns layout) */ + overflow: hidden; + text-overflow: ellipsis; +} + +/* styles preview tooltip */ +.preview-tooltip { + background-color: #fff8; + font-family: monospace; + text-align: center; + border-radius-top: 5px; + display: none; /* remove this to enable tooltip in preview image */ +} \ No newline at end of file diff --git a/javascript/script.js b/javascript/script.js index 8f4cac58..9aa0b5c1 100644 --- a/javascript/script.js +++ b/javascript/script.js @@ -150,9 +150,12 @@ function initStylePreviewOverlay() { let overlayVisible = false; const samplesPath = document.querySelector("meta[name='samples-path']").getAttribute("content") const overlay = document.createElement('div'); + const tooltip = document.createElement('div'); + tooltip.className = 'preview-tooltip'; + overlay.appendChild(tooltip); overlay.id = 'stylePreviewOverlay'; document.body.appendChild(overlay); - document.addEventListener('mouseover', function(e) { + document.addEventListener('mouseover', function (e) { const label = e.target.closest('.style_selections label'); if (!label) return; label.removeEventListener("mouseout", onMouseLeave); @@ -162,9 +165,12 @@ function initStylePreviewOverlay() { const originalText = label.querySelector("span").getAttribute("data-original-text"); const name = originalText || label.querySelector("span").textContent; overlay.style.backgroundImage = `url("${samplesPath.replace( - "fooocus_v2", - name.toLowerCase().replaceAll(" ", "_") + "fooocus_v2", + name.toLowerCase().replaceAll(" ", "_") ).replaceAll("\\", "\\\\")}")`; + + tooltip.textContent = name; + function onMouseLeave() { overlayVisible = false; overlay.style.opacity = "0"; @@ -172,8 +178,8 @@ function initStylePreviewOverlay() { label.removeEventListener("mouseout", onMouseLeave); } }); - document.addEventListener('mousemove', function(e) { - if(!overlayVisible) return; + document.addEventListener('mousemove', function (e) { + if (!overlayVisible) return; overlay.style.left = `${e.clientX}px`; overlay.style.top = `${e.clientY}px`; overlay.className = e.clientY > window.innerHeight / 2 ? "lower-half" : "upper-half"; diff --git a/webui.py b/webui.py index ee7edc2d..832cc194 100644 --- a/webui.py +++ b/webui.py @@ -300,7 +300,7 @@ with shared.gradio_root: history_link = gr.HTML() shared.gradio_root.load(update_history_link, outputs=history_link, queue=False, show_progress=False) - with gr.Tab(label='Style'): + with gr.Tab(label='Style', elem_classes=['style_selections_tab']): style_sorter.try_load_sorted_styles( style_names=legal_style_names, default_selected=modules.config.default_styles) From 39669453cda5bbbbdb322246beda195d0ae46af6 Mon Sep 17 00:00:00 2001 From: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Date: Mon, 11 Mar 2024 17:59:58 +0100 Subject: [PATCH 076/103] feat: allow to add disabled LoRAs in config on application start (#2507) add LoRA checkbox enable/disable handling to all necessary occurrences --- modules/config.py | 7 ++++++- modules/core.py | 13 ++++++++----- presets/anime.json | 5 +++++ presets/default.json | 5 +++++ presets/lcm.json | 5 +++++ presets/realistic.json | 5 +++++ presets/sai.json | 5 +++++ webui.py | 8 ++++---- 8 files changed, 43 insertions(+), 10 deletions(-) diff --git a/modules/config.py b/modules/config.py index 83590a24..8fec8e05 100644 --- a/modules/config.py +++ b/modules/config.py @@ -275,27 +275,32 @@ default_loras = get_config_item_or_set_default( key='default_loras', default_value=[ [ + True, "None", 1.0 ], [ + True, "None", 1.0 ], [ + True, "None", 1.0 ], [ + True, "None", 1.0 ], [ + True, "None", 1.0 ] ], - validator=lambda x: isinstance(x, list) and all(len(y) == 2 and isinstance(y[0], str) and isinstance(y[1], numbers.Number) for y in x) + validator=lambda x: isinstance(x, list) and all(len(y) == 3 and isinstance(y[0], bool) and isinstance(y[1], str) and isinstance(y[2], numbers.Number) for y in x) ) default_max_lora_number = get_config_item_or_set_default( key='default_max_lora_number', diff --git a/modules/core.py b/modules/core.py index bfc44966..e8e19397 100644 --- a/modules/core.py +++ b/modules/core.py @@ -73,14 +73,17 @@ class StableDiffusionModel: loras_to_load = [] - for name, weight in loras: - if name == 'None': + for enabled, filename, weight in loras: + if not enabled: continue - if os.path.exists(name): - lora_filename = name + if filename == 'None': + continue + + if os.path.exists(filename): + lora_filename = filename else: - lora_filename = get_file_from_folder_list(name, modules.config.paths_loras) + lora_filename = get_file_from_folder_list(filename, modules.config.paths_loras) if not os.path.exists(lora_filename): print(f'Lora file not found: {lora_filename}') diff --git a/presets/anime.json b/presets/anime.json index 8bd2813b..1f2b26a9 100644 --- a/presets/anime.json +++ b/presets/anime.json @@ -4,22 +4,27 @@ "default_refiner_switch": 0.5, "default_loras": [ [ + true, "None", 1.0 ], [ + true, "None", 1.0 ], [ + true, "None", 1.0 ], [ + true, "None", 1.0 ], [ + true, "None", 1.0 ] diff --git a/presets/default.json b/presets/default.json index 7930c92f..963f7a63 100644 --- a/presets/default.json +++ b/presets/default.json @@ -4,22 +4,27 @@ "default_refiner_switch": 0.5, "default_loras": [ [ + false, "sd_xl_offset_example-lora_1.0.safetensors", 0.1 ], [ + true, "None", 1.0 ], [ + true, "None", 1.0 ], [ + true, "None", 1.0 ], [ + true, "None", 1.0 ] diff --git a/presets/lcm.json b/presets/lcm.json index 3897f881..6713fdd5 100644 --- a/presets/lcm.json +++ b/presets/lcm.json @@ -4,22 +4,27 @@ "default_refiner_switch": 0.5, "default_loras": [ [ + true, "None", 1.0 ], [ + true, "None", 1.0 ], [ + true, "None", 1.0 ], [ + true, "None", 1.0 ], [ + true, "None", 1.0 ] diff --git a/presets/realistic.json b/presets/realistic.json index 7799c96a..95f8b6e0 100644 --- a/presets/realistic.json +++ b/presets/realistic.json @@ -4,22 +4,27 @@ "default_refiner_switch": 0.5, "default_loras": [ [ + true, "SDXL_FILM_PHOTOGRAPHY_STYLE_BetaV0.4.safetensors", 0.25 ], [ + true, "None", 1.0 ], [ + true, "None", 1.0 ], [ + true, "None", 1.0 ], [ + true, "None", 1.0 ] diff --git a/presets/sai.json b/presets/sai.json index fecf047b..918028f3 100644 --- a/presets/sai.json +++ b/presets/sai.json @@ -4,22 +4,27 @@ "default_refiner_switch": 0.75, "default_loras": [ [ + true, "sd_xl_offset_example-lora_1.0.safetensors", 0.5 ], [ + true, "None", 1.0 ], [ + true, "None", 1.0 ], [ + true, "None", 1.0 ], [ + true, "None", 1.0 ] diff --git a/webui.py b/webui.py index 832cc194..7fe10f15 100644 --- a/webui.py +++ b/webui.py @@ -353,15 +353,15 @@ with shared.gradio_root: with gr.Group(): lora_ctrls = [] - for i, (n, v) in enumerate(modules.config.default_loras): + for i, (enabled, filename, weight) in enumerate(modules.config.default_loras): with gr.Row(): - lora_enabled = gr.Checkbox(label='Enable', value=True, + lora_enabled = gr.Checkbox(label='Enable', value=enabled, elem_classes=['lora_enable', 'min_check'], scale=1) lora_model = gr.Dropdown(label=f'LoRA {i + 1}', - choices=['None'] + modules.config.lora_filenames, value=n, + choices=['None'] + modules.config.lora_filenames, value=filename, elem_classes='lora_model', scale=5) lora_weight = gr.Slider(label='Weight', minimum=modules.config.default_loras_min_weight, - maximum=modules.config.default_loras_max_weight, step=0.01, value=v, + maximum=modules.config.default_loras_max_weight, step=0.01, value=weight, elem_classes='lora_weight', scale=5) lora_ctrls += [lora_enabled, lora_model, lora_weight] From d57afc88a48359bc1642c2ae30a091f0426eff43 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Mon, 11 Mar 2024 18:26:04 +0100 Subject: [PATCH 077/103] feat: merge webui css into one file --- css/style.css | 131 +++++++++++++++++++++++++++++++++++++++++++++++ modules/html.py | 133 ------------------------------------------------ webui.py | 4 +- 3 files changed, 132 insertions(+), 136 deletions(-) diff --git a/css/style.css b/css/style.css index 3cc1e5e5..c702a725 100644 --- a/css/style.css +++ b/css/style.css @@ -1,5 +1,136 @@ /* based on https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/v1.6.0/style.css */ +.loader-container { + display: flex; /* Use flex to align items horizontally */ + align-items: center; /* Center items vertically within the container */ + white-space: nowrap; /* Prevent line breaks within the container */ +} + +.loader { + border: 8px solid #f3f3f3; /* Light grey */ + border-top: 8px solid #3498db; /* Blue */ + border-radius: 50%; + width: 30px; + height: 30px; + animation: spin 2s linear infinite; +} + +@keyframes spin { + 0% { transform: rotate(0deg); } + 100% { transform: rotate(360deg); } +} + +/* Style the progress bar */ +progress { + appearance: none; /* Remove default styling */ + height: 20px; /* Set the height of the progress bar */ + border-radius: 5px; /* Round the corners of the progress bar */ + background-color: #f3f3f3; /* Light grey background */ + width: 100%; +} + +/* Style the progress bar container */ +.progress-container { + margin-left: 20px; + margin-right: 20px; + flex-grow: 1; /* Allow the progress container to take up remaining space */ +} + +/* Set the color of the progress bar fill */ +progress::-webkit-progress-value { + background-color: #3498db; /* Blue color for the fill */ +} + +progress::-moz-progress-bar { + background-color: #3498db; /* Blue color for the fill in Firefox */ +} + +/* Style the text on the progress bar */ +progress::after { + content: attr(value '%'); /* Display the progress value followed by '%' */ + position: absolute; + top: 50%; + left: 50%; + transform: translate(-50%, -50%); + color: white; /* Set text color */ + font-size: 14px; /* Set font size */ +} + +/* Style other texts */ +.loader-container > span { + margin-left: 5px; /* Add spacing between the progress bar and the text */ +} + +.progress-bar > .generating { + display: none !important; +} + +.progress-bar{ + height: 30px !important; +} + +.type_row{ + height: 80px !important; +} + +.type_row_half{ + height: 32px !important; +} + +.scroll-hide{ + resize: none !important; +} + +.refresh_button{ + border: none !important; + background: none !important; + font-size: none !important; + box-shadow: none !important; +} + +.advanced_check_row{ + width: 250px !important; +} + +.min_check{ + min-width: min(1px, 100%) !important; +} + +.resizable_area { + resize: vertical; + overflow: auto !important; +} + +.aspect_ratios label { + width: 140px !important; +} + +.aspect_ratios label span { + white-space: nowrap !important; +} + +.aspect_ratios label input { + margin-left: -5px !important; +} + +.lora_enable label { + height: 100%; +} + +.lora_enable label input { + margin: auto; +} + +.lora_enable label span { + display: none; +} + +@-moz-document url-prefix() { + .lora_weight input[type=number] { + width: 80px; + } +} + #context-menu{ z-index:9999; position:absolute; diff --git a/modules/html.py b/modules/html.py index 769151a9..25771cb9 100644 --- a/modules/html.py +++ b/modules/html.py @@ -1,136 +1,3 @@ -css = ''' -.loader-container { - display: flex; /* Use flex to align items horizontally */ - align-items: center; /* Center items vertically within the container */ - white-space: nowrap; /* Prevent line breaks within the container */ -} - -.loader { - border: 8px solid #f3f3f3; /* Light grey */ - border-top: 8px solid #3498db; /* Blue */ - border-radius: 50%; - width: 30px; - height: 30px; - animation: spin 2s linear infinite; -} - -@keyframes spin { - 0% { transform: rotate(0deg); } - 100% { transform: rotate(360deg); } -} - -/* Style the progress bar */ -progress { - appearance: none; /* Remove default styling */ - height: 20px; /* Set the height of the progress bar */ - border-radius: 5px; /* Round the corners of the progress bar */ - background-color: #f3f3f3; /* Light grey background */ - width: 100%; -} - -/* Style the progress bar container */ -.progress-container { - margin-left: 20px; - margin-right: 20px; - flex-grow: 1; /* Allow the progress container to take up remaining space */ -} - -/* Set the color of the progress bar fill */ -progress::-webkit-progress-value { - background-color: #3498db; /* Blue color for the fill */ -} - -progress::-moz-progress-bar { - background-color: #3498db; /* Blue color for the fill in Firefox */ -} - -/* Style the text on the progress bar */ -progress::after { - content: attr(value '%'); /* Display the progress value followed by '%' */ - position: absolute; - top: 50%; - left: 50%; - transform: translate(-50%, -50%); - color: white; /* Set text color */ - font-size: 14px; /* Set font size */ -} - -/* Style other texts */ -.loader-container > span { - margin-left: 5px; /* Add spacing between the progress bar and the text */ -} - -.progress-bar > .generating { - display: none !important; -} - -.progress-bar{ - height: 30px !important; -} - -.type_row{ - height: 80px !important; -} - -.type_row_half{ - height: 32px !important; -} - -.scroll-hide{ - resize: none !important; -} - -.refresh_button{ - border: none !important; - background: none !important; - font-size: none !important; - box-shadow: none !important; -} - -.advanced_check_row{ - width: 250px !important; -} - -.min_check{ - min-width: min(1px, 100%) !important; -} - -.resizable_area { - resize: vertical; - overflow: auto !important; -} - -.aspect_ratios label { - width: 140px !important; -} - -.aspect_ratios label span { - white-space: nowrap !important; -} - -.aspect_ratios label input { - margin-left: -5px !important; -} - -.lora_enable label { - height: 100%; -} - -.lora_enable label input { - margin: auto; -} - -.lora_enable label span { - display: none; -} - -@-moz-document url-prefix() { - .lora_weight input[type=number] { - width: 80px; - } -} - -''' progress_html = '''
diff --git a/webui.py b/webui.py index 7fe10f15..d68ade62 100644 --- a/webui.py +++ b/webui.py @@ -91,9 +91,7 @@ title = f'Fooocus {fooocus_version.version}' if isinstance(args_manager.args.preset, str): title += ' ' + args_manager.args.preset -shared.gradio_root = gr.Blocks( - title=title, - css=modules.html.css).queue() +shared.gradio_root = gr.Blocks(title=title).queue() with shared.gradio_root: currentTask = gr.State(worker.AsyncTask(args=[])) From 532401df766af637488e194f39fe1cec1ddd4739 Mon Sep 17 00:00:00 2001 From: Giuseppe Speranza Date: Mon, 11 Mar 2024 19:58:25 +0100 Subject: [PATCH 078/103] fix: prioritize VRAM over RAM in Colab, preventing out of memory issues (#1710) * colab: balance the use of RAM enables the use of VRAM memory so as not to saturate the system RAM * feat: use --always-high-vram by default for Colab, adjust readme --------- Co-authored-by: Manuel Schmid --- fooocus_colab.ipynb | 2 +- readme.md | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/fooocus_colab.ipynb b/fooocus_colab.ipynb index 205dac55..7fa98879 100644 --- a/fooocus_colab.ipynb +++ b/fooocus_colab.ipynb @@ -12,7 +12,7 @@ "%cd /content\n", "!git clone https://github.com/lllyasviel/Fooocus.git\n", "%cd /content/Fooocus\n", - "!python entry_with_update.py --share\n" + "!python entry_with_update.py --share --always-high-vram\n" ] } ], diff --git a/readme.md b/readme.md index 0bfee5b4..4e47ac08 100644 --- a/readme.md +++ b/readme.md @@ -115,16 +115,18 @@ See also the common problems and troubleshoots [here](troubleshoot.md). ### Colab -(Last tested - 2023 Dec 12) +(Last tested - 2024 Mar 11) | Colab | Info | --- | --- | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lllyasviel/Fooocus/blob/main/fooocus_colab.ipynb) | Fooocus Official -In Colab, you can modify the last line to `!python entry_with_update.py --share` or `!python entry_with_update.py --preset anime --share` or `!python entry_with_update.py --preset realistic --share` for Fooocus Default/Anime/Realistic Edition. +In Colab, you can modify the last line to `!python entry_with_update.py --share --always-high-vram` or `!python entry_with_update.py --share --always-high-vram --preset anime` or `!python entry_with_update.py --share --always-high-vram --preset realistic` for Fooocus Default/Anime/Realistic Edition. Note that this Colab will disable refiner by default because Colab free's resources are relatively limited (and some "big" features like image prompt may cause free-tier Colab to disconnect). We make sure that basic text-to-image is always working on free-tier Colab. +Using `--always-high-vram` shifts resource allocation from RAM to VRAM and achieves the overall best balance between performance, flexibility and stability on the default T4 instance. + Thanks to [camenduru](https://github.com/camenduru)! ### Linux (Using Anaconda) From 57a01865b99e3334fc83da25adc48ab989d853ab Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Mon, 11 Mar 2024 23:49:45 +0100 Subject: [PATCH 079/103] refactor: only use LoRA activate on handover to async worker, extract method --- modules/async_worker.py | 14 +++----------- modules/core.py | 5 +---- modules/default_pipeline.py | 4 ++-- modules/util.py | 4 ++++ presets/lightning.json | 5 +++++ 5 files changed, 15 insertions(+), 17 deletions(-) diff --git a/modules/async_worker.py b/modules/async_worker.py index c5953a58..ee997852 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -46,8 +46,8 @@ def worker(): from modules.sdxl_styles import apply_style, apply_wildcards, fooocus_expansion, apply_arrays from modules.private_logger import log from extras.expansion import safe_str - from modules.util import remove_empty_str, HWC3, resize_image, \ - get_image_shape_ceil, set_image_shape_ceil, get_shape_ceil, resample_image, erode_or_dilate, ordinal_suffix + from modules.util import remove_empty_str, HWC3, resize_image, get_image_shape_ceil, set_image_shape_ceil, \ + get_shape_ceil, resample_image, erode_or_dilate, ordinal_suffix, get_enabled_loras from modules.upscaler import perform_upscale from modules.flags import Performance from modules.meta_parser import get_metadata_parser, MetadataScheme @@ -124,14 +124,6 @@ def worker(): async_task.results = async_task.results + [wall] return - def apply_enabled_loras(loras): - enabled_loras = [] - for lora_enabled, lora_model, lora_weight in loras: - if lora_enabled: - enabled_loras.append([lora_model, lora_weight]) - - return enabled_loras - @torch.no_grad() @torch.inference_mode() def handler(async_task): @@ -155,7 +147,7 @@ def worker(): base_model_name = args.pop() refiner_model_name = args.pop() refiner_switch = args.pop() - loras = apply_enabled_loras([[bool(args.pop()), str(args.pop()), float(args.pop()), ] for _ in range(modules.config.default_max_lora_number)]) + loras = get_enabled_loras([[bool(args.pop()), str(args.pop()), float(args.pop())] for _ in range(modules.config.default_max_lora_number)]) input_image_checkbox = args.pop() current_tab = args.pop() uov_method = args.pop() diff --git a/modules/core.py b/modules/core.py index e8e19397..38ee8e8d 100644 --- a/modules/core.py +++ b/modules/core.py @@ -73,10 +73,7 @@ class StableDiffusionModel: loras_to_load = [] - for enabled, filename, weight in loras: - if not enabled: - continue - + for filename, weight in loras: if filename == 'None': continue diff --git a/modules/default_pipeline.py b/modules/default_pipeline.py index f8edfae1..190601ec 100644 --- a/modules/default_pipeline.py +++ b/modules/default_pipeline.py @@ -11,7 +11,7 @@ from extras.expansion import FooocusExpansion from ldm_patched.modules.model_base import SDXL, SDXLRefiner from modules.sample_hijack import clip_separate -from modules.util import get_file_from_folder_list +from modules.util import get_file_from_folder_list, get_enabled_loras model_base = core.StableDiffusionModel() @@ -254,7 +254,7 @@ def refresh_everything(refiner_model_name, base_model_name, loras, refresh_everything( refiner_model_name=modules.config.default_refiner_model_name, base_model_name=modules.config.default_base_model_name, - loras=modules.config.default_loras + loras=get_enabled_loras(modules.config.default_loras) ) diff --git a/modules/util.py b/modules/util.py index 9c432eb6..7c46d946 100644 --- a/modules/util.py +++ b/modules/util.py @@ -360,3 +360,7 @@ def makedirs_with_log(path): os.makedirs(path, exist_ok=True) except OSError as error: print(f'Directory {path} could not be created, reason: {error}') + + +def get_enabled_loras(loras: list) -> list: + return [[lora[1], lora[2]] for lora in loras if lora[0]] diff --git a/presets/lightning.json b/presets/lightning.json index 64249358..d1466c10 100644 --- a/presets/lightning.json +++ b/presets/lightning.json @@ -4,22 +4,27 @@ "default_refiner_switch": 0.5, "default_loras": [ [ + true, "None", 1.0 ], [ + true, "None", 1.0 ], [ + true, "None", 1.0 ], [ + true, "None", 1.0 ], [ + true, "None", 1.0 ] From 6da0441cc75d2e40df76780109a2ea4bcb58c1c1 Mon Sep 17 00:00:00 2001 From: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Date: Tue, 12 Mar 2024 23:13:38 +0100 Subject: [PATCH 080/103] fix: update xformers to 0.0.23 (#2517) WARNING[XFORMERS]: xFormers can't load C++/CUDA extensions. xFormers was built for: PyTorch 2.0.1+cu118 with CUDA 1108 (you have 2.1.0+cu121) Python 3.10.11 (you have 3.10.9) --- launch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/launch.py b/launch.py index 4269f1fc..f545c39e 100644 --- a/launch.py +++ b/launch.py @@ -42,7 +42,7 @@ def prepare_environment(): if TRY_INSTALL_XFORMERS: if REINSTALL_ALL or not is_installed("xformers"): - xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.20') + xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.23') if platform.system() == "Windows": if platform.python_version().startswith("3.10"): run_pip(f"install -U -I --no-deps {xformers_package}", "xformers", live=True) From 4363dbc303f6c022bfeccb43c2b55f4a19fc96a5 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Wed, 13 Mar 2024 00:32:54 +0100 Subject: [PATCH 081/103] fix: revert testing change to default lora activation --- presets/default.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/presets/default.json b/presets/default.json index 963f7a63..d02bb8a4 100644 --- a/presets/default.json +++ b/presets/default.json @@ -4,7 +4,7 @@ "default_refiner_switch": 0.5, "default_loras": [ [ - false, + true, "sd_xl_offset_example-lora_1.0.safetensors", 0.1 ], From f51e0138e64a05b1d6ebe47ee1d5716fb700f7e4 Mon Sep 17 00:00:00 2001 From: josephrocca <1167575+josephrocca@users.noreply.github.com> Date: Wed, 13 Mar 2024 22:12:06 +0800 Subject: [PATCH 082/103] feat: update xformers to 0.0.23 in Dockerfile (#2519) --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 2aea2810..b969cd0e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -10,7 +10,7 @@ RUN apt-get update -y && \ COPY requirements_docker.txt requirements_versions.txt /tmp/ RUN pip install --no-cache-dir -r /tmp/requirements_docker.txt -r /tmp/requirements_versions.txt && \ rm -f /tmp/requirements_docker.txt /tmp/requirements_versions.txt -RUN pip install --no-cache-dir xformers==0.0.22 --no-dependencies +RUN pip install --no-cache-dir xformers==0.0.23 --no-dependencies RUN curl -fsL -o /usr/local/lib/python3.10/dist-packages/gradio/frpc_linux_amd64_v0.2 https://cdn-media.huggingface.co/frpc-gradio-0.2/frpc_linux_amd64 && \ chmod +x /usr/local/lib/python3.10/dist-packages/gradio/frpc_linux_amd64_v0.2 From 9cd0366d300e6a70258c15e6cce6ae19c0f9b36b Mon Sep 17 00:00:00 2001 From: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Date: Fri, 15 Mar 2024 20:38:21 +0100 Subject: [PATCH 083/103] fix: parse seed as string to display correctly in metadata preview (#2536) --- modules/async_worker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/async_worker.py b/modules/async_worker.py index 83fc3912..62480f3f 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -856,7 +856,7 @@ def worker(): d.append(('Sampler', 'sampler', sampler_name)) d.append(('Scheduler', 'scheduler', scheduler_name)) - d.append(('Seed', 'seed', task['task_seed'])) + d.append(('Seed', 'seed', str(task['task_seed']))) if freeu_enabled: d.append(('FreeU', 'freeu', str((freeu_b1, freeu_b2, freeu_s1, freeu_s2)))) From 0da614f7e1072ac5dd4528273a624c09d01323ef Mon Sep 17 00:00:00 2001 From: Zxilly Date: Sat, 16 Mar 2024 03:51:10 +0800 Subject: [PATCH 084/103] feat: allow users to add custom preset without blocking automatic update (#2520) --- presets/.gitignore | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 presets/.gitignore diff --git a/presets/.gitignore b/presets/.gitignore new file mode 100644 index 00000000..481930c5 --- /dev/null +++ b/presets/.gitignore @@ -0,0 +1,6 @@ +*.json +!anime.json +!default.json +!lcm.json +!realistic.json +!sai.json \ No newline at end of file From 4a44be36fd61aeb2e20fd7f2e2f639a1acab7d20 Mon Sep 17 00:00:00 2001 From: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Date: Fri, 15 Mar 2024 22:04:27 +0100 Subject: [PATCH 085/103] feat: add preset selection to Gradio UI (session based) (#1570) * add preset selection uses meta parsing to set presets in user session (UI elements only) * add LoRA handling * use default config as fallback value * add preset refresh on "Refresh All Files" click * add special handling for default_styles and default_aspect_ratio * sort styles after preset change * code cleanup * download missing models from preset * set default refiner to "None" in preset realistic * use state_is_generating for preset selection change * DRY output parameter handling * feat: add argument --disable-preset-selection useful for cloud provisioning to prevent model switches and keep models loaded * feat: keep prompt when not set in preset, use more robust syntax * fix: add default return values when preset download is disabled https://github.com/mashb1t/Fooocus/issues/20 * feat: add translation for preset label * refactor: unify preset loading methods in config * refactor: code cleanup --- args_manager.py | 3 ++ language/en.json | 1 + launch.py | 28 ++++++----- modules/config.py | 105 +++++++++++++++++++++++++---------------- modules/meta_parser.py | 7 ++- presets/realistic.json | 2 +- webui.py | 57 +++++++++++++++++----- 7 files changed, 133 insertions(+), 70 deletions(-) diff --git a/args_manager.py b/args_manager.py index 8c3e1918..6a3ae9dc 100644 --- a/args_manager.py +++ b/args_manager.py @@ -4,7 +4,10 @@ import os from tempfile import gettempdir args_parser.parser.add_argument("--share", action='store_true', help="Set whether to share on Gradio.") + args_parser.parser.add_argument("--preset", type=str, default=None, help="Apply specified UI preset.") +args_parser.parser.add_argument("--disable-preset-selection", action='store_true', + help="Disables preset selection in Gradio.") args_parser.parser.add_argument("--language", type=str, default='default', help="Translate UI using json files in [language] folder. " diff --git a/language/en.json b/language/en.json index 3e42fff0..0f97e6e9 100644 --- a/language/en.json +++ b/language/en.json @@ -38,6 +38,7 @@ "* \"Inpaint or Outpaint\" is powered by the sampler \"DPMPP Fooocus Seamless 2M SDE Karras Inpaint Sampler\" (beta)": "* \"Inpaint or Outpaint\" is powered by the sampler \"DPMPP Fooocus Seamless 2M SDE Karras Inpaint Sampler\" (beta)", "Setting": "Setting", "Style": "Style", + "Preset": "Preset", "Performance": "Performance", "Speed": "Speed", "Quality": "Quality", diff --git a/launch.py b/launch.py index 3cee7f9c..afa66705 100644 --- a/launch.py +++ b/launch.py @@ -93,7 +93,7 @@ if config.temp_path_cleanup_on_launch: print(f"[Cleanup] Failed to delete content of temp dir.") -def download_models(): +def download_models(default_model, previous_default_models, checkpoint_downloads, embeddings_downloads, lora_downloads): for file_name, url in vae_approx_filenames: load_file_from_url(url=url, model_dir=config.path_vae_approx, file_name=file_name) @@ -105,30 +105,32 @@ def download_models(): if args.disable_preset_download: print('Skipped model download.') - return + return default_model, checkpoint_downloads if not args.always_download_new_model: - if not os.path.exists(os.path.join(config.paths_checkpoints[0], config.default_base_model_name)): - for alternative_model_name in config.previous_default_models: + if not os.path.exists(os.path.join(config.paths_checkpoints[0], default_model)): + for alternative_model_name in previous_default_models: if os.path.exists(os.path.join(config.paths_checkpoints[0], alternative_model_name)): - print(f'You do not have [{config.default_base_model_name}] but you have [{alternative_model_name}].') + print(f'You do not have [{default_model}] but you have [{alternative_model_name}].') print(f'Fooocus will use [{alternative_model_name}] to avoid downloading new models, ' - f'but you are not using latest models.') + f'but you are not using the latest models.') print('Use --always-download-new-model to avoid fallback and always get new models.') - config.checkpoint_downloads = {} - config.default_base_model_name = alternative_model_name + checkpoint_downloads = {} + default_model = alternative_model_name break - for file_name, url in config.checkpoint_downloads.items(): + for file_name, url in checkpoint_downloads.items(): load_file_from_url(url=url, model_dir=config.paths_checkpoints[0], file_name=file_name) - for file_name, url in config.embeddings_downloads.items(): + for file_name, url in embeddings_downloads.items(): load_file_from_url(url=url, model_dir=config.path_embeddings, file_name=file_name) - for file_name, url in config.lora_downloads.items(): + for file_name, url in lora_downloads.items(): load_file_from_url(url=url, model_dir=config.paths_loras[0], file_name=file_name) - return + return default_model, checkpoint_downloads -download_models() +config.default_base_model_name, config.checkpoint_downloads = download_models( + config.default_base_model_name, config.previous_default_models, config.checkpoint_downloads, + config.embeddings_downloads, config.lora_downloads) from webui import * diff --git a/modules/config.py b/modules/config.py index 8fec8e05..c82f61c2 100644 --- a/modules/config.py +++ b/modules/config.py @@ -97,21 +97,44 @@ def try_load_deprecated_user_path_config(): try_load_deprecated_user_path_config() + +def get_presets(): + preset_folder = 'presets' + presets = ['initial'] + if not os.path.exists(preset_folder): + print('No presets found.') + return presets + + return presets + [f[:f.index('.json')] for f in os.listdir(preset_folder) if f.endswith('.json')] + + +def try_get_preset_content(preset): + if isinstance(preset, str): + preset_path = os.path.abspath(f'./presets/{preset}.json') + try: + if os.path.exists(preset_path): + with open(preset_path, "r", encoding="utf-8") as json_file: + json_content = json.load(json_file) + print(f'Loaded preset: {preset_path}') + return json_content + else: + raise FileNotFoundError + except Exception as e: + print(f'Load preset [{preset_path}] failed') + print(e) + return {} + + +try: + with open(os.path.abspath(f'./presets/default.json'), "r", encoding="utf-8") as json_file: + config_dict.update(json.load(json_file)) +except Exception as e: + print(f'Load default preset failed.') + print(e) + +available_presets = get_presets() preset = args_manager.args.preset - -if isinstance(preset, str): - preset_path = os.path.abspath(f'./presets/{preset}.json') - try: - if os.path.exists(preset_path): - with open(preset_path, "r", encoding="utf-8") as json_file: - config_dict.update(json.load(json_file)) - print(f'Loaded preset: {preset_path}') - else: - raise FileNotFoundError - except Exception as e: - print(f'Load preset [{preset_path}] failed') - print(e) - +config_dict.update(try_get_preset_content(preset)) def get_path_output() -> str: """ @@ -241,7 +264,7 @@ temp_path_cleanup_on_launch = get_config_item_or_set_default( default_value=True, validator=lambda x: isinstance(x, bool) ) -default_base_model_name = get_config_item_or_set_default( +default_base_model_name = default_model = get_config_item_or_set_default( key='default_model', default_value='model.safetensors', validator=lambda x: isinstance(x, str) @@ -251,7 +274,7 @@ previous_default_models = get_config_item_or_set_default( default_value=[], validator=lambda x: isinstance(x, list) and all(isinstance(k, str) for k in x) ) -default_refiner_model_name = get_config_item_or_set_default( +default_refiner_model_name = default_refiner = get_config_item_or_set_default( key='default_refiner', default_value='None', validator=lambda x: isinstance(x, str) @@ -451,29 +474,30 @@ example_inpaint_prompts = [[x] for x in example_inpaint_prompts] config_dict["default_loras"] = default_loras = default_loras[:default_max_lora_number] + [['None', 1.0] for _ in range(default_max_lora_number - len(default_loras))] -possible_preset_keys = [ - "default_model", - "default_refiner", - "default_refiner_switch", - "default_loras_min_weight", - "default_loras_max_weight", - "default_loras", - "default_max_lora_number", - "default_cfg_scale", - "default_sample_sharpness", - "default_sampler", - "default_scheduler", - "default_performance", - "default_prompt", - "default_prompt_negative", - "default_styles", - "default_aspect_ratio", - "default_save_metadata_to_images", - "checkpoint_downloads", - "embeddings_downloads", - "lora_downloads", -] - +# mapping config to meta parameter +possible_preset_keys = { + "default_model": "base_model", + "default_refiner": "refiner_model", + "default_refiner_switch": "refiner_switch", + "previous_default_models": "previous_default_models", + "default_loras_min_weight": "default_loras_min_weight", + "default_loras_max_weight": "default_loras_max_weight", + "default_loras": "", + "default_cfg_scale": "guidance_scale", + "default_sample_sharpness": "sharpness", + "default_sampler": "sampler", + "default_scheduler": "scheduler", + "default_overwrite_step": "steps", + "default_performance": "performance", + "default_prompt": "prompt", + "default_prompt_negative": "negative_prompt", + "default_styles": "styles", + "default_aspect_ratio": "resolution", + "default_save_metadata_to_images": "default_save_metadata_to_images", + "checkpoint_downloads": "checkpoint_downloads", + "embeddings_downloads": "embeddings_downloads", + "lora_downloads": "lora_downloads" +} REWRITE_PRESET = False @@ -530,10 +554,11 @@ def get_model_filenames(folder_paths, extensions=None, name_filter=None): def update_files(): - global model_filenames, lora_filenames, wildcard_filenames + global model_filenames, lora_filenames, wildcard_filenames, available_presets model_filenames = get_model_filenames(paths_checkpoints) lora_filenames = get_model_filenames(paths_loras) wildcard_filenames = get_files_from_folder(path_wildcards, ['.txt']) + available_presets = get_presets() return diff --git a/modules/meta_parser.py b/modules/meta_parser.py index 546c093f..0cdbdf1f 100644 --- a/modules/meta_parser.py +++ b/modules/meta_parser.py @@ -210,9 +210,8 @@ def parse_meta_from_preset(preset_content): height = height[:height.index(" ")] preset_prepared[meta_key] = (width, height) else: - preset_prepared[meta_key] = items[settings_key] if settings_key in items and items[ - settings_key] is not None else getattr(modules.config, settings_key) - + preset_prepared[meta_key] = items[settings_key] if settings_key in items and items[settings_key] is not None else getattr(modules.config, settings_key) + if settings_key == "default_styles" or settings_key == "default_aspect_ratio": preset_prepared[meta_key] = str(preset_prepared[meta_key]) @@ -570,4 +569,4 @@ def get_exif(metadata: str | None, metadata_scheme: str): exif[0x0131] = 'Fooocus v' + fooocus_version.version # 0x927C = MakerNote exif[0x927C] = metadata_scheme - return exif \ No newline at end of file + return exif diff --git a/presets/realistic.json b/presets/realistic.json index 95f8b6e0..6db6d0b7 100644 --- a/presets/realistic.json +++ b/presets/realistic.json @@ -1,6 +1,6 @@ { "default_model": "realisticStockPhoto_v20.safetensors", - "default_refiner": "", + "default_refiner": "None", "default_refiner_switch": 0.5, "default_loras": [ [ diff --git a/webui.py b/webui.py index d68ade62..01c828df 100644 --- a/webui.py +++ b/webui.py @@ -15,6 +15,7 @@ import modules.style_sorter as style_sorter import modules.meta_parser import args_manager import copy +import launch from modules.sdxl_styles import legal_style_names from modules.private_logger import get_current_html_path @@ -252,6 +253,11 @@ with shared.gradio_root: with gr.Column(scale=1, visible=modules.config.default_advanced_checkbox) as advanced_column: with gr.Tab(label='Setting'): + if not args_manager.args.disable_preset_selection: + preset_selection = gr.Radio(label='Preset', + choices=modules.config.available_presets, + value=args_manager.args.preset if args_manager.args.preset else "initial", + interactive=True) performance_selection = gr.Radio(label='Performance', choices=flags.Performance.list(), value=modules.config.default_performance) @@ -518,13 +524,50 @@ with shared.gradio_root: modules.config.update_files() results = [gr.update(choices=modules.config.model_filenames)] results += [gr.update(choices=['None'] + modules.config.model_filenames)] + if not args_manager.args.disable_preset_selection: + results += [gr.update(choices=modules.config.available_presets)] for i in range(modules.config.default_max_lora_number): - results += [gr.update(interactive=True), gr.update(choices=['None'] + modules.config.lora_filenames), gr.update()] + results += [gr.update(interactive=True), + gr.update(choices=['None'] + modules.config.lora_filenames), gr.update()] return results - refresh_files.click(refresh_files_clicked, [], [base_model, refiner_model] + lora_ctrls, + refresh_files_output = [base_model, refiner_model] + if not args_manager.args.disable_preset_selection: + refresh_files_output += [preset_selection] + refresh_files.click(refresh_files_clicked, [], refresh_files_output + lora_ctrls, queue=False, show_progress=False) + state_is_generating = gr.State(False) + + load_data_outputs = [advanced_checkbox, image_number, prompt, negative_prompt, style_selections, + performance_selection, overwrite_step, overwrite_switch, aspect_ratios_selection, + overwrite_width, overwrite_height, guidance_scale, sharpness, adm_scaler_positive, + adm_scaler_negative, adm_scaler_end, refiner_swap_method, adaptive_cfg, base_model, + refiner_model, refiner_switch, sampler_name, scheduler_name, seed_random, image_seed, + generate_button, load_parameter_button] + freeu_ctrls + lora_ctrls + + if not args_manager.args.disable_preset_selection: + def preset_selection_change(preset, is_generating): + preset_content = modules.config.try_get_preset_content(preset) if preset != 'initial' else {} + preset_prepared = modules.meta_parser.parse_meta_from_preset(preset_content) + + default_model = preset_prepared.get('base_model') + previous_default_models = preset_prepared.get('previous_default_models', []) + checkpoint_downloads = preset_prepared.get('checkpoint_downloads', {}) + embeddings_downloads = preset_prepared.get('embeddings_downloads', {}) + lora_downloads = preset_prepared.get('lora_downloads', {}) + + preset_prepared['base_model'], preset_prepared['lora_downloads'] = launch.download_models( + default_model, previous_default_models, checkpoint_downloads, embeddings_downloads, lora_downloads) + + if 'prompt' in preset_prepared and preset_prepared.get('prompt') == '': + del preset_prepared['prompt'] + + return modules.meta_parser.load_parameter_button_click(json.dumps(preset_prepared), is_generating) + + preset_selection.change(preset_selection_change, inputs=[preset_selection, state_is_generating], outputs=load_data_outputs, queue=False, show_progress=True) \ + .then(fn=style_sorter.sort_styles, inputs=style_selections, outputs=style_selections, queue=False, show_progress=False) \ + performance_selection.change(lambda x: [gr.update(interactive=not flags.Performance.has_restricted_features(x))] * 11 + [gr.update(visible=not flags.Performance.has_restricted_features(x))] * 1 + [gr.update(interactive=not flags.Performance.has_restricted_features(x), value=flags.Performance.has_restricted_features(x))] * 1, @@ -600,8 +643,6 @@ with shared.gradio_root: ctrls += ip_ctrls - state_is_generating = gr.State(False) - def parse_meta(raw_prompt_txt, is_generating): loaded_json = None if is_json(raw_prompt_txt): @@ -617,13 +658,6 @@ with shared.gradio_root: prompt.input(parse_meta, inputs=[prompt, state_is_generating], outputs=[prompt, generate_button, load_parameter_button], queue=False, show_progress=False) - load_data_outputs = [advanced_checkbox, image_number, prompt, negative_prompt, style_selections, - performance_selection, overwrite_step, overwrite_switch, aspect_ratios_selection, - overwrite_width, overwrite_height, guidance_scale, sharpness, adm_scaler_positive, - adm_scaler_negative, adm_scaler_end, refiner_swap_method, adaptive_cfg, base_model, - refiner_model, refiner_switch, sampler_name, scheduler_name, seed_random, image_seed, - generate_button, load_parameter_button] + freeu_ctrls + lora_ctrls - load_parameter_button.click(modules.meta_parser.load_parameter_button_click, inputs=[prompt, state_is_generating], outputs=load_data_outputs, queue=False, show_progress=False) def trigger_metadata_import(filepath, state_is_generating): @@ -637,7 +671,6 @@ with shared.gradio_root: return modules.meta_parser.load_parameter_button_click(parsed_parameters, state_is_generating) - metadata_import_button.click(trigger_metadata_import, inputs=[metadata_input_image, state_is_generating], outputs=load_data_outputs, queue=False, show_progress=True) \ .then(style_sorter.sort_styles, inputs=style_selections, outputs=style_selections, queue=False, show_progress=False) From 55e23a9374cbe09d70f182b7a73a7885411822db Mon Sep 17 00:00:00 2001 From: Spencer Hayes-Laverdiere Date: Fri, 15 Mar 2024 17:30:29 -0400 Subject: [PATCH 086/103] fix: add error output for unsupported images (#2537) * Raise Error on bad decode * Move task arg pop to try block * fix: prevent empty task from getting queued --------- Co-authored-by: Manuel Schmid --- modules/gradio_hijack.py | 7 +++++-- webui.py | 6 +++++- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/modules/gradio_hijack.py b/modules/gradio_hijack.py index 181429ec..35df81c0 100644 --- a/modules/gradio_hijack.py +++ b/modules/gradio_hijack.py @@ -17,7 +17,7 @@ from gradio_client.documentation import document, set_documentation_group from gradio_client.serializing import ImgSerializable from PIL import Image as _Image # using _ to minimize namespace pollution -from gradio import processing_utils, utils +from gradio import processing_utils, utils, Error from gradio.components.base import IOComponent, _Keywords, Block from gradio.deprecation import warn_style_method_deprecation from gradio.events import ( @@ -275,7 +275,10 @@ class Image( x, mask = x["image"], x["mask"] assert isinstance(x, str) - im = processing_utils.decode_base64_to_image(x) + try: + im = processing_utils.decode_base64_to_image(x) + except PIL.UnidentifiedImageError: + raise Error("Unsupported image type in input") with warnings.catch_warnings(): warnings.simplefilter("ignore") im = im.convert(self.image_mode) diff --git a/webui.py b/webui.py index 01c828df..98780bff 100644 --- a/webui.py +++ b/webui.py @@ -29,12 +29,16 @@ def get_task(*args): return worker.AsyncTask(args=args) -def generate_clicked(task): +def generate_clicked(task: worker.AsyncTask): import ldm_patched.modules.model_management as model_management with model_management.interrupt_processing_mutex: model_management.interrupt_processing = False # outputs=[progress_html, progress_window, progress_gallery, gallery] + + if len(task.args) == 0: + return + execution_start_time = time.perf_counter() finished = False From 37274c652a044783c63f0966c087a4a062f09790 Mon Sep 17 00:00:00 2001 From: David Sage <162500231+DavidDragonsage@users.noreply.github.com> Date: Fri, 15 Mar 2024 14:52:27 -0700 Subject: [PATCH 087/103] feat: improve anime preset by adding style Fooocus Semi Realistic (#2492) * Add files via upload In anime.json, at Line 36, replace "Fooocus Negative" with "Fooocus Semi Realistic" * Add files via upload In sdxl_styles_fooocus.json, insert this text at Line 6: { "name": "Fooocus Semi Realistic", "negative_prompt": "(worst quality, low quality, normal quality, lowres, low details, oversaturated, undersaturated, overexposed, underexposed, grayscale, bw, bad photo, bad photography, bad art:1.4), (watermark, signature, text font, username, error, logo, words, letters, digits, autograph, trademark, name:1.2), (blur, blurry, grainy), morbid, ugly, asymmetrical, mutated malformed, mutilated, poorly lit, bad shadow, draft, cropped, out of frame, cut off, censored, jpeg artifacts, out of focus, glitch, duplicate, (bad hands, bad anatomy, bad body, bad face, bad teeth, bad arms, bad legs, deformities:1.3)" }, * Add files via upload Popup image for the new "Fooocus Semi Realistic" style * Update sdxl_styles_fooocus.json Removed "grayscale, bw" from the proposed Fooocus Realistic entry at Line 6 of sdxl_styles_fooocus.json * refactor: cleanup files * feat: use default model to create thumbnail juggernautv8, seed 0, 1024x1024, no LoRAs, only this style, positive prompt "cat" --------- Co-authored-by: Manuel Schmid Co-authored-by: Manuel Schmid --- presets/anime.json | 2 +- sdxl_styles/samples/fooocus_semi_realistic.jpg | Bin 0 -> 8565 bytes sdxl_styles/sdxl_styles_fooocus.json | 4 ++++ 3 files changed, 5 insertions(+), 1 deletion(-) create mode 100644 sdxl_styles/samples/fooocus_semi_realistic.jpg diff --git a/presets/anime.json b/presets/anime.json index 1f2b26a9..6fe6e4ba 100644 --- a/presets/anime.json +++ b/presets/anime.json @@ -38,7 +38,7 @@ "default_prompt_negative": "", "default_styles": [ "Fooocus V2", - "Fooocus Negative", + "Fooocus Semi Realistic", "Fooocus Masterpiece" ], "default_aspect_ratio": "896*1152", diff --git a/sdxl_styles/samples/fooocus_semi_realistic.jpg b/sdxl_styles/samples/fooocus_semi_realistic.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b07555a7becce71aa5e2722450e2fea328724615 GIT binary patch literal 8565 zcmbW3cTkf}8}37qCPjKL3WD?|9fDY>0s_)YUIe7qPyz-}6i}*wfOL^2HBv(FC3KLE zp(k_*B-GFk@AsWKXXgBM&bgo2dv@o!cjx-;&g``}vp36td)n$+>Hq=)0>D4FAK+#V z@C-nBJ0u|ZHxvFF#6ZL? zUF5%03GWaQ-Ju{QA^lIu|5LhY2hfrM-UHqf5j+48(h?BS65MnHxB&oyJO7bK0QjFl zKuAP zj!w=lU|&D~07zia$4}uAk)OXrB_@4KPD%Zqmi{v@zo4+_S8+*Ibq&0>uD+qMv#YxY z(c9NQFg`IkH9a%?7rBB$udc0cY;JAi4i1lwPfqb?=l^mM0EqsJb({YS_J45E-f|KC zdj_QcauE;)+%_Uw;yd>xN$AuJNNv68A4q*5V|bd7TiHR*D{Y8neB(1l!Nm7?Z}41W*zY+#VhgEkG3z`EqJ4Mbq8m>)x`*ae0#Cor|}!U8lws zAygoJ$)43hmI`5ei={0=#%YcHh8@t5e5m8l>LSe2Us=pE;F-qrFK(X5MQBysmNjQZ zl6wGUgxWR6c*57Q-QAoUu1ORq&`Ep*5d z)AGBq(l0fK3kr=Vp^H=fhVtRe3QCqZvL-55OHsWgTA|X9heqYTci_>ZUjoGa=f)eF zVztp)St+bKl5YP58DoS|^p+O=g>6HaNv^4`x$}`dDVT@(<09YPprt!j zx-D5LKcJ=S2Y(?w{$O>#@lt2+n!QyS2Nw$l3-_h0Us7LqFE)c_^7NSv8IWO(Z=sC~ zne*v)M$%gykzZmP&SJrev2{pK3zmY@it6t3qX4m=OvW#+xw3?^c7dFekM-fm#`0bb zW91nWZq}iB_L}Ve{dEVKeJ9cRUmxtg_$OCHcOk!>XI(YR%_!x@??tWgG=xo$YbwW1 zZ~^Vc;l3H~=3E_G7vS&Eg~D+LIn}F)^B~F^r7wO;87&$YQk^?%eRKOJj#D-W- z?+FnlY{7|Y`4wUXDU zn@lDdjYr%7fJnpb@?o!5ReT1TQwjC<3l`lGB0Tw6#x=CoWx_d$Y^}*8IH4!=AkSGz zpSX=@*{*Ti6V+y}TzrC{3Buk0kYC{9M34*C1t<5s8H>&?Il<`6Q({`WXF zPf^^dS$ysa#IQ77>11xb;@zd<4;(Dx%(_MYZR^Z&3vc(`6>$b-Z;&oas&2ET0U<~n zr``uN{_fvSsP~;(BQ2w}7aZs#=Dv=!j5vkS;#bi7(K+-XjAe^VY{@4-FYMv2q11x{ zDPFAHBhGxicfWSbZ3=~5|CKv;jK%OwGIfHlQeDo+)l1iq3k+QEC<)0NyWDwKMFvP1 z=#N02AmtW-{%EoO`INrQP^id+XL3h1yvO5W-+VUIfa zXoOn9H9ih>?^*({qo4Dn?p;UVJ866sg5?Gf=-|rk9z}h&^nMRQTm{3<#DltuZE=#R zbM+K&IqX(HqKY1&^T<$7fiL@*x*Uf%y{2Ue;f5;L zOx5%>!p-vQ90T3fS=c|D^BQm|t}3tsdl~n6D*wRAMav&Jy2@BOI3UQL+~ZPRs2CCG zw3XUzjiCGF20K@3-Q65`C5%lnUzGJ$^T+H75Q&Rv;8^#}7=v*6HOzPl) z_YiEn0Yv+W0`YTJTQMc`5eSzWTx+=lyu!cq@B^|TqhTZMynSJX6z%Dx?VY@#}%wN(O)QT3|TMr&ZPHY+v4_R zbeVUN-x6Q`aVMf({Urdq0oYV*r&iuIadoykWYR6=SQ0$Pg5!T+kWJW3x$L)$48OPk zTstd?r1LoR53jvrc*>+ehU&mFWLug12-oUM%$#Zq}wN zdzqJyZ($xR0!;^OAFbsi#HWSsXGeatR?whWXSH!0wYHG;*jkW14=Ma>&GCZL=bgD~ z9~O2GYK3nJaOdseToRcZ@6OJq<{6#3Css}SVw%8_{CqSZ6* zIjFujYaDSlI$4saX7KMzDOW37qjv|)us;Exh^gteb!k>l1?prVy~u6+IY%O^r>uuNO|t(LGXk3 z;2a?-#f|#}D#GBC=abEz!6Ui&9L{UIT9AJ+)`H$s*N>V`MdzBu#=K}vs--k z69-A}@bgkb{lq-@4Pd41wT4KZaPPrmWow+PC9bGD9)NE%n~-q_UNvi`AXmK>8R1{r zg(G8p-vr7pA1Q3A$f4kIkt?x4UBfwpIOnxDyQML!hnXhaiH)YQa2jfSQfE*~Mj%yP zHq!m5ab+KlbT>0IkO%j0V$r72P5vOGXuOp-)=t1MIY)i` zqHUR5A?I~w5m^hI<66A7ts( zPQ$4CaQJvWWqP2%9jWtX(e?&lZ%;@H{wf@WcMl5TfkbJ^+)Wh4^rrbNwbM@kuS$_e zO_q7eif=Pqqm?4eL{laRpLfPEi;+=i3^#e0(Z(I0J5sHVBlPD`rhDkBYsZb%#?!e5 zj01!PRRUH?MfrwM;$Qf|sMAB>E+v9lV^O-pu2veXz)3)+u{E1;aeli)NORJMxiAPC*`WWKo6Z++ zDEeSrjgOpNr{PK0FJ!%f>S9NGzY=8G`xIU(-{VuR$nE?z#_lIy;$Tn6&!Z_?Lg?=Q2NLviE;WnEDb?^@PysocUm z+$Jx4rYRXr<5T6v)BB%*4}Z^tr}Xkq8ou$_l!`hp1gdtuIek!YTWWV}I`xkIcT z84!(*Wm08=O5Z+Rx>=Oz3p-F$T*rRqGSM$UaLX-h{NgjZ9~AGy2#k%1{m}f)pp}t= zWl(rfOT|Z=u(=aPxyKJ)U1ga@=s5y{+CFuRdrJ4jitcGn(KR=|ba^A30cer}i2ly^Vd?&bD-zwDjK0hZXFX8V-rl4ZLR?IE7?2XAj)<;o)a776Rr(4~<>qs`(W52kSZj<0; z`YhQtQ+yu^r(g*Nl<$+zW@&d+c=%aon|HhP!djps;^{eEKNWZfj-_tzm(fUxd9(SQ z^m!L|i{ON_N@4K@PRl6Lgv112oZLI<8;1r~~d+qnDbw*R-P+$7Yjk*xm z@T&~igBHB4BWGDccsgdqnXmoq^D-rF>dR)3I64=#L_h0Hzu=_!%B0?G3Yx}p*|5z( z2%cO`=@9tM3PVcJMh!a6of%ym1D6D&io(Nmfl{%w(>yI=hXhxTnL-}nB)snE){KmB^B~4g7`2qX=j23Ep`I{RVR+f zd+KQo1vG!x;`GJcD<~T8Y15G`z&iT*;_49+ zLe}xtFAH$0sE4(Y%QpawPT21AP50gO@I|tNivD1;@;X{+lcszmX=Nwa@Nl=gyjj34l}GWc0J3uZkGlS86Pw3J zl#XfG1R|S%{1a1W8)@-7PYg(dMkKxVgi3T$c{dxj~ zNKH3uJ}ax{{xqwN{z$q2nu;RUOe%0wDt5cq0E_)g4l@nMQBY8ZKtbk5*Y5SsV zAP|4evu(qdtr^?1ny5WiK_Vw9j|dfi8L(pppGnFOftY3jQ(PzL1yfV*E@M6p;nqYt`foPr6q0)GG9PW^R zB$6Zx8{gpr5$H;CuC|tZS1e%^Fwft_o7=O3Q89Xz_Vz8Y*!re$Uk^$U5x-|D6>ZZ< z@p!esZUm}Y4!V@Y4rj-m(5x*iD3MD6pY(({?2(ZL9-Wqy;w0_cGK{JM>0DvQE(1Q6 zffuu+adP0eNMwU5{blfSv;^ z=$>t&b4A+Mz6ElMJffn7_FJpOU;q8siSm+vWKcHYY^gJct2{B|3i9P8_lSS%{&tET zgY7>Vl~sGVFmeE`Jk_+qwIek5KO7bPa<#C&t_k~_d&t=B6ool#yO`&o`F1!$66`u` ziI{Qzspaa&+vT;Sl9T#=ry(O|qiE|iSib08*;Ux_$ESSm#djZb+kZ*gDoqJp| zYN_uVjm6~qbHhhJpeBJVU8QQj^i|O6RO_rd3{BxvWJR>x zxzZ!^vOU`9!~Rk3_pAs7qV#i}c3Ws5L%P7{{CA(slRUP_Ur{fEx4nwe4^%)ado3#p zaN$Qs?)(t!XFy!9a+rjdhPK49oxp&@XZmb{-m|58fG>Mo1<<8zpH5naTkclTrlZO>Me+PFTDD(-)EI>df zSxFkdBRkqClx6E?=)@3H@JD*x^3;SJF_-yZOw^(CM7bfTcntG1Wn&6dy`F}n=}8~o zR1}EZ7Z-4{9D!HGjLIc4u(Bqb3WdBX&?GvjnCaoBggRH~@+ZOOhaI_|xEjXOvh-c# zV*`Oxe{TRC-0*U{mD4zx(5Q3FLPc7!Mt*s7m9^E`Pn3BY$3O!@efH#pm(RBYHeP_A zyLALkH44aF$2AL%!N-egsxfS_o{!diJ&jA6GxGH>luAyh|LLWXMtn?>CF@C96L-vM zTXjnEiYjAQ*$a{Pn%+{YUp0_Jxp{sYeNT$@=lHa<31{r+>lXNhaSg>l9$5044PFG1?FF?)YE7n| zmVa%E4bCXP-3M8q(5B=*Hlm?$M|LhWkO9n+56lsUw&9NGPl79#Q?dCY~9wlgjK8&%bJ6)o6mxEY@7Uw4sKxf`O6r&KG>w-8X={zNN(? zA@{b~sIc+QQ!<~K&ea`QN6vWp{I=_X-*-Uis_>k4nlIc`$^qV`cq3hRBb^I1SJz?H z{d$Fdrz&%GVEr|yiHi?=l2+;ra!h(UP|0V$ zNEXLvw_(nzN2GrUHI%6#+d*tsje1qd`=L3P^m{GIYzei!hZ`lAuW+qj$MS0rL=qZo zIs5)G<*>`-qB$t1QbN~7@sEF9*c|J_4B7X2m3j9c!;qd_w_Bvx$^q4!SifGF*!6o6 z^E=JHF3ykvKzcrQ@SD4qEbnYg8K|yZ*3(jsYx{m$!X7HtxD1TN1KEPpW{auHjr9Ylj|=?AoU= z+dA|$-AZ9*)l)Kv}y}g%lEhb z${YlA{qIAYdu7f%FBDx=X?9yhkn@*MXYyNq>kH;htj8i{ze?MeX%|E-mxLqCHUKe> zC&DXy*~~bm_pLnE)UM&_({d3kd4$XS%sde*-)WbUVh;3NgHpaVBt6a*Gj^q!0E$Aw z0NY$@xgi=>Ry$qkGvlE zNTu{EjwT|66%uv0%gm;laB%s1X$!$hTODQ`7Q^kOF*(DD9>uF)0{#?!ucNb&a&LHF zm%)n8OjYQ$+2H!#&jA;bA2j+?q@MMn@`$VWLfa*XEA>uydQ8Y0jAcCV%?G|--5Y=e z*I!;3r_g%tg&suM99yuwobu87wUB2H*}Nyp&8wcLo8h z-=Yp?6#dF$8!@CRWIS>Dx(%d-Qqoq8yVx|}=G9mjB*MgG=U z^}(vIhladC`l0uwTIyO&W+lCjZe82YZ7(#iC_KLP1~7ERS2O~4Gi!W@$U0t1;f1$314%6+i0ay zwXBRos);3gdld6e9K=?1ajX(Woa3}#ueq~&mo=1)4O&A$Pn+v5Wb0ODFDs-)+^aVh z5QOL)@N}hkD-%>^W^TB8EH~ked)0GOZy$nD4m+9I#RZ7NWm_H6qyCWa>B%VxU1R|3 z+3#84N7aGKoHYANcr(_cQ+Q5L#J5~~!(;#GlS5y8tHdf=MJXsuF-DbUE44P&yF;vG z!a}|Nb;@Q1iG(`2a-3>3$H_45+B&@K1^`p*e^=)&J?P8*m$^NJrY>F&Ym@B#WKZq@+UEy;*uPf* zrm-Sk(5F`F*5%TEbEvwaie+9A?`nC0Zf?SkWVrgaJvOC9$hL6{_LFA%iWJFjlW0cv zUh`rdyFQ1Ep*a1=5rPj$l%_Mg>VF~T?cH`hAhckq*Sw(}PnESoo&MlmkSa8~4hi?0 z(^aLm-^%}`D-fqnR=l(77oPgZFb3bu*?EZ&7eQ&3OuadFy1+5F1^p8VsA<6h9m! zG=1Q3cZ2>^*8NUYcF=N|lzazF?qeqi&W+Jbk;{JEoY56T%$js1#R>D00B}9wN$OBY zs<=EHZ{c3I#%MTU)YsR^1)hx pZ)TVPh{BgFji^?BgQA#X432I+X`(dr3u>}lG124nSoP-b{{X_R7Q6re literal 0 HcmV?d00001 diff --git a/sdxl_styles/sdxl_styles_fooocus.json b/sdxl_styles/sdxl_styles_fooocus.json index 81d6442e..cf64eab4 100644 --- a/sdxl_styles/sdxl_styles_fooocus.json +++ b/sdxl_styles/sdxl_styles_fooocus.json @@ -3,6 +3,10 @@ "name": "Fooocus Enhance", "negative_prompt": "(worst quality, low quality, normal quality, lowres, low details, oversaturated, undersaturated, overexposed, underexposed, grayscale, bw, bad photo, bad photography, bad art:1.4), (watermark, signature, text font, username, error, logo, words, letters, digits, autograph, trademark, name:1.2), (blur, blurry, grainy), morbid, ugly, asymmetrical, mutated malformed, mutilated, poorly lit, bad shadow, draft, cropped, out of frame, cut off, censored, jpeg artifacts, out of focus, glitch, duplicate, (airbrushed, cartoon, anime, semi-realistic, cgi, render, blender, digital art, manga, amateur:1.3), (3D ,3D Game, 3D Game Scene, 3D Character:1.1), (bad hands, bad anatomy, bad body, bad face, bad teeth, bad arms, bad legs, deformities:1.3)" }, + { + "name": "Fooocus Semi Realistic", + "negative_prompt": "(worst quality, low quality, normal quality, lowres, low details, oversaturated, undersaturated, overexposed, underexposed, bad photo, bad photography, bad art:1.4), (watermark, signature, text font, username, error, logo, words, letters, digits, autograph, trademark, name:1.2), (blur, blurry, grainy), morbid, ugly, asymmetrical, mutated malformed, mutilated, poorly lit, bad shadow, draft, cropped, out of frame, cut off, censored, jpeg artifacts, out of focus, glitch, duplicate, (bad hands, bad anatomy, bad body, bad face, bad teeth, bad arms, bad legs, deformities:1.3)" + }, { "name": "Fooocus Sharp", "prompt": "cinematic still {prompt} . emotional, harmonious, vignette, 4k epic detailed, shot on kodak, 35mm photo, sharp focus, high budget, cinemascope, moody, epic, gorgeous, film grain, grainy", From 86cba3f223720245269b96d86560d8ef16806c2a Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Fri, 15 Mar 2024 23:11:26 +0100 Subject: [PATCH 088/103] feat: add translation for unsupported image error (#2537) --- language/en.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/language/en.json b/language/en.json index 0f97e6e9..fefc79c4 100644 --- a/language/en.json +++ b/language/en.json @@ -384,5 +384,6 @@ "Metadata Scheme": "Metadata Scheme", "Image Prompt parameters are not included. Use png and a1111 for compatibility with Civitai.": "Image Prompt parameters are not included. Use png and a1111 for compatibility with Civitai.", "fooocus (json)": "fooocus (json)", - "a1111 (plain text)": "a1111 (plain text)" + "a1111 (plain text)": "a1111 (plain text)", + "Unsupported image type in input": "Unsupported image type in input" } \ No newline at end of file From d057f2fae9c8222cb30a1eb20c549a7cf2c96bda Mon Sep 17 00:00:00 2001 From: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Date: Sun, 17 Mar 2024 14:01:10 +0100 Subject: [PATCH 089/103] fix: correctly handle empty lora array in a1111 metadata log scheme (#2551) --- modules/meta_parser.py | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/modules/meta_parser.py b/modules/meta_parser.py index 546c093f..e518a9cf 100644 --- a/modules/meta_parser.py +++ b/modules/meta_parser.py @@ -377,7 +377,7 @@ class A1111MetadataParser(MetadataParser): data[key] = filename break - if 'lora_hashes' in data: + if 'lora_hashes' in data and data['lora_hashes'] != '': lora_filenames = modules.config.lora_filenames.copy() if modules.config.sdxl_lcm_lora in lora_filenames: lora_filenames.remove(modules.config.sdxl_lcm_lora) @@ -431,16 +431,15 @@ class A1111MetadataParser(MetadataParser): if key in data: generation_params[self.fooocus_to_a1111[key]] = data[key] - lora_hashes = [] - for index, (lora_name, lora_weight, lora_hash) in enumerate(self.loras): - # workaround for Fooocus not knowing LoRA name in LoRA metadata - lora_hashes.append(f'{lora_name}: {lora_hash}: {lora_weight}') - lora_hashes_string = ', '.join(lora_hashes) + if len(self.loras) > 0: + lora_hashes = [] + for index, (lora_name, lora_weight, lora_hash) in enumerate(self.loras): + # workaround for Fooocus not knowing LoRA name in LoRA metadata + lora_hashes.append(f'{lora_name}: {lora_hash}: {lora_weight}') + lora_hashes_string = ', '.join(lora_hashes) + generation_params[self.fooocus_to_a1111['lora_hashes']] = lora_hashes_string - generation_params |= { - self.fooocus_to_a1111['lora_hashes']: lora_hashes_string, - self.fooocus_to_a1111['version']: data['version'] - } + generation_params[self.fooocus_to_a1111['version']] = data['version'] if modules.config.metadata_created_by != '': generation_params[self.fooocus_to_a1111['created_by']] = modules.config.metadata_created_by From 6b44c101dbfe742fd912456c200ad1bfa4a88473 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Mon, 18 Mar 2024 12:30:39 +0100 Subject: [PATCH 090/103] feat: update changelog and readme --- readme.md | 10 +++++++--- update_log.md | 11 +++++++++++ 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/readme.md b/readme.md index 4e47ac08..6ec24eed 100644 --- a/readme.md +++ b/readme.md @@ -84,6 +84,10 @@ The first time you launch the software, it will automatically download models: After Fooocus 2.1.60, you will also have `run_anime.bat` and `run_realistic.bat`. They are different model presets (and require different models, but they will be automatically downloaded). [Check here for more details](https://github.com/lllyasviel/Fooocus/discussions/679). +After Fooocus 2.3.0 you can also switch presets directly in the browser. Keep in mind to add these arguments if you want to change the default behavior: +* Use `--disable-preset-selection` to disable preset selection in the browser. +* Use `--always-download-new-model` to download missing models on preset switch. Default is fallback to `previous_default_models` defined in the corresponding preset, also see terminal output. + ![image](https://github.com/lllyasviel/Fooocus/assets/19834515/d386f817-4bd7-490c-ad89-c1e228c23447) If you already have these files, you can copy them to the above locations to speed up installation. @@ -115,7 +119,7 @@ See also the common problems and troubleshoots [here](troubleshoot.md). ### Colab -(Last tested - 2024 Mar 11) +(Last tested - 2024 Mar 18 - @mashb1t) | Colab | Info | --- | --- | @@ -125,9 +129,9 @@ In Colab, you can modify the last line to `!python entry_with_update.py --share Note that this Colab will disable refiner by default because Colab free's resources are relatively limited (and some "big" features like image prompt may cause free-tier Colab to disconnect). We make sure that basic text-to-image is always working on free-tier Colab. -Using `--always-high-vram` shifts resource allocation from RAM to VRAM and achieves the overall best balance between performance, flexibility and stability on the default T4 instance. +Using `--always-high-vram` shifts resource allocation from RAM to VRAM and achieves the overall best balance between performance, flexibility and stability on the default T4 instance. Please find more information [here](https://github.com/lllyasviel/Fooocus/pull/1710#issuecomment-1989185346). -Thanks to [camenduru](https://github.com/camenduru)! +Thanks to [camenduru](https://github.com/camenduru) for the template! ### Linux (Using Anaconda) diff --git a/update_log.md b/update_log.md index 322c19c1..4e22db0a 100644 --- a/update_log.md +++ b/update_log.md @@ -1,3 +1,14 @@ +# [2.3.0](https://github.com/lllyasviel/Fooocus/releases/tag/2.3.0) + +* Add performance "lightning" (based on [SDXL-Lightning 4 step LoRA](https://huggingface.co/ByteDance/SDXL-Lightning/blob/main/sdxl_lightning_4step_lora.safetensors)) +* Add preset selection to UI, disable with argument `--disable-preset-selection`. Use `--always-download-new-model` to download missing models on preset switch. +* Improve face swap consistency by switching later in the process to (synthetic) refiner +* Add temp path cleanup on startup +* Add support for wildcard subdirectories +* Add scrollable 2 column layout for styles for better structure +* Improve Colab resource needs for T4 instances (default), positively tested with all image prompt features +* Improve anime preset, now uses style `Fooocus Semi Realistic` instead of `Fooocus Negative` (less wet look images) + # [2.2.1](https://github.com/lllyasviel/Fooocus/releases/tag/2.2.1) * Fix some small bugs (e.g. image grid, upscale fast 2x, LoRA weight width in Firefox) From c08518abae926d596ac5095ba8af7c3c3d88cc4d Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Mon, 18 Mar 2024 17:40:37 +0100 Subject: [PATCH 091/103] feat: add backwards compatibility for presets without disable/enable LoRA boolean https://github.com/lllyasviel/Fooocus/pull/2507 --- modules/config.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/modules/config.py b/modules/config.py index c82f61c2..f3bf7f1f 100644 --- a/modules/config.py +++ b/modules/config.py @@ -323,8 +323,12 @@ default_loras = get_config_item_or_set_default( 1.0 ] ], - validator=lambda x: isinstance(x, list) and all(len(y) == 3 and isinstance(y[0], bool) and isinstance(y[1], str) and isinstance(y[2], numbers.Number) for y in x) + validator=lambda x: isinstance(x, list) and all( + len(y) == 3 and isinstance(y[0], bool) and isinstance(y[1], str) and isinstance(y[2], numbers.Number) + or len(y) == 2 and isinstance(y[0], str) and isinstance(y[1], numbers.Number) + for y in x) ) +default_loras = [(y[0], y[1], y[2]) if len(y) == 3 else (True, y[0], y[1]) for y in default_loras] default_max_lora_number = get_config_item_or_set_default( key='default_max_lora_number', default_value=len(default_loras) if isinstance(default_loras, list) and len(default_loras) > 0 else 5, From ee361715afb7dab10ff266fcf8d8c6abcebfd81a Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Mon, 18 Mar 2024 18:04:15 +0100 Subject: [PATCH 092/103] docs: bump version number to 2.3.0 --- fooocus_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fooocus_version.py b/fooocus_version.py index 6c3c2c90..a4b8895b 100644 --- a/fooocus_version.py +++ b/fooocus_version.py @@ -1 +1 @@ -version = '2.2.1' +version = '2.3.0' From 3efce581cac1df4441980710f55c28fbde3ac3d7 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Mon, 18 Mar 2024 18:13:15 +0100 Subject: [PATCH 093/103] docs: add hint for colab preset timeout to readme --- readme.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/readme.md b/readme.md index 6ec24eed..5f66e02a 100644 --- a/readme.md +++ b/readme.md @@ -119,7 +119,7 @@ See also the common problems and troubleshoots [here](troubleshoot.md). ### Colab -(Last tested - 2024 Mar 18 - @mashb1t) +(Last tested - 2024 Mar 18 by [mashb1t](https://github.com/mashb1t)) | Colab | Info | --- | --- | @@ -127,6 +127,8 @@ See also the common problems and troubleshoots [here](troubleshoot.md). In Colab, you can modify the last line to `!python entry_with_update.py --share --always-high-vram` or `!python entry_with_update.py --share --always-high-vram --preset anime` or `!python entry_with_update.py --share --always-high-vram --preset realistic` for Fooocus Default/Anime/Realistic Edition. +You can also change the preset in the UI. Please be aware that this may lead to timeouts after 60 seconds. If this is the case, please wait until the download has finished, change the preset to initial and back to the one you've selected or reload the page. + Note that this Colab will disable refiner by default because Colab free's resources are relatively limited (and some "big" features like image prompt may cause free-tier Colab to disconnect). We make sure that basic text-to-image is always working on free-tier Colab. Using `--always-high-vram` shifts resource allocation from RAM to VRAM and achieves the overall best balance between performance, flexibility and stability on the default T4 instance. Please find more information [here](https://github.com/lllyasviel/Fooocus/pull/1710#issuecomment-1989185346). From 532a6e2e67634a8b33fb218a44da498c4f689db5 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Tue, 19 Mar 2024 19:10:14 +0100 Subject: [PATCH 094/103] fix: remove positive prompt from anime prefix prevents the prompt from getting overridden when switching presets in browser --- presets/anime.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/presets/anime.json b/presets/anime.json index 6fe6e4ba..2610677c 100644 --- a/presets/anime.json +++ b/presets/anime.json @@ -34,7 +34,7 @@ "default_sampler": "dpmpp_2m_sde_gpu", "default_scheduler": "karras", "default_performance": "Speed", - "default_prompt": "1girl, ", + "default_prompt": "", "default_prompt_negative": "", "default_styles": [ "Fooocus V2", From 856eb750ab515a3b5b28b7d35360fca1411dd933 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Tue, 19 Mar 2024 23:08:38 +0100 Subject: [PATCH 095/103] fix: add enabled value to LoRA when setting default_max_lora_number --- modules/config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/config.py b/modules/config.py index f3bf7f1f..ba2a76fb 100644 --- a/modules/config.py +++ b/modules/config.py @@ -476,7 +476,7 @@ metadata_created_by = get_config_item_or_set_default( example_inpaint_prompts = [[x] for x in example_inpaint_prompts] -config_dict["default_loras"] = default_loras = default_loras[:default_max_lora_number] + [['None', 1.0] for _ in range(default_max_lora_number - len(default_loras))] +config_dict["default_loras"] = default_loras = default_loras[:default_max_lora_number] + [[True, 'None', 1.0] for _ in range(default_max_lora_number - len(default_loras))] # mapping config to meta parameter possible_preset_keys = { From 978267f461e204c6c4359a79ed818ee2e3e1af39 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Wed, 20 Mar 2024 21:12:21 +0100 Subject: [PATCH 096/103] fix: correctly set preset config and loras in meta parser --- modules/config.py | 8 -------- modules/meta_parser.py | 19 ++++++++++++++----- 2 files changed, 14 insertions(+), 13 deletions(-) diff --git a/modules/config.py b/modules/config.py index ba2a76fb..76ffd348 100644 --- a/modules/config.py +++ b/modules/config.py @@ -124,14 +124,6 @@ def try_get_preset_content(preset): print(e) return {} - -try: - with open(os.path.abspath(f'./presets/default.json'), "r", encoding="utf-8") as json_file: - config_dict.update(json.load(json_file)) -except Exception as e: - print(f'Load default preset failed.') - print(e) - available_presets = get_presets() preset = args_manager.args.preset config_dict.update(try_get_preset_content(preset)) diff --git a/modules/meta_parser.py b/modules/meta_parser.py index 15f0ad7b..10bc6896 100644 --- a/modules/meta_parser.py +++ b/modules/meta_parser.py @@ -169,11 +169,20 @@ def get_freeu(key: str, fallback: str | None, source_dict: dict, results: list, def get_lora(key: str, fallback: str | None, source_dict: dict, results: list): try: - n, w = source_dict.get(key, source_dict.get(fallback)).split(' : ') - w = float(w) - results.append(True) - results.append(n) - results.append(w) + split_data = source_dict.get(key, source_dict.get(fallback)).split(' : ') + enabled = True + name = split_data[0] + weight = split_data[1] + + if len(split_data) == 3: + enabled = split_data[0] == 'True' + name = split_data[1] + weight = split_data[2] + + weight = float(weight) + results.append(enabled) + results.append(name) + results.append(weight) except: results.append(True) results.append('None') From 7564dd5131ebef2b62b34ea88215f05d29bcdd60 Mon Sep 17 00:00:00 2001 From: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Date: Sat, 23 Mar 2024 12:49:20 +0100 Subject: [PATCH 097/103] fix: load image number from preset (#2611) * fix: add default_image_number to preset handling * fix: use minimum image number of preset and config to prevent UI overflow --- modules/config.py | 1 + modules/meta_parser.py | 14 +++++++++++++- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/modules/config.py b/modules/config.py index 76ffd348..6c02ca13 100644 --- a/modules/config.py +++ b/modules/config.py @@ -485,6 +485,7 @@ possible_preset_keys = { "default_scheduler": "scheduler", "default_overwrite_step": "steps", "default_performance": "performance", + "default_image_number": "image_number", "default_prompt": "prompt", "default_prompt_negative": "negative_prompt", "default_styles": "styles", diff --git a/modules/meta_parser.py b/modules/meta_parser.py index 10bc6896..8cd21cbc 100644 --- a/modules/meta_parser.py +++ b/modules/meta_parser.py @@ -27,8 +27,9 @@ def load_parameter_button_click(raw_metadata: dict | str, is_generating: bool): loaded_parameter_dict = json.loads(raw_metadata) assert isinstance(loaded_parameter_dict, dict) - results = [len(loaded_parameter_dict) > 0, 1] + results = [len(loaded_parameter_dict) > 0] + get_image_number('image_number', 'Image Number', loaded_parameter_dict, results) get_str('prompt', 'Prompt', loaded_parameter_dict, results) get_str('negative_prompt', 'Negative Prompt', loaded_parameter_dict, results) get_list('styles', 'Styles', loaded_parameter_dict, results) @@ -92,6 +93,17 @@ def get_float(key: str, fallback: str | None, source_dict: dict, results: list, results.append(gr.update()) +def get_image_number(key: str, fallback: str | None, source_dict: dict, results: list, default=None): + try: + h = source_dict.get(key, source_dict.get(fallback, default)) + assert h is not None + h = int(h) + h = min(h, modules.config.default_max_image_number) + results.append(h) + except: + results.append(1) + + def get_steps(key: str, fallback: str | None, source_dict: dict, results: list, default=None): try: h = source_dict.get(key, source_dict.get(fallback, default)) From 9aaa40055334978742295b6187c90bac72a81f84 Mon Sep 17 00:00:00 2001 From: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Date: Sat, 23 Mar 2024 13:10:21 +0100 Subject: [PATCH 098/103] fix: use correct base dimensions for outpaint mask padding (#2612) --- modules/async_worker.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/async_worker.py b/modules/async_worker.py index fa959361..d8a1e072 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -614,12 +614,12 @@ def worker(): H, W, C = inpaint_image.shape if 'left' in outpaint_selections: - inpaint_image = np.pad(inpaint_image, [[0, 0], [int(H * 0.3), 0], [0, 0]], mode='edge') - inpaint_mask = np.pad(inpaint_mask, [[0, 0], [int(H * 0.3), 0]], mode='constant', + inpaint_image = np.pad(inpaint_image, [[0, 0], [int(W * 0.3), 0], [0, 0]], mode='edge') + inpaint_mask = np.pad(inpaint_mask, [[0, 0], [int(W * 0.3), 0]], mode='constant', constant_values=255) if 'right' in outpaint_selections: - inpaint_image = np.pad(inpaint_image, [[0, 0], [0, int(H * 0.3)], [0, 0]], mode='edge') - inpaint_mask = np.pad(inpaint_mask, [[0, 0], [0, int(H * 0.3)]], mode='constant', + inpaint_image = np.pad(inpaint_image, [[0, 0], [0, int(W * 0.3)], [0, 0]], mode='edge') + inpaint_mask = np.pad(inpaint_mask, [[0, 0], [0, int(W * 0.3)]], mode='constant', constant_values=255) inpaint_image = np.ascontiguousarray(inpaint_image.copy()) From 523ef5c70e527c817a08dfd4ee975e00ddfca0f2 Mon Sep 17 00:00:00 2001 From: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Date: Sat, 23 Mar 2024 16:37:18 +0100 Subject: [PATCH 099/103] fix: add Civitai compatibility for LoRAs in a1111 metadata scheme by switching schema (#2615) * feat: update sha256 generation functions https://github.com/lllyasviel/stable-diffusion-webui-forge/blob/29be1da7cf2b5dccfc70fbdd33eb35c56a31ffb7/modules/hashes.py * feat: add compatibility for LoRAs in a1111 metadata scheme * feat: add backwards compatibility * refactor: extract remove_special_loras * fix: correctly apply LoRA weight for legacy schema --- modules/config.py | 1 + modules/meta_parser.py | 47 ++++++++++++++++++++++++++++-------------- modules/util.py | 38 +++++++++++++++++++++++++++++----- 3 files changed, 66 insertions(+), 20 deletions(-) diff --git a/modules/config.py b/modules/config.py index 6c02ca13..b81e218a 100644 --- a/modules/config.py +++ b/modules/config.py @@ -539,6 +539,7 @@ wildcard_filenames = [] sdxl_lcm_lora = 'sdxl_lcm_lora.safetensors' sdxl_lightning_lora = 'sdxl_lightning_4step_lora.safetensors' +loras_metadata_remove = [sdxl_lcm_lora, sdxl_lightning_lora] def get_model_filenames(folder_paths, extensions=None, name_filter=None): diff --git a/modules/meta_parser.py b/modules/meta_parser.py index 8cd21cbc..70ab8860 100644 --- a/modules/meta_parser.py +++ b/modules/meta_parser.py @@ -1,5 +1,4 @@ import json -import os import re from abc import ABC, abstractmethod from pathlib import Path @@ -12,7 +11,7 @@ import modules.config import modules.sdxl_styles from modules.flags import MetadataScheme, Performance, Steps from modules.flags import SAMPLERS, CIVITAI_NO_KARRAS -from modules.util import quote, unquote, extract_styles_from_prompt, is_json, get_file_from_folder_list, calculate_sha256 +from modules.util import quote, unquote, extract_styles_from_prompt, is_json, get_file_from_folder_list, sha256 re_param_code = r'\s*(\w[\w \-/]+):\s*("(?:\\.|[^\\"])+"|[^,]*)(?:,|$)' re_param = re.compile(re_param_code) @@ -110,7 +109,8 @@ def get_steps(key: str, fallback: str | None, source_dict: dict, results: list, assert h is not None h = int(h) # if not in steps or in steps and performance is not the same - if h not in iter(Steps) or Steps(h).name.casefold() != source_dict.get('performance', '').replace(' ', '_').casefold(): + if h not in iter(Steps) or Steps(h).name.casefold() != source_dict.get('performance', '').replace(' ', + '_').casefold(): results.append(h) return results.append(-1) @@ -204,7 +204,8 @@ def get_lora(key: str, fallback: str | None, source_dict: dict, results: list): def get_sha256(filepath): global hash_cache if filepath not in hash_cache: - hash_cache[filepath] = calculate_sha256(filepath) + # is_safetensors = os.path.splitext(filepath)[1].lower() == '.safetensors' + hash_cache[filepath] = sha256(filepath) return hash_cache[filepath] @@ -231,8 +232,9 @@ def parse_meta_from_preset(preset_content): height = height[:height.index(" ")] preset_prepared[meta_key] = (width, height) else: - preset_prepared[meta_key] = items[settings_key] if settings_key in items and items[settings_key] is not None else getattr(modules.config, settings_key) - + preset_prepared[meta_key] = items[settings_key] if settings_key in items and items[ + settings_key] is not None else getattr(modules.config, settings_key) + if settings_key == "default_styles" or settings_key == "default_aspect_ratio": preset_prepared[meta_key] = str(preset_prepared[meta_key]) @@ -288,6 +290,12 @@ class MetadataParser(ABC): lora_hash = get_sha256(lora_path) self.loras.append((Path(lora_name).stem, lora_weight, lora_hash)) + @staticmethod + def remove_special_loras(lora_filenames): + for lora_to_remove in modules.config.loras_metadata_remove: + if lora_to_remove in lora_filenames: + lora_filenames.remove(lora_to_remove) + class A1111MetadataParser(MetadataParser): def get_scheme(self) -> MetadataScheme: @@ -397,12 +405,19 @@ class A1111MetadataParser(MetadataParser): data[key] = filename break - if 'lora_hashes' in data and data['lora_hashes'] != '': + lora_data = '' + if 'lora_weights' in data and data['lora_weights'] != '': + lora_data = data['lora_weights'] + elif 'lora_hashes' in data and data['lora_hashes'] != '' and data['lora_hashes'].split(', ')[0].count(':') == 2: + lora_data = data['lora_hashes'] + + if lora_data != '': lora_filenames = modules.config.lora_filenames.copy() - if modules.config.sdxl_lcm_lora in lora_filenames: - lora_filenames.remove(modules.config.sdxl_lcm_lora) - for li, lora in enumerate(data['lora_hashes'].split(', ')): - lora_name, lora_hash, lora_weight = lora.split(': ') + self.remove_special_loras(lora_filenames) + for li, lora in enumerate(lora_data.split(', ')): + lora_split = lora.split(': ') + lora_name = lora_split[0] + lora_weight = lora_split[2] if len(lora_split) == 3 else lora_split[1] for filename in lora_filenames: path = Path(filename) if lora_name == path.stem: @@ -453,11 +468,15 @@ class A1111MetadataParser(MetadataParser): if len(self.loras) > 0: lora_hashes = [] + lora_weights = [] for index, (lora_name, lora_weight, lora_hash) in enumerate(self.loras): # workaround for Fooocus not knowing LoRA name in LoRA metadata - lora_hashes.append(f'{lora_name}: {lora_hash}: {lora_weight}') + lora_hashes.append(f'{lora_name}: {lora_hash}') + lora_weights.append(f'{lora_name}: {lora_weight}') lora_hashes_string = ', '.join(lora_hashes) + lora_weights_string = ', '.join(lora_weights) generation_params[self.fooocus_to_a1111['lora_hashes']] = lora_hashes_string + generation_params[self.fooocus_to_a1111['lora_weights']] = lora_weights_string generation_params[self.fooocus_to_a1111['version']] = data['version'] @@ -480,9 +499,7 @@ class FooocusMetadataParser(MetadataParser): def parse_json(self, metadata: dict) -> dict: model_filenames = modules.config.model_filenames.copy() lora_filenames = modules.config.lora_filenames.copy() - if modules.config.sdxl_lcm_lora in lora_filenames: - lora_filenames.remove(modules.config.sdxl_lcm_lora) - + self.remove_special_loras(lora_filenames) for key, value in metadata.items(): if value in ['', 'None']: continue diff --git a/modules/util.py b/modules/util.py index 7c46d946..9e0fb294 100644 --- a/modules/util.py +++ b/modules/util.py @@ -7,9 +7,9 @@ import math import os import cv2 import json +import hashlib from PIL import Image -from hashlib import sha256 import modules.sdxl_styles @@ -182,16 +182,44 @@ def get_files_from_folder(folder_path, extensions=None, name_filter=None): return filenames -def calculate_sha256(filename, length=HASH_SHA256_LENGTH) -> str: - hash_sha256 = sha256() +def sha256(filename, use_addnet_hash=False, length=HASH_SHA256_LENGTH): + print(f"Calculating sha256 for {filename}: ", end='') + if use_addnet_hash: + with open(filename, "rb") as file: + sha256_value = addnet_hash_safetensors(file) + else: + sha256_value = calculate_sha256(filename) + print(f"{sha256_value}") + + return sha256_value[:length] if length is not None else sha256_value + + +def addnet_hash_safetensors(b): + """kohya-ss hash for safetensors from https://github.com/kohya-ss/sd-scripts/blob/main/library/train_util.py""" + hash_sha256 = hashlib.sha256() + blksize = 1024 * 1024 + + b.seek(0) + header = b.read(8) + n = int.from_bytes(header, "little") + + offset = n + 8 + b.seek(offset) + for chunk in iter(lambda: b.read(blksize), b""): + hash_sha256.update(chunk) + + return hash_sha256.hexdigest() + + +def calculate_sha256(filename) -> str: + hash_sha256 = hashlib.sha256() blksize = 1024 * 1024 with open(filename, "rb") as f: for chunk in iter(lambda: f.read(blksize), b""): hash_sha256.update(chunk) - res = hash_sha256.hexdigest() - return res[:length] if length else res + return hash_sha256.hexdigest() def quote(text): From e2f9bcb11d06216d6800676c48d8d74d6fd77a4b Mon Sep 17 00:00:00 2001 From: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Date: Sat, 23 Mar 2024 16:57:11 +0100 Subject: [PATCH 100/103] docs: bump version number to 2.3.1, add changelog (#2616) --- fooocus_version.py | 2 +- update_log.md | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/fooocus_version.py b/fooocus_version.py index a4b8895b..b2050196 100644 --- a/fooocus_version.py +++ b/fooocus_version.py @@ -1 +1 @@ -version = '2.3.0' +version = '2.3.1' diff --git a/update_log.md b/update_log.md index 4e22db0a..62c4882b 100644 --- a/update_log.md +++ b/update_log.md @@ -1,3 +1,10 @@ +# [2.3.1](https://github.com/lllyasviel/Fooocus/releases/tag/2.3.1) + +* Remove positive prompt from anime prefix to not reset prompt after switching presets +* Fix image number being reset to 1 when switching preset, now doesn't reset anymore +* Fix outpainting dimension calculation when extending left/right +* Fix LoRA compatibility for LoRAs in a1111 metadata scheme + # [2.3.0](https://github.com/lllyasviel/Fooocus/releases/tag/2.3.0) * Add performance "lightning" (based on [SDXL-Lightning 4 step LoRA](https://huggingface.co/ByteDance/SDXL-Lightning/blob/main/sdxl_lightning_4step_lora.safetensors)) From d16a54edd69f82158ae7ffe5669618db33a01ac7 Mon Sep 17 00:00:00 2001 From: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Date: Wed, 1 May 2024 14:11:38 +0200 Subject: [PATCH 101/103] fix: use LF as line breaks for Docker entrypoint.sh (#2843) adjusted for Linux again, see https://github.com/lllyasviel/Fooocus/discussions/2836 --- entrypoint.sh | 34 +--------------------------------- 1 file changed, 1 insertion(+), 33 deletions(-) diff --git a/entrypoint.sh b/entrypoint.sh index d0dba09c..57b06c6b 100755 --- a/entrypoint.sh +++ b/entrypoint.sh @@ -1,33 +1 @@ -#!/bin/bash - -ORIGINALDIR=/content/app -# Use predefined DATADIR if it is defined -[[ x"${DATADIR}" == "x" ]] && DATADIR=/content/data - -# Make persistent dir from original dir -function mklink () { - mkdir -p $DATADIR/$1 - ln -s $DATADIR/$1 $ORIGINALDIR -} - -# Copy old files from import dir -function import () { - (test -d /import/$1 && cd /import/$1 && cp -Rpn . $DATADIR/$1/) -} - -cd $ORIGINALDIR - -# models -mklink models -# Copy original files -(cd $ORIGINALDIR/models.org && cp -Rpn . $ORIGINALDIR/models/) -# Import old files -import models - -# outputs -mklink outputs -# Import old files -import outputs - -# Start application -python launch.py $* +#!/bin/bash ORIGINALDIR=/content/app # Use predefined DATADIR if it is defined [[ x"${DATADIR}" == "x" ]] && DATADIR=/content/data # Make persistent dir from original dir function mklink () { mkdir -p $DATADIR/$1 ln -s $DATADIR/$1 $ORIGINALDIR } # Copy old files from import dir function import () { (test -d /import/$1 && cd /import/$1 && cp -Rpn . $DATADIR/$1/) } cd $ORIGINALDIR # models mklink models # Copy original files (cd $ORIGINALDIR/models.org && cp -Rpn . $ORIGINALDIR/models/) # Import old files import models # outputs mklink outputs # Import old files import outputs # Start application python launch.py $* \ No newline at end of file From c36e951781b17b36657369854a10664b5c09b118 Mon Sep 17 00:00:00 2001 From: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Date: Sat, 4 May 2024 14:37:40 +0200 Subject: [PATCH 102/103] Revert "fix: use LF as line breaks for Docker entrypoint.sh (#2843)" (#2865) False alarm, worked as intended before. Sorry for the fuzz. This reverts commit d16a54edd69f82158ae7ffe5669618db33a01ac7. --- entrypoint.sh | 34 +++++++++++++++++++++++++++++++++- 1 file changed, 33 insertions(+), 1 deletion(-) diff --git a/entrypoint.sh b/entrypoint.sh index 57b06c6b..d0dba09c 100755 --- a/entrypoint.sh +++ b/entrypoint.sh @@ -1 +1,33 @@ -#!/bin/bash ORIGINALDIR=/content/app # Use predefined DATADIR if it is defined [[ x"${DATADIR}" == "x" ]] && DATADIR=/content/data # Make persistent dir from original dir function mklink () { mkdir -p $DATADIR/$1 ln -s $DATADIR/$1 $ORIGINALDIR } # Copy old files from import dir function import () { (test -d /import/$1 && cd /import/$1 && cp -Rpn . $DATADIR/$1/) } cd $ORIGINALDIR # models mklink models # Copy original files (cd $ORIGINALDIR/models.org && cp -Rpn . $ORIGINALDIR/models/) # Import old files import models # outputs mklink outputs # Import old files import outputs # Start application python launch.py $* \ No newline at end of file +#!/bin/bash + +ORIGINALDIR=/content/app +# Use predefined DATADIR if it is defined +[[ x"${DATADIR}" == "x" ]] && DATADIR=/content/data + +# Make persistent dir from original dir +function mklink () { + mkdir -p $DATADIR/$1 + ln -s $DATADIR/$1 $ORIGINALDIR +} + +# Copy old files from import dir +function import () { + (test -d /import/$1 && cd /import/$1 && cp -Rpn . $DATADIR/$1/) +} + +cd $ORIGINALDIR + +# models +mklink models +# Copy original files +(cd $ORIGINALDIR/models.org && cp -Rpn . $ORIGINALDIR/models/) +# Import old files +import models + +# outputs +mklink outputs +# Import old files +import outputs + +# Start application +python launch.py $* From 6308fb8b54f62e61711aa57b086b30466ebbb857 Mon Sep 17 00:00:00 2001 From: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Date: Thu, 9 May 2024 19:03:30 +0200 Subject: [PATCH 103/103] feat: update anime from animaPencilXL_v100 to animaPencilXL_v310 (#2454) * feat: update anime from animaPencilXL_v100 to animaPencilXL_v200 * feat: update animaPencilXL from 2.0.0 to 2.6.0 * feat: update animaPencilXL from 2.6.0 to 3.1.0 * feat: reduce cfg as suggested by vendor from 3.0.0 https://civitai.com/models/261336?modelVersionId=435001 "recommend to decrease CFG scale." + all examples are in CFG 6 --- presets/anime.json | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/presets/anime.json b/presets/anime.json index 2610677c..78607edb 100644 --- a/presets/anime.json +++ b/presets/anime.json @@ -1,5 +1,5 @@ { - "default_model": "animaPencilXL_v100.safetensors", + "default_model": "animaPencilXL_v310.safetensors", "default_refiner": "None", "default_refiner_switch": 0.5, "default_loras": [ @@ -29,7 +29,7 @@ 1.0 ] ], - "default_cfg_scale": 7.0, + "default_cfg_scale": 6.0, "default_sample_sharpness": 2.0, "default_sampler": "dpmpp_2m_sde_gpu", "default_scheduler": "karras", @@ -43,9 +43,15 @@ ], "default_aspect_ratio": "896*1152", "checkpoint_downloads": { - "animaPencilXL_v100.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/animaPencilXL_v100.safetensors" + "animaPencilXL_v310.safetensors": "https://huggingface.co/mashb1t/fav_models/resolve/main/fav/animaPencilXL_v310.safetensors" }, "embeddings_downloads": {}, "lora_downloads": {}, - "previous_default_models": [] + "previous_default_models": [ + "animaPencilXL_v300.safetensors", + "animaPencilXL_v260.safetensors", + "animaPencilXL_v210.safetensors", + "animaPencilXL_v200.safetensors", + "animaPencilXL_v100.safetensors" + ] } \ No newline at end of file