diff --git a/.github/workflows/build_container.yml b/.github/workflows/build_container.yml index eb70cda3..16056db8 100644 --- a/.github/workflows/build_container.yml +++ b/.github/workflows/build_container.yml @@ -38,7 +38,7 @@ jobs: type=edge,branch=main - name: Build and push Docker image - uses: docker/build-push-action@v5 + uses: docker/build-push-action@v6 with: context: . file: ./Dockerfile diff --git a/.gitignore b/.gitignore index 85914986..5bf633a8 100644 --- a/.gitignore +++ b/.gitignore @@ -10,6 +10,7 @@ __pycache__ *.partial *.onnx sorted_styles.json +hash_cache.txt /input /cache /language/default.json diff --git a/args_manager.py b/args_manager.py index 08e4dc6e..71cbf09d 100644 --- a/args_manager.py +++ b/args_manager.py @@ -32,7 +32,10 @@ args_parser.parser.add_argument("--enable-auto-describe-image", action='store_tr help="Enables automatic description of uov and enhance image when prompt is empty", default=False) args_parser.parser.add_argument("--always-download-new-model", action='store_true', - help="Always download newer models ", default=False) + help="Always download newer models", default=False) + +args_parser.parser.add_argument("--rebuild-hash-cache", action='store_true', + help="Generates missing model and LoRA hashes.", default=False) args_parser.parser.set_defaults( disable_cuda_malloc=True, diff --git a/fooocus_version.py b/fooocus_version.py index 36fac3bb..a37e9c40 100644 --- a/fooocus_version.py +++ b/fooocus_version.py @@ -1 +1 @@ -version = '2.5.2 (mashb1t)' +version = '2.6.0-rc1 (mashb1t)' diff --git a/launch.py b/launch.py index e333e287..937c0cdb 100644 --- a/launch.py +++ b/launch.py @@ -98,7 +98,8 @@ if config.temp_path_cleanup_on_launch: else: print(f"[Cleanup] Failed to delete content of temp dir.") -def download_models(default_model, previous_default_models, checkpoint_downloads, embeddings_downloads, lora_downloads): + +def download_models(default_model, previous_default_models, checkpoint_downloads, embeddings_downloads, lora_downloads, vae_downloads): for file_name, url in vae_approx_filenames: load_file_from_url(url=url, model_dir=config.path_vae_approx, file_name=file_name) @@ -130,12 +131,14 @@ def download_models(default_model, previous_default_models, checkpoint_downloads load_file_from_url(url=url, model_dir=config.path_embeddings, file_name=file_name) for file_name, url in lora_downloads.items(): load_file_from_url(url=url, model_dir=config.paths_loras[0], file_name=file_name) + for file_name, url in vae_downloads.items(): + load_file_from_url(url=url, model_dir=config.path_vae, file_name=file_name) return default_model, checkpoint_downloads config.default_base_model_name, config.checkpoint_downloads = download_models( config.default_base_model_name, config.previous_default_models, config.checkpoint_downloads, - config.embeddings_downloads, config.lora_downloads) + config.embeddings_downloads, config.lora_downloads, config.vae_downloads) from webui import * diff --git a/ldm_patched/k_diffusion/sampling.py b/ldm_patched/k_diffusion/sampling.py index ea5540a4..4d9d4ea6 100644 --- a/ldm_patched/k_diffusion/sampling.py +++ b/ldm_patched/k_diffusion/sampling.py @@ -835,4 +835,74 @@ def sample_tcd(model, x, sigmas, extra_args=None, callback=None, disable=None, n else: x *= torch.sqrt(1.0 + sigmas[i + 1] ** 2) + return x + + +@torch.no_grad() +def sample_restart(model, x, sigmas, extra_args=None, callback=None, disable=None, s_noise=1., restart_list=None): + """Implements restart sampling in Restart Sampling for Improving Generative Processes (2023) + Restart_list format: {min_sigma: [ restart_steps, restart_times, max_sigma]} + If restart_list is None: will choose restart_list automatically, otherwise will use the given restart_list + """ + extra_args = {} if extra_args is None else extra_args + s_in = x.new_ones([x.shape[0]]) + step_id = 0 + + def heun_step(x, old_sigma, new_sigma, second_order=True): + nonlocal step_id + denoised = model(x, old_sigma * s_in, **extra_args) + d = to_d(x, old_sigma, denoised) + if callback is not None: + callback({'x': x, 'i': step_id, 'sigma': new_sigma, 'sigma_hat': old_sigma, 'denoised': denoised}) + dt = new_sigma - old_sigma + if new_sigma == 0 or not second_order: + # Euler method + x = x + d * dt + else: + # Heun's method + x_2 = x + d * dt + denoised_2 = model(x_2, new_sigma * s_in, **extra_args) + d_2 = to_d(x_2, new_sigma, denoised_2) + d_prime = (d + d_2) / 2 + x = x + d_prime * dt + step_id += 1 + return x + + steps = sigmas.shape[0] - 1 + if restart_list is None: + if steps >= 20: + restart_steps = 9 + restart_times = 1 + if steps >= 36: + restart_steps = steps // 4 + restart_times = 2 + sigmas = get_sigmas_karras(steps - restart_steps * restart_times, sigmas[-2].item(), sigmas[0].item(), device=sigmas.device) + restart_list = {0.1: [restart_steps + 1, restart_times, 2]} + else: + restart_list = {} + + restart_list = {int(torch.argmin(abs(sigmas - key), dim=0)): value for key, value in restart_list.items()} + + step_list = [] + for i in range(len(sigmas) - 1): + step_list.append((sigmas[i], sigmas[i + 1])) + if i + 1 in restart_list: + restart_steps, restart_times, restart_max = restart_list[i + 1] + min_idx = i + 1 + max_idx = int(torch.argmin(abs(sigmas - restart_max), dim=0)) + if max_idx < min_idx: + sigma_restart = get_sigmas_karras(restart_steps, sigmas[min_idx].item(), sigmas[max_idx].item(), device=sigmas.device)[:-1] + while restart_times > 0: + restart_times -= 1 + step_list.extend(zip(sigma_restart[:-1], sigma_restart[1:])) + + last_sigma = None + for old_sigma, new_sigma in tqdm(step_list, disable=disable): + if last_sigma is None: + last_sigma = old_sigma + elif last_sigma < old_sigma: + x = x + torch.randn_like(x) * s_noise * (old_sigma ** 2 - last_sigma ** 2) ** 0.5 + x = heun_step(x, old_sigma, new_sigma) + last_sigma = new_sigma + return x \ No newline at end of file diff --git a/ldm_patched/modules/samplers.py b/ldm_patched/modules/samplers.py index 9ed1fcd2..05b4b317 100644 --- a/ldm_patched/modules/samplers.py +++ b/ldm_patched/modules/samplers.py @@ -523,7 +523,7 @@ class UNIPCBH2(Sampler): KSAMPLER_NAMES = ["euler", "euler_ancestral", "heun", "heunpp2","dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_sde", "dpmpp_sde_gpu", - "dpmpp_2m", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "tcd", "edm_playground_v2.5"] + "dpmpp_2m", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "tcd", "edm_playground_v2.5", "restart"] class KSAMPLER(Sampler): def __init__(self, sampler_function, extra_options={}, inpaint_options={}): diff --git a/modules/config.py b/modules/config.py index da061aef..2df7013c 100644 --- a/modules/config.py +++ b/modules/config.py @@ -7,6 +7,7 @@ import args_manager import tempfile import modules.flags import modules.sdxl_styles +from modules.hash_cache import load_cache_from_file, save_cache_to_file from modules.model_loader import load_file_from_url from modules.extra_utils import makedirs_with_log, get_files_from_folder, try_eval_env_var @@ -445,6 +446,12 @@ embeddings_downloads = get_config_item_or_set_default( validator=lambda x: isinstance(x, dict) and all(isinstance(k, str) and isinstance(v, str) for k, v in x.items()), expected_type=dict ) +vae_downloads = get_config_item_or_set_default( + key='vae_downloads', + default_value={}, + validator=lambda x: isinstance(x, dict) and all(isinstance(k, str) and isinstance(v, str) for k, v in x.items()), + expected_type=dict +) available_aspect_ratios = get_config_item_or_set_default( key='available_aspect_ratios', default_value=modules.flags.sdxl_aspect_ratios, @@ -463,6 +470,12 @@ default_inpaint_engine_version = get_config_item_or_set_default( validator=lambda x: x in modules.flags.inpaint_engine_versions, expected_type=str ) +default_inpaint_method = get_config_item_or_set_default( + key='default_inpaint_method', + default_value=modules.flags.inpaint_option_default, + validator=lambda x: x in modules.flags.inpaint_options, + expected_type=str +) default_cfg_tsnr = get_config_item_or_set_default( key='default_cfg_tsnr', default_value=7.0, @@ -602,7 +615,7 @@ default_inpaint_mask_sam_model = get_config_item_or_set_default( config_dict["default_loras"] = default_loras = default_loras[:default_max_lora_number] + [[True, 'None', 1.0] for _ in range(default_max_lora_number - len(default_loras))] -# mapping config to meta parameter +# mapping config to meta parameter possible_preset_keys = { "default_model": "base_model", "default_refiner": "refiner_model", @@ -618,6 +631,7 @@ possible_preset_keys = { "default_sampler": "sampler", "default_scheduler": "scheduler", "default_overwrite_step": "steps", + "default_overwrite_switch": "overwrite_switch", "default_performance": "performance", "default_image_number": "image_number", "default_prompt": "prompt", @@ -628,7 +642,10 @@ possible_preset_keys = { "checkpoint_downloads": "checkpoint_downloads", "embeddings_downloads": "embeddings_downloads", "lora_downloads": "lora_downloads", - "default_vae": "vae" + "vae_downloads": "vae_downloads", + "default_vae": "vae", + # "default_inpaint_method": "inpaint_method", # disabled so inpaint mode doesn't refresh after every preset change + "default_inpaint_engine_version": "inpaint_engine_version", } REWRITE_PRESET = False @@ -875,3 +892,20 @@ def downloading_sam_vit_h(): update_files() +load_cache_from_file() + +if args_manager.args.rebuild_hash_cache: + from modules.hash_cache import sha256_from_cache + from modules.util import get_file_from_folder_list + + print('[Cache] Rebuilding hash cache') + for filename in model_filenames: + filepath = get_file_from_folder_list(filename, paths_checkpoints) + sha256_from_cache(filepath) + for filename in lora_filenames: + filepath = get_file_from_folder_list(filename, paths_loras) + sha256_from_cache(filepath) + print('[Cache] Done') + +# write cache to file again for sorting and cleanup of invalid cache entries +save_cache_to_file() diff --git a/modules/flags.py b/modules/flags.py index c8ea0a0c..5b224818 100644 --- a/modules/flags.py +++ b/modules/flags.py @@ -41,7 +41,8 @@ KSAMPLER = { "dpmpp_3m_sde_gpu": "", "ddpm": "", "lcm": "LCM", - "tcd": "TCD" + "tcd": "TCD", + "restart": "Restart" } SAMPLER_EXTRA = { diff --git a/modules/hash_cache.py b/modules/hash_cache.py new file mode 100644 index 00000000..10566560 --- /dev/null +++ b/modules/hash_cache.py @@ -0,0 +1,53 @@ +import json +import os + +from modules.util import sha256, HASH_SHA256_LENGTH + +hash_cache_filename = 'hash_cache.txt' +hash_cache = {} + + +def sha256_from_cache(filepath): + global hash_cache + if filepath not in hash_cache: + hash_value = sha256(filepath) + hash_cache[filepath] = hash_value + save_cache_to_file(filepath, hash_value) + + return hash_cache[filepath] + + +def load_cache_from_file(): + global hash_cache + + try: + if os.path.exists(hash_cache_filename): + with open(hash_cache_filename, 'rt', encoding='utf-8') as fp: + for line in fp: + entry = json.loads(line) + for filepath, hash_value in entry.items(): + if not os.path.exists(filepath) or not isinstance(hash_value, str) and len(hash_value) != HASH_SHA256_LENGTH: + print(f'[Cache] Skipping invalid cache entry: {filepath}') + continue + hash_cache[filepath] = hash_value + except Exception as e: + print(f'[Cache] Loading failed: {e}') + + +def save_cache_to_file(filename=None, hash_value=None): + global hash_cache + + if filename is not None and hash_value is not None: + items = [(filename, hash_value)] + mode = 'at' + else: + items = sorted(hash_cache.items()) + mode = 'wt' + + try: + with open(hash_cache_filename, mode, encoding='utf-8') as fp: + for filepath, hash_value in items: + json.dump({filepath: hash_value}, fp) + fp.write('\n') + except Exception as e: + print(f'[Cache] Saving failed: {e}') diff --git a/modules/meta_parser.py b/modules/meta_parser.py index 8997a878..ac0dff38 100644 --- a/modules/meta_parser.py +++ b/modules/meta_parser.py @@ -9,18 +9,18 @@ from PIL import Image import fooocus_version import modules.config import modules.sdxl_styles +from modules import hash_cache from modules.flags import MetadataScheme, Performance, Steps from modules.flags import SAMPLERS, CIVITAI_NO_KARRAS -from modules.util import quote, unquote, extract_styles_from_prompt, is_json, get_file_from_folder_list, sha256 +from modules.hash_cache import sha256_from_cache +from modules.util import quote, unquote, extract_styles_from_prompt, is_json, get_file_from_folder_list re_param_code = r'\s*(\w[\w \-/]+):\s*("(?:\\.|[^\\"])+"|[^,]*)(?:,|$)' re_param = re.compile(re_param_code) re_imagesize = re.compile(r"^(\d+)x(\d+)$") -hash_cache = {} - -def load_parameter_button_click(raw_metadata: dict | str, is_generating: bool): +def load_parameter_button_click(raw_metadata: dict | str, is_generating: bool, inpaint_mode: str): loaded_parameter_dict = raw_metadata if isinstance(raw_metadata, str): loaded_parameter_dict = json.loads(raw_metadata) @@ -49,6 +49,8 @@ def load_parameter_button_click(raw_metadata: dict | str, is_generating: bool): get_str('scheduler', 'Scheduler', loaded_parameter_dict, results) get_str('vae', 'VAE', loaded_parameter_dict, results) get_seed('seed', 'Seed', loaded_parameter_dict, results) + get_inpaint_engine_version('inpaint_engine_version', 'Inpaint Engine Version', loaded_parameter_dict, results, inpaint_mode) + get_inpaint_method('inpaint_method', 'Inpaint Mode', loaded_parameter_dict, results) if is_generating: results.append(gr.update()) @@ -160,6 +162,36 @@ def get_seed(key: str, fallback: str | None, source_dict: dict, results: list, d results.append(gr.update()) +def get_inpaint_engine_version(key: str, fallback: str | None, source_dict: dict, results: list, inpaint_mode: str, default=None) -> str | None: + try: + h = source_dict.get(key, source_dict.get(fallback, default)) + assert isinstance(h, str) and h in modules.flags.inpaint_engine_versions + if inpaint_mode != modules.flags.inpaint_option_detail: + results.append(h) + else: + results.append(gr.update()) + results.append(h) + return h + except: + results.append(gr.update()) + results.append('empty') + return None + + +def get_inpaint_method(key: str, fallback: str | None, source_dict: dict, results: list, default=None) -> str | None: + try: + h = source_dict.get(key, source_dict.get(fallback, default)) + assert isinstance(h, str) and h in modules.flags.inpaint_options + results.append(h) + for i in range(modules.config.default_enhance_tabs): + results.append(h) + return h + except: + results.append(gr.update()) + for i in range(modules.config.default_enhance_tabs): + results.append(gr.update()) + + def get_adm_guidance(key: str, fallback: str | None, source_dict: dict, results: list, default=None): try: h = source_dict.get(key, source_dict.get(fallback, default)) @@ -215,14 +247,6 @@ def get_lora(key: str, fallback: str | None, source_dict: dict, results: list, p results.append(1) -def get_sha256(filepath): - global hash_cache - if filepath not in hash_cache: - hash_cache[filepath] = sha256(filepath) - - return hash_cache[filepath] - - def parse_meta_from_preset(preset_content): assert isinstance(preset_content, dict) preset_prepared = {} @@ -289,18 +313,18 @@ class MetadataParser(ABC): self.base_model_name = Path(base_model_name).stem base_model_path = get_file_from_folder_list(base_model_name, modules.config.paths_checkpoints) - self.base_model_hash = get_sha256(base_model_path) + self.base_model_hash = sha256_from_cache(base_model_path) if refiner_model_name not in ['', 'None']: self.refiner_model_name = Path(refiner_model_name).stem refiner_model_path = get_file_from_folder_list(refiner_model_name, modules.config.paths_checkpoints) - self.refiner_model_hash = get_sha256(refiner_model_path) + self.refiner_model_hash = sha256_from_cache(refiner_model_path) self.loras = [] for (lora_name, lora_weight) in loras: if lora_name != 'None': lora_path = get_file_from_folder_list(lora_name, modules.config.paths_loras) - lora_hash = get_sha256(lora_path) + lora_hash = sha256_from_cache(lora_path) self.loras.append((Path(lora_name).stem, lora_weight, lora_hash)) self.vae_name = Path(vae_name).stem diff --git a/presets/.gitignore b/presets/.gitignore index 27e74136..e39511a9 100644 --- a/presets/.gitignore +++ b/presets/.gitignore @@ -3,5 +3,6 @@ !default.json !lcm.json !playground_v2.5.json +!pony_v6.json !realistic.json !sai.json \ No newline at end of file diff --git a/presets/anime.json b/presets/anime.json index 68958b8e..bf735c9b 100644 --- a/presets/anime.json +++ b/presets/anime.json @@ -1,5 +1,5 @@ { - "default_model": "animaPencilXL_v400.safetensors", + "default_model": "animaPencilXL_v500.safetensors", "default_refiner": "None", "default_refiner_switch": 0.5, "default_loras": [ @@ -44,11 +44,12 @@ "default_aspect_ratio": "896*1152", "default_overwrite_step": -1, "checkpoint_downloads": { - "animaPencilXL_v400.safetensors": "https://huggingface.co/mashb1t/fav_models/resolve/main/fav/animaPencilXL_v400.safetensors" + "animaPencilXL_v500.safetensors": "https://huggingface.co/mashb1t/fav_models/resolve/main/fav/animaPencilXL_v500.safetensors" }, "embeddings_downloads": {}, "lora_downloads": {}, "previous_default_models": [ + "animaPencilXL_v400.safetensors", "animaPencilXL_v310.safetensors", "animaPencilXL_v300.safetensors", "animaPencilXL_v260.safetensors", diff --git a/presets/hyper-sd.json b/presets/hyper-sd.json index 700b694d..67e06739 100644 --- a/presets/hyper-sd.json +++ b/presets/hyper-sd.json @@ -43,6 +43,7 @@ ], "default_aspect_ratio": "1152*896", "default_overwrite_step": -1, + "default_overwrite_switch": -1, "checkpoint_downloads": { "juggernautXL_v8Rundiffusion.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/juggernautXL_v8Rundiffusion.safetensors" }, diff --git a/presets/playground_v2.5.json b/presets/playground_v2.5.json index 5bc6059e..ac28c260 100644 --- a/presets/playground_v2.5.json +++ b/presets/playground_v2.5.json @@ -40,6 +40,8 @@ "Fooocus V2" ], "default_aspect_ratio": "1024*1024", + "default_overwrite_step": -1, + "default_inpaint_engine_version": "None", "checkpoint_downloads": { "playground-v2.5-1024px-aesthetic.fp16.safetensors": "https://huggingface.co/mashb1t/fav_models/resolve/main/fav/playground-v2.5-1024px-aesthetic.fp16.safetensors" }, diff --git a/presets/pony_v6.json b/presets/pony_v6.json new file mode 100644 index 00000000..549d7c52 --- /dev/null +++ b/presets/pony_v6.json @@ -0,0 +1,54 @@ +{ + "default_model": "ponyDiffusionV6XL.safetensors", + "default_refiner": "None", + "default_refiner_switch": 0.5, + "default_vae": "ponyDiffusionV6XL_vae.safetensors", + "default_loras": [ + [ + true, + "None", + 1.0 + ], + [ + true, + "None", + 1.0 + ], + [ + true, + "None", + 1.0 + ], + [ + true, + "None", + 1.0 + ], + [ + true, + "None", + 1.0 + ] + ], + "default_cfg_scale": 7.0, + "default_sample_sharpness": 2.0, + "default_sampler": "dpmpp_2m_sde_gpu", + "default_scheduler": "karras", + "default_performance": "Speed", + "default_prompt": "", + "default_prompt_negative": "", + "default_styles": [ + "Fooocus Pony" + ], + "default_aspect_ratio": "896*1152", + "default_overwrite_step": -1, + "default_inpaint_engine_version": "None", + "checkpoint_downloads": { + "ponyDiffusionV6XL.safetensors": "https://huggingface.co/mashb1t/fav_models/resolve/main/fav/ponyDiffusionV6XL.safetensors" + }, + "embeddings_downloads": {}, + "lora_downloads": {}, + "vae_downloads": { + "ponyDiffusionV6XL_vae.safetensors": "https://huggingface.co/mashb1t/fav_models/resolve/main/fav/ponyDiffusionV6XL_vae.safetensors" + } +} diff --git a/presets/realistic.json b/presets/realistic.json index f2d4773f..8402bbc1 100644 --- a/presets/realistic.json +++ b/presets/realistic.json @@ -5,7 +5,7 @@ "default_loras": [ [ true, - "SDXL_FILM_PHOTOGRAPHY_STYLE_BetaV0.4.safetensors", + "SDXL_FILM_PHOTOGRAPHY_STYLE_V1.safetensors", 0.25 ], [ @@ -48,7 +48,7 @@ }, "embeddings_downloads": {}, "lora_downloads": { - "SDXL_FILM_PHOTOGRAPHY_STYLE_BetaV0.4.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/SDXL_FILM_PHOTOGRAPHY_STYLE_BetaV0.4.safetensors" + "SDXL_FILM_PHOTOGRAPHY_STYLE_V1.safetensors": "https://huggingface.co/mashb1t/fav_models/resolve/main/fav/SDXL_FILM_PHOTOGRAPHY_STYLE_V1.safetensors" }, "previous_default_models": ["realisticStockPhoto_v10.safetensors"] } \ No newline at end of file diff --git a/presets/turbo.json b/presets/turbo.json index 95050d41..f8629b2e 100644 --- a/presets/turbo.json +++ b/presets/turbo.json @@ -1,5 +1,5 @@ { - "default_model": "DreamShaperXL_Turbo_dpmppSdeKarras_half_pruned_6.safetensors", + "default_model": "DreamShaperXL_Turbo_v2_1.safetensors", "default_refiner": "None", "default_refiner_switch": 0.5, "default_loras": [ @@ -39,8 +39,11 @@ "default_aspect_ratio": "1024*1024", "default_overwrite_step": 6, "checkpoint_downloads": { - "DreamShaperXL_Turbo_dpmppSdeKarras_half_pruned_6.safetensors": "https://huggingface.co/Lykon/dreamshaper-xl-turbo/resolve/main/DreamShaperXL_Turbo_dpmppSdeKarras_half_pruned_6.safetensors" + "DreamShaperXL_Turbo_v2_1.safetensors": "https://huggingface.co/mashb1t/fav_models/resolve/main/fav/DreamShaperXL_Turbo_v2_1.safetensors" }, "embeddings_downloads": {}, - "lora_downloads": {} + "lora_downloads": {}, + "previous_default_models": [ + "DreamShaperXL_Turbo_dpmppSdeKarras_half_pruned_6.safetensors" + ] } \ No newline at end of file diff --git a/readme.md b/readme.md index dd341db6..190708a4 100644 --- a/readme.md +++ b/readme.md @@ -487,11 +487,11 @@ See the common problems [here](troubleshoot.md). Given different goals, the default models and configs of Fooocus are different: -| Task | Windows | Linux args | Main Model | Refiner | Config | -| --- | --- | --- | --- | --- |--------------------------------------------------------------------------------| -| General | run.bat | | juggernautXL_v8Rundiffusion | not used | [here](https://github.com/lllyasviel/Fooocus/blob/main/presets/default.json) | -| Realistic | run_realistic.bat | --preset realistic | realisticStockPhoto_v20 | not used | [here](https://github.com/lllyasviel/Fooocus/blob/main/presets/realistic.json) | -| Anime | run_anime.bat | --preset anime | animaPencilXL_v100 | not used | [here](https://github.com/lllyasviel/Fooocus/blob/main/presets/anime.json) | +| Task | Windows | Linux args | Main Model | Refiner | Config | +|-----------| --- | --- |-----------------------------| --- |--------------------------------------------------------------------------------| +| General | run.bat | | juggernautXL_v8Rundiffusion | not used | [here](https://github.com/lllyasviel/Fooocus/blob/main/presets/default.json) | +| Realistic | run_realistic.bat | --preset realistic | realisticStockPhoto_v20 | not used | [here](https://github.com/lllyasviel/Fooocus/blob/main/presets/realistic.json) | +| Anime | run_anime.bat | --preset anime | animaPencilXL_v500 | not used | [here](https://github.com/lllyasviel/Fooocus/blob/main/presets/anime.json) | Note that the download is **automatic** - you do not need to do anything if the internet connection is okay. However, you can download them manually if you (or move them from somewhere else) have your own preparation. diff --git a/sdxl_styles/samples/fooocus_pony.jpg b/sdxl_styles/samples/fooocus_pony.jpg new file mode 100644 index 00000000..b34e5c76 Binary files /dev/null and b/sdxl_styles/samples/fooocus_pony.jpg differ diff --git a/sdxl_styles/sdxl_styles_fooocus.json b/sdxl_styles/sdxl_styles_fooocus.json index cf64eab4..c589f686 100644 --- a/sdxl_styles/sdxl_styles_fooocus.json +++ b/sdxl_styles/sdxl_styles_fooocus.json @@ -14,7 +14,7 @@ }, { "name": "Fooocus Masterpiece", - "prompt": "(masterpiece), (best quality), (ultra-detailed), {prompt}, illustration, disheveled hair, detailed eyes, perfect composition, moist skin, intricate details, earrings, by wlop", + "prompt": "(masterpiece), (best quality), (ultra-detailed), {prompt}, illustration, disheveled hair, detailed eyes, perfect composition, moist skin, intricate details, earrings", "negative_prompt": "longbody, lowres, bad anatomy, bad hands, missing fingers, pubic hair,extra digit, fewer digits, cropped, worst quality, low quality" }, { @@ -30,5 +30,10 @@ "name": "Fooocus Cinematic", "prompt": "cinematic still {prompt} . emotional, harmonious, vignette, highly detailed, high budget, bokeh, cinemascope, moody, epic, gorgeous, film grain, grainy", "negative_prompt": "anime, cartoon, graphic, text, painting, crayon, graphite, abstract, glitch, deformed, mutated, ugly, disfigured" + }, + { + "name": "Fooocus Pony", + "prompt": "score_9, score_8_up, score_7_up, {prompt}", + "negative_prompt": "score_6, score_5, score_4" } ] diff --git a/update_log.md b/update_log.md index 2ba79f48..03b8b9fb 100644 --- a/update_log.md +++ b/update_log.md @@ -1,3 +1,17 @@ +# [2.6.0-rc1](https://github.com/mashb1t/Fooocus/releases/tag/v2.6.0-rc1) + +* Update default models to latest versions + * animaPencilXL_v400 => animaPencilXL_v500 + * DreamShaperXL_Turbo_dpmppSdeKarras => DreamShaperXL_Turbo_v2_1 + * SDXL_FILM_PHOTOGRAPHY_STYLE_BetaV0.4 => SDXL_FILM_PHOTOGRAPHY_STYLE_V1 +* Add preset for pony_v6 (using ponyDiffusionV6XL) +* Add style `Fooocus Pony` +* Add restart sampler ([paper](https://arxiv.org/abs/2306.14878)) +* Add config option for default_inpaint_engine_version, sets inpaint engine for pony_v6 and playground_v2.5 to None for improved results (incompatible with inpaint engine) +* Add image editor functionality to mask upload (same as for inpaint, now correctly resizes and allows more detailed mask creation) +* Add persistent model cache for metadata. Use `--rebuild-hash-cache` to manually rebuild the cache for all non-cached hashes +* Rename `--enable-describe-uov-image` to `--enable-auto-describe-image` to better reflect its purpose (now also works for enhance image upload) + # [2.5.2](https://github.com/mashb1t/Fooocus/releases/tag/v2.5.2) * Add missing prompt return values for `Upscale (Fast 2x)` in enhance diff --git a/webui.py b/webui.py index 72f170a8..cb0f48d5 100644 --- a/webui.py +++ b/webui.py @@ -90,7 +90,7 @@ def generate_clicked(task: worker.AsyncTask): return -def inpaint_mode_change(mode): +def inpaint_mode_change(mode, inpaint_engine_version): assert mode in modules.flags.inpaint_options # inpaint_additional_prompt, outpaint_selections, example_inpaint_prompts, @@ -104,17 +104,20 @@ def inpaint_mode_change(mode): False, 'None', 0.5, 0.0 ] + if inpaint_engine_version == 'empty': + inpaint_engine_version = modules.config.default_inpaint_engine_version + if mode == modules.flags.inpaint_option_modify: return [ gr.update(visible=True), gr.update(visible=False, value=[]), gr.Dataset.update(visible=False, samples=modules.config.example_inpaint_prompts), - True, modules.config.default_inpaint_engine_version, 1.0, 0.0 + True, inpaint_engine_version, 1.0, 0.0 ] return [ gr.update(visible=False, value=''), gr.update(visible=True), gr.Dataset.update(visible=False, samples=modules.config.example_inpaint_prompts), - False, modules.config.default_inpaint_engine_version, 1.0, 0.618 + False, inpaint_engine_version, 1.0, 0.618 ] @@ -129,6 +132,7 @@ shared.gradio_root = gr.Blocks(title=title).queue() with shared.gradio_root: currentTask = gr.State(worker.AsyncTask(args=[])) + inpaint_engine_state = gr.State('empty') with gr.Row(): with gr.Column(scale=2): with gr.Row(): @@ -235,7 +239,7 @@ with shared.gradio_root: with gr.Column(): inpaint_input_image = grh.Image(label='Image', source='upload', type='numpy', tool='sketch', height=500, brush_color="#FFFFFF", elem_id='inpaint_canvas', show_label=False) inpaint_advanced_masking_checkbox = gr.Checkbox(label='Enable Advanced Masking Features', value=False) - inpaint_mode = gr.Dropdown(choices=modules.flags.inpaint_options, value=modules.flags.inpaint_option_default, label='Method') + inpaint_mode = gr.Dropdown(choices=modules.flags.inpaint_options, value=modules.config.default_inpaint_method, label='Method') inpaint_additional_prompt = gr.Textbox(placeholder="Describe what you want to inpaint.", elem_id='inpaint_additional_prompt', label='Inpaint Additional Prompt', visible=False) outpaint_selections = gr.CheckboxGroup(choices=['Left', 'Right', 'Top', 'Bottom'], value=[], label='Outpaint Direction') example_inpaint_prompts = gr.Dataset(samples=modules.config.example_inpaint_prompts, @@ -377,6 +381,9 @@ with shared.gradio_root: queue=False, show_progress=False) gr.HTML('\U0001F4D4 Document') enhance_ctrls = [] + enhance_inpaint_mode_ctrls = [] + enhance_inpaint_engine_ctrls = [] + enhance_inpaint_update_ctrls = [] for index in range(modules.config.default_enhance_tabs): with gr.TabItem(label=f'#{index + 1}') as enhance_tab_item: enhance_enabled = gr.Checkbox(label='Enable', value=False, elem_classes='min_check', @@ -435,7 +442,7 @@ with shared.gradio_root: with gr.Accordion("Inpaint", visible=True, open=False): enhance_inpaint_mode = gr.Dropdown(choices=modules.flags.inpaint_options, - value=modules.flags.inpaint_option_default, + value=modules.config.default_inpaint_method, label='Method', interactive=True) enhance_inpaint_disable_initial_latent = gr.Checkbox( label='Disable initial latent in inpaint', value=False) @@ -485,7 +492,15 @@ with shared.gradio_root: enhance_mask_invert ] - enhance_inpaint_mode.input(inpaint_mode_change, inputs=enhance_inpaint_mode, outputs=[ + enhance_inpaint_mode_ctrls += [enhance_inpaint_mode] + enhance_inpaint_engine_ctrls += [enhance_inpaint_engine] + + enhance_inpaint_update_ctrls += [[ + enhance_inpaint_mode, enhance_inpaint_disable_initial_latent, enhance_inpaint_engine, + enhance_inpaint_strength, enhance_inpaint_respective_field + ]] + + enhance_inpaint_mode.change(inpaint_mode_change, inputs=[enhance_inpaint_mode, inpaint_engine_state], outputs=[ inpaint_additional_prompt, outpaint_selections, example_inpaint_prompts, enhance_inpaint_disable_initial_latent, enhance_inpaint_engine, enhance_inpaint_strength, enhance_inpaint_respective_field @@ -874,10 +889,12 @@ with shared.gradio_root: overwrite_width, overwrite_height, guidance_scale, sharpness, adm_scaler_positive, adm_scaler_negative, adm_scaler_end, refiner_swap_method, adaptive_cfg, clip_skip, base_model, refiner_model, refiner_switch, sampler_name, scheduler_name, vae_name, - seed_random, image_seed, generate_button, load_parameter_button] + freeu_ctrls + lora_ctrls + seed_random, image_seed, inpaint_engine, inpaint_engine_state, + inpaint_mode] + enhance_inpaint_mode_ctrls + [generate_button, + load_parameter_button] + freeu_ctrls + lora_ctrls if not args_manager.args.disable_preset_selection: - def preset_selection_change(preset, is_generating): + def preset_selection_change(preset, is_generating, inpaint_mode): preset_content = modules.config.try_get_preset_content(preset) if preset != 'initial' else {} preset_prepared = modules.meta_parser.parse_meta_from_preset(preset_content) @@ -886,18 +903,35 @@ with shared.gradio_root: checkpoint_downloads = preset_prepared.get('checkpoint_downloads', {}) embeddings_downloads = preset_prepared.get('embeddings_downloads', {}) lora_downloads = preset_prepared.get('lora_downloads', {}) + vae_downloads = preset_prepared.get('vae_downloads', {}) - preset_prepared['base_model'], preset_prepared['lora_downloads'] = launch.download_models( - default_model, previous_default_models, checkpoint_downloads, embeddings_downloads, lora_downloads) + preset_prepared['base_model'], preset_prepared['checkpoint_downloads'] = launch.download_models( + default_model, previous_default_models, checkpoint_downloads, embeddings_downloads, lora_downloads, + vae_downloads) if 'prompt' in preset_prepared and preset_prepared.get('prompt') == '': del preset_prepared['prompt'] - return modules.meta_parser.load_parameter_button_click(json.dumps(preset_prepared), is_generating) + return modules.meta_parser.load_parameter_button_click(json.dumps(preset_prepared), is_generating, inpaint_mode) - preset_selection.change(preset_selection_change, inputs=[preset_selection, state_is_generating], outputs=load_data_outputs, queue=False, show_progress=True) \ + + def inpaint_engine_state_change(inpaint_engine_version, *args): + if inpaint_engine_version == 'empty': + inpaint_engine_version = modules.config.default_inpaint_engine_version + + result = [] + for inpaint_mode in args: + if inpaint_mode != modules.flags.inpaint_option_detail: + result.append(gr.update(value=inpaint_engine_version)) + else: + result.append(gr.update()) + + return result + + preset_selection.change(preset_selection_change, inputs=[preset_selection, state_is_generating, inpaint_mode], outputs=load_data_outputs, queue=False, show_progress=True) \ .then(fn=style_sorter.sort_styles, inputs=style_selections, outputs=style_selections, queue=False, show_progress=False) \ - .then(lambda: None, _js='()=>{refresh_style_localization();}') + .then(lambda: None, _js='()=>{refresh_style_localization();}') \ + .then(inpaint_engine_state_change, inputs=[inpaint_engine_state] + enhance_inpaint_mode_ctrls, outputs=enhance_inpaint_engine_ctrls, queue=False, show_progress=False) performance_selection.change(lambda x: [gr.update(interactive=not flags.Performance.has_restricted_features(x))] * 11 + [gr.update(visible=not flags.Performance.has_restricted_features(x))] * 1 + @@ -915,12 +949,20 @@ with shared.gradio_root: queue=False, show_progress=False) \ .then(fn=lambda: None, _js='refresh_grid_delayed', queue=False, show_progress=False) - inpaint_mode.input(inpaint_mode_change, inputs=inpaint_mode, outputs=[ + inpaint_mode.change(inpaint_mode_change, inputs=[inpaint_mode, inpaint_engine_state], outputs=[ inpaint_additional_prompt, outpaint_selections, example_inpaint_prompts, inpaint_disable_initial_latent, inpaint_engine, inpaint_strength, inpaint_respective_field ], show_progress=False, queue=False) + # load configured default_inpaint_method + default_inpaint_ctrls = [inpaint_mode, inpaint_disable_initial_latent, inpaint_engine, inpaint_strength, inpaint_respective_field] + for mode, disable_initial_latent, engine, strength, respective_field in [default_inpaint_ctrls] + enhance_inpaint_update_ctrls: + shared.gradio_root.load(inpaint_mode_change, inputs=[mode, inpaint_engine_state], outputs=[ + inpaint_additional_prompt, outpaint_selections, example_inpaint_prompts, disable_initial_latent, + engine, strength, respective_field + ], show_progress=False, queue=False) + generate_mask_button.click(fn=generate_mask, inputs=[inpaint_input_image, inpaint_mask_model, inpaint_mask_cloth_category, inpaint_mask_dino_prompt_text, inpaint_mask_sam_model, @@ -973,7 +1015,7 @@ with shared.gradio_root: prompt.input(parse_meta, inputs=[prompt, state_is_generating], outputs=[prompt, generate_button, load_parameter_button], queue=False, show_progress=False) - load_parameter_button.click(modules.meta_parser.load_parameter_button_click, inputs=[prompt, state_is_generating], outputs=load_data_outputs, queue=False, show_progress=False) + load_parameter_button.click(modules.meta_parser.load_parameter_button_click, inputs=[prompt, state_is_generating, inpaint_mode], outputs=load_data_outputs, queue=False, show_progress=False) def trigger_metadata_import(filepath, state_is_generating): parameters, metadata_scheme = modules.meta_parser.read_info_from_image(filepath) @@ -984,7 +1026,7 @@ with shared.gradio_root: metadata_parser = modules.meta_parser.get_metadata_parser(metadata_scheme) parsed_parameters = metadata_parser.to_json(parameters) - return modules.meta_parser.load_parameter_button_click(parsed_parameters, state_is_generating) + return modules.meta_parser.load_parameter_button_click(parsed_parameters, state_is_generating, inpaint_mode) metadata_import_button.click(trigger_metadata_import, inputs=[metadata_input_image, state_is_generating], outputs=load_data_outputs, queue=False, show_progress=True) \ .then(style_sorter.sort_styles, inputs=style_selections, outputs=style_selections, queue=False, show_progress=False)