diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000..485dee64 --- /dev/null +++ b/.dockerignore @@ -0,0 +1 @@ +.idea diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml new file mode 100644 index 00000000..483e0de1 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -0,0 +1,106 @@ +name: Bug Report +description: You think something is broken in Fooocus +title: "[Bug]: " +labels: ["bug", "triage"] + +body: + - type: markdown + attributes: + value: | + > The title of the bug report should be short and descriptive. + > Use relevant keywords for searchability. + > Do not leave it blank, but also do not put an entire error log in it. + - type: checkboxes + attributes: + label: Checklist + description: | + Please perform basic debugging to see if your configuration is the cause of the issue. + Basic debug procedure +  2. Update Fooocus - sometimes things just need to be updated +  3. Backup and remove your config.txt - check if the issue is caused by bad configuration +  5. Try a fresh installation of Fooocus in a different directory - see if a clean installation solves the issue + Before making a issue report please, check that the issue hasn't been reported recently. + options: + - label: The issue exists on a clean installation of Fooocus + - label: The issue exists in the current version of Fooocus + - label: The issue has not been reported before recently + - label: The issue has been reported before but has not been fixed yet + - type: markdown + attributes: + value: | + > Please fill this form with as much information as possible. Don't forget to add information about "What browsers" and provide screenshots if possible + - type: textarea + id: what-did + attributes: + label: What happened? + description: Tell us what happened in a very clear and simple way + placeholder: | + image generation is not working as intended. + validations: + required: true + - type: textarea + id: steps + attributes: + label: Steps to reproduce the problem + description: Please provide us with precise step by step instructions on how to reproduce the bug + placeholder: | + 1. Go to ... + 2. Press ... + 3. ... + validations: + required: true + - type: textarea + id: what-should + attributes: + label: What should have happened? + description: Tell us what you think the normal behavior should be + placeholder: | + Fooocus should ... + validations: + required: true + - type: dropdown + id: browsers + attributes: + label: What browsers do you use to access Fooocus? + multiple: true + options: + - Mozilla Firefox + - Google Chrome + - Brave + - Apple Safari + - Microsoft Edge + - Android + - iOS + - Other + - type: dropdown + id: hosting + attributes: + label: Where are you running Fooocus? + multiple: false + options: + - Locally + - Locally with virtualization (e.g. Docker) + - Cloud (Google Colab) + - Cloud (other) + - type: input + id: operating-system + attributes: + label: What operating system are you using? + placeholder: | + Windows 10 + - type: textarea + id: logs + attributes: + label: Console logs + description: Please provide **full** cmd/terminal logs from the moment you started UI to the end of it, after the bug occured. If it's very long, provide a link to pastebin or similar service. + render: Shell + validations: + required: true + - type: textarea + id: misc + attributes: + label: Additional information + description: | + Please provide us with any relevant additional info or context. + Examples: +  I have updated my GPU driver recently. \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 00000000..7bbf022a --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,5 @@ +blank_issues_enabled: false +contact_links: + - name: Ask a question + url: https://github.com/lllyasviel/Fooocus/discussions/new?category=q-a + about: Ask the community for help \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml new file mode 100644 index 00000000..90e594e4 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -0,0 +1,40 @@ +name: Feature request +description: Suggest an idea for this project +title: "[Feature Request]: " +labels: ["enhancement", "triage"] + +body: + - type: checkboxes + attributes: + label: Is there an existing issue for this? + description: Please search to see if an issue already exists for the feature you want, and that it's not implemented in a recent build/commit. + options: + - label: I have searched the existing issues and checked the recent builds/commits + required: true + - type: markdown + attributes: + value: | + *Please fill this form with as much information as possible, provide screenshots and/or illustrations of the feature if possible* + - type: textarea + id: feature + attributes: + label: What would your feature do? + description: Tell us about your feature in a very clear and simple way, and what problem it would solve + validations: + required: true + - type: textarea + id: workflow + attributes: + label: Proposed workflow + description: Please provide us with step by step information on how you'd like the feature to be accessed and used + value: | + 1. Go to .... + 2. Press .... + 3. ... + validations: + required: true + - type: textarea + id: misc + attributes: + label: Additional information + description: Add any other context or screenshots about the feature request here. \ No newline at end of file diff --git a/.gitignore b/.gitignore index 00906a79..e423ef81 100644 --- a/.gitignore +++ b/.gitignore @@ -52,3 +52,4 @@ user_path_config-deprecated.txt /package-lock.json /.coverage* /auth.json +.DS_Store diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 00000000..2aea2810 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,29 @@ +FROM nvidia/cuda:12.3.1-base-ubuntu22.04 +ENV DEBIAN_FRONTEND noninteractive +ENV CMDARGS --listen + +RUN apt-get update -y && \ + apt-get install -y curl libgl1 libglib2.0-0 python3-pip python-is-python3 git && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* + +COPY requirements_docker.txt requirements_versions.txt /tmp/ +RUN pip install --no-cache-dir -r /tmp/requirements_docker.txt -r /tmp/requirements_versions.txt && \ + rm -f /tmp/requirements_docker.txt /tmp/requirements_versions.txt +RUN pip install --no-cache-dir xformers==0.0.22 --no-dependencies +RUN curl -fsL -o /usr/local/lib/python3.10/dist-packages/gradio/frpc_linux_amd64_v0.2 https://cdn-media.huggingface.co/frpc-gradio-0.2/frpc_linux_amd64 && \ + chmod +x /usr/local/lib/python3.10/dist-packages/gradio/frpc_linux_amd64_v0.2 + +RUN adduser --disabled-password --gecos '' user && \ + mkdir -p /content/app /content/data + +COPY entrypoint.sh /content/ +RUN chown -R user:user /content + +WORKDIR /content +USER user + +RUN git clone https://github.com/lllyasviel/Fooocus /content/app +RUN mv /content/app/models /content/app/models.org + +CMD [ "sh", "-c", "/content/entrypoint.sh ${CMDARGS}" ] diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 00000000..dee7b3e7 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,38 @@ +version: '3.9' + +volumes: + fooocus-data: + +services: + app: + build: . + image: fooocus + ports: + - "7865:7865" + environment: + - CMDARGS=--listen # Arguments for launch.py. + - DATADIR=/content/data # Directory which stores models, outputs dir + - config_path=/content/data/config.txt + - config_example_path=/content/data/config_modification_tutorial.txt + - path_checkpoints=/content/data/models/checkpoints/ + - path_loras=/content/data/models/loras/ + - path_embeddings=/content/data/models/embeddings/ + - path_vae_approx=/content/data/models/vae_approx/ + - path_upscale_models=/content/data/models/upscale_models/ + - path_inpaint=/content/data/models/inpaint/ + - path_controlnet=/content/data/models/controlnet/ + - path_clip_vision=/content/data/models/clip_vision/ + - path_fooocus_expansion=/content/data/models/prompt_expansion/fooocus_expansion/ + - path_outputs=/content/app/outputs/ # Warning: If it is not located under '/content/app', you can't see history log! + volumes: + - fooocus-data:/content/data + #- ./models:/import/models # Once you import files, you don't need to mount again. + #- ./outputs:/import/outputs # Once you import files, you don't need to mount again. + tty: true + deploy: + resources: + reservations: + devices: + - driver: nvidia + device_ids: ['0'] + capabilities: [compute, utility] diff --git a/docker.md b/docker.md new file mode 100644 index 00000000..36cfa632 --- /dev/null +++ b/docker.md @@ -0,0 +1,66 @@ +# Fooocus on Docker + +The docker image is based on NVIDIA CUDA 12.3 and PyTorch 2.0, see [Dockerfile](Dockerfile) and [requirements_docker.txt](requirements_docker.txt) for details. + +## Quick start + +**This is just an easy way for testing. Please find more information in the [notes](#notes).** + +1. Clone this repository +2. Build the image with `docker compose build` +3. Run the docker container with `docker compose up`. Building the image takes some time. + +When you see the message `Use the app with http://0.0.0.0:7865/` in the console, you can access the URL in your browser. + +Your models and outputs are stored in the `fooocus-data` volume, which, depending on OS, is stored in `/var/lib/docker/volumes`. + +## Details + +### Update the container manually + +When you are using `docker compose up` continuously, the container is not updated to the latest version of Fooocus automatically. +Run `git pull` before executing `docker compose build --no-cache` to build an image with the latest Fooocus version. +You can then start it with `docker compose up` + +### Import models, outputs +If you want to import files from models or the outputs folder, you can uncomment the following settings in the [docker-compose.yml](docker-compose.yml): +``` +#- ./models:/import/models # Once you import files, you don't need to mount again. +#- ./outputs:/import/outputs # Once you import files, you don't need to mount again. +``` +After running `docker compose up`, your files will be copied into `/content/data/models` and `/content/data/outputs` +Since `/content/data` is a persistent volume folder, your files will be persisted even when you re-run `docker compose up --build` without above volume settings. + + +### Paths inside the container + +|Path|Details| +|-|-| +|/content/app|The application stored folder| +|/content/app/models.org|Original 'models' folder.
Files are copied to the '/content/app/models' which is symlinked to '/content/data/models' every time the container boots. (Existing files will not be overwritten.) | +|/content/data|Persistent volume mount point| +|/content/data/models|The folder is symlinked to '/content/app/models'| +|/content/data/outputs|The folder is symlinked to '/content/app/outputs'| + +### Environments + +You can change `config.txt` parameters by using environment variables. +**The priority of using the environments is higher than the values defined in `config.txt`, and they will be saved to the `config_modification_tutorial.txt`** + +Docker specified environments are there. They are used by 'entrypoint.sh' +|Environment|Details| +|-|-| +|DATADIR|'/content/data' location.| +|CMDARGS|Arguments for [entry_with_update.py](entry_with_update.py) which is called by [entrypoint.sh](entrypoint.sh)| +|config_path|'config.txt' location| +|config_example_path|'config_modification_tutorial.txt' location| + +You can also use the same json key names and values explained in the 'config_modification_tutorial.txt' as the environments. +See examples in the [docker-compose.yml](docker-compose.yml) + +## Notes + +- Please keep 'path_outputs' under '/content/app'. Otherwise, you may get an error when you open the history log. +- Docker on Mac/Windows still has issues in the form of slow volume access when you use "bind mount" volumes. Please refer to [this article](https://docs.docker.com/storage/volumes/#use-a-volume-with-docker-compose) for not using "bind mount". +- The MPS backend (Metal Performance Shaders, Apple Silicon M1/M2/etc.) is not yet supported in Docker, see https://github.com/pytorch/pytorch/issues/81224 +- You can also use `docker compose up -d` to start the container detached and connect to the logs with `docker compose logs -f`. This way you can also close the terminal and keep the container running. \ No newline at end of file diff --git a/entrypoint.sh b/entrypoint.sh new file mode 100755 index 00000000..d0dba09c --- /dev/null +++ b/entrypoint.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +ORIGINALDIR=/content/app +# Use predefined DATADIR if it is defined +[[ x"${DATADIR}" == "x" ]] && DATADIR=/content/data + +# Make persistent dir from original dir +function mklink () { + mkdir -p $DATADIR/$1 + ln -s $DATADIR/$1 $ORIGINALDIR +} + +# Copy old files from import dir +function import () { + (test -d /import/$1 && cd /import/$1 && cp -Rpn . $DATADIR/$1/) +} + +cd $ORIGINALDIR + +# models +mklink models +# Copy original files +(cd $ORIGINALDIR/models.org && cp -Rpn . $ORIGINALDIR/models/) +# Import old files +import models + +# outputs +mklink outputs +# Import old files +import outputs + +# Start application +python launch.py $* diff --git a/extras/expansion.py b/extras/expansion.py index c1b59b8a..34c1ee8d 100644 --- a/extras/expansion.py +++ b/extras/expansion.py @@ -112,6 +112,9 @@ class FooocusExpansion: max_token_length = 75 * int(math.ceil(float(current_token_length) / 75.0)) max_new_tokens = max_token_length - current_token_length + if max_new_tokens == 0: + return prompt[:-1] + # https://huggingface.co/blog/introducing-csearch # https://huggingface.co/docs/transformers/generation_strategies features = self.model.generate(**tokenized_kwargs, diff --git a/fooocus_version.py b/fooocus_version.py index e6525665..6e0a04af 100644 --- a/fooocus_version.py +++ b/fooocus_version.py @@ -1 +1 @@ -version = '2.1.865 (mashb1t)' +version = '2.2.0 (mashb1t)' diff --git a/language/en.json b/language/en.json index 2ff9fc5f..fc241fc2 100644 --- a/language/en.json +++ b/language/en.json @@ -49,6 +49,8 @@ "Uses the internet to translate prompts to English.": "Uses the internet to translate prompts to English.", "Random": "Random", "Seed": "Seed", + "Disable seed increment": "Disable seed increment", + "Disable automatic seed increment when image number is > 1.": "Disable automatic seed increment when image number is > 1.", "\ud83d\udcda History Log": "\uD83D\uDCDA History Log", "Image Style": "Image Style", "Fooocus V2": "Fooocus V2", @@ -343,6 +345,10 @@ "Forced Overwrite of Denoising Strength of \"Vary\"": "Forced Overwrite of Denoising Strength of \"Vary\"", "Set as negative number to disable. For developer debugging.": "Set as negative number to disable. For developer debugging.", "Forced Overwrite of Denoising Strength of \"Upscale\"": "Forced Overwrite of Denoising Strength of \"Upscale\"", + "Disable Preview": "Disable Preview", + "Disable preview during generation.": "Disable preview during generation.", + "Disable Intermediate Results": "Disable Intermediate Results", + "Disable intermediate results during generation, only show final gallery.": "Disable intermediate results during generation, only show final gallery.", "Inpaint Engine": "Inpaint Engine", "v1": "v1", "Version of Fooocus inpaint model": "Version of Fooocus inpaint model", diff --git a/launch.py b/launch.py index 51c75fdb..0c101510 100644 --- a/launch.py +++ b/launch.py @@ -67,7 +67,6 @@ vae_approx_filenames = [ 'https://huggingface.co/lllyasviel/misc/resolve/main/xl-to-v1_interposer-v3.1.safetensors') ] - def ini_args(): from args_manager import args return args @@ -100,10 +99,10 @@ def download_models(default_model, previous_default_models, checkpoint_downloads return default_model, checkpoint_downloads if not args.always_download_new_model: - if not os.path.exists(os.path.join(config.path_checkpoints, default_model)): - for alternative_model_name in previous_default_models: - if os.path.exists(os.path.join(config.path_checkpoints, alternative_model_name)): - print(f'You do not have [{default_model}] but you have [{alternative_model_name}].') + if not os.path.exists(os.path.join(config.paths_checkpoints[0], config.default_base_model_name)): + for alternative_model_name in config.previous_default_models: + if os.path.exists(os.path.join(config.paths_checkpoints[0], alternative_model_name)): + print(f'You do not have [{config.default_base_model_name}] but you have [{alternative_model_name}].') print(f'Fooocus will use [{alternative_model_name}] to avoid downloading new models, ' f'but you are not using the latest models.') print('Use --always-download-new-model to avoid fallback and always get new models.') @@ -111,12 +110,12 @@ def download_models(default_model, previous_default_models, checkpoint_downloads default_model = alternative_model_name break - for file_name, url in checkpoint_downloads.items(): - load_file_from_url(url=url, model_dir=config.path_checkpoints, file_name=file_name) - for file_name, url in embeddings_downloads.items(): + for file_name, url in config.checkpoint_downloads.items(): + load_file_from_url(url=url, model_dir=config.paths_checkpoints[0], file_name=file_name) + for file_name, url in config.embeddings_downloads.items(): load_file_from_url(url=url, model_dir=config.path_embeddings, file_name=file_name) - for file_name, url in lora_downloads.items(): - load_file_from_url(url=url, model_dir=config.path_loras, file_name=file_name) + for file_name, url in config.lora_downloads.items(): + load_file_from_url(url=url, model_dir=config.paths_loras[0], file_name=file_name) return default_model, checkpoint_downloads diff --git a/ldm_patched/modules/args_parser.py b/ldm_patched/modules/args_parser.py index e5b84dc1..0c6165a7 100644 --- a/ldm_patched/modules/args_parser.py +++ b/ldm_patched/modules/args_parser.py @@ -100,8 +100,7 @@ vram_group.add_argument("--always-high-vram", action="store_true") vram_group.add_argument("--always-normal-vram", action="store_true") vram_group.add_argument("--always-low-vram", action="store_true") vram_group.add_argument("--always-no-vram", action="store_true") -vram_group.add_argument("--always-cpu", action="store_true") - +vram_group.add_argument("--always-cpu", type=int, nargs="?", metavar="CPU_NUM_THREADS", const=-1) parser.add_argument("--always-offload-from-vram", action="store_true") parser.add_argument("--pytorch-deterministic", action="store_true") diff --git a/ldm_patched/modules/model_management.py b/ldm_patched/modules/model_management.py index 6f88579d..840d79a0 100644 --- a/ldm_patched/modules/model_management.py +++ b/ldm_patched/modules/model_management.py @@ -60,6 +60,9 @@ except: pass if args.always_cpu: + if args.always_cpu > 0: + torch.set_num_threads(args.always_cpu) + print(f"Running on {torch.get_num_threads()} CPU threads") cpu_state = CPUState.CPU def is_intel_xpu(): diff --git a/modules/async_worker.py b/modules/async_worker.py index 19a4da6f..bc3233da 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -45,14 +45,13 @@ def worker(): import args_manager from modules.censor import censor_batch, censor_single - - from modules.sdxl_styles import apply_style, apply_wildcards, fooocus_expansion + from modules.sdxl_styles import apply_style, apply_wildcards, fooocus_expansion, apply_arrays from modules.private_logger import log from extras.expansion import safe_str from modules.util import remove_empty_str, HWC3, resize_image, \ get_image_shape_ceil, set_image_shape_ceil, get_shape_ceil, resample_image, erode_or_dilate from modules.upscaler import perform_upscale - from modules.flags import Performance, lora_count + from modules.flags import Performance from modules.meta_parser import get_metadata_parser, MetadataScheme pid = os.getpid() @@ -127,6 +126,14 @@ def worker(): async_task.results = async_task.results + [wall] return + def apply_enabled_loras(loras): + enabled_loras = [] + for lora_enabled, lora_model, lora_weight in loras: + if lora_enabled: + enabled_loras.append([lora_model, lora_weight]) + + return enabled_loras + @torch.no_grad() @torch.inference_mode() def handler(async_task): @@ -150,7 +157,7 @@ def worker(): base_model_name = args.pop() refiner_model_name = args.pop() refiner_switch = args.pop() - loras = [[str(args.pop()), float(args.pop())] for _ in range(lora_count)] + loras = apply_enabled_loras([[bool(args.pop()), str(args.pop()), float(args.pop()), ] for _ in range(modules.config.default_max_lora_number)]) input_image_checkbox = args.pop() current_tab = args.pop() uov_method = args.pop() @@ -162,6 +169,7 @@ def worker(): disable_preview = args.pop() disable_intermediate_results = args.pop() + disable_seed_increment = args.pop() black_out_nsfw = args.pop() adm_scaler_positive = args.pop() adm_scaler_negative = args.pop() @@ -423,10 +431,14 @@ def worker(): tasks = [] for i in range(image_number): - task_seed = (seed + i) % (constants.MAX_SEED + 1) # randint is inclusive, % is not - task_rng = random.Random(task_seed) # may bind to inpaint noise in the future + if disable_seed_increment: + task_seed = seed + else: + task_seed = (seed + i) % (constants.MAX_SEED + 1) # randint is inclusive, % is not + task_rng = random.Random(task_seed) # may bind to inpaint noise in the future task_prompt = apply_wildcards(prompt, task_rng) + task_prompt = apply_arrays(task_prompt, i) task_negative_prompt = apply_wildcards(negative_prompt, task_rng) task_extra_positive_prompts = [apply_wildcards(pmt, task_rng) for pmt in extra_positive_prompts] task_extra_negative_prompts = [apply_wildcards(pmt, task_rng) for pmt in extra_negative_prompts] @@ -625,8 +637,7 @@ def worker(): ) if debugging_inpaint_preprocessor: - yield_result(async_task, inpaint_worker.current_task.visualize_mask_processing(), black_out_nsfw, - do_not_show_finished_images=True) + yield_result(async_task, inpaint_worker.current_task.visualize_mask_processing(), black_out_nsfw, do_not_show_finished_images=True) return progressbar(async_task, 13, 'VAE Inpaint encoding ...') diff --git a/modules/config.py b/modules/config.py index 1cec6fa5..71708555 100644 --- a/modules/config.py +++ b/modules/config.py @@ -7,11 +7,19 @@ import modules.flags import modules.sdxl_styles from modules.model_loader import load_file_from_url -from modules.util import get_files_from_folder -from modules.flags import Performance, MetadataScheme, lora_count +from modules.util import get_files_from_folder, makedirs_with_log +from modules.flags import Performance, MetadataScheme -config_path = os.path.abspath("./config.txt") -config_example_path = os.path.abspath("config_modification_tutorial.txt") +def get_config_path(key, default_value): + env = os.getenv(key) + if env is not None and isinstance(env, str): + print(f"Environment: {key} = {env}") + return env + else: + return os.path.abspath(default_value) + +config_path = get_config_path('config_path', "./config.txt") +config_example_path = get_config_path('config_example_path', "config_modification_tutorial.txt") config_dict = {} always_save_keys = [] visited_keys = [] @@ -137,19 +145,20 @@ def try_load_preset_global(preset): preset = args_manager.args.preset try_load_preset_global(preset) -def get_path_output(make_directory=False) -> str: + +def get_path_output() -> str: """ Checking output path argument and overriding default path. """ global config_dict - path_output = get_dir_or_set_default('path_outputs', '../outputs/', make_directory) + path_output = get_dir_or_set_default('path_outputs', '../outputs/', make_directory=True) if args_manager.args.output_path: print(f'[CONFIG] Overriding config value path_outputs with {args_manager.args.output_path}') config_dict['path_outputs'] = path_output = args_manager.args.output_path return path_output -def get_dir_or_set_default(key, default_value, make_directory=False): +def get_dir_or_set_default(key, default_value, as_array=False, make_directory=False): global config_dict, visited_keys, always_save_keys if key not in visited_keys: @@ -158,26 +167,44 @@ def get_dir_or_set_default(key, default_value, make_directory=False): if key not in always_save_keys: always_save_keys.append(key) - v = config_dict.get(key, None) + v = os.getenv(key) + if v is not None: + print(f"Environment: {key} = {v}") + config_dict[key] = v + else: + v = config_dict.get(key, None) + if isinstance(v, str): if make_directory: - try: - os.makedirs(v, exist_ok=True) - except OSError as error: - print(f'Directory {v} could not be created, reason: {error}') + makedirs_with_log(v) if os.path.exists(v) and os.path.isdir(v): + return v if not as_array else [v] + elif isinstance(v, list): + if make_directory: + for d in v: + makedirs_with_log(d) + if all([os.path.exists(d) and os.path.isdir(d) for d in v]): return v if v is not None: print(f'Failed to load config key: {json.dumps({key:v})} is invalid or does not exist; will use {json.dumps({key:default_value})} instead.') - dp = os.path.abspath(os.path.join(os.path.dirname(__file__), default_value)) - os.makedirs(dp, exist_ok=True) + if isinstance(default_value, list): + dp = [] + for path in default_value: + abs_path = os.path.abspath(os.path.join(os.path.dirname(__file__), path)) + dp.append(abs_path) + os.makedirs(abs_path, exist_ok=True) + else: + dp = os.path.abspath(os.path.join(os.path.dirname(__file__), default_value)) + os.makedirs(dp, exist_ok=True) + if as_array: + dp = [dp] config_dict[key] = dp return dp -path_checkpoints = get_dir_or_set_default('path_checkpoints', '../models/checkpoints/') -path_loras = get_dir_or_set_default('path_loras', '../models/loras/') +paths_checkpoints = get_dir_or_set_default('path_checkpoints', ['../models/checkpoints/'], True) +paths_loras = get_dir_or_set_default('path_loras', ['../models/loras/'], True) path_embeddings = get_dir_or_set_default('path_embeddings', '../models/embeddings/') path_vae_approx = get_dir_or_set_default('path_vae_approx', '../models/vae_approx/') path_upscale_models = get_dir_or_set_default('path_upscale_models', '../models/upscale_models/') @@ -186,8 +213,7 @@ path_controlnet = get_dir_or_set_default('path_controlnet', '../models/controlne path_clip_vision = get_dir_or_set_default('path_clip_vision', '../models/clip_vision/') path_fooocus_expansion = get_dir_or_set_default('path_fooocus_expansion', '../models/prompt_expansion/fooocus_expansion') path_safety_checker_models = get_dir_or_set_default('path_safety_checker_models', '../models/safety_checker_models/') -path_outputs = get_path_output(True) - +path_outputs = get_path_output() def get_config_item_or_set_default(key, default_value, validator, disable_empty_as_none=False): global config_dict, visited_keys @@ -195,6 +221,11 @@ def get_config_item_or_set_default(key, default_value, validator, disable_empty_ if key not in visited_keys: visited_keys.append(key) + v = os.getenv(key) + if v is not None: + print(f"Environment: {key} = {v}") + config_dict[key] = v + if key not in config_dict: config_dict[key] = default_value return default_value @@ -232,6 +263,16 @@ default_refiner_switch = get_config_item_or_set_default( default_value=0.8, validator=lambda x: isinstance(x, numbers.Number) and 0 <= x <= 1 ) +default_loras_min_weight = get_config_item_or_set_default( + key='default_loras_min_weight', + default_value=-2, + validator=lambda x: isinstance(x, numbers.Number) and -10 <= x <= 10 +) +default_loras_max_weight = get_config_item_or_set_default( + key='default_loras_max_weight', + default_value=2, + validator=lambda x: isinstance(x, numbers.Number) and -10 <= x <= 10 +) default_loras = get_config_item_or_set_default( key='default_loras', default_value=[ @@ -258,6 +299,11 @@ default_loras = get_config_item_or_set_default( ], validator=lambda x: isinstance(x, list) and all(len(y) == 2 and isinstance(y[0], str) and isinstance(y[1], numbers.Number) for y in x) ) +default_max_lora_number = get_config_item_or_set_default( + key='default_max_lora_number', + default_value=len(default_loras), + validator=lambda x: isinstance(x, int) and x >= 1 +) default_cfg_scale = get_config_item_or_set_default( key='default_cfg_scale', default_value=7.0, @@ -302,7 +348,7 @@ default_prompt = get_config_item_or_set_default( default_performance = get_config_item_or_set_default( key='default_performance', default_value=Performance.SPEED.value, - validator=lambda x: x in [y[1] for y in modules.flags.performance_selections if y[1] == x] + validator=lambda x: x in Performance.list() ) default_advanced_checkbox = get_config_item_or_set_default( key='default_advanced_checkbox', @@ -428,7 +474,7 @@ default_inpaint_mask_sam_model = get_config_item_or_set_default( validator=lambda x: x in modules.flags.inpaint_mask_sam_model ) -config_dict["default_loras"] = default_loras = default_loras[:lora_count] + [['None', 1.0] for _ in range(lora_count - len(default_loras))] +config_dict["default_loras"] = default_loras = default_loras[:default_max_lora_number] + [['None', 1.0] for _ in range(default_max_lora_number - len(default_loras))] # mapping config to meta parameter possible_preset_keys = { @@ -436,6 +482,8 @@ possible_preset_keys = { "default_refiner": "refiner_model", "default_refiner_switch": "refiner_switch", "previous_default_models": "previous_default_models", + "default_loras_min_weight": "default_loras_min_weight", + "default_loras_max_weight": "default_loras_max_weight", "default_loras": "", "default_cfg_scale": "guidance_scale", "default_sample_sharpness": "sharpness", @@ -447,6 +495,7 @@ possible_preset_keys = { "default_prompt_negative": "negative_prompt", "default_styles": "styles", "default_aspect_ratio": "resolution", + "default_save_metadata_to_images": "default_save_metadata_to_images", "checkpoint_downloads": "checkpoint_downloads", "embeddings_downloads": "embeddings_downloads", "lora_downloads": "lora_downloads" @@ -491,16 +540,21 @@ with open(config_example_path, "w", encoding="utf-8") as json_file: model_filenames = [] lora_filenames = [] +sdxl_lcm_lora = 'sdxl_lcm_lora.safetensors' -def get_model_filenames(folder_path, name_filter=None): - return get_files_from_folder(folder_path, ['.pth', '.ckpt', '.bin', '.safetensors', '.fooocus.patch'], name_filter) +def get_model_filenames(folder_paths, name_filter=None): + extensions = ['.pth', '.ckpt', '.bin', '.safetensors', '.fooocus.patch'] + files = [] + for folder in folder_paths: + files += get_files_from_folder(folder, extensions, name_filter) + return files def update_all_model_names(): global model_filenames, lora_filenames - model_filenames = get_model_filenames(path_checkpoints) - lora_filenames = get_model_filenames(path_loras) + model_filenames = get_model_filenames(paths_checkpoints) + lora_filenames = get_model_filenames(paths_loras) return @@ -545,10 +599,10 @@ def downloading_inpaint_models(v): def downloading_sdxl_lcm_lora(): load_file_from_url( url='https://huggingface.co/lllyasviel/misc/resolve/main/sdxl_lcm_lora.safetensors', - model_dir=path_loras, - file_name='sdxl_lcm_lora.safetensors' + model_dir=paths_loras[0], + file_name=sdxl_lcm_lora ) - return 'sdxl_lcm_lora.safetensors' + return sdxl_lcm_lora def downloading_controlnet_canny(): diff --git a/modules/core.py b/modules/core.py index 7a29d988..bfc44966 100644 --- a/modules/core.py +++ b/modules/core.py @@ -18,6 +18,7 @@ from ldm_patched.contrib.external import VAEDecode, EmptyLatentImage, VAEEncode, from ldm_patched.contrib.external_freelunch import FreeU_V2 from ldm_patched.modules.sample import prepare_mask from modules.lora import match_lora +from modules.util import get_file_from_folder_list from ldm_patched.modules.lora import model_lora_keys_unet, model_lora_keys_clip from modules.config import path_embeddings from ldm_patched.contrib.external_model_advanced import ModelSamplingDiscrete @@ -79,7 +80,7 @@ class StableDiffusionModel: if os.path.exists(name): lora_filename = name else: - lora_filename = os.path.join(modules.config.path_loras, name) + lora_filename = get_file_from_folder_list(name, modules.config.paths_loras) if not os.path.exists(lora_filename): print(f'Lora file not found: {lora_filename}') diff --git a/modules/default_pipeline.py b/modules/default_pipeline.py index 2f45667c..f8edfae1 100644 --- a/modules/default_pipeline.py +++ b/modules/default_pipeline.py @@ -11,6 +11,7 @@ from extras.expansion import FooocusExpansion from ldm_patched.modules.model_base import SDXL, SDXLRefiner from modules.sample_hijack import clip_separate +from modules.util import get_file_from_folder_list model_base = core.StableDiffusionModel() @@ -60,7 +61,7 @@ def assert_model_integrity(): def refresh_base_model(name): global model_base - filename = os.path.abspath(os.path.realpath(os.path.join(modules.config.path_checkpoints, name))) + filename = get_file_from_folder_list(name, modules.config.paths_checkpoints) if model_base.filename == filename: return @@ -76,7 +77,7 @@ def refresh_base_model(name): def refresh_refiner_model(name): global model_refiner - filename = os.path.abspath(os.path.realpath(os.path.join(modules.config.path_checkpoints, name))) + filename = get_file_from_folder_list(name, modules.config.paths_checkpoints) if model_refiner.filename == filename: return diff --git a/modules/flags.py b/modules/flags.py index 5180ef13..aecd7f41 100644 --- a/modules/flags.py +++ b/modules/flags.py @@ -67,18 +67,13 @@ default_parameters = { cn_ip: (0.5, 0.6), cn_ip_face: (0.9, 0.75), cn_canny: (0.5, 1.0), cn_cpds: (0.5, 1.0) } # stop, weight -inpaint_engine_versions = ['None', 'v1', 'v2.5', 'v2.6'] - output_formats = ['png', 'jpg', 'webp'] -inpaint_mask_models = [ - 'u2net', 'u2netp', 'u2net_human_seg', 'u2net_cloth_seg', 'silueta', 'isnet-general-use', 'isnet-anime', 'sam' -] - +inpaint_mask_models = ['u2net', 'u2netp', 'u2net_human_seg', 'u2net_cloth_seg', 'silueta', 'isnet-general-use', 'isnet-anime', 'sam'] inpaint_mask_cloth_category = ['full', 'upper', 'lower'] - inpaint_mask_sam_model = ['sam_vit_b_01ec64', 'sam_vit_h_4b8939', 'sam_vit_l_0b3195'] +inpaint_engine_versions = ['None', 'v1', 'v2.5', 'v2.6'] inpaint_option_default = 'Inpaint or Outpaint (default)' inpaint_option_detail = 'Improve Detail (face, hand, eyes, etc.)' inpaint_option_modify = 'Modify Content (add objects, change background, etc.)' @@ -98,9 +93,6 @@ metadata_scheme = [ (f'{MetadataScheme.A1111.value} (plain text)', MetadataScheme.A1111.value), ] -lora_count = 5 -lora_count_with_lcm = lora_count + 1 - controlnet_image_count = 4 @@ -133,7 +125,7 @@ class Performance(Enum): performance_selections = [ - ('Quality \U00002223 60 steps', Performance.QUALITY.value), - ('Speed \U00002223 30 steps', Performance.SPEED.value), - ('Extreme Speed (LCM) \U00002223 8 steps, intermediate results disabled', Performance.EXTREME_SPEED.value) -] \ No newline at end of file + (f'Quality \U00002223 {Steps.QUALITY.value} steps', Performance.QUALITY.value), + (f'Speed \U00002223 {Steps.SPEED.value} steps', Performance.SPEED.value), + (f'Extreme Speed (LCM) \U00002223 {Steps.EXTREME_SPEED.value} steps, intermediate results disabled', Performance.EXTREME_SPEED.value) +] diff --git a/modules/html.py b/modules/html.py index 3ec6f2d6..47a1483a 100644 --- a/modules/html.py +++ b/modules/html.py @@ -112,6 +112,30 @@ progress::after { margin-left: -5px !important; } +.lora_enable { + flex-grow: 1 !important; +} + +.lora_enable label { + height: 100%; +} + +.lora_enable label input { + margin: auto; +} + +.lora_enable label span { + display: none; +} + +.lora_model { + flex-grow: 5 !important; +} + +.lora_weight { + flex-grow: 5 !important; +} + ''' progress_html = '''
diff --git a/modules/meta_parser.py b/modules/meta_parser.py index 66e40dd3..b5b1dbe0 100644 --- a/modules/meta_parser.py +++ b/modules/meta_parser.py @@ -11,8 +11,8 @@ import fooocus_version import modules.config import modules.sdxl_styles from modules.flags import MetadataScheme, Performance, Steps -from modules.flags import lora_count, SAMPLERS, CIVITAI_NO_KARRAS -from modules.util import quote, unquote, extract_styles_from_prompt, is_json, calculate_sha256 +from modules.flags import SAMPLERS, CIVITAI_NO_KARRAS +from modules.util import quote, unquote, extract_styles_from_prompt, is_json, get_file_from_folder_list, calculate_sha256 re_param_code = r'\s*(\w[\w \-/]+):\s*("(?:\\.|[^\\"])+"|[^,]*)(?:,|$)' re_param = re.compile(re_param_code) @@ -57,7 +57,7 @@ def load_parameter_button_click(raw_metadata: dict | str, is_generating: bool): get_freeu('freeu', 'FreeU', loaded_parameter_dict, results) - for i in range(lora_count): + for i in range(modules.config.default_max_lora_number): get_lora(f'lora_combined_{i + 1}', f'LoRA {i + 1}', loaded_parameter_dict, results) return results @@ -171,9 +171,11 @@ def get_lora(key: str, fallback: str | None, source_dict: dict, results: list): try: n, w = source_dict.get(key, source_dict.get(fallback)).split(' : ') w = float(w) + results.append(True) results.append(n) results.append(w) except: + results.append(True) results.append('None') results.append(1) @@ -209,7 +211,7 @@ def parse_meta_from_preset(preset_content): preset_prepared[meta_key] = (width, height) else: preset_prepared[meta_key] = items[settings_key] if settings_key in items and items[settings_key] is not None else getattr(modules.config, settings_key) - + if settings_key == "default_styles" or settings_key == "default_aspect_ratio": preset_prepared[meta_key] = str(preset_prepared[meta_key]) @@ -241,7 +243,8 @@ class MetadataParser(ABC): def parse_string(self, metadata: dict) -> str: raise NotImplementedError - def set_data(self, raw_prompt, full_prompt, raw_negative_prompt, full_negative_prompt, steps, base_model_name, refiner_model_name, loras): + def set_data(self, raw_prompt, full_prompt, raw_negative_prompt, full_negative_prompt, steps, base_model_name, + refiner_model_name, loras): self.raw_prompt = raw_prompt self.full_prompt = full_prompt self.raw_negative_prompt = raw_negative_prompt @@ -249,18 +252,18 @@ class MetadataParser(ABC): self.steps = steps self.base_model_name = Path(base_model_name).stem - base_model_path = os.path.join(modules.config.path_checkpoints, base_model_name) + base_model_path = get_file_from_folder_list(base_model_name, modules.config.paths_checkpoints) self.base_model_hash = get_sha256(base_model_path) if refiner_model_name not in ['', 'None']: self.refiner_model_name = Path(refiner_model_name).stem - refiner_model_path = os.path.join(modules.config.path_checkpoints, refiner_model_name) + refiner_model_path = get_file_from_folder_list(refiner_model_name, modules.config.paths_checkpoints) self.refiner_model_hash = get_sha256(refiner_model_path) self.loras = [] for (lora_name, lora_weight) in loras: if lora_name != 'None': - lora_path = os.path.join(modules.config.path_loras, lora_name) + lora_path = get_file_from_folder_list(lora_name, modules.config.paths_loras) lora_hash = get_sha256(lora_path) self.loras.append((Path(lora_name).stem, lora_weight, lora_hash)) @@ -327,7 +330,7 @@ class A1111MetadataParser(MetadataParser): for k, v in re_param.findall(lastline): try: - if v[0] == '"' and v[-1] == '"': + if v != '' and v[0] == '"' and v[-1] == '"': v = unquote(v) m = re_imagesize.match(v) @@ -375,7 +378,8 @@ class A1111MetadataParser(MetadataParser): if 'lora_hashes' in data: lora_filenames = modules.config.lora_filenames.copy() - lora_filenames.remove(modules.config.downloading_sdxl_lcm_lora()) + if modules.config.sdxl_lcm_lora in lora_filenames: + lora_filenames.remove(modules.config.sdxl_lcm_lora) for li, lora in enumerate(data['lora_hashes'].split(', ')): lora_name, lora_hash, lora_weight = lora.split(': ') for filename in lora_filenames: @@ -456,7 +460,8 @@ class FooocusMetadataParser(MetadataParser): def parse_json(self, metadata: dict) -> dict: model_filenames = modules.config.model_filenames.copy() lora_filenames = modules.config.lora_filenames.copy() - lora_filenames.remove(modules.config.downloading_sdxl_lcm_lora()) + if modules.config.sdxl_lcm_lora in lora_filenames: + lora_filenames.remove(modules.config.sdxl_lcm_lora) for key, value in metadata.items(): if value in ['', 'None']: diff --git a/modules/sdxl_styles.py b/modules/sdxl_styles.py index f5bb6276..71afc402 100644 --- a/modules/sdxl_styles.py +++ b/modules/sdxl_styles.py @@ -1,6 +1,7 @@ import os import re import json +import math from modules.util import get_files_from_folder @@ -80,3 +81,38 @@ def apply_wildcards(wildcard_text, rng, directory=wildcards_path): print(f'[Wildcards] BFS stack overflow. Current text: {wildcard_text}') return wildcard_text + +def get_words(arrays, totalMult, index): + if(len(arrays) == 1): + return [arrays[0].split(',')[index]] + else: + words = arrays[0].split(',') + word = words[index % len(words)] + index -= index % len(words) + index /= len(words) + index = math.floor(index) + return [word] + get_words(arrays[1:], math.floor(totalMult/len(words)), index) + + + +def apply_arrays(text, index): + arrays = re.findall(r'\[\[([\s,\w-]+)\]\]', text) + if len(arrays) == 0: + return text + + print(f'[Arrays] processing: {text}') + mult = 1 + for arr in arrays: + words = arr.split(',') + mult *= len(words) + + index %= mult + chosen_words = get_words(arrays, mult, index) + + i = 0 + for arr in arrays: + text = text.replace(f'[[{arr}]]', chosen_words[i], 1) + i = i+1 + + return text + diff --git a/modules/util.py b/modules/util.py index 4a919a1a..c7923ec8 100644 --- a/modules/util.py +++ b/modules/util.py @@ -160,7 +160,7 @@ def generate_temp_filename(folder='./outputs/', extension='png'): random_number = random.randint(1000, 9999) filename = f"{time_string}_{random_number}.{extension}" result = os.path.join(folder, date_string, filename) - return date_string, os.path.abspath(os.path.realpath(result)), filename + return date_string, os.path.abspath(result), filename def get_files_from_folder(folder_path, exensions=None, name_filter=None): @@ -341,5 +341,22 @@ def is_json(data: str) -> bool: return False return True + +def get_file_from_folder_list(name, folders): + for folder in folders: + filename = os.path.abspath(os.path.realpath(os.path.join(folder, name))) + if os.path.isfile(filename): + return filename + + return os.path.abspath(os.path.realpath(os.path.join(folders[0], name))) + + def ordinal_suffix(number: int) -> str: return 'th' if 10 <= number % 100 <= 20 else {1: 'st', 2: 'nd', 3: 'rd'}.get(number % 10, 'th') + + +def makedirs_with_log(path): + try: + os.makedirs(path, exist_ok=True) + except OSError as error: + print(f'Directory {path} could not be created, reason: {error}') diff --git a/readme.md b/readme.md index 02a5231b..c61d1f7c 100644 --- a/readme.md +++ b/readme.md @@ -433,6 +433,10 @@ You can install Fooocus on Apple Mac silicon (M1 or M2) with macOS 'Catalina' or Use `python entry_with_update.py --preset anime` or `python entry_with_update.py --preset realistic` for Fooocus Anime/Realistic Edition. +### Docker + +See [docker.md](docker.md) + ### Download Previous Version See the guidelines [here](https://github.com/lllyasviel/Fooocus/discussions/1405). @@ -489,7 +493,7 @@ In both ways the access is unauthenticated by default. You can add basic authent The below things are already inside the software, and **users do not need to do anything about these**. -1. GPT2-based [prompt expansion as a dynamic style "Fooocus V2".](https://github.com/lllyasviel/Fooocus/discussions/117#raw) (similar to Midjourney's hidden pre-processsing and "raw" mode, or the LeonardoAI's Prompt Magic). +1. GPT2-based [prompt expansion as a dynamic style "Fooocus V2".](https://github.com/lllyasviel/Fooocus/discussions/117#raw) (similar to Midjourney's hidden pre-processing and "raw" mode, or the LeonardoAI's Prompt Magic). 2. Native refiner swap inside one single k-sampler. The advantage is that the refiner model can now reuse the base model's momentum (or ODE's history parameters) collected from k-sampling to achieve more coherent sampling. In Automatic1111's high-res fix and ComfyUI's node system, the base model and refiner use two independent k-samplers, which means the momentum is largely wasted, and the sampling continuity is broken. Fooocus uses its own advanced k-diffusion sampling that ensures seamless, native, and continuous swap in a refiner setup. (Update Aug 13: Actually, I discussed this with Automatic1111 several days ago, and it seems that the “native refiner swap inside one single k-sampler” is [merged]( https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12371) into the dev branch of webui. Great!) 3. Negative ADM guidance. Because the highest resolution level of XL Base does not have cross attentions, the positive and negative signals for XL's highest resolution level cannot receive enough contrasts during the CFG sampling, causing the results to look a bit plastic or overly smooth in certain cases. Fortunately, since the XL's highest resolution level is still conditioned on image aspect ratios (ADM), we can modify the adm on the positive/negative side to compensate for the lack of CFG contrast in the highest resolution level. (Update Aug 16, the IOS App [Draw Things](https://apps.apple.com/us/app/draw-things-ai-generation/id6444050820) will support Negative ADM Guidance. Great!) 4. We implemented a carefully tuned variation of Section 5.1 of ["Improving Sample Quality of Diffusion Models Using Self-Attention Guidance"](https://arxiv.org/pdf/2210.00939.pdf). The weight is set to very low, but this is Fooocus's final guarantee to make sure that the XL will never yield an overly smooth or plastic appearance (examples [here](https://github.com/lllyasviel/Fooocus/discussions/117#sharpness)). This can almost eliminate all cases for which XL still occasionally produces overly smooth results, even with negative ADM guidance. (Update 2023 Aug 18, the Gaussian kernel of SAG is changed to an anisotropic kernel for better structure preservation and fewer artifacts.) @@ -566,7 +570,7 @@ entry_with_update.py [-h] [--listen [IP]] [--port PORT] [--attention-split | --attention-quad | --attention-pytorch] [--disable-xformers] [--always-gpu | --always-high-vram | --always-normal-vram | - --always-low-vram | --always-no-vram | --always-cpu] + --always-low-vram | --always-no-vram | --always-cpu [CPU_NUM_THREADS]] [--always-offload-from-vram] [--disable-server-log] [--debug-mode] [--is-windows-embedded-python] [--disable-server-info] [--share] [--preset PRESET] diff --git a/requirements_docker.txt b/requirements_docker.txt new file mode 100644 index 00000000..3cf4aa89 --- /dev/null +++ b/requirements_docker.txt @@ -0,0 +1,5 @@ +torch==2.0.1 +torchvision==0.15.2 +torchaudio==2.0.2 +torchtext==0.15.2 +torchdata==0.6.1 diff --git a/update_log.md b/update_log.md index e052d24c..b0192d0d 100644 --- a/update_log.md +++ b/update_log.md @@ -1,3 +1,16 @@ +# [2.2.0](https://github.com/lllyasviel/Fooocus/releases/tag/2.2.0) + +* Isolate every image generation to truly allow multi-user usage +* Add array support, changes the main prompt when increasing the image number. Syntax: `[[red, green, blue]] flower` +* Add optional metadata to images, allowing you to regenerate and modify them later with the same parameters +* Now supports native PNG, JPG and WEBP image generation +* Add Docker support + +# [2.1.865](https://github.com/lllyasviel/Fooocus/releases/tag/2.1.865) + +* Various bugfixes +* Add authentication to --listen + # 2.1.864 * New model list. See also discussions. diff --git a/webui.py b/webui.py index 514a6a65..67d84650 100644 --- a/webui.py +++ b/webui.py @@ -357,7 +357,7 @@ with shared.gradio_root: if args_manager.args.disable_image_log: return gr.update(value='') - return gr.update(value=f'\U0001F4DA History Log') + return gr.update(value=f'\U0001F4DA History Log') history_link = gr.HTML() shared.gradio_root.load(update_history_link, outputs=history_link, queue=False, show_progress=False) @@ -417,11 +417,15 @@ with shared.gradio_root: for i, (n, v) in enumerate(modules.config.default_loras): with gr.Row(): + lora_enabled = gr.Checkbox(label='Enable', value=True, + elem_classes=['lora_enable', 'min_check']) lora_model = gr.Dropdown(label=f'LoRA {i + 1}', - choices=['None'] + modules.config.lora_filenames, value=n) - lora_weight = gr.Slider(label='Weight', minimum=-2, maximum=2, step=0.01, value=v, + choices=['None'] + modules.config.lora_filenames, value=n, + elem_classes='lora_model') + lora_weight = gr.Slider(label='Weight', minimum=modules.config.default_loras_min_weight, + maximum=modules.config.default_loras_max_weight, step=0.01, value=v, elem_classes='lora_weight') - lora_ctrls += [lora_model, lora_weight] + lora_ctrls += [lora_enabled, lora_model, lora_weight] with gr.Row(): model_refresh = gr.Button(label='Refresh', value='\U0001f504 Refresh All Files', variant='secondary', elem_classes='refresh_button') @@ -493,6 +497,10 @@ with shared.gradio_root: interactive=modules.config.default_performance != 'Extreme Speed', info='Disable intermediate results during generation, only show final gallery.') + disable_seed_increment = gr.Checkbox(label='Disable seed increment', + info='Disable automatic seed increment when image number is > 1.', + value=False) + black_out_nsfw = gr.Checkbox(label='Black Out NSFW', value=modules.config.default_black_out_nsfw, interactive=not modules.config.default_black_out_nsfw, info='Use black image if NSFW is detected.') @@ -590,8 +598,8 @@ with shared.gradio_root: gr.update(choices=['None'] + modules.config.model_filenames)] if not args_manager.args.disable_preset_selection: results += [gr.update(choices=modules.config.available_presets)] - for i in range(flags.lora_count): - results += [gr.update(choices=['None'] + modules.config.lora_filenames), gr.update()] + for i in range(modules.config.default_max_lora_number): + results += [gr.update(interactive=True), gr.update(choices=['None'] + modules.config.lora_filenames), gr.update()] return results model_refresh_output = [base_model, refiner_model] @@ -710,7 +718,7 @@ with shared.gradio_root: ctrls += [input_image_checkbox, current_tab] ctrls += [uov_method, uov_input_image] ctrls += [outpaint_selections, inpaint_input_image, inpaint_additional_prompt, inpaint_mask_image] - ctrls += [disable_preview, disable_intermediate_results, black_out_nsfw] + ctrls += [disable_preview, disable_intermediate_results, disable_seed_increment, black_out_nsfw] ctrls += [adm_scaler_positive, adm_scaler_negative, adm_scaler_end, adaptive_cfg] ctrls += [sampler_name, scheduler_name] ctrls += [overwrite_step, overwrite_switch, overwrite_width, overwrite_height, overwrite_vary_strength] @@ -720,6 +728,11 @@ with shared.gradio_root: ctrls += freeu_ctrls ctrls += inpaint_ctrls + if not args_manager.args.disable_metadata: + ctrls += [save_metadata_to_images, metadata_scheme] + + ctrls += ip_ctrls + if not args_manager.args.disable_metadata: ctrls += [save_metadata_to_images, metadata_scheme]