diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 00000000..485dee64
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1 @@
+.idea
diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md
deleted file mode 100644
index 624cfe3e..00000000
--- a/.github/ISSUE_TEMPLATE/bug_report.md
+++ /dev/null
@@ -1,18 +0,0 @@
----
-name: Bug report
-about: Describe a problem
-title: ''
-labels: ''
-assignees: ''
-
----
-
-**Read Troubleshoot**
-
-[x] I confirm that I have read the [Troubleshoot](https://github.com/lllyasviel/Fooocus/blob/main/troubleshoot.md) guide before making this issue.
-
-**Describe the problem**
-A clear and concise description of what the bug is.
-
-**Full Console Log**
-Paste the **full** console log here. You will make our job easier if you give a **full** log.
diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml
new file mode 100644
index 00000000..483e0de1
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/bug_report.yml
@@ -0,0 +1,106 @@
+name: Bug Report
+description: You think something is broken in Fooocus
+title: "[Bug]: "
+labels: ["bug", "triage"]
+
+body:
+ - type: markdown
+ attributes:
+ value: |
+ > The title of the bug report should be short and descriptive.
+ > Use relevant keywords for searchability.
+ > Do not leave it blank, but also do not put an entire error log in it.
+ - type: checkboxes
+ attributes:
+ label: Checklist
+ description: |
+ Please perform basic debugging to see if your configuration is the cause of the issue.
+ Basic debug procedure
+ 2. Update Fooocus - sometimes things just need to be updated
+ 3. Backup and remove your config.txt - check if the issue is caused by bad configuration
+ 5. Try a fresh installation of Fooocus in a different directory - see if a clean installation solves the issue
+ Before making a issue report please, check that the issue hasn't been reported recently.
+ options:
+ - label: The issue exists on a clean installation of Fooocus
+ - label: The issue exists in the current version of Fooocus
+ - label: The issue has not been reported before recently
+ - label: The issue has been reported before but has not been fixed yet
+ - type: markdown
+ attributes:
+ value: |
+ > Please fill this form with as much information as possible. Don't forget to add information about "What browsers" and provide screenshots if possible
+ - type: textarea
+ id: what-did
+ attributes:
+ label: What happened?
+ description: Tell us what happened in a very clear and simple way
+ placeholder: |
+ image generation is not working as intended.
+ validations:
+ required: true
+ - type: textarea
+ id: steps
+ attributes:
+ label: Steps to reproduce the problem
+ description: Please provide us with precise step by step instructions on how to reproduce the bug
+ placeholder: |
+ 1. Go to ...
+ 2. Press ...
+ 3. ...
+ validations:
+ required: true
+ - type: textarea
+ id: what-should
+ attributes:
+ label: What should have happened?
+ description: Tell us what you think the normal behavior should be
+ placeholder: |
+ Fooocus should ...
+ validations:
+ required: true
+ - type: dropdown
+ id: browsers
+ attributes:
+ label: What browsers do you use to access Fooocus?
+ multiple: true
+ options:
+ - Mozilla Firefox
+ - Google Chrome
+ - Brave
+ - Apple Safari
+ - Microsoft Edge
+ - Android
+ - iOS
+ - Other
+ - type: dropdown
+ id: hosting
+ attributes:
+ label: Where are you running Fooocus?
+ multiple: false
+ options:
+ - Locally
+ - Locally with virtualization (e.g. Docker)
+ - Cloud (Google Colab)
+ - Cloud (other)
+ - type: input
+ id: operating-system
+ attributes:
+ label: What operating system are you using?
+ placeholder: |
+ Windows 10
+ - type: textarea
+ id: logs
+ attributes:
+ label: Console logs
+ description: Please provide **full** cmd/terminal logs from the moment you started UI to the end of it, after the bug occured. If it's very long, provide a link to pastebin or similar service.
+ render: Shell
+ validations:
+ required: true
+ - type: textarea
+ id: misc
+ attributes:
+ label: Additional information
+ description: |
+ Please provide us with any relevant additional info or context.
+ Examples:
+ I have updated my GPU driver recently.
\ No newline at end of file
diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml
new file mode 100644
index 00000000..7bbf022a
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/config.yml
@@ -0,0 +1,5 @@
+blank_issues_enabled: false
+contact_links:
+ - name: Ask a question
+ url: https://github.com/lllyasviel/Fooocus/discussions/new?category=q-a
+ about: Ask the community for help
\ No newline at end of file
diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md
deleted file mode 100644
index 8101bc36..00000000
--- a/.github/ISSUE_TEMPLATE/feature_request.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-name: Feature request
-about: Suggest an idea for this project
-title: ''
-labels: ''
-assignees: ''
-
----
-
-**Is your feature request related to a problem? Please describe.**
-A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
-
-**Describe the idea you'd like**
-A clear and concise description of what you want to happen.
diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml
new file mode 100644
index 00000000..90e594e4
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/feature_request.yml
@@ -0,0 +1,40 @@
+name: Feature request
+description: Suggest an idea for this project
+title: "[Feature Request]: "
+labels: ["enhancement", "triage"]
+
+body:
+ - type: checkboxes
+ attributes:
+ label: Is there an existing issue for this?
+ description: Please search to see if an issue already exists for the feature you want, and that it's not implemented in a recent build/commit.
+ options:
+ - label: I have searched the existing issues and checked the recent builds/commits
+ required: true
+ - type: markdown
+ attributes:
+ value: |
+ *Please fill this form with as much information as possible, provide screenshots and/or illustrations of the feature if possible*
+ - type: textarea
+ id: feature
+ attributes:
+ label: What would your feature do?
+ description: Tell us about your feature in a very clear and simple way, and what problem it would solve
+ validations:
+ required: true
+ - type: textarea
+ id: workflow
+ attributes:
+ label: Proposed workflow
+ description: Please provide us with step by step information on how you'd like the feature to be accessed and used
+ value: |
+ 1. Go to ....
+ 2. Press ....
+ 3. ...
+ validations:
+ required: true
+ - type: textarea
+ id: misc
+ attributes:
+ label: Additional information
+ description: Add any other context or screenshots about the feature request here.
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
index de2f5778..85914986 100644
--- a/.gitignore
+++ b/.gitignore
@@ -51,3 +51,4 @@ user_path_config-deprecated.txt
/package-lock.json
/.coverage*
/auth.json
+.DS_Store
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 00000000..2aea2810
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,29 @@
+FROM nvidia/cuda:12.3.1-base-ubuntu22.04
+ENV DEBIAN_FRONTEND noninteractive
+ENV CMDARGS --listen
+
+RUN apt-get update -y && \
+ apt-get install -y curl libgl1 libglib2.0-0 python3-pip python-is-python3 git && \
+ apt-get clean && \
+ rm -rf /var/lib/apt/lists/*
+
+COPY requirements_docker.txt requirements_versions.txt /tmp/
+RUN pip install --no-cache-dir -r /tmp/requirements_docker.txt -r /tmp/requirements_versions.txt && \
+ rm -f /tmp/requirements_docker.txt /tmp/requirements_versions.txt
+RUN pip install --no-cache-dir xformers==0.0.22 --no-dependencies
+RUN curl -fsL -o /usr/local/lib/python3.10/dist-packages/gradio/frpc_linux_amd64_v0.2 https://cdn-media.huggingface.co/frpc-gradio-0.2/frpc_linux_amd64 && \
+ chmod +x /usr/local/lib/python3.10/dist-packages/gradio/frpc_linux_amd64_v0.2
+
+RUN adduser --disabled-password --gecos '' user && \
+ mkdir -p /content/app /content/data
+
+COPY entrypoint.sh /content/
+RUN chown -R user:user /content
+
+WORKDIR /content
+USER user
+
+RUN git clone https://github.com/lllyasviel/Fooocus /content/app
+RUN mv /content/app/models /content/app/models.org
+
+CMD [ "sh", "-c", "/content/entrypoint.sh ${CMDARGS}" ]
diff --git a/args_manager.py b/args_manager.py
index eeb38e1f..c7c1b7ab 100644
--- a/args_manager.py
+++ b/args_manager.py
@@ -1,5 +1,7 @@
import ldm_patched.modules.args_parser as args_parser
+import os
+from tempfile import gettempdir
args_parser.parser.add_argument("--share", action='store_true', help="Set whether to share on Gradio.")
args_parser.parser.add_argument("--preset", type=str, default=None, help="Apply specified UI preset.")
@@ -18,7 +20,10 @@ args_parser.parser.add_argument("--disable-image-log", action='store_true',
help="Prevent writing images and logs to hard drive.")
args_parser.parser.add_argument("--disable-analytics", action='store_true',
- help="Disables analytics for Gradio", default=False)
+ help="Disables analytics for Gradio.")
+
+args_parser.parser.add_argument("--disable-metadata", action='store_true',
+ help="Disables saving metadata to images.")
args_parser.parser.add_argument("--disable-preset-download", action='store_true',
help="Disables downloading models for presets", default=False)
@@ -40,7 +45,11 @@ args_parser.args.always_offload_from_vram = not args_parser.args.disable_offload
if args_parser.args.disable_analytics:
import os
os.environ["GRADIO_ANALYTICS_ENABLED"] = "False"
+
if args_parser.args.disable_in_browser:
args_parser.args.in_browser = False
+if args_parser.args.temp_path is None:
+ args_parser.args.temp_path = os.path.join(gettempdir(), 'Fooocus')
+
args = args_parser.args
diff --git a/docker-compose.yml b/docker-compose.yml
new file mode 100644
index 00000000..dee7b3e7
--- /dev/null
+++ b/docker-compose.yml
@@ -0,0 +1,38 @@
+version: '3.9'
+
+volumes:
+ fooocus-data:
+
+services:
+ app:
+ build: .
+ image: fooocus
+ ports:
+ - "7865:7865"
+ environment:
+ - CMDARGS=--listen # Arguments for launch.py.
+ - DATADIR=/content/data # Directory which stores models, outputs dir
+ - config_path=/content/data/config.txt
+ - config_example_path=/content/data/config_modification_tutorial.txt
+ - path_checkpoints=/content/data/models/checkpoints/
+ - path_loras=/content/data/models/loras/
+ - path_embeddings=/content/data/models/embeddings/
+ - path_vae_approx=/content/data/models/vae_approx/
+ - path_upscale_models=/content/data/models/upscale_models/
+ - path_inpaint=/content/data/models/inpaint/
+ - path_controlnet=/content/data/models/controlnet/
+ - path_clip_vision=/content/data/models/clip_vision/
+ - path_fooocus_expansion=/content/data/models/prompt_expansion/fooocus_expansion/
+ - path_outputs=/content/app/outputs/ # Warning: If it is not located under '/content/app', you can't see history log!
+ volumes:
+ - fooocus-data:/content/data
+ #- ./models:/import/models # Once you import files, you don't need to mount again.
+ #- ./outputs:/import/outputs # Once you import files, you don't need to mount again.
+ tty: true
+ deploy:
+ resources:
+ reservations:
+ devices:
+ - driver: nvidia
+ device_ids: ['0']
+ capabilities: [compute, utility]
diff --git a/docker.md b/docker.md
new file mode 100644
index 00000000..36cfa632
--- /dev/null
+++ b/docker.md
@@ -0,0 +1,66 @@
+# Fooocus on Docker
+
+The docker image is based on NVIDIA CUDA 12.3 and PyTorch 2.0, see [Dockerfile](Dockerfile) and [requirements_docker.txt](requirements_docker.txt) for details.
+
+## Quick start
+
+**This is just an easy way for testing. Please find more information in the [notes](#notes).**
+
+1. Clone this repository
+2. Build the image with `docker compose build`
+3. Run the docker container with `docker compose up`. Building the image takes some time.
+
+When you see the message `Use the app with http://0.0.0.0:7865/` in the console, you can access the URL in your browser.
+
+Your models and outputs are stored in the `fooocus-data` volume, which, depending on OS, is stored in `/var/lib/docker/volumes`.
+
+## Details
+
+### Update the container manually
+
+When you are using `docker compose up` continuously, the container is not updated to the latest version of Fooocus automatically.
+Run `git pull` before executing `docker compose build --no-cache` to build an image with the latest Fooocus version.
+You can then start it with `docker compose up`
+
+### Import models, outputs
+If you want to import files from models or the outputs folder, you can uncomment the following settings in the [docker-compose.yml](docker-compose.yml):
+```
+#- ./models:/import/models # Once you import files, you don't need to mount again.
+#- ./outputs:/import/outputs # Once you import files, you don't need to mount again.
+```
+After running `docker compose up`, your files will be copied into `/content/data/models` and `/content/data/outputs`
+Since `/content/data` is a persistent volume folder, your files will be persisted even when you re-run `docker compose up --build` without above volume settings.
+
+
+### Paths inside the container
+
+|Path|Details|
+|-|-|
+|/content/app|The application stored folder|
+|/content/app/models.org|Original 'models' folder.
Files are copied to the '/content/app/models' which is symlinked to '/content/data/models' every time the container boots. (Existing files will not be overwritten.) |
+|/content/data|Persistent volume mount point|
+|/content/data/models|The folder is symlinked to '/content/app/models'|
+|/content/data/outputs|The folder is symlinked to '/content/app/outputs'|
+
+### Environments
+
+You can change `config.txt` parameters by using environment variables.
+**The priority of using the environments is higher than the values defined in `config.txt`, and they will be saved to the `config_modification_tutorial.txt`**
+
+Docker specified environments are there. They are used by 'entrypoint.sh'
+|Environment|Details|
+|-|-|
+|DATADIR|'/content/data' location.|
+|CMDARGS|Arguments for [entry_with_update.py](entry_with_update.py) which is called by [entrypoint.sh](entrypoint.sh)|
+|config_path|'config.txt' location|
+|config_example_path|'config_modification_tutorial.txt' location|
+
+You can also use the same json key names and values explained in the 'config_modification_tutorial.txt' as the environments.
+See examples in the [docker-compose.yml](docker-compose.yml)
+
+## Notes
+
+- Please keep 'path_outputs' under '/content/app'. Otherwise, you may get an error when you open the history log.
+- Docker on Mac/Windows still has issues in the form of slow volume access when you use "bind mount" volumes. Please refer to [this article](https://docs.docker.com/storage/volumes/#use-a-volume-with-docker-compose) for not using "bind mount".
+- The MPS backend (Metal Performance Shaders, Apple Silicon M1/M2/etc.) is not yet supported in Docker, see https://github.com/pytorch/pytorch/issues/81224
+- You can also use `docker compose up -d` to start the container detached and connect to the logs with `docker compose logs -f`. This way you can also close the terminal and keep the container running.
\ No newline at end of file
diff --git a/entrypoint.sh b/entrypoint.sh
new file mode 100755
index 00000000..d0dba09c
--- /dev/null
+++ b/entrypoint.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+ORIGINALDIR=/content/app
+# Use predefined DATADIR if it is defined
+[[ x"${DATADIR}" == "x" ]] && DATADIR=/content/data
+
+# Make persistent dir from original dir
+function mklink () {
+ mkdir -p $DATADIR/$1
+ ln -s $DATADIR/$1 $ORIGINALDIR
+}
+
+# Copy old files from import dir
+function import () {
+ (test -d /import/$1 && cd /import/$1 && cp -Rpn . $DATADIR/$1/)
+}
+
+cd $ORIGINALDIR
+
+# models
+mklink models
+# Copy original files
+(cd $ORIGINALDIR/models.org && cp -Rpn . $ORIGINALDIR/models/)
+# Import old files
+import models
+
+# outputs
+mklink outputs
+# Import old files
+import outputs
+
+# Start application
+python launch.py $*
diff --git a/extras/expansion.py b/extras/expansion.py
index c1b59b8a..34c1ee8d 100644
--- a/extras/expansion.py
+++ b/extras/expansion.py
@@ -112,6 +112,9 @@ class FooocusExpansion:
max_token_length = 75 * int(math.ceil(float(current_token_length) / 75.0))
max_new_tokens = max_token_length - current_token_length
+ if max_new_tokens == 0:
+ return prompt[:-1]
+
# https://huggingface.co/blog/introducing-csearch
# https://huggingface.co/docs/transformers/generation_strategies
features = self.model.generate(**tokenized_kwargs,
diff --git a/extras/preprocessors.py b/extras/preprocessors.py
index 798fe15d..0aa83109 100644
--- a/extras/preprocessors.py
+++ b/extras/preprocessors.py
@@ -1,27 +1,26 @@
import cv2
import numpy as np
-import modules.advanced_parameters as advanced_parameters
-def centered_canny(x: np.ndarray):
+def centered_canny(x: np.ndarray, canny_low_threshold, canny_high_threshold):
assert isinstance(x, np.ndarray)
assert x.ndim == 2 and x.dtype == np.uint8
- y = cv2.Canny(x, int(advanced_parameters.canny_low_threshold), int(advanced_parameters.canny_high_threshold))
+ y = cv2.Canny(x, int(canny_low_threshold), int(canny_high_threshold))
y = y.astype(np.float32) / 255.0
return y
-def centered_canny_color(x: np.ndarray):
+def centered_canny_color(x: np.ndarray, canny_low_threshold, canny_high_threshold):
assert isinstance(x, np.ndarray)
assert x.ndim == 3 and x.shape[2] == 3
- result = [centered_canny(x[..., i]) for i in range(3)]
+ result = [centered_canny(x[..., i], canny_low_threshold, canny_high_threshold) for i in range(3)]
result = np.stack(result, axis=2)
return result
-def pyramid_canny_color(x: np.ndarray):
+def pyramid_canny_color(x: np.ndarray, canny_low_threshold, canny_high_threshold):
assert isinstance(x, np.ndarray)
assert x.ndim == 3 and x.shape[2] == 3
@@ -31,7 +30,7 @@ def pyramid_canny_color(x: np.ndarray):
for k in [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
Hs, Ws = int(H * k), int(W * k)
small = cv2.resize(x, (Ws, Hs), interpolation=cv2.INTER_AREA)
- edge = centered_canny_color(small)
+ edge = centered_canny_color(small, canny_low_threshold, canny_high_threshold)
if acc_edge is None:
acc_edge = edge
else:
@@ -54,11 +53,11 @@ def norm255(x, low=4, high=96):
return x * 255.0
-def canny_pyramid(x):
+def canny_pyramid(x, canny_low_threshold, canny_high_threshold):
# For some reasons, SAI's Control-lora Canny seems to be trained on canny maps with non-standard resolutions.
# Then we use pyramid to use all resolutions to avoid missing any structure in specific resolutions.
- color_canny = pyramid_canny_color(x)
+ color_canny = pyramid_canny_color(x, canny_low_threshold, canny_high_threshold)
result = np.sum(color_canny, axis=2)
return norm255(result, low=1, high=99).clip(0, 255).astype(np.uint8)
diff --git a/fooocus_version.py b/fooocus_version.py
index 91c2ddda..d4b750f9 100644
--- a/fooocus_version.py
+++ b/fooocus_version.py
@@ -1 +1 @@
-version = '2.1.865'
+version = '2.2.0'
diff --git a/language/en.json b/language/en.json
index fd40ca2f..cb5603f9 100644
--- a/language/en.json
+++ b/language/en.json
@@ -48,6 +48,8 @@
"Describing what you do not want to see.": "Describing what you do not want to see.",
"Random": "Random",
"Seed": "Seed",
+ "Disable seed increment": "Disable seed increment",
+ "Disable automatic seed increment when image number is > 1.": "Disable automatic seed increment when image number is > 1.",
"\ud83d\udcda History Log": "\uD83D\uDCDA History Log",
"Image Style": "Image Style",
"Fooocus V2": "Fooocus V2",
@@ -342,6 +344,10 @@
"Forced Overwrite of Denoising Strength of \"Vary\"": "Forced Overwrite of Denoising Strength of \"Vary\"",
"Set as negative number to disable. For developer debugging.": "Set as negative number to disable. For developer debugging.",
"Forced Overwrite of Denoising Strength of \"Upscale\"": "Forced Overwrite of Denoising Strength of \"Upscale\"",
+ "Disable Preview": "Disable Preview",
+ "Disable preview during generation.": "Disable preview during generation.",
+ "Disable Intermediate Results": "Disable Intermediate Results",
+ "Disable intermediate results during generation, only show final gallery.": "Disable intermediate results during generation, only show final gallery.",
"Inpaint Engine": "Inpaint Engine",
"v1": "v1",
"Version of Fooocus inpaint model": "Version of Fooocus inpaint model",
@@ -368,5 +374,12 @@
"* Powered by Fooocus Inpaint Engine (beta)": "* Powered by Fooocus Inpaint Engine (beta)",
"Fooocus Enhance": "Fooocus Enhance",
"Fooocus Cinematic": "Fooocus Cinematic",
- "Fooocus Sharp": "Fooocus Sharp"
+ "Fooocus Sharp": "Fooocus Sharp",
+ "Drag any image generated by Fooocus here": "Drag any image generated by Fooocus here",
+ "Metadata": "Metadata",
+ "Apply Metadata": "Apply Metadata",
+ "Metadata Scheme": "Metadata Scheme",
+ "Image Prompt parameters are not included. Use a1111 for compatibility with Civitai.": "Image Prompt parameters are not included. Use a1111 for compatibility with Civitai.",
+ "fooocus (json)": "fooocus (json)",
+ "a1111 (plain text)": "a1111 (plain text)"
}
\ No newline at end of file
diff --git a/launch.py b/launch.py
index db174f54..4269f1fc 100644
--- a/launch.py
+++ b/launch.py
@@ -68,7 +68,6 @@ vae_approx_filenames = [
'https://huggingface.co/lllyasviel/misc/resolve/main/xl-to-v1_interposer-v3.1.safetensors')
]
-
def ini_args():
from args_manager import args
return args
@@ -101,9 +100,9 @@ def download_models():
return
if not args.always_download_new_model:
- if not os.path.exists(os.path.join(config.path_checkpoints, config.default_base_model_name)):
+ if not os.path.exists(os.path.join(config.paths_checkpoints[0], config.default_base_model_name)):
for alternative_model_name in config.previous_default_models:
- if os.path.exists(os.path.join(config.path_checkpoints, alternative_model_name)):
+ if os.path.exists(os.path.join(config.paths_checkpoints[0], alternative_model_name)):
print(f'You do not have [{config.default_base_model_name}] but you have [{alternative_model_name}].')
print(f'Fooocus will use [{alternative_model_name}] to avoid downloading new models, '
f'but you are not using latest models.')
@@ -113,11 +112,11 @@ def download_models():
break
for file_name, url in config.checkpoint_downloads.items():
- load_file_from_url(url=url, model_dir=config.path_checkpoints, file_name=file_name)
+ load_file_from_url(url=url, model_dir=config.paths_checkpoints[0], file_name=file_name)
for file_name, url in config.embeddings_downloads.items():
load_file_from_url(url=url, model_dir=config.path_embeddings, file_name=file_name)
for file_name, url in config.lora_downloads.items():
- load_file_from_url(url=url, model_dir=config.path_loras, file_name=file_name)
+ load_file_from_url(url=url, model_dir=config.paths_loras[0], file_name=file_name)
return
diff --git a/ldm_patched/modules/args_parser.py b/ldm_patched/modules/args_parser.py
index e5b84dc1..0c6165a7 100644
--- a/ldm_patched/modules/args_parser.py
+++ b/ldm_patched/modules/args_parser.py
@@ -100,8 +100,7 @@ vram_group.add_argument("--always-high-vram", action="store_true")
vram_group.add_argument("--always-normal-vram", action="store_true")
vram_group.add_argument("--always-low-vram", action="store_true")
vram_group.add_argument("--always-no-vram", action="store_true")
-vram_group.add_argument("--always-cpu", action="store_true")
-
+vram_group.add_argument("--always-cpu", type=int, nargs="?", metavar="CPU_NUM_THREADS", const=-1)
parser.add_argument("--always-offload-from-vram", action="store_true")
parser.add_argument("--pytorch-deterministic", action="store_true")
diff --git a/ldm_patched/modules/model_management.py b/ldm_patched/modules/model_management.py
index 6f88579d..840d79a0 100644
--- a/ldm_patched/modules/model_management.py
+++ b/ldm_patched/modules/model_management.py
@@ -60,6 +60,9 @@ except:
pass
if args.always_cpu:
+ if args.always_cpu > 0:
+ torch.set_num_threads(args.always_cpu)
+ print(f"Running on {torch.get_num_threads()} CPU threads")
cpu_state = CPUState.CPU
def is_intel_xpu():
diff --git a/modules/advanced_parameters.py b/modules/advanced_parameters.py
deleted file mode 100644
index 0caa3eec..00000000
--- a/modules/advanced_parameters.py
+++ /dev/null
@@ -1,33 +0,0 @@
-disable_preview, adm_scaler_positive, adm_scaler_negative, adm_scaler_end, adaptive_cfg, sampler_name, \
- scheduler_name, generate_image_grid, overwrite_step, overwrite_switch, overwrite_width, overwrite_height, \
- overwrite_vary_strength, overwrite_upscale_strength, \
- mixing_image_prompt_and_vary_upscale, mixing_image_prompt_and_inpaint, \
- debugging_cn_preprocessor, skipping_cn_preprocessor, controlnet_softness, canny_low_threshold, canny_high_threshold, \
- refiner_swap_method, \
- freeu_enabled, freeu_b1, freeu_b2, freeu_s1, freeu_s2, \
- debugging_inpaint_preprocessor, inpaint_disable_initial_latent, inpaint_engine, inpaint_strength, inpaint_respective_field, \
- inpaint_mask_upload_checkbox, invert_mask_checkbox, inpaint_erode_or_dilate = [None] * 35
-
-
-def set_all_advanced_parameters(*args):
- global disable_preview, adm_scaler_positive, adm_scaler_negative, adm_scaler_end, adaptive_cfg, sampler_name, \
- scheduler_name, generate_image_grid, overwrite_step, overwrite_switch, overwrite_width, overwrite_height, \
- overwrite_vary_strength, overwrite_upscale_strength, \
- mixing_image_prompt_and_vary_upscale, mixing_image_prompt_and_inpaint, \
- debugging_cn_preprocessor, skipping_cn_preprocessor, controlnet_softness, canny_low_threshold, canny_high_threshold, \
- refiner_swap_method, \
- freeu_enabled, freeu_b1, freeu_b2, freeu_s1, freeu_s2, \
- debugging_inpaint_preprocessor, inpaint_disable_initial_latent, inpaint_engine, inpaint_strength, inpaint_respective_field, \
- inpaint_mask_upload_checkbox, invert_mask_checkbox, inpaint_erode_or_dilate
-
- disable_preview, adm_scaler_positive, adm_scaler_negative, adm_scaler_end, adaptive_cfg, sampler_name, \
- scheduler_name, generate_image_grid, overwrite_step, overwrite_switch, overwrite_width, overwrite_height, \
- overwrite_vary_strength, overwrite_upscale_strength, \
- mixing_image_prompt_and_vary_upscale, mixing_image_prompt_and_inpaint, \
- debugging_cn_preprocessor, skipping_cn_preprocessor, controlnet_softness, canny_low_threshold, canny_high_threshold, \
- refiner_swap_method, \
- freeu_enabled, freeu_b1, freeu_b2, freeu_s1, freeu_s2, \
- debugging_inpaint_preprocessor, inpaint_disable_initial_latent, inpaint_engine, inpaint_strength, inpaint_respective_field, \
- inpaint_mask_upload_checkbox, invert_mask_checkbox, inpaint_erode_or_dilate = args
-
- return
diff --git a/modules/async_worker.py b/modules/async_worker.py
index 40abb7fa..2c029cfb 100644
--- a/modules/async_worker.py
+++ b/modules/async_worker.py
@@ -1,11 +1,15 @@
import threading
+from modules.patch import PatchSettings, patch_settings, patch_all
+patch_all()
class AsyncTask:
def __init__(self, args):
self.args = args
self.yields = []
self.results = []
+ self.last_stop = False
+ self.processing = False
async_tasks = []
@@ -14,6 +18,7 @@ async_tasks = []
def worker():
global async_tasks
+ import os
import traceback
import math
import numpy as np
@@ -31,17 +36,22 @@ def worker():
import extras.preprocessors as preprocessors
import modules.inpaint_worker as inpaint_worker
import modules.constants as constants
- import modules.advanced_parameters as advanced_parameters
import extras.ip_adapter as ip_adapter
import extras.face_crop
import fooocus_version
+ import args_manager
- from modules.sdxl_styles import apply_style, apply_wildcards, fooocus_expansion
+ from modules.sdxl_styles import apply_style, apply_wildcards, fooocus_expansion, apply_arrays
from modules.private_logger import log
from extras.expansion import safe_str
from modules.util import remove_empty_str, HWC3, resize_image, \
get_image_shape_ceil, set_image_shape_ceil, get_shape_ceil, resample_image, erode_or_dilate, ordinal_suffix
from modules.upscaler import perform_upscale
+ from modules.flags import Performance
+ from modules.meta_parser import get_metadata_parser, MetadataScheme
+
+ pid = os.getpid()
+ print(f'Started worker with PID {pid}')
try:
async_gradio_app = shared.gradio_root
@@ -69,9 +79,6 @@ def worker():
return
def build_image_wall(async_task):
- if not advanced_parameters.generate_image_grid:
- return
-
results = async_task.results
if len(results) < 2:
@@ -111,10 +118,19 @@ def worker():
async_task.results = async_task.results + [wall]
return
+ def apply_enabled_loras(loras):
+ enabled_loras = []
+ for lora_enabled, lora_model, lora_weight in loras:
+ if lora_enabled:
+ enabled_loras.append([lora_model, lora_weight])
+
+ return enabled_loras
+
@torch.no_grad()
@torch.inference_mode()
def handler(async_task):
execution_start_time = time.perf_counter()
+ async_task.processing = True
args = async_task.args
args.reverse()
@@ -122,16 +138,17 @@ def worker():
prompt = args.pop()
negative_prompt = args.pop()
style_selections = args.pop()
- performance_selection = args.pop()
+ performance_selection = Performance(args.pop())
aspect_ratios_selection = args.pop()
image_number = args.pop()
+ output_format = args.pop()
image_seed = args.pop()
sharpness = args.pop()
guidance_scale = args.pop()
base_model_name = args.pop()
refiner_model_name = args.pop()
refiner_switch = args.pop()
- loras = [[str(args.pop()), float(args.pop())] for _ in range(5)]
+ loras = apply_enabled_loras([[bool(args.pop()), str(args.pop()), float(args.pop()), ] for _ in range(modules.config.default_max_lora_number)])
input_image_checkbox = args.pop()
current_tab = args.pop()
uov_method = args.pop()
@@ -141,8 +158,48 @@ def worker():
inpaint_additional_prompt = args.pop()
inpaint_mask_image_upload = args.pop()
+ disable_preview = args.pop()
+ disable_intermediate_results = args.pop()
+ disable_seed_increment = args.pop()
+ adm_scaler_positive = args.pop()
+ adm_scaler_negative = args.pop()
+ adm_scaler_end = args.pop()
+ adaptive_cfg = args.pop()
+ sampler_name = args.pop()
+ scheduler_name = args.pop()
+ overwrite_step = args.pop()
+ overwrite_switch = args.pop()
+ overwrite_width = args.pop()
+ overwrite_height = args.pop()
+ overwrite_vary_strength = args.pop()
+ overwrite_upscale_strength = args.pop()
+ mixing_image_prompt_and_vary_upscale = args.pop()
+ mixing_image_prompt_and_inpaint = args.pop()
+ debugging_cn_preprocessor = args.pop()
+ skipping_cn_preprocessor = args.pop()
+ canny_low_threshold = args.pop()
+ canny_high_threshold = args.pop()
+ refiner_swap_method = args.pop()
+ controlnet_softness = args.pop()
+ freeu_enabled = args.pop()
+ freeu_b1 = args.pop()
+ freeu_b2 = args.pop()
+ freeu_s1 = args.pop()
+ freeu_s2 = args.pop()
+ debugging_inpaint_preprocessor = args.pop()
+ inpaint_disable_initial_latent = args.pop()
+ inpaint_engine = args.pop()
+ inpaint_strength = args.pop()
+ inpaint_respective_field = args.pop()
+ inpaint_mask_upload_checkbox = args.pop()
+ invert_mask_checkbox = args.pop()
+ inpaint_erode_or_dilate = args.pop()
+
+ save_metadata_to_images = args.pop() if not args_manager.args.disable_metadata else False
+ metadata_scheme = MetadataScheme(args.pop()) if not args_manager.args.disable_metadata else MetadataScheme.FOOOCUS
+
cn_tasks = {x: [] for x in flags.ip_list}
- for _ in range(4):
+ for _ in range(flags.controlnet_image_count):
cn_img = args.pop()
cn_stop = args.pop()
cn_weight = args.pop()
@@ -167,17 +224,9 @@ def worker():
print(f'Refiner disabled because base model and refiner are same.')
refiner_model_name = 'None'
- assert performance_selection in ['Speed', 'Quality', 'Extreme Speed']
+ steps = performance_selection.steps()
- steps = 30
-
- if performance_selection == 'Speed':
- steps = 30
-
- if performance_selection == 'Quality':
- steps = 60
-
- if performance_selection == 'Extreme Speed':
+ if performance_selection == Performance.EXTREME_SPEED:
print('Enter LCM mode.')
progressbar(async_task, 1, 'Downloading LCM components ...')
loras += [(modules.config.downloading_sdxl_lcm_lora(), 1.0)]
@@ -186,30 +235,32 @@ def worker():
print(f'Refiner disabled in LCM mode.')
refiner_model_name = 'None'
- sampler_name = advanced_parameters.sampler_name = 'lcm'
- scheduler_name = advanced_parameters.scheduler_name = 'lcm'
- modules.patch.sharpness = sharpness = 0.0
- cfg_scale = guidance_scale = 1.0
- modules.patch.adaptive_cfg = advanced_parameters.adaptive_cfg = 1.0
+ sampler_name = 'lcm'
+ scheduler_name = 'lcm'
+ sharpness = 0.0
+ guidance_scale = 1.0
+ adaptive_cfg = 1.0
refiner_switch = 1.0
- modules.patch.positive_adm_scale = advanced_parameters.adm_scaler_positive = 1.0
- modules.patch.negative_adm_scale = advanced_parameters.adm_scaler_negative = 1.0
- modules.patch.adm_scaler_end = advanced_parameters.adm_scaler_end = 0.0
- steps = 8
+ adm_scaler_positive = 1.0
+ adm_scaler_negative = 1.0
+ adm_scaler_end = 0.0
- modules.patch.adaptive_cfg = advanced_parameters.adaptive_cfg
- print(f'[Parameters] Adaptive CFG = {modules.patch.adaptive_cfg}')
-
- modules.patch.sharpness = sharpness
- print(f'[Parameters] Sharpness = {modules.patch.sharpness}')
-
- modules.patch.positive_adm_scale = advanced_parameters.adm_scaler_positive
- modules.patch.negative_adm_scale = advanced_parameters.adm_scaler_negative
- modules.patch.adm_scaler_end = advanced_parameters.adm_scaler_end
+ print(f'[Parameters] Adaptive CFG = {adaptive_cfg}')
+ print(f'[Parameters] Sharpness = {sharpness}')
+ print(f'[Parameters] ControlNet Softness = {controlnet_softness}')
print(f'[Parameters] ADM Scale = '
- f'{modules.patch.positive_adm_scale} : '
- f'{modules.patch.negative_adm_scale} : '
- f'{modules.patch.adm_scaler_end}')
+ f'{adm_scaler_positive} : '
+ f'{adm_scaler_negative} : '
+ f'{adm_scaler_end}')
+
+ patch_settings[pid] = PatchSettings(
+ sharpness,
+ adm_scaler_end,
+ adm_scaler_positive,
+ adm_scaler_negative,
+ controlnet_softness,
+ adaptive_cfg
+ )
cfg_scale = float(guidance_scale)
print(f'[Parameters] CFG = {cfg_scale}')
@@ -222,10 +273,9 @@ def worker():
width, height = int(width), int(height)
skip_prompt_processing = False
- refiner_swap_method = advanced_parameters.refiner_swap_method
inpaint_worker.current_task = None
- inpaint_parameterized = advanced_parameters.inpaint_engine != 'None'
+ inpaint_parameterized = inpaint_engine != 'None'
inpaint_image = None
inpaint_mask = None
inpaint_head_model_path = None
@@ -239,15 +289,12 @@ def worker():
seed = int(image_seed)
print(f'[Parameters] Seed = {seed}')
- sampler_name = advanced_parameters.sampler_name
- scheduler_name = advanced_parameters.scheduler_name
-
goals = []
tasks = []
if input_image_checkbox:
if (current_tab == 'uov' or (
- current_tab == 'ip' and advanced_parameters.mixing_image_prompt_and_vary_upscale)) \
+ current_tab == 'ip' and mixing_image_prompt_and_vary_upscale)) \
and uov_method != flags.disabled and uov_input_image is not None:
uov_input_image = HWC3(uov_input_image)
if 'vary' in uov_method:
@@ -257,26 +304,17 @@ def worker():
if 'fast' in uov_method:
skip_prompt_processing = True
else:
- steps = 18
-
- if performance_selection == 'Speed':
- steps = 18
-
- if performance_selection == 'Quality':
- steps = 36
-
- if performance_selection == 'Extreme Speed':
- steps = 8
+ steps = performance_selection.steps_uov()
progressbar(async_task, 1, 'Downloading upscale models ...')
modules.config.downloading_upscale_model()
if (current_tab == 'inpaint' or (
- current_tab == 'ip' and advanced_parameters.mixing_image_prompt_and_inpaint)) \
+ current_tab == 'ip' and mixing_image_prompt_and_inpaint)) \
and isinstance(inpaint_input_image, dict):
inpaint_image = inpaint_input_image['image']
inpaint_mask = inpaint_input_image['mask'][:, :, 0]
-
- if advanced_parameters.inpaint_mask_upload_checkbox:
+
+ if inpaint_mask_upload_checkbox:
if isinstance(inpaint_mask_image_upload, np.ndarray):
if inpaint_mask_image_upload.ndim == 3:
H, W, C = inpaint_image.shape
@@ -285,10 +323,10 @@ def worker():
inpaint_mask_image_upload = (inpaint_mask_image_upload > 127).astype(np.uint8) * 255
inpaint_mask = np.maximum(inpaint_mask, inpaint_mask_image_upload)
- if int(advanced_parameters.inpaint_erode_or_dilate) != 0:
- inpaint_mask = erode_or_dilate(inpaint_mask, advanced_parameters.inpaint_erode_or_dilate)
+ if int(inpaint_erode_or_dilate) != 0:
+ inpaint_mask = erode_or_dilate(inpaint_mask, inpaint_erode_or_dilate)
- if advanced_parameters.invert_mask_checkbox:
+ if invert_mask_checkbox:
inpaint_mask = 255 - inpaint_mask
inpaint_image = HWC3(inpaint_image)
@@ -299,7 +337,7 @@ def worker():
if inpaint_parameterized:
progressbar(async_task, 1, 'Downloading inpainter ...')
inpaint_head_model_path, inpaint_patch_model_path = modules.config.downloading_inpaint_models(
- advanced_parameters.inpaint_engine)
+ inpaint_engine)
base_model_additional_loras += [(inpaint_patch_model_path, 1.0)]
print(f'[Inpaint] Current inpaint model is {inpaint_patch_model_path}')
if refiner_model_name == 'None':
@@ -315,8 +353,8 @@ def worker():
prompt = inpaint_additional_prompt + '\n' + prompt
goals.append('inpaint')
if current_tab == 'ip' or \
- advanced_parameters.mixing_image_prompt_and_inpaint or \
- advanced_parameters.mixing_image_prompt_and_vary_upscale:
+ mixing_image_prompt_and_vary_upscale or \
+ mixing_image_prompt_and_inpaint:
goals.append('cn')
progressbar(async_task, 1, 'Downloading control models ...')
if len(cn_tasks[flags.cn_canny]) > 0:
@@ -335,19 +373,19 @@ def worker():
ip_adapter.load_ip_adapter(clip_vision_path, ip_negative_path, ip_adapter_path)
ip_adapter.load_ip_adapter(clip_vision_path, ip_negative_path, ip_adapter_face_path)
- if advanced_parameters.overwrite_step > 0:
- steps = advanced_parameters.overwrite_step
+ if overwrite_step > 0:
+ steps = overwrite_step
switch = int(round(steps * refiner_switch))
- if advanced_parameters.overwrite_switch > 0:
- switch = advanced_parameters.overwrite_switch
+ if overwrite_switch > 0:
+ switch = overwrite_switch
- if advanced_parameters.overwrite_width > 0:
- width = advanced_parameters.overwrite_width
+ if overwrite_width > 0:
+ width = overwrite_width
- if advanced_parameters.overwrite_height > 0:
- height = advanced_parameters.overwrite_height
+ if overwrite_height > 0:
+ height = overwrite_height
print(f'[Parameters] Sampler = {sampler_name} - {scheduler_name}')
print(f'[Parameters] Steps = {steps} - {switch}')
@@ -376,11 +414,16 @@ def worker():
progressbar(async_task, 3, 'Processing prompts ...')
tasks = []
+
for i in range(image_number):
- task_seed = (seed + i) % (constants.MAX_SEED + 1) # randint is inclusive, % is not
- task_rng = random.Random(task_seed) # may bind to inpaint noise in the future
+ if disable_seed_increment:
+ task_seed = seed
+ else:
+ task_seed = (seed + i) % (constants.MAX_SEED + 1) # randint is inclusive, % is not
+ task_rng = random.Random(task_seed) # may bind to inpaint noise in the future
task_prompt = apply_wildcards(prompt, task_rng)
+ task_prompt = apply_arrays(task_prompt, i)
task_negative_prompt = apply_wildcards(negative_prompt, task_rng)
task_extra_positive_prompts = [apply_wildcards(pmt, task_rng) for pmt in extra_positive_prompts]
task_extra_negative_prompts = [apply_wildcards(pmt, task_rng) for pmt in extra_negative_prompts]
@@ -446,8 +489,8 @@ def worker():
denoising_strength = 0.5
if 'strong' in uov_method:
denoising_strength = 0.85
- if advanced_parameters.overwrite_vary_strength > 0:
- denoising_strength = advanced_parameters.overwrite_vary_strength
+ if overwrite_vary_strength > 0:
+ denoising_strength = overwrite_vary_strength
shape_ceil = get_image_shape_ceil(uov_input_image)
if shape_ceil < 1024:
@@ -511,15 +554,15 @@ def worker():
if direct_return:
d = [('Upscale (Fast)', '2x')]
- log(uov_input_image, d)
- yield_result(async_task, uov_input_image, do_not_show_finished_images=True)
+ uov_input_image_path = log(uov_input_image, d, output_format)
+ yield_result(async_task, uov_input_image_path, do_not_show_finished_images=True)
return
tiled = True
denoising_strength = 0.382
- if advanced_parameters.overwrite_upscale_strength > 0:
- denoising_strength = advanced_parameters.overwrite_upscale_strength
+ if overwrite_upscale_strength > 0:
+ denoising_strength = overwrite_upscale_strength
initial_pixels = core.numpy_to_pytorch(uov_input_image)
progressbar(async_task, 13, 'VAE encoding ...')
@@ -563,19 +606,19 @@ def worker():
inpaint_image = np.ascontiguousarray(inpaint_image.copy())
inpaint_mask = np.ascontiguousarray(inpaint_mask.copy())
- advanced_parameters.inpaint_strength = 1.0
- advanced_parameters.inpaint_respective_field = 1.0
+ inpaint_strength = 1.0
+ inpaint_respective_field = 1.0
- denoising_strength = advanced_parameters.inpaint_strength
+ denoising_strength = inpaint_strength
inpaint_worker.current_task = inpaint_worker.InpaintWorker(
image=inpaint_image,
mask=inpaint_mask,
use_fill=denoising_strength > 0.99,
- k=advanced_parameters.inpaint_respective_field
+ k=inpaint_respective_field
)
- if advanced_parameters.debugging_inpaint_preprocessor:
+ if debugging_inpaint_preprocessor:
yield_result(async_task, inpaint_worker.current_task.visualize_mask_processing(),
do_not_show_finished_images=True)
return
@@ -621,7 +664,7 @@ def worker():
model=pipeline.final_unet
)
- if not advanced_parameters.inpaint_disable_initial_latent:
+ if not inpaint_disable_initial_latent:
initial_latent = {'samples': latent_fill}
B, C, H, W = latent_fill.shape
@@ -634,24 +677,24 @@ def worker():
cn_img, cn_stop, cn_weight = task
cn_img = resize_image(HWC3(cn_img), width=width, height=height)
- if not advanced_parameters.skipping_cn_preprocessor:
- cn_img = preprocessors.canny_pyramid(cn_img)
+ if not skipping_cn_preprocessor:
+ cn_img = preprocessors.canny_pyramid(cn_img, canny_low_threshold, canny_high_threshold)
cn_img = HWC3(cn_img)
task[0] = core.numpy_to_pytorch(cn_img)
- if advanced_parameters.debugging_cn_preprocessor:
+ if debugging_cn_preprocessor:
yield_result(async_task, cn_img, do_not_show_finished_images=True)
return
for task in cn_tasks[flags.cn_cpds]:
cn_img, cn_stop, cn_weight = task
cn_img = resize_image(HWC3(cn_img), width=width, height=height)
- if not advanced_parameters.skipping_cn_preprocessor:
+ if not skipping_cn_preprocessor:
cn_img = preprocessors.cpds(cn_img)
cn_img = HWC3(cn_img)
task[0] = core.numpy_to_pytorch(cn_img)
- if advanced_parameters.debugging_cn_preprocessor:
+ if debugging_cn_preprocessor:
yield_result(async_task, cn_img, do_not_show_finished_images=True)
return
for task in cn_tasks[flags.cn_ip]:
@@ -662,21 +705,21 @@ def worker():
cn_img = resize_image(cn_img, width=224, height=224, resize_mode=0)
task[0] = ip_adapter.preprocess(cn_img, ip_adapter_path=ip_adapter_path)
- if advanced_parameters.debugging_cn_preprocessor:
+ if debugging_cn_preprocessor:
yield_result(async_task, cn_img, do_not_show_finished_images=True)
return
for task in cn_tasks[flags.cn_ip_face]:
cn_img, cn_stop, cn_weight = task
cn_img = HWC3(cn_img)
- if not advanced_parameters.skipping_cn_preprocessor:
+ if not skipping_cn_preprocessor:
cn_img = extras.face_crop.crop_image(cn_img)
# https://github.com/tencent-ailab/IP-Adapter/blob/d580c50a291566bbf9fc7ac0f760506607297e6d/README.md?plain=1#L75
cn_img = resize_image(cn_img, width=224, height=224, resize_mode=0)
task[0] = ip_adapter.preprocess(cn_img, ip_adapter_path=ip_adapter_face_path)
- if advanced_parameters.debugging_cn_preprocessor:
+ if debugging_cn_preprocessor:
yield_result(async_task, cn_img, do_not_show_finished_images=True)
return
@@ -685,14 +728,14 @@ def worker():
if len(all_ip_tasks) > 0:
pipeline.final_unet = ip_adapter.patch_model(pipeline.final_unet, all_ip_tasks)
- if advanced_parameters.freeu_enabled:
+ if freeu_enabled:
print(f'FreeU is enabled!')
pipeline.final_unet = core.apply_freeu(
pipeline.final_unet,
- advanced_parameters.freeu_b1,
- advanced_parameters.freeu_b2,
- advanced_parameters.freeu_s1,
- advanced_parameters.freeu_s2
+ freeu_b1,
+ freeu_b2,
+ freeu_s1,
+ freeu_s2
)
all_steps = steps * image_number
@@ -738,6 +781,8 @@ def worker():
execution_start_time = time.perf_counter()
try:
+ if async_task.last_stop is not False:
+ ldm_patched.model_management.interrupt_current_processing()
positive_cond, negative_cond = task['c'], task['uc']
if 'cn' in goals:
@@ -765,7 +810,8 @@ def worker():
denoise=denoising_strength,
tiled=tiled,
cfg_scale=cfg_scale,
- refiner_swap_method=refiner_swap_method
+ refiner_swap_method=refiner_swap_method,
+ disable_preview=disable_preview
)
del task['c'], task['uc'], positive_cond, negative_cond # Save memory
@@ -773,37 +819,58 @@ def worker():
if inpaint_worker.current_task is not None:
imgs = [inpaint_worker.current_task.post_process(x) for x in imgs]
+ img_paths = []
for x in imgs:
- d = [
- ('Prompt', task['log_positive_prompt']),
- ('Negative Prompt', task['log_negative_prompt']),
- ('Fooocus V2 Expansion', task['expansion']),
- ('Styles', str(raw_style_selections)),
- ('Performance', performance_selection),
- ('Resolution', str((width, height))),
- ('Sharpness', sharpness),
- ('Guidance Scale', guidance_scale),
- ('ADM Guidance', str((
- modules.patch.positive_adm_scale,
- modules.patch.negative_adm_scale,
- modules.patch.adm_scaler_end))),
- ('Base Model', base_model_name),
- ('Refiner Model', refiner_model_name),
- ('Refiner Switch', refiner_switch),
- ('Sampler', sampler_name),
- ('Scheduler', scheduler_name),
- ('Seed', task['task_seed']),
- ]
+ d = [('Prompt', 'prompt', task['log_positive_prompt']),
+ ('Negative Prompt', 'negative_prompt', task['log_negative_prompt']),
+ ('Fooocus V2 Expansion', 'prompt_expansion', task['expansion']),
+ ('Styles', 'styles', str(raw_style_selections)),
+ ('Performance', 'performance', performance_selection.value),
+ ('Resolution', 'resolution', str((width, height))),
+ ('Guidance Scale', 'guidance_scale', guidance_scale),
+ ('Sharpness', 'sharpness', sharpness),
+ ('ADM Guidance', 'adm_guidance', str((
+ modules.patch.patch_settings[pid].positive_adm_scale,
+ modules.patch.patch_settings[pid].negative_adm_scale,
+ modules.patch.patch_settings[pid].adm_scaler_end))),
+ ('Base Model', 'base_model', base_model_name),
+ ('Refiner Model', 'refiner_model', refiner_model_name),
+ ('Refiner Switch', 'refiner_switch', refiner_switch)]
+
+ if refiner_model_name != 'None':
+ if overwrite_switch > 0:
+ d.append(('Overwrite Switch', 'overwrite_switch', overwrite_switch))
+ if refiner_swap_method != flags.refiner_swap_method:
+ d.append(('Refiner Swap Method', 'refiner_swap_method', refiner_swap_method))
+ if modules.patch.patch_settings[pid].adaptive_cfg != modules.config.default_cfg_tsnr:
+ d.append(('CFG Mimicking from TSNR', 'adaptive_cfg', modules.patch.patch_settings[pid].adaptive_cfg))
+
+ d.append(('Sampler', 'sampler', sampler_name))
+ d.append(('Scheduler', 'scheduler', scheduler_name))
+ d.append(('Seed', 'seed', task['task_seed']))
+
+ if freeu_enabled:
+ d.append(('FreeU', 'freeu', str((freeu_b1, freeu_b2, freeu_s1, freeu_s2))))
+
+ metadata_parser = None
+ if save_metadata_to_images:
+ metadata_parser = modules.meta_parser.get_metadata_parser(metadata_scheme)
+ metadata_parser.set_data(task['log_positive_prompt'], task['positive'],
+ task['log_negative_prompt'], task['negative'],
+ steps, base_model_name, refiner_model_name, loras)
+
for li, (n, w) in enumerate(loras):
if n != 'None':
- d.append((f'LoRA {li + 1}', f'{n} : {w}'))
- d.append(('Version', 'v' + fooocus_version.version))
- log(x, d)
+ d.append((f'LoRA {li + 1}', f'lora_combined_{li + 1}', f'{n} : {w}'))
- yield_result(async_task, imgs, do_not_show_finished_images=len(tasks) == 1)
+ d.append(('Version', 'version', 'Fooocus v' + fooocus_version.version))
+ img_paths.append(log(x, d, metadata_parser, output_format))
+
+ yield_result(async_task, img_paths, do_not_show_finished_images=len(tasks) == 1 or disable_intermediate_results)
except ldm_patched.modules.model_management.InterruptProcessingException as e:
- if shared.last_stop == 'skip':
+ if async_task.last_stop == 'skip':
print('User skipped')
+ async_task.last_stop = False
continue
else:
print('User stopped')
@@ -811,21 +878,27 @@ def worker():
execution_time = time.perf_counter() - execution_start_time
print(f'Generating and saving time: {execution_time:.2f} seconds')
-
+ async_task.processing = False
return
while True:
time.sleep(0.01)
if len(async_tasks) > 0:
task = async_tasks.pop(0)
+ generate_image_grid = task.args.pop(0)
+
try:
handler(task)
- build_image_wall(task)
+ if generate_image_grid:
+ build_image_wall(task)
task.yields.append(['finish', task.results])
pipeline.prepare_text_encoder(async_call=True)
except:
traceback.print_exc()
task.yields.append(['finish', task.results])
+ finally:
+ if pid in modules.patch.patch_settings:
+ del modules.patch.patch_settings[pid]
pass
diff --git a/modules/config.py b/modules/config.py
index 1f4e82eb..09c8fd7c 100644
--- a/modules/config.py
+++ b/modules/config.py
@@ -7,11 +7,19 @@ import modules.flags
import modules.sdxl_styles
from modules.model_loader import load_file_from_url
-from modules.util import get_files_from_folder
+from modules.util import get_files_from_folder, makedirs_with_log
+from modules.flags import Performance, MetadataScheme
+def get_config_path(key, default_value):
+ env = os.getenv(key)
+ if env is not None and isinstance(env, str):
+ print(f"Environment: {key} = {env}")
+ return env
+ else:
+ return os.path.abspath(default_value)
-config_path = os.path.abspath("./config.txt")
-config_example_path = os.path.abspath("config_modification_tutorial.txt")
+config_path = get_config_path('config_path', "./config.txt")
+config_example_path = get_config_path('config_example_path', "config_modification_tutorial.txt")
config_dict = {}
always_save_keys = []
visited_keys = []
@@ -107,14 +115,14 @@ def get_path_output() -> str:
Checking output path argument and overriding default path.
"""
global config_dict
- path_output = get_dir_or_set_default('path_outputs', '../outputs/')
+ path_output = get_dir_or_set_default('path_outputs', '../outputs/', make_directory=True)
if args_manager.args.output_path:
print(f'[CONFIG] Overriding config value path_outputs with {args_manager.args.output_path}')
config_dict['path_outputs'] = path_output = args_manager.args.output_path
return path_output
-def get_dir_or_set_default(key, default_value):
+def get_dir_or_set_default(key, default_value, as_array=False, make_directory=False):
global config_dict, visited_keys, always_save_keys
if key not in visited_keys:
@@ -123,20 +131,44 @@ def get_dir_or_set_default(key, default_value):
if key not in always_save_keys:
always_save_keys.append(key)
- v = config_dict.get(key, None)
- if isinstance(v, str) and os.path.exists(v) and os.path.isdir(v):
- return v
+ v = os.getenv(key)
+ if v is not None:
+ print(f"Environment: {key} = {v}")
+ config_dict[key] = v
+ else:
+ v = config_dict.get(key, None)
+
+ if isinstance(v, str):
+ if make_directory:
+ makedirs_with_log(v)
+ if os.path.exists(v) and os.path.isdir(v):
+ return v if not as_array else [v]
+ elif isinstance(v, list):
+ if make_directory:
+ for d in v:
+ makedirs_with_log(d)
+ if all([os.path.exists(d) and os.path.isdir(d) for d in v]):
+ return v
+
+ if v is not None:
+ print(f'Failed to load config key: {json.dumps({key:v})} is invalid or does not exist; will use {json.dumps({key:default_value})} instead.')
+ if isinstance(default_value, list):
+ dp = []
+ for path in default_value:
+ abs_path = os.path.abspath(os.path.join(os.path.dirname(__file__), path))
+ dp.append(abs_path)
+ os.makedirs(abs_path, exist_ok=True)
else:
- if v is not None:
- print(f'Failed to load config key: {json.dumps({key:v})} is invalid or does not exist; will use {json.dumps({key:default_value})} instead.')
dp = os.path.abspath(os.path.join(os.path.dirname(__file__), default_value))
os.makedirs(dp, exist_ok=True)
- config_dict[key] = dp
- return dp
+ if as_array:
+ dp = [dp]
+ config_dict[key] = dp
+ return dp
-path_checkpoints = get_dir_or_set_default('path_checkpoints', '../models/checkpoints/')
-path_loras = get_dir_or_set_default('path_loras', '../models/loras/')
+paths_checkpoints = get_dir_or_set_default('path_checkpoints', ['../models/checkpoints/'], True)
+paths_loras = get_dir_or_set_default('path_loras', ['../models/loras/'], True)
path_embeddings = get_dir_or_set_default('path_embeddings', '../models/embeddings/')
path_vae_approx = get_dir_or_set_default('path_vae_approx', '../models/vae_approx/')
path_upscale_models = get_dir_or_set_default('path_upscale_models', '../models/upscale_models/')
@@ -146,13 +178,17 @@ path_clip_vision = get_dir_or_set_default('path_clip_vision', '../models/clip_vi
path_fooocus_expansion = get_dir_or_set_default('path_fooocus_expansion', '../models/prompt_expansion/fooocus_expansion')
path_outputs = get_path_output()
-
def get_config_item_or_set_default(key, default_value, validator, disable_empty_as_none=False):
global config_dict, visited_keys
if key not in visited_keys:
visited_keys.append(key)
+ v = os.getenv(key)
+ if v is not None:
+ print(f"Environment: {key} = {v}")
+ config_dict[key] = v
+
if key not in config_dict:
config_dict[key] = default_value
return default_value
@@ -190,6 +226,16 @@ default_refiner_switch = get_config_item_or_set_default(
default_value=0.8,
validator=lambda x: isinstance(x, numbers.Number) and 0 <= x <= 1
)
+default_loras_min_weight = get_config_item_or_set_default(
+ key='default_loras_min_weight',
+ default_value=-2,
+ validator=lambda x: isinstance(x, numbers.Number) and -10 <= x <= 10
+)
+default_loras_max_weight = get_config_item_or_set_default(
+ key='default_loras_max_weight',
+ default_value=2,
+ validator=lambda x: isinstance(x, numbers.Number) and -10 <= x <= 10
+)
default_loras = get_config_item_or_set_default(
key='default_loras',
default_value=[
@@ -216,6 +262,11 @@ default_loras = get_config_item_or_set_default(
],
validator=lambda x: isinstance(x, list) and all(len(y) == 2 and isinstance(y[0], str) and isinstance(y[1], numbers.Number) for y in x)
)
+default_max_lora_number = get_config_item_or_set_default(
+ key='default_max_lora_number',
+ default_value=len(default_loras),
+ validator=lambda x: isinstance(x, int) and x >= 1
+)
default_cfg_scale = get_config_item_or_set_default(
key='default_cfg_scale',
default_value=7.0,
@@ -259,8 +310,8 @@ default_prompt = get_config_item_or_set_default(
)
default_performance = get_config_item_or_set_default(
key='default_performance',
- default_value='Speed',
- validator=lambda x: x in modules.flags.performance_selections
+ default_value=Performance.SPEED.value,
+ validator=lambda x: x in Performance.list()
)
default_advanced_checkbox = get_config_item_or_set_default(
key='default_advanced_checkbox',
@@ -272,6 +323,11 @@ default_max_image_number = get_config_item_or_set_default(
default_value=32,
validator=lambda x: isinstance(x, int) and x >= 1
)
+default_output_format = get_config_item_or_set_default(
+ key='default_output_format',
+ default_value='png',
+ validator=lambda x: x in modules.flags.output_formats
+)
default_image_number = get_config_item_or_set_default(
key='default_image_number',
default_value=2,
@@ -335,16 +391,34 @@ example_inpaint_prompts = get_config_item_or_set_default(
],
validator=lambda x: isinstance(x, list) and all(isinstance(v, str) for v in x)
)
+default_save_metadata_to_images = get_config_item_or_set_default(
+ key='default_save_metadata_to_images',
+ default_value=False,
+ validator=lambda x: isinstance(x, bool)
+)
+default_metadata_scheme = get_config_item_or_set_default(
+ key='default_metadata_scheme',
+ default_value=MetadataScheme.FOOOCUS.value,
+ validator=lambda x: x in [y[1] for y in modules.flags.metadata_scheme if y[1] == x]
+)
+metadata_created_by = get_config_item_or_set_default(
+ key='metadata_created_by',
+ default_value='',
+ validator=lambda x: isinstance(x, str)
+)
example_inpaint_prompts = [[x] for x in example_inpaint_prompts]
-config_dict["default_loras"] = default_loras = default_loras[:5] + [['None', 1.0] for _ in range(5 - len(default_loras))]
+config_dict["default_loras"] = default_loras = default_loras[:default_max_lora_number] + [['None', 1.0] for _ in range(default_max_lora_number - len(default_loras))]
possible_preset_keys = [
"default_model",
"default_refiner",
"default_refiner_switch",
+ "default_loras_min_weight",
+ "default_loras_max_weight",
"default_loras",
+ "default_max_lora_number",
"default_cfg_scale",
"default_sample_sharpness",
"default_sampler",
@@ -354,6 +428,7 @@ possible_preset_keys = [
"default_prompt_negative",
"default_styles",
"default_aspect_ratio",
+ "default_save_metadata_to_images",
"checkpoint_downloads",
"embeddings_downloads",
"lora_downloads",
@@ -397,21 +472,23 @@ with open(config_example_path, "w", encoding="utf-8") as json_file:
'and there is no "," before the last "}". \n\n\n')
json.dump({k: config_dict[k] for k in visited_keys}, json_file, indent=4)
-
-os.makedirs(path_outputs, exist_ok=True)
-
model_filenames = []
lora_filenames = []
+sdxl_lcm_lora = 'sdxl_lcm_lora.safetensors'
-def get_model_filenames(folder_path, name_filter=None):
- return get_files_from_folder(folder_path, ['.pth', '.ckpt', '.bin', '.safetensors', '.fooocus.patch'], name_filter)
+def get_model_filenames(folder_paths, name_filter=None):
+ extensions = ['.pth', '.ckpt', '.bin', '.safetensors', '.fooocus.patch']
+ files = []
+ for folder in folder_paths:
+ files += get_files_from_folder(folder, extensions, name_filter)
+ return files
def update_all_model_names():
global model_filenames, lora_filenames
- model_filenames = get_model_filenames(path_checkpoints)
- lora_filenames = get_model_filenames(path_loras)
+ model_filenames = get_model_filenames(paths_checkpoints)
+ lora_filenames = get_model_filenames(paths_loras)
return
@@ -456,10 +533,10 @@ def downloading_inpaint_models(v):
def downloading_sdxl_lcm_lora():
load_file_from_url(
url='https://huggingface.co/lllyasviel/misc/resolve/main/sdxl_lcm_lora.safetensors',
- model_dir=path_loras,
- file_name='sdxl_lcm_lora.safetensors'
+ model_dir=paths_loras[0],
+ file_name=sdxl_lcm_lora
)
- return 'sdxl_lcm_lora.safetensors'
+ return sdxl_lcm_lora
def downloading_controlnet_canny():
diff --git a/modules/core.py b/modules/core.py
index 989b8e32..bfc44966 100644
--- a/modules/core.py
+++ b/modules/core.py
@@ -1,8 +1,3 @@
-from modules.patch import patch_all
-
-patch_all()
-
-
import os
import einops
import torch
@@ -16,7 +11,6 @@ import ldm_patched.modules.controlnet
import modules.sample_hijack
import ldm_patched.modules.samplers
import ldm_patched.modules.latent_formats
-import modules.advanced_parameters
from ldm_patched.modules.sd import load_checkpoint_guess_config
from ldm_patched.contrib.external import VAEDecode, EmptyLatentImage, VAEEncode, VAEEncodeTiled, VAEDecodeTiled, \
@@ -24,6 +18,7 @@ from ldm_patched.contrib.external import VAEDecode, EmptyLatentImage, VAEEncode,
from ldm_patched.contrib.external_freelunch import FreeU_V2
from ldm_patched.modules.sample import prepare_mask
from modules.lora import match_lora
+from modules.util import get_file_from_folder_list
from ldm_patched.modules.lora import model_lora_keys_unet, model_lora_keys_clip
from modules.config import path_embeddings
from ldm_patched.contrib.external_model_advanced import ModelSamplingDiscrete
@@ -85,7 +80,7 @@ class StableDiffusionModel:
if os.path.exists(name):
lora_filename = name
else:
- lora_filename = os.path.join(modules.config.path_loras, name)
+ lora_filename = get_file_from_folder_list(name, modules.config.paths_loras)
if not os.path.exists(lora_filename):
print(f'Lora file not found: {lora_filename}')
@@ -268,7 +263,7 @@ def get_previewer(model):
def ksampler(model, positive, negative, latent, seed=None, steps=30, cfg=7.0, sampler_name='dpmpp_2m_sde_gpu',
scheduler='karras', denoise=1.0, disable_noise=False, start_step=None, last_step=None,
force_full_denoise=False, callback_function=None, refiner=None, refiner_switch=-1,
- previewer_start=None, previewer_end=None, sigmas=None, noise_mean=None):
+ previewer_start=None, previewer_end=None, sigmas=None, noise_mean=None, disable_preview=False):
if sigmas is not None:
sigmas = sigmas.clone().to(ldm_patched.modules.model_management.get_torch_device())
@@ -299,7 +294,7 @@ def ksampler(model, positive, negative, latent, seed=None, steps=30, cfg=7.0, sa
def callback(step, x0, x, total_steps):
ldm_patched.modules.model_management.throw_exception_if_processing_interrupted()
y = None
- if previewer is not None and not modules.advanced_parameters.disable_preview:
+ if previewer is not None and not disable_preview:
y = previewer(x0, previewer_start + step, previewer_end)
if callback_function is not None:
callback_function(previewer_start + step, x0, x, previewer_end, y)
diff --git a/modules/default_pipeline.py b/modules/default_pipeline.py
index 6001d97f..f8edfae1 100644
--- a/modules/default_pipeline.py
+++ b/modules/default_pipeline.py
@@ -11,6 +11,7 @@ from extras.expansion import FooocusExpansion
from ldm_patched.modules.model_base import SDXL, SDXLRefiner
from modules.sample_hijack import clip_separate
+from modules.util import get_file_from_folder_list
model_base = core.StableDiffusionModel()
@@ -60,7 +61,7 @@ def assert_model_integrity():
def refresh_base_model(name):
global model_base
- filename = os.path.abspath(os.path.realpath(os.path.join(modules.config.path_checkpoints, name)))
+ filename = get_file_from_folder_list(name, modules.config.paths_checkpoints)
if model_base.filename == filename:
return
@@ -76,7 +77,7 @@ def refresh_base_model(name):
def refresh_refiner_model(name):
global model_refiner
- filename = os.path.abspath(os.path.realpath(os.path.join(modules.config.path_checkpoints, name)))
+ filename = get_file_from_folder_list(name, modules.config.paths_checkpoints)
if model_refiner.filename == filename:
return
@@ -315,7 +316,7 @@ def get_candidate_vae(steps, switch, denoise=1.0, refiner_swap_method='joint'):
@torch.no_grad()
@torch.inference_mode()
-def process_diffusion(positive_cond, negative_cond, steps, switch, width, height, image_seed, callback, sampler_name, scheduler_name, latent=None, denoise=1.0, tiled=False, cfg_scale=7.0, refiner_swap_method='joint'):
+def process_diffusion(positive_cond, negative_cond, steps, switch, width, height, image_seed, callback, sampler_name, scheduler_name, latent=None, denoise=1.0, tiled=False, cfg_scale=7.0, refiner_swap_method='joint', disable_preview=False):
target_unet, target_vae, target_refiner_unet, target_refiner_vae, target_clip \
= final_unet, final_vae, final_refiner_unet, final_refiner_vae, final_clip
@@ -374,6 +375,7 @@ def process_diffusion(positive_cond, negative_cond, steps, switch, width, height
refiner_switch=switch,
previewer_start=0,
previewer_end=steps,
+ disable_preview=disable_preview
)
decoded_latent = core.decode_vae(vae=target_vae, latent_image=sampled_latent, tiled=tiled)
@@ -392,6 +394,7 @@ def process_diffusion(positive_cond, negative_cond, steps, switch, width, height
scheduler=scheduler_name,
previewer_start=0,
previewer_end=steps,
+ disable_preview=disable_preview
)
print('Refiner swapped by changing ksampler. Noise preserved.')
@@ -414,6 +417,7 @@ def process_diffusion(positive_cond, negative_cond, steps, switch, width, height
scheduler=scheduler_name,
previewer_start=switch,
previewer_end=steps,
+ disable_preview=disable_preview
)
target_model = target_refiner_vae
@@ -422,7 +426,7 @@ def process_diffusion(positive_cond, negative_cond, steps, switch, width, height
decoded_latent = core.decode_vae(vae=target_model, latent_image=sampled_latent, tiled=tiled)
if refiner_swap_method == 'vae':
- modules.patch.eps_record = 'vae'
+ modules.patch.patch_settings[os.getpid()].eps_record = 'vae'
if modules.inpaint_worker.current_task is not None:
modules.inpaint_worker.current_task.unswap()
@@ -440,7 +444,8 @@ def process_diffusion(positive_cond, negative_cond, steps, switch, width, height
sampler_name=sampler_name,
scheduler=scheduler_name,
previewer_start=0,
- previewer_end=steps
+ previewer_end=steps,
+ disable_preview=disable_preview
)
print('Fooocus VAE-based swap.')
@@ -459,7 +464,7 @@ def process_diffusion(positive_cond, negative_cond, steps, switch, width, height
denoise=denoise)[switch:] * k_sigmas
len_sigmas = len(sigmas) - 1
- noise_mean = torch.mean(modules.patch.eps_record, dim=1, keepdim=True)
+ noise_mean = torch.mean(modules.patch.patch_settings[os.getpid()].eps_record, dim=1, keepdim=True)
if modules.inpaint_worker.current_task is not None:
modules.inpaint_worker.current_task.swap()
@@ -479,7 +484,8 @@ def process_diffusion(positive_cond, negative_cond, steps, switch, width, height
previewer_start=switch,
previewer_end=steps,
sigmas=sigmas,
- noise_mean=noise_mean
+ noise_mean=noise_mean,
+ disable_preview=disable_preview
)
target_model = target_refiner_vae
@@ -488,5 +494,5 @@ def process_diffusion(positive_cond, negative_cond, steps, switch, width, height
decoded_latent = core.decode_vae(vae=target_model, latent_image=sampled_latent, tiled=tiled)
images = core.pytorch_to_numpy(decoded_latent)
- modules.patch.eps_record = None
+ modules.patch.patch_settings[os.getpid()].eps_record = None
return images
diff --git a/modules/flags.py b/modules/flags.py
index 27f2d716..6f12bc8f 100644
--- a/modules/flags.py
+++ b/modules/flags.py
@@ -1,3 +1,5 @@
+from enum import IntEnum, Enum
+
disabled = 'Disabled'
enabled = 'Enabled'
subtle_variation = 'Vary (Subtle)'
@@ -10,16 +12,49 @@ uov_list = [
disabled, subtle_variation, strong_variation, upscale_15, upscale_2, upscale_fast
]
-KSAMPLER_NAMES = ["euler", "euler_ancestral", "heun", "heunpp2","dpm_2", "dpm_2_ancestral",
- "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_sde", "dpmpp_sde_gpu",
- "dpmpp_2m", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm"]
+CIVITAI_NO_KARRAS = ["euler", "euler_ancestral", "heun", "dpm_fast", "dpm_adaptive", "ddim", "uni_pc"]
+
+# fooocus: a1111 (Civitai)
+KSAMPLER = {
+ "euler": "Euler",
+ "euler_ancestral": "Euler a",
+ "heun": "Heun",
+ "heunpp2": "",
+ "dpm_2": "DPM2",
+ "dpm_2_ancestral": "DPM2 a",
+ "lms": "LMS",
+ "dpm_fast": "DPM fast",
+ "dpm_adaptive": "DPM adaptive",
+ "dpmpp_2s_ancestral": "DPM++ 2S a",
+ "dpmpp_sde": "DPM++ SDE",
+ "dpmpp_sde_gpu": "DPM++ SDE",
+ "dpmpp_2m": "DPM++ 2M",
+ "dpmpp_2m_sde": "DPM++ 2M SDE",
+ "dpmpp_2m_sde_gpu": "DPM++ 2M SDE",
+ "dpmpp_3m_sde": "",
+ "dpmpp_3m_sde_gpu": "",
+ "ddpm": "",
+ "lcm": "LCM"
+}
+
+SAMPLER_EXTRA = {
+ "ddim": "DDIM",
+ "uni_pc": "UniPC",
+ "uni_pc_bh2": ""
+}
+
+SAMPLERS = KSAMPLER | SAMPLER_EXTRA
+
+KSAMPLER_NAMES = list(KSAMPLER.keys())
SCHEDULER_NAMES = ["normal", "karras", "exponential", "sgm_uniform", "simple", "ddim_uniform", "lcm", "turbo"]
-SAMPLER_NAMES = KSAMPLER_NAMES + ["ddim", "uni_pc", "uni_pc_bh2"]
+SAMPLER_NAMES = KSAMPLER_NAMES + list(SAMPLER_EXTRA.keys())
sampler_list = SAMPLER_NAMES
scheduler_list = SCHEDULER_NAMES
+refiner_swap_method = 'joint'
+
cn_ip = "ImagePrompt"
cn_ip_face = "FaceSwap"
cn_canny = "PyraCanny"
@@ -32,9 +67,9 @@ default_parameters = {
cn_ip: (0.5, 0.6), cn_ip_face: (0.9, 0.75), cn_canny: (0.5, 1.0), cn_cpds: (0.5, 1.0)
} # stop, weight
-inpaint_engine_versions = ['None', 'v1', 'v2.5', 'v2.6']
-performance_selections = ['Speed', 'Quality', 'Extreme Speed']
+output_formats = ['png', 'jpg', 'webp']
+inpaint_engine_versions = ['None', 'v1', 'v2.5', 'v2.6']
inpaint_option_default = 'Inpaint or Outpaint (default)'
inpaint_option_detail = 'Improve Detail (face, hand, eyes, etc.)'
inpaint_option_modify = 'Modify Content (add objects, change background, etc.)'
@@ -42,3 +77,49 @@ inpaint_options = [inpaint_option_default, inpaint_option_detail, inpaint_option
desc_type_photo = 'Photograph'
desc_type_anime = 'Art/Anime'
+
+
+class MetadataScheme(Enum):
+ FOOOCUS = 'fooocus'
+ A1111 = 'a1111'
+
+
+metadata_scheme = [
+ (f'{MetadataScheme.FOOOCUS.value} (json)', MetadataScheme.FOOOCUS.value),
+ (f'{MetadataScheme.A1111.value} (plain text)', MetadataScheme.A1111.value),
+]
+
+lora_count = 5
+
+controlnet_image_count = 4
+
+
+class Steps(IntEnum):
+ QUALITY = 60
+ SPEED = 30
+ EXTREME_SPEED = 8
+
+
+class StepsUOV(IntEnum):
+ QUALITY = 36
+ SPEED = 18
+ EXTREME_SPEED = 8
+
+
+class Performance(Enum):
+ QUALITY = 'Quality'
+ SPEED = 'Speed'
+ EXTREME_SPEED = 'Extreme Speed'
+
+ @classmethod
+ def list(cls) -> list:
+ return list(map(lambda c: c.value, cls))
+
+ def steps(self) -> int | None:
+ return Steps[self.name].value if Steps[self.name] else None
+
+ def steps_uov(self) -> int | None:
+ return StepsUOV[self.name].value if Steps[self.name] else None
+
+
+performance_selections = Performance.list()
diff --git a/modules/html.py b/modules/html.py
index 3ec6f2d6..47a1483a 100644
--- a/modules/html.py
+++ b/modules/html.py
@@ -112,6 +112,30 @@ progress::after {
margin-left: -5px !important;
}
+.lora_enable {
+ flex-grow: 1 !important;
+}
+
+.lora_enable label {
+ height: 100%;
+}
+
+.lora_enable label input {
+ margin: auto;
+}
+
+.lora_enable label span {
+ display: none;
+}
+
+.lora_model {
+ flex-grow: 5 !important;
+}
+
+.lora_weight {
+ flex-grow: 5 !important;
+}
+
'''
progress_html = '''