diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 00000000..d1eab807
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,54 @@
+__pycache__
+*.ckpt
+*.safetensors
+*.pth
+*.pt
+*.bin
+*.patch
+*.backup
+*.corrupted
+*.partial
+*.onnx
+sorted_styles.json
+/input
+/cache
+/language/default.json
+/test_imgs
+config.txt
+config_modification_tutorial.txt
+user_path_config.txt
+user_path_config-deprecated.txt
+/modules/*.png
+/repositories
+/fooocus_env
+/venv
+/tmp
+/ui-config.json
+/outputs
+/config.json
+/log
+/webui.settings.bat
+/embeddings
+/styles.csv
+/params.txt
+/styles.csv.bak
+/webui-user.bat
+/webui-user.sh
+/interrogate
+/user.css
+/.idea
+/notification.ogg
+/notification.mp3
+/SwinIR
+/textual_inversion
+.vscode
+/extensions
+/test/stdout.txt
+/test/stderr.txt
+/cache.json*
+/config_states/
+/node_modules
+/package-lock.json
+/.coverage*
+/auth.json
+.DS_Store
\ No newline at end of file
diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 00000000..ce213ceb
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1,3 @@
+# Ensure that shell scripts always use lf line endings, e.g. entrypoint.sh for docker
+* text=auto
+*.sh text eol=lf
\ No newline at end of file
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index 585eb87a..f9876685 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -1 +1 @@
-* @lllyasviel
+* @mashb1t
diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md
deleted file mode 100644
index 331426a3..00000000
--- a/.github/ISSUE_TEMPLATE/bug_report.md
+++ /dev/null
@@ -1,18 +0,0 @@
----
-name: Bug report
-about: Describe a problem
-title: ''
-labels: ''
-assignees: ''
-
----
-
-**Read Troubleshoot**
-
-[x] I admit that I have read the [Troubleshoot](https://github.com/lllyasviel/Fooocus/blob/main/troubleshoot.md) before making this issue.
-
-**Describe the problem**
-A clear and concise description of what the bug is.
-
-**Full Console Log**
-Paste **full** console log here. You will make our job easier if you give a **full** log.
diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml
new file mode 100644
index 00000000..5b9cded6
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/bug_report.yml
@@ -0,0 +1,107 @@
+name: Bug Report
+description: You think something is broken in Fooocus
+title: "[Bug]: "
+labels: ["bug", "triage"]
+
+body:
+ - type: markdown
+ attributes:
+ value: |
+ > The title of the bug report should be short and descriptive.
+ > Use relevant keywords for searchability.
+ > Do not leave it blank, but also do not put an entire error log in it.
+ - type: checkboxes
+ attributes:
+ label: Checklist
+ description: |
+ Please perform basic debugging to see if your configuration is the cause of the issue.
+ Basic debug procedure
+ 2. Update Fooocus - sometimes things just need to be updated
+ 3. Backup and remove your config.txt - check if the issue is caused by bad configuration
+ 5. Try a fresh installation of Fooocus in a different directory - see if a clean installation solves the issue
+ Before making a issue report please, check that the issue hasn't been reported recently.
+ options:
+ - label: The issue has not been resolved by following the [troubleshooting guide](https://github.com/lllyasviel/Fooocus/blob/main/troubleshoot.md)
+ - label: The issue exists on a clean installation of Fooocus
+ - label: The issue exists in the current version of Fooocus
+ - label: The issue has not been reported before recently
+ - label: The issue has been reported before but has not been fixed yet
+ - type: markdown
+ attributes:
+ value: |
+ > Please fill this form with as much information as possible. Don't forget to add information about "What browsers" and provide screenshots if possible
+ - type: textarea
+ id: what-did
+ attributes:
+ label: What happened?
+ description: Tell us what happened in a very clear and simple way
+ placeholder: |
+ image generation is not working as intended.
+ validations:
+ required: true
+ - type: textarea
+ id: steps
+ attributes:
+ label: Steps to reproduce the problem
+ description: Please provide us with precise step by step instructions on how to reproduce the bug
+ placeholder: |
+ 1. Go to ...
+ 2. Press ...
+ 3. ...
+ validations:
+ required: true
+ - type: textarea
+ id: what-should
+ attributes:
+ label: What should have happened?
+ description: Tell us what you think the normal behavior should be
+ placeholder: |
+ Fooocus should ...
+ validations:
+ required: true
+ - type: dropdown
+ id: browsers
+ attributes:
+ label: What browsers do you use to access Fooocus?
+ multiple: true
+ options:
+ - Mozilla Firefox
+ - Google Chrome
+ - Brave
+ - Apple Safari
+ - Microsoft Edge
+ - Android
+ - iOS
+ - Other
+ - type: dropdown
+ id: hosting
+ attributes:
+ label: Where are you running Fooocus?
+ multiple: false
+ options:
+ - Locally
+ - Locally with virtualization (e.g. Docker)
+ - Cloud (Google Colab)
+ - Cloud (other)
+ - type: input
+ id: operating-system
+ attributes:
+ label: What operating system are you using?
+ placeholder: |
+ Windows 10
+ - type: textarea
+ id: logs
+ attributes:
+ label: Console logs
+ description: Please provide **full** cmd/terminal logs from the moment you started UI to the end of it, after the bug occured. If it's very long, provide a link to pastebin or similar service.
+ render: Shell
+ validations:
+ required: true
+ - type: textarea
+ id: misc
+ attributes:
+ label: Additional information
+ description: |
+ Please provide us with any relevant additional info or context.
+ Examples:
+ I have updated my GPU driver recently.
\ No newline at end of file
diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml
new file mode 100644
index 00000000..7bbf022a
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/config.yml
@@ -0,0 +1,5 @@
+blank_issues_enabled: false
+contact_links:
+ - name: Ask a question
+ url: https://github.com/lllyasviel/Fooocus/discussions/new?category=q-a
+ about: Ask the community for help
\ No newline at end of file
diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md
deleted file mode 100644
index 8101bc36..00000000
--- a/.github/ISSUE_TEMPLATE/feature_request.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-name: Feature request
-about: Suggest an idea for this project
-title: ''
-labels: ''
-assignees: ''
-
----
-
-**Is your feature request related to a problem? Please describe.**
-A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
-
-**Describe the idea you'd like**
-A clear and concise description of what you want to happen.
diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml
new file mode 100644
index 00000000..90e594e4
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/feature_request.yml
@@ -0,0 +1,40 @@
+name: Feature request
+description: Suggest an idea for this project
+title: "[Feature Request]: "
+labels: ["enhancement", "triage"]
+
+body:
+ - type: checkboxes
+ attributes:
+ label: Is there an existing issue for this?
+ description: Please search to see if an issue already exists for the feature you want, and that it's not implemented in a recent build/commit.
+ options:
+ - label: I have searched the existing issues and checked the recent builds/commits
+ required: true
+ - type: markdown
+ attributes:
+ value: |
+ *Please fill this form with as much information as possible, provide screenshots and/or illustrations of the feature if possible*
+ - type: textarea
+ id: feature
+ attributes:
+ label: What would your feature do?
+ description: Tell us about your feature in a very clear and simple way, and what problem it would solve
+ validations:
+ required: true
+ - type: textarea
+ id: workflow
+ attributes:
+ label: Proposed workflow
+ description: Please provide us with step by step information on how you'd like the feature to be accessed and used
+ value: |
+ 1. Go to ....
+ 2. Press ....
+ 3. ...
+ validations:
+ required: true
+ - type: textarea
+ id: misc
+ attributes:
+ label: Additional information
+ description: Add any other context or screenshots about the feature request here.
\ No newline at end of file
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
new file mode 100644
index 00000000..adee0ed1
--- /dev/null
+++ b/.github/dependabot.yml
@@ -0,0 +1,6 @@
+version: 2
+updates:
+ - package-ecosystem: "github-actions"
+ directory: "/"
+ schedule:
+ interval: "monthly"
\ No newline at end of file
diff --git a/.github/workflows/build_container.yml b/.github/workflows/build_container.yml
new file mode 100644
index 00000000..eb70cda3
--- /dev/null
+++ b/.github/workflows/build_container.yml
@@ -0,0 +1,47 @@
+name: Docker image build
+
+on:
+ push:
+ branches:
+ - main
+ tags:
+ - v*
+
+jobs:
+ build-and-push-image:
+ runs-on: ubuntu-latest
+
+ permissions:
+ contents: read
+ packages: write
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+
+ - name: Log in to the Container registry
+ uses: docker/login-action@v3
+ with:
+ registry: ghcr.io
+ username: ${{ github.repository_owner }}
+ password: ${{ secrets.GITHUB_TOKEN }}
+
+ - name: Extract metadata (tags, labels) for Docker
+ id: meta
+ uses: docker/metadata-action@v5
+ with:
+ images: ghcr.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
+ tags: |
+ type=semver,pattern={{version}}
+ type=semver,pattern={{major}}.{{minor}}
+ type=semver,pattern={{major}}
+ type=edge,branch=main
+
+ - name: Build and push Docker image
+ uses: docker/build-push-action@v5
+ with:
+ context: .
+ file: ./Dockerfile
+ push: true
+ tags: ${{ steps.meta.outputs.tags }}
+ labels: ${{ steps.meta.outputs.labels }}
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
index 05ce1df8..85914986 100644
--- a/.gitignore
+++ b/.gitignore
@@ -20,6 +20,7 @@ user_path_config.txt
user_path_config-deprecated.txt
/modules/*.png
/repositories
+/fooocus_env
/venv
/tmp
/ui-config.json
@@ -50,3 +51,4 @@ user_path_config-deprecated.txt
/package-lock.json
/.coverage*
/auth.json
+.DS_Store
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 00000000..820ae94a
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,29 @@
+FROM nvidia/cuda:12.4.1-base-ubuntu22.04
+ENV DEBIAN_FRONTEND noninteractive
+ENV CMDARGS --listen
+
+RUN apt-get update -y && \
+ apt-get install -y curl libgl1 libglib2.0-0 python3-pip python-is-python3 git && \
+ apt-get clean && \
+ rm -rf /var/lib/apt/lists/*
+
+COPY requirements_docker.txt requirements_versions.txt /tmp/
+RUN pip install --no-cache-dir -r /tmp/requirements_docker.txt -r /tmp/requirements_versions.txt && \
+ rm -f /tmp/requirements_docker.txt /tmp/requirements_versions.txt
+RUN pip install --no-cache-dir xformers==0.0.23 --no-dependencies
+RUN curl -fsL -o /usr/local/lib/python3.10/dist-packages/gradio/frpc_linux_amd64_v0.2 https://cdn-media.huggingface.co/frpc-gradio-0.2/frpc_linux_amd64 && \
+ chmod +x /usr/local/lib/python3.10/dist-packages/gradio/frpc_linux_amd64_v0.2
+
+RUN adduser --disabled-password --gecos '' user && \
+ mkdir -p /content/app /content/data
+
+COPY entrypoint.sh /content/
+RUN chown -R user:user /content
+
+WORKDIR /content
+USER user
+
+COPY --chown=user:user . /content/app
+RUN mv /content/app/models /content/app/models.org
+
+CMD [ "sh", "-c", "/content/entrypoint.sh ${CMDARGS}" ]
diff --git a/args_manager.py b/args_manager.py
index d063a5e1..5a2b37c9 100644
--- a/args_manager.py
+++ b/args_manager.py
@@ -1,8 +1,10 @@
import ldm_patched.modules.args_parser as args_parser
-
args_parser.parser.add_argument("--share", action='store_true', help="Set whether to share on Gradio.")
+
args_parser.parser.add_argument("--preset", type=str, default=None, help="Apply specified UI preset.")
+args_parser.parser.add_argument("--disable-preset-selection", action='store_true',
+ help="Disables preset selection in Gradio.")
args_parser.parser.add_argument("--language", type=str, default='default',
help="Translate UI using json files in [language] folder. "
@@ -18,7 +20,19 @@ args_parser.parser.add_argument("--disable-image-log", action='store_true',
help="Prevent writing images and logs to hard drive.")
args_parser.parser.add_argument("--disable-analytics", action='store_true',
- help="Disables analytics for Gradio", default=False)
+ help="Disables analytics for Gradio.")
+
+args_parser.parser.add_argument("--disable-metadata", action='store_true',
+ help="Disables saving metadata to images.")
+
+args_parser.parser.add_argument("--disable-preset-download", action='store_true',
+ help="Disables downloading models for presets", default=False)
+
+args_parser.parser.add_argument("--enable-describe-uov-image", action='store_true',
+ help="Disables automatic description of uov images when prompt is empty", default=False)
+
+args_parser.parser.add_argument("--always-download-new-model", action='store_true',
+ help="Always download newer models ", default=False)
args_parser.parser.set_defaults(
disable_cuda_malloc=True,
@@ -35,4 +49,7 @@ if args_parser.args.disable_analytics:
import os
os.environ["GRADIO_ANALYTICS_ENABLED"] = "False"
+if args_parser.args.disable_in_browser:
+ args_parser.args.in_browser = False
+
args = args_parser.args
diff --git a/css/style.css b/css/style.css
index b87b20a7..6ed0f628 100644
--- a/css/style.css
+++ b/css/style.css
@@ -1,5 +1,150 @@
/* based on https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/v1.6.0/style.css */
+.loader-container {
+ display: flex; /* Use flex to align items horizontally */
+ align-items: center; /* Center items vertically within the container */
+ white-space: nowrap; /* Prevent line breaks within the container */
+}
+
+.loader {
+ border: 8px solid #f3f3f3; /* Light grey */
+ border-top: 8px solid #3498db; /* Blue */
+ border-radius: 50%;
+ width: 30px;
+ height: 30px;
+ animation: spin 2s linear infinite;
+}
+
+@keyframes spin {
+ 0% { transform: rotate(0deg); }
+ 100% { transform: rotate(360deg); }
+}
+
+/* Style the progress bar */
+progress {
+ appearance: none; /* Remove default styling */
+ height: 20px; /* Set the height of the progress bar */
+ border-radius: 5px; /* Round the corners of the progress bar */
+ background-color: #f3f3f3; /* Light grey background */
+ width: 100%;
+ vertical-align: middle !important;
+}
+
+/* Style the progress bar container */
+.progress-container {
+ margin-left: 20px;
+ margin-right: 20px;
+ flex-grow: 1; /* Allow the progress container to take up remaining space */
+}
+
+/* Set the color of the progress bar fill */
+progress::-webkit-progress-value {
+ background-color: #3498db; /* Blue color for the fill */
+}
+
+progress::-moz-progress-bar {
+ background-color: #3498db; /* Blue color for the fill in Firefox */
+}
+
+/* Style the text on the progress bar */
+progress::after {
+ content: attr(value '%'); /* Display the progress value followed by '%' */
+ position: absolute;
+ top: 50%;
+ left: 50%;
+ transform: translate(-50%, -50%);
+ color: white; /* Set text color */
+ font-size: 14px; /* Set font size */
+}
+
+/* Style other texts */
+.loader-container > span {
+ margin-left: 5px; /* Add spacing between the progress bar and the text */
+}
+
+.progress-bar > .generating {
+ display: none !important;
+}
+
+.progress-bar{
+ height: 30px !important;
+}
+
+.progress-bar span {
+ text-align: right;
+ width: 215px;
+}
+div:has(> #positive_prompt) {
+ border: none;
+}
+
+#positive_prompt {
+ padding: 1px;
+ background: var(--background-fill-primary);
+}
+
+.type_row {
+ height: 84px !important;
+}
+
+.type_row_half {
+ height: 34px !important;
+}
+
+.refresh_button {
+ border: none !important;
+ background: none !important;
+ font-size: none !important;
+ box-shadow: none !important;
+}
+
+.advanced_check_row {
+ width: 250px !important;
+}
+
+.min_check {
+ min-width: min(1px, 100%) !important;
+}
+
+.resizable_area {
+ resize: vertical;
+ overflow: auto !important;
+}
+
+.performance_selection label {
+ width: 140px !important;
+}
+
+.aspect_ratios label {
+ flex: calc(50% - 5px) !important;
+}
+
+.aspect_ratios label span {
+ white-space: nowrap !important;
+}
+
+.aspect_ratios label input {
+ margin-left: -5px !important;
+}
+
+.lora_enable label {
+ height: 100%;
+}
+
+.lora_enable label input {
+ margin: auto;
+}
+
+.lora_enable label span {
+ display: none;
+}
+
+@-moz-document url-prefix() {
+ .lora_weight input[type=number] {
+ width: 80px;
+ }
+}
+
#context-menu{
z-index:9999;
position:absolute;
@@ -196,3 +341,78 @@
pointer-events: none;
display: none;
}
+
+#stylePreviewOverlay {
+ opacity: 0;
+ pointer-events: none;
+ width: 128px;
+ height: 128px;
+ position: fixed;
+ top: 0px;
+ left: 0px;
+ border: solid 1px lightgrey;
+ transform: translate(-140px, 20px);
+ background-size: cover;
+ background-position: center;
+ background-color: rgba(0, 0, 0, 0.3);
+ border-radius: 5px;
+ z-index: 100;
+ transition: transform 0.1s ease, opacity 0.3s ease;
+}
+
+#stylePreviewOverlay.lower-half {
+ transform: translate(-140px, -140px);
+}
+
+/* scrollable box for style selections */
+.contain .tabs {
+ height: 100%;
+}
+
+.contain .tabs .tabitem.style_selections_tab {
+ height: 100%;
+}
+
+.contain .tabs .tabitem.style_selections_tab > div:first-child {
+ height: 100%;
+}
+
+.contain .tabs .tabitem.style_selections_tab .style_selections {
+ min-height: 200px;
+ height: 100%;
+}
+
+.contain .tabs .tabitem.style_selections_tab .style_selections .wrap[data-testid="checkbox-group"] {
+ position: absolute; /* remove this to disable scrolling within the checkbox-group */
+ overflow: auto;
+ padding-right: 2px;
+ max-height: 100%;
+}
+
+.contain .tabs .tabitem.style_selections_tab .style_selections .wrap[data-testid="checkbox-group"] label {
+ /* max-width: calc(35% - 15px) !important; */ /* add this to enable 3 columns layout */
+ flex: calc(50% - 5px) !important;
+}
+
+.contain .tabs .tabitem.style_selections_tab .style_selections .wrap[data-testid="checkbox-group"] label span {
+ /* white-space:nowrap; */ /* add this to disable text wrapping (better choice for 3 columns layout) */
+ overflow: hidden;
+ text-overflow: ellipsis;
+}
+
+/* styles preview tooltip */
+.preview-tooltip {
+ background-color: #fff8;
+ font-family: monospace;
+ text-align: center;
+ border-radius: 5px 5px 0px 0px;
+ display: none; /* remove this to enable tooltip in preview image */
+}
+
+#inpaint_canvas .canvas-tooltip-info {
+ top: 2px;
+}
+
+#inpaint_brush_color input[type=color]{
+ background: none;
+}
\ No newline at end of file
diff --git a/development.md b/development.md
new file mode 100644
index 00000000..bbb3def9
--- /dev/null
+++ b/development.md
@@ -0,0 +1,11 @@
+## Running unit tests
+
+Native python:
+```
+python -m unittest tests/
+```
+
+Embedded python (Windows zip file installation method):
+```
+..\python_embeded\python.exe -m unittest
+```
diff --git a/docker-compose.yml b/docker-compose.yml
new file mode 100644
index 00000000..f724964d
--- /dev/null
+++ b/docker-compose.yml
@@ -0,0 +1,36 @@
+volumes:
+ fooocus-data:
+
+services:
+ app:
+ build: .
+ image: ghcr.io/lllyasviel/fooocus
+ ports:
+ - "7865:7865"
+ environment:
+ - CMDARGS=--listen # Arguments for launch.py.
+ - DATADIR=/content/data # Directory which stores models, outputs dir
+ - config_path=/content/data/config.txt
+ - config_example_path=/content/data/config_modification_tutorial.txt
+ - path_checkpoints=/content/data/models/checkpoints/
+ - path_loras=/content/data/models/loras/
+ - path_embeddings=/content/data/models/embeddings/
+ - path_vae_approx=/content/data/models/vae_approx/
+ - path_upscale_models=/content/data/models/upscale_models/
+ - path_inpaint=/content/data/models/inpaint/
+ - path_controlnet=/content/data/models/controlnet/
+ - path_clip_vision=/content/data/models/clip_vision/
+ - path_fooocus_expansion=/content/data/models/prompt_expansion/fooocus_expansion/
+ - path_outputs=/content/app/outputs/ # Warning: If it is not located under '/content/app', you can't see history log!
+ volumes:
+ - fooocus-data:/content/data
+ #- ./models:/import/models # Once you import files, you don't need to mount again.
+ #- ./outputs:/import/outputs # Once you import files, you don't need to mount again.
+ tty: true
+ deploy:
+ resources:
+ reservations:
+ devices:
+ - driver: nvidia
+ device_ids: ['0']
+ capabilities: [compute, utility]
diff --git a/docker.md b/docker.md
new file mode 100644
index 00000000..cd75d9f5
--- /dev/null
+++ b/docker.md
@@ -0,0 +1,131 @@
+# Fooocus on Docker
+
+The docker image is based on NVIDIA CUDA 12.4 and PyTorch 2.1, see [Dockerfile](Dockerfile) and [requirements_docker.txt](requirements_docker.txt) for details.
+
+## Requirements
+
+- A computer with specs good enough to run Fooocus, and proprietary Nvidia drivers
+- Docker, Docker Compose, or Podman
+
+## Quick start
+
+**More information in the [notes](#notes).**
+
+### Running with Docker Compose
+
+1. Clone this repository
+2. Run the docker container with `docker compose up`.
+
+### Running with Docker
+
+```sh
+docker run -p 7865:7865 -v fooocus-data:/content/data -it \
+--gpus all \
+-e CMDARGS=--listen \
+-e DATADIR=/content/data \
+-e config_path=/content/data/config.txt \
+-e config_example_path=/content/data/config_modification_tutorial.txt \
+-e path_checkpoints=/content/data/models/checkpoints/ \
+-e path_loras=/content/data/models/loras/ \
+-e path_embeddings=/content/data/models/embeddings/ \
+-e path_vae_approx=/content/data/models/vae_approx/ \
+-e path_upscale_models=/content/data/models/upscale_models/ \
+-e path_inpaint=/content/data/models/inpaint/ \
+-e path_controlnet=/content/data/models/controlnet/ \
+-e path_clip_vision=/content/data/models/clip_vision/ \
+-e path_fooocus_expansion=/content/data/models/prompt_expansion/fooocus_expansion/ \
+-e path_outputs=/content/app/outputs/ \
+ghcr.io/lllyasviel/fooocus
+```
+### Running with Podman
+
+```sh
+podman run -p 7865:7865 -v fooocus-data:/content/data -it \
+--security-opt=no-new-privileges --cap-drop=ALL --security-opt label=type:nvidia_container_t --device=nvidia.com/gpu=all \
+-e CMDARGS=--listen \
+-e DATADIR=/content/data \
+-e config_path=/content/data/config.txt \
+-e config_example_path=/content/data/config_modification_tutorial.txt \
+-e path_checkpoints=/content/data/models/checkpoints/ \
+-e path_loras=/content/data/models/loras/ \
+-e path_embeddings=/content/data/models/embeddings/ \
+-e path_vae_approx=/content/data/models/vae_approx/ \
+-e path_upscale_models=/content/data/models/upscale_models/ \
+-e path_inpaint=/content/data/models/inpaint/ \
+-e path_controlnet=/content/data/models/controlnet/ \
+-e path_clip_vision=/content/data/models/clip_vision/ \
+-e path_fooocus_expansion=/content/data/models/prompt_expansion/fooocus_expansion/ \
+-e path_outputs=/content/app/outputs/ \
+ghcr.io/lllyasviel/fooocus
+```
+
+When you see the message `Use the app with http://0.0.0.0:7865/` in the console, you can access the URL in your browser.
+
+Your models and outputs are stored in the `fooocus-data` volume, which, depending on OS, is stored in `/var/lib/docker/volumes/` (or `~/.local/share/containers/storage/volumes/` when using `podman`).
+
+## Building the container locally
+
+Clone the repository first, and open a terminal in the folder.
+
+Build with `docker`:
+```sh
+docker build . -t fooocus
+```
+
+Build with `podman`:
+```sh
+podman build . -t fooocus
+```
+
+## Details
+
+### Update the container manually (`docker compose`)
+
+When you are using `docker compose up` continuously, the container is not updated to the latest version of Fooocus automatically.
+Run `git pull` before executing `docker compose build --no-cache` to build an image with the latest Fooocus version.
+You can then start it with `docker compose up`
+
+### Import models, outputs
+
+If you want to import files from models or the outputs folder, you can add the following bind mounts in the [docker-compose.yml](docker-compose.yml) or your preferred method of running the container:
+```
+#- ./models:/import/models # Once you import files, you don't need to mount again.
+#- ./outputs:/import/outputs # Once you import files, you don't need to mount again.
+```
+After running the container, your files will be copied into `/content/data/models` and `/content/data/outputs`
+Since `/content/data` is a persistent volume folder, your files will be persisted even when you re-run the container without the above mounts.
+
+
+### Paths inside the container
+
+|Path|Details|
+|-|-|
+|/content/app|The application stored folder|
+|/content/app/models.org|Original 'models' folder. Files are copied to the '/content/app/models' which is symlinked to '/content/data/models' every time the container boots. (Existing files will not be overwritten.) |
+|/content/data|Persistent volume mount point|
+|/content/data/models|The folder is symlinked to '/content/app/models'|
+|/content/data/outputs|The folder is symlinked to '/content/app/outputs'|
+
+### Environments
+
+You can change `config.txt` parameters by using environment variables.
+**The priority of using the environments is higher than the values defined in `config.txt`, and they will be saved to the `config_modification_tutorial.txt`**
+
+Docker specified environments are there. They are used by 'entrypoint.sh'
+|Environment|Details|
+|-|-|
+|DATADIR|'/content/data' location.|
+|CMDARGS|Arguments for [entry_with_update.py](entry_with_update.py) which is called by [entrypoint.sh](entrypoint.sh)|
+|config_path|'config.txt' location|
+|config_example_path|'config_modification_tutorial.txt' location|
+|HF_MIRROR| huggingface mirror site domain|
+
+You can also use the same json key names and values explained in the 'config_modification_tutorial.txt' as the environments.
+See examples in the [docker-compose.yml](docker-compose.yml)
+
+## Notes
+
+- Please keep 'path_outputs' under '/content/app'. Otherwise, you may get an error when you open the history log.
+- Docker on Mac/Windows still has issues in the form of slow volume access when you use "bind mount" volumes. Please refer to [this article](https://docs.docker.com/storage/volumes/#use-a-volume-with-docker-compose) for not using "bind mount".
+- The MPS backend (Metal Performance Shaders, Apple Silicon M1/M2/etc.) is not yet supported in Docker, see https://github.com/pytorch/pytorch/issues/81224
+- You can also use `docker compose up -d` to start the container detached and connect to the logs with `docker compose logs -f`. This way you can also close the terminal and keep the container running.
\ No newline at end of file
diff --git a/entrypoint.sh b/entrypoint.sh
new file mode 100755
index 00000000..d0dba09c
--- /dev/null
+++ b/entrypoint.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+ORIGINALDIR=/content/app
+# Use predefined DATADIR if it is defined
+[[ x"${DATADIR}" == "x" ]] && DATADIR=/content/data
+
+# Make persistent dir from original dir
+function mklink () {
+ mkdir -p $DATADIR/$1
+ ln -s $DATADIR/$1 $ORIGINALDIR
+}
+
+# Copy old files from import dir
+function import () {
+ (test -d /import/$1 && cd /import/$1 && cp -Rpn . $DATADIR/$1/)
+}
+
+cd $ORIGINALDIR
+
+# models
+mklink models
+# Copy original files
+(cd $ORIGINALDIR/models.org && cp -Rpn . $ORIGINALDIR/models/)
+# Import old files
+import models
+
+# outputs
+mklink outputs
+# Import old files
+import outputs
+
+# Start application
+python launch.py $*
diff --git a/extras/censor.py b/extras/censor.py
new file mode 100644
index 00000000..45617fd8
--- /dev/null
+++ b/extras/censor.py
@@ -0,0 +1,60 @@
+import os
+
+import numpy as np
+import torch
+from transformers import CLIPConfig, CLIPImageProcessor
+
+import ldm_patched.modules.model_management as model_management
+import modules.config
+from extras.safety_checker.models.safety_checker import StableDiffusionSafetyChecker
+from ldm_patched.modules.model_patcher import ModelPatcher
+
+safety_checker_repo_root = os.path.join(os.path.dirname(__file__), 'safety_checker')
+config_path = os.path.join(safety_checker_repo_root, "configs", "config.json")
+preprocessor_config_path = os.path.join(safety_checker_repo_root, "configs", "preprocessor_config.json")
+
+
+class Censor:
+ def __init__(self):
+ self.safety_checker_model: ModelPatcher | None = None
+ self.clip_image_processor: CLIPImageProcessor | None = None
+ self.load_device = torch.device('cpu')
+ self.offload_device = torch.device('cpu')
+
+ def init(self):
+ if self.safety_checker_model is None and self.clip_image_processor is None:
+ safety_checker_model = modules.config.downloading_safety_checker_model()
+ self.clip_image_processor = CLIPImageProcessor.from_json_file(preprocessor_config_path)
+ clip_config = CLIPConfig.from_json_file(config_path)
+ model = StableDiffusionSafetyChecker.from_pretrained(safety_checker_model, config=clip_config)
+ model.eval()
+
+ self.load_device = model_management.text_encoder_device()
+ self.offload_device = model_management.text_encoder_offload_device()
+
+ model.to(self.offload_device)
+
+ self.safety_checker_model = ModelPatcher(model, load_device=self.load_device, offload_device=self.offload_device)
+
+ def censor(self, images: list | np.ndarray) -> list | np.ndarray:
+ self.init()
+ model_management.load_model_gpu(self.safety_checker_model)
+
+ single = False
+ if not isinstance(images, list) or isinstance(images, np.ndarray):
+ images = [images]
+ single = True
+
+ safety_checker_input = self.clip_image_processor(images, return_tensors="pt")
+ safety_checker_input.to(device=self.load_device)
+ checked_images, has_nsfw_concept = self.safety_checker_model.model(images=images,
+ clip_input=safety_checker_input.pixel_values)
+ checked_images = [image.astype(np.uint8) for image in checked_images]
+
+ if single:
+ checked_images = checked_images[0]
+
+ return checked_images
+
+
+default_censor = Censor().censor
diff --git a/extras/expansion.py b/extras/expansion.py
index c1b59b8a..34c1ee8d 100644
--- a/extras/expansion.py
+++ b/extras/expansion.py
@@ -112,6 +112,9 @@ class FooocusExpansion:
max_token_length = 75 * int(math.ceil(float(current_token_length) / 75.0))
max_new_tokens = max_token_length - current_token_length
+ if max_new_tokens == 0:
+ return prompt[:-1]
+
# https://huggingface.co/blog/introducing-csearch
# https://huggingface.co/docs/transformers/generation_strategies
features = self.model.generate(**tokenized_kwargs,
diff --git a/extras/ip_adapter.py b/extras/ip_adapter.py
index cb1d366f..22527d24 100644
--- a/extras/ip_adapter.py
+++ b/extras/ip_adapter.py
@@ -2,12 +2,13 @@ import torch
import ldm_patched.modules.clip_vision
import safetensors.torch as sf
import ldm_patched.modules.model_management as model_management
-import contextlib
import ldm_patched.ldm.modules.attention as attention
from extras.resampler import Resampler
from ldm_patched.modules.model_patcher import ModelPatcher
from modules.core import numpy_to_pytorch
+from modules.ops import use_patched_ops
+from ldm_patched.modules.ops import manual_cast
SD_V12_CHANNELS = [320] * 4 + [640] * 4 + [1280] * 4 + [1280] * 6 + [640] * 6 + [320] * 6 + [1280] * 2
@@ -116,14 +117,16 @@ def load_ip_adapter(clip_vision_path, ip_negative_path, ip_adapter_path):
clip_extra_context_tokens = ip_state_dict["image_proj"]["proj.weight"].shape[0] // cross_attention_dim
clip_embeddings_dim = None
- ip_adapter = IPAdapterModel(
- ip_state_dict,
- plus=plus,
- cross_attention_dim=cross_attention_dim,
- clip_embeddings_dim=clip_embeddings_dim,
- clip_extra_context_tokens=clip_extra_context_tokens,
- sdxl_plus=sdxl_plus
- )
+ with use_patched_ops(manual_cast):
+ ip_adapter = IPAdapterModel(
+ ip_state_dict,
+ plus=plus,
+ cross_attention_dim=cross_attention_dim,
+ clip_embeddings_dim=clip_embeddings_dim,
+ clip_extra_context_tokens=clip_extra_context_tokens,
+ sdxl_plus=sdxl_plus
+ )
+
ip_adapter.sdxl = sdxl
ip_adapter.load_device = load_device
ip_adapter.offload_device = offload_device
diff --git a/extras/preprocessors.py b/extras/preprocessors.py
index 798fe15d..0aa83109 100644
--- a/extras/preprocessors.py
+++ b/extras/preprocessors.py
@@ -1,27 +1,26 @@
import cv2
import numpy as np
-import modules.advanced_parameters as advanced_parameters
-def centered_canny(x: np.ndarray):
+def centered_canny(x: np.ndarray, canny_low_threshold, canny_high_threshold):
assert isinstance(x, np.ndarray)
assert x.ndim == 2 and x.dtype == np.uint8
- y = cv2.Canny(x, int(advanced_parameters.canny_low_threshold), int(advanced_parameters.canny_high_threshold))
+ y = cv2.Canny(x, int(canny_low_threshold), int(canny_high_threshold))
y = y.astype(np.float32) / 255.0
return y
-def centered_canny_color(x: np.ndarray):
+def centered_canny_color(x: np.ndarray, canny_low_threshold, canny_high_threshold):
assert isinstance(x, np.ndarray)
assert x.ndim == 3 and x.shape[2] == 3
- result = [centered_canny(x[..., i]) for i in range(3)]
+ result = [centered_canny(x[..., i], canny_low_threshold, canny_high_threshold) for i in range(3)]
result = np.stack(result, axis=2)
return result
-def pyramid_canny_color(x: np.ndarray):
+def pyramid_canny_color(x: np.ndarray, canny_low_threshold, canny_high_threshold):
assert isinstance(x, np.ndarray)
assert x.ndim == 3 and x.shape[2] == 3
@@ -31,7 +30,7 @@ def pyramid_canny_color(x: np.ndarray):
for k in [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
Hs, Ws = int(H * k), int(W * k)
small = cv2.resize(x, (Ws, Hs), interpolation=cv2.INTER_AREA)
- edge = centered_canny_color(small)
+ edge = centered_canny_color(small, canny_low_threshold, canny_high_threshold)
if acc_edge is None:
acc_edge = edge
else:
@@ -54,11 +53,11 @@ def norm255(x, low=4, high=96):
return x * 255.0
-def canny_pyramid(x):
+def canny_pyramid(x, canny_low_threshold, canny_high_threshold):
# For some reasons, SAI's Control-lora Canny seems to be trained on canny maps with non-standard resolutions.
# Then we use pyramid to use all resolutions to avoid missing any structure in specific resolutions.
- color_canny = pyramid_canny_color(x)
+ color_canny = pyramid_canny_color(x, canny_low_threshold, canny_high_threshold)
result = np.sum(color_canny, axis=2)
return norm255(result, low=1, high=99).clip(0, 255).astype(np.uint8)
diff --git a/extras/resampler.py b/extras/resampler.py
index 4521c8c3..539f309d 100644
--- a/extras/resampler.py
+++ b/extras/resampler.py
@@ -108,8 +108,7 @@ class Resampler(nn.Module):
)
def forward(self, x):
-
- latents = self.latents.repeat(x.size(0), 1, 1)
+ latents = self.latents.repeat(x.size(0), 1, 1).to(x)
x = self.proj_in(x)
@@ -118,4 +117,4 @@ class Resampler(nn.Module):
latents = ff(latents) + latents
latents = self.proj_out(latents)
- return self.norm_out(latents)
\ No newline at end of file
+ return self.norm_out(latents)
diff --git a/extras/safety_checker/configs/config.json b/extras/safety_checker/configs/config.json
new file mode 100644
index 00000000..aa454d22
--- /dev/null
+++ b/extras/safety_checker/configs/config.json
@@ -0,0 +1,171 @@
+{
+ "_name_or_path": "clip-vit-large-patch14/",
+ "architectures": [
+ "SafetyChecker"
+ ],
+ "initializer_factor": 1.0,
+ "logit_scale_init_value": 2.6592,
+ "model_type": "clip",
+ "projection_dim": 768,
+ "text_config": {
+ "_name_or_path": "",
+ "add_cross_attention": false,
+ "architectures": null,
+ "attention_dropout": 0.0,
+ "bad_words_ids": null,
+ "bos_token_id": 0,
+ "chunk_size_feed_forward": 0,
+ "cross_attention_hidden_size": null,
+ "decoder_start_token_id": null,
+ "diversity_penalty": 0.0,
+ "do_sample": false,
+ "dropout": 0.0,
+ "early_stopping": false,
+ "encoder_no_repeat_ngram_size": 0,
+ "eos_token_id": 2,
+ "exponential_decay_length_penalty": null,
+ "finetuning_task": null,
+ "forced_bos_token_id": null,
+ "forced_eos_token_id": null,
+ "hidden_act": "quick_gelu",
+ "hidden_size": 768,
+ "id2label": {
+ "0": "LABEL_0",
+ "1": "LABEL_1"
+ },
+ "initializer_factor": 1.0,
+ "initializer_range": 0.02,
+ "intermediate_size": 3072,
+ "is_decoder": false,
+ "is_encoder_decoder": false,
+ "label2id": {
+ "LABEL_0": 0,
+ "LABEL_1": 1
+ },
+ "layer_norm_eps": 1e-05,
+ "length_penalty": 1.0,
+ "max_length": 20,
+ "max_position_embeddings": 77,
+ "min_length": 0,
+ "model_type": "clip_text_model",
+ "no_repeat_ngram_size": 0,
+ "num_attention_heads": 12,
+ "num_beam_groups": 1,
+ "num_beams": 1,
+ "num_hidden_layers": 12,
+ "num_return_sequences": 1,
+ "output_attentions": false,
+ "output_hidden_states": false,
+ "output_scores": false,
+ "pad_token_id": 1,
+ "prefix": null,
+ "problem_type": null,
+ "pruned_heads": {},
+ "remove_invalid_values": false,
+ "repetition_penalty": 1.0,
+ "return_dict": true,
+ "return_dict_in_generate": false,
+ "sep_token_id": null,
+ "task_specific_params": null,
+ "temperature": 1.0,
+ "tie_encoder_decoder": false,
+ "tie_word_embeddings": true,
+ "tokenizer_class": null,
+ "top_k": 50,
+ "top_p": 1.0,
+ "torch_dtype": null,
+ "torchscript": false,
+ "transformers_version": "4.21.0.dev0",
+ "typical_p": 1.0,
+ "use_bfloat16": false,
+ "vocab_size": 49408
+ },
+ "text_config_dict": {
+ "hidden_size": 768,
+ "intermediate_size": 3072,
+ "num_attention_heads": 12,
+ "num_hidden_layers": 12
+ },
+ "torch_dtype": "float32",
+ "transformers_version": null,
+ "vision_config": {
+ "_name_or_path": "",
+ "add_cross_attention": false,
+ "architectures": null,
+ "attention_dropout": 0.0,
+ "bad_words_ids": null,
+ "bos_token_id": null,
+ "chunk_size_feed_forward": 0,
+ "cross_attention_hidden_size": null,
+ "decoder_start_token_id": null,
+ "diversity_penalty": 0.0,
+ "do_sample": false,
+ "dropout": 0.0,
+ "early_stopping": false,
+ "encoder_no_repeat_ngram_size": 0,
+ "eos_token_id": null,
+ "exponential_decay_length_penalty": null,
+ "finetuning_task": null,
+ "forced_bos_token_id": null,
+ "forced_eos_token_id": null,
+ "hidden_act": "quick_gelu",
+ "hidden_size": 1024,
+ "id2label": {
+ "0": "LABEL_0",
+ "1": "LABEL_1"
+ },
+ "image_size": 224,
+ "initializer_factor": 1.0,
+ "initializer_range": 0.02,
+ "intermediate_size": 4096,
+ "is_decoder": false,
+ "is_encoder_decoder": false,
+ "label2id": {
+ "LABEL_0": 0,
+ "LABEL_1": 1
+ },
+ "layer_norm_eps": 1e-05,
+ "length_penalty": 1.0,
+ "max_length": 20,
+ "min_length": 0,
+ "model_type": "clip_vision_model",
+ "no_repeat_ngram_size": 0,
+ "num_attention_heads": 16,
+ "num_beam_groups": 1,
+ "num_beams": 1,
+ "num_hidden_layers": 24,
+ "num_return_sequences": 1,
+ "output_attentions": false,
+ "output_hidden_states": false,
+ "output_scores": false,
+ "pad_token_id": null,
+ "patch_size": 14,
+ "prefix": null,
+ "problem_type": null,
+ "pruned_heads": {},
+ "remove_invalid_values": false,
+ "repetition_penalty": 1.0,
+ "return_dict": true,
+ "return_dict_in_generate": false,
+ "sep_token_id": null,
+ "task_specific_params": null,
+ "temperature": 1.0,
+ "tie_encoder_decoder": false,
+ "tie_word_embeddings": true,
+ "tokenizer_class": null,
+ "top_k": 50,
+ "top_p": 1.0,
+ "torch_dtype": null,
+ "torchscript": false,
+ "transformers_version": "4.21.0.dev0",
+ "typical_p": 1.0,
+ "use_bfloat16": false
+ },
+ "vision_config_dict": {
+ "hidden_size": 1024,
+ "intermediate_size": 4096,
+ "num_attention_heads": 16,
+ "num_hidden_layers": 24,
+ "patch_size": 14
+ }
+}
diff --git a/extras/safety_checker/configs/preprocessor_config.json b/extras/safety_checker/configs/preprocessor_config.json
new file mode 100644
index 00000000..5294955f
--- /dev/null
+++ b/extras/safety_checker/configs/preprocessor_config.json
@@ -0,0 +1,20 @@
+{
+ "crop_size": 224,
+ "do_center_crop": true,
+ "do_convert_rgb": true,
+ "do_normalize": true,
+ "do_resize": true,
+ "feature_extractor_type": "CLIPFeatureExtractor",
+ "image_mean": [
+ 0.48145466,
+ 0.4578275,
+ 0.40821073
+ ],
+ "image_std": [
+ 0.26862954,
+ 0.26130258,
+ 0.27577711
+ ],
+ "resample": 3,
+ "size": 224
+}
diff --git a/extras/safety_checker/models/safety_checker.py b/extras/safety_checker/models/safety_checker.py
new file mode 100644
index 00000000..ea38bf03
--- /dev/null
+++ b/extras/safety_checker/models/safety_checker.py
@@ -0,0 +1,126 @@
+# from https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/safety_checker.py
+
+# Copyright 2024 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import numpy as np
+import torch
+import torch.nn as nn
+from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
+from transformers.utils import logging
+
+logger = logging.get_logger(__name__)
+
+
+def cosine_distance(image_embeds, text_embeds):
+ normalized_image_embeds = nn.functional.normalize(image_embeds)
+ normalized_text_embeds = nn.functional.normalize(text_embeds)
+ return torch.mm(normalized_image_embeds, normalized_text_embeds.t())
+
+
+class StableDiffusionSafetyChecker(PreTrainedModel):
+ config_class = CLIPConfig
+ main_input_name = "clip_input"
+
+ _no_split_modules = ["CLIPEncoderLayer"]
+
+ def __init__(self, config: CLIPConfig):
+ super().__init__(config)
+
+ self.vision_model = CLIPVisionModel(config.vision_config)
+ self.visual_projection = nn.Linear(config.vision_config.hidden_size, config.projection_dim, bias=False)
+
+ self.concept_embeds = nn.Parameter(torch.ones(17, config.projection_dim), requires_grad=False)
+ self.special_care_embeds = nn.Parameter(torch.ones(3, config.projection_dim), requires_grad=False)
+
+ self.concept_embeds_weights = nn.Parameter(torch.ones(17), requires_grad=False)
+ self.special_care_embeds_weights = nn.Parameter(torch.ones(3), requires_grad=False)
+
+ @torch.no_grad()
+ def forward(self, clip_input, images):
+ pooled_output = self.vision_model(clip_input)[1] # pooled_output
+ image_embeds = self.visual_projection(pooled_output)
+
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
+ special_cos_dist = cosine_distance(image_embeds, self.special_care_embeds).cpu().float().numpy()
+ cos_dist = cosine_distance(image_embeds, self.concept_embeds).cpu().float().numpy()
+
+ result = []
+ batch_size = image_embeds.shape[0]
+ for i in range(batch_size):
+ result_img = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []}
+
+ # increase this value to create a stronger `nfsw` filter
+ # at the cost of increasing the possibility of filtering benign images
+ adjustment = 0.0
+
+ for concept_idx in range(len(special_cos_dist[0])):
+ concept_cos = special_cos_dist[i][concept_idx]
+ concept_threshold = self.special_care_embeds_weights[concept_idx].item()
+ result_img["special_scores"][concept_idx] = round(concept_cos - concept_threshold + adjustment, 3)
+ if result_img["special_scores"][concept_idx] > 0:
+ result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]})
+ adjustment = 0.01
+
+ for concept_idx in range(len(cos_dist[0])):
+ concept_cos = cos_dist[i][concept_idx]
+ concept_threshold = self.concept_embeds_weights[concept_idx].item()
+ result_img["concept_scores"][concept_idx] = round(concept_cos - concept_threshold + adjustment, 3)
+ if result_img["concept_scores"][concept_idx] > 0:
+ result_img["bad_concepts"].append(concept_idx)
+
+ result.append(result_img)
+
+ has_nsfw_concepts = [len(res["bad_concepts"]) > 0 for res in result]
+
+ for idx, has_nsfw_concept in enumerate(has_nsfw_concepts):
+ if has_nsfw_concept:
+ if torch.is_tensor(images) or torch.is_tensor(images[0]):
+ images[idx] = torch.zeros_like(images[idx]) # black image
+ else:
+ images[idx] = np.zeros(images[idx].shape) # black image
+
+ if any(has_nsfw_concepts):
+ logger.warning(
+ "Potential NSFW content was detected in one or more images. A black image will be returned instead."
+ " Try again with a different prompt and/or seed."
+ )
+
+ return images, has_nsfw_concepts
+
+ @torch.no_grad()
+ def forward_onnx(self, clip_input: torch.Tensor, images: torch.Tensor):
+ pooled_output = self.vision_model(clip_input)[1] # pooled_output
+ image_embeds = self.visual_projection(pooled_output)
+
+ special_cos_dist = cosine_distance(image_embeds, self.special_care_embeds)
+ cos_dist = cosine_distance(image_embeds, self.concept_embeds)
+
+ # increase this value to create a stronger `nsfw` filter
+ # at the cost of increasing the possibility of filtering benign images
+ adjustment = 0.0
+
+ special_scores = special_cos_dist - self.special_care_embeds_weights + adjustment
+ # special_scores = special_scores.round(decimals=3)
+ special_care = torch.any(special_scores > 0, dim=1)
+ special_adjustment = special_care * 0.01
+ special_adjustment = special_adjustment.unsqueeze(1).expand(-1, cos_dist.shape[1])
+
+ concept_scores = (cos_dist - self.concept_embeds_weights) + special_adjustment
+ # concept_scores = concept_scores.round(decimals=3)
+ has_nsfw_concepts = torch.any(concept_scores > 0, dim=1)
+
+ images[has_nsfw_concepts] = 0.0 # black image
+
+ return images, has_nsfw_concepts
diff --git a/extras/vae_interpose.py b/extras/vae_interpose.py
index 72fb09a4..d407ca83 100644
--- a/extras/vae_interpose.py
+++ b/extras/vae_interpose.py
@@ -1,69 +1,85 @@
# https://github.com/city96/SD-Latent-Interposer/blob/main/interposer.py
import os
-import torch
-import safetensors.torch as sf
-import torch.nn as nn
-import ldm_patched.modules.model_management
+import safetensors.torch as sf
+import torch
+import torch.nn as nn
+
+import ldm_patched.modules.model_management
from ldm_patched.modules.model_patcher import ModelPatcher
from modules.config import path_vae_approx
-class Block(nn.Module):
- def __init__(self, size):
+class ResBlock(nn.Module):
+ """Block with residuals"""
+
+ def __init__(self, ch):
super().__init__()
self.join = nn.ReLU()
+ self.norm = nn.BatchNorm2d(ch)
self.long = nn.Sequential(
- nn.Conv2d(size, size, kernel_size=3, stride=1, padding=1),
- nn.LeakyReLU(0.1),
- nn.Conv2d(size, size, kernel_size=3, stride=1, padding=1),
- nn.LeakyReLU(0.1),
- nn.Conv2d(size, size, kernel_size=3, stride=1, padding=1),
+ nn.Conv2d(ch, ch, kernel_size=3, stride=1, padding=1),
+ nn.SiLU(),
+ nn.Conv2d(ch, ch, kernel_size=3, stride=1, padding=1),
+ nn.SiLU(),
+ nn.Conv2d(ch, ch, kernel_size=3, stride=1, padding=1),
+ nn.Dropout(0.1)
)
def forward(self, x):
- y = self.long(x)
- z = self.join(y + x)
- return z
+ x = self.norm(x)
+ return self.join(self.long(x) + x)
-class Interposer(nn.Module):
- def __init__(self):
+class ExtractBlock(nn.Module):
+ """Increase no. of channels by [out/in]"""
+
+ def __init__(self, ch_in, ch_out):
super().__init__()
- self.chan = 4
- self.hid = 128
-
- self.head_join = nn.ReLU()
- self.head_short = nn.Conv2d(self.chan, self.hid, kernel_size=3, stride=1, padding=1)
- self.head_long = nn.Sequential(
- nn.Conv2d(self.chan, self.hid, kernel_size=3, stride=1, padding=1),
- nn.LeakyReLU(0.1),
- nn.Conv2d(self.hid, self.hid, kernel_size=3, stride=1, padding=1),
- nn.LeakyReLU(0.1),
- nn.Conv2d(self.hid, self.hid, kernel_size=3, stride=1, padding=1),
- )
- self.core = nn.Sequential(
- Block(self.hid),
- Block(self.hid),
- Block(self.hid),
- )
- self.tail = nn.Sequential(
- nn.ReLU(),
- nn.Conv2d(self.hid, self.chan, kernel_size=3, stride=1, padding=1)
+ self.join = nn.ReLU()
+ self.short = nn.Conv2d(ch_in, ch_out, kernel_size=3, stride=1, padding=1)
+ self.long = nn.Sequential(
+ nn.Conv2d(ch_in, ch_out, kernel_size=3, stride=1, padding=1),
+ nn.SiLU(),
+ nn.Conv2d(ch_out, ch_out, kernel_size=3, stride=1, padding=1),
+ nn.SiLU(),
+ nn.Conv2d(ch_out, ch_out, kernel_size=3, stride=1, padding=1),
+ nn.Dropout(0.1)
)
def forward(self, x):
- y = self.head_join(
- self.head_long(x) +
- self.head_short(x)
+ return self.join(self.long(x) + self.short(x))
+
+
+class InterposerModel(nn.Module):
+ """Main neural network"""
+
+ def __init__(self, ch_in=4, ch_out=4, ch_mid=64, scale=1.0, blocks=12):
+ super().__init__()
+ self.ch_in = ch_in
+ self.ch_out = ch_out
+ self.ch_mid = ch_mid
+ self.blocks = blocks
+ self.scale = scale
+
+ self.head = ExtractBlock(self.ch_in, self.ch_mid)
+ self.core = nn.Sequential(
+ nn.Upsample(scale_factor=self.scale, mode="nearest"),
+ *[ResBlock(self.ch_mid) for _ in range(blocks)],
+ nn.BatchNorm2d(self.ch_mid),
+ nn.SiLU(),
)
+ self.tail = nn.Conv2d(self.ch_mid, self.ch_out, kernel_size=3, stride=1, padding=1)
+
+ def forward(self, x):
+ y = self.head(x)
z = self.core(y)
return self.tail(z)
vae_approx_model = None
-vae_approx_filename = os.path.join(path_vae_approx, 'xl-to-v1_interposer-v3.1.safetensors')
+vae_approx_filename = os.path.join(path_vae_approx, 'xl-to-v1_interposer-v4.0.safetensors')
def parse(x):
@@ -72,7 +88,7 @@ def parse(x):
x_origin = x.clone()
if vae_approx_model is None:
- model = Interposer()
+ model = InterposerModel()
model.eval()
sd = sf.load_file(vae_approx_filename)
model.load_state_dict(sd)
diff --git a/fooocus_colab.ipynb b/fooocus_colab.ipynb
index 205dac55..7fa98879 100644
--- a/fooocus_colab.ipynb
+++ b/fooocus_colab.ipynb
@@ -12,7 +12,7 @@
"%cd /content\n",
"!git clone https://github.com/lllyasviel/Fooocus.git\n",
"%cd /content/Fooocus\n",
- "!python entry_with_update.py --share\n"
+ "!python entry_with_update.py --share --always-high-vram\n"
]
}
],
diff --git a/fooocus_version.py b/fooocus_version.py
index 2511cfc7..84d1586b 100644
--- a/fooocus_version.py
+++ b/fooocus_version.py
@@ -1 +1 @@
-version = '2.1.855'
+version = '2.4.3'
diff --git a/javascript/contextMenus.js b/javascript/contextMenus.js
index 2f32af1b..7494674d 100644
--- a/javascript/contextMenus.js
+++ b/javascript/contextMenus.js
@@ -154,12 +154,8 @@ let cancelGenerateForever = function() {
let generateOnRepeatForButtons = function() {
generateOnRepeat('#generate_button', '#stop_button');
};
-
appendContextMenuOption('#generate_button', 'Generate forever', generateOnRepeatForButtons);
-// appendContextMenuOption('#stop_button', 'Generate forever', generateOnRepeatForButtons);
-// appendContextMenuOption('#stop_button', 'Cancel generate forever', cancelGenerateForever);
-// appendContextMenuOption('#generate_button', 'Cancel generate forever', cancelGenerateForever);
})();
//End example Context Menu Items
diff --git a/javascript/localization.js b/javascript/localization.js
index 8fda68e2..02e4cfba 100644
--- a/javascript/localization.js
+++ b/javascript/localization.js
@@ -45,6 +45,9 @@ function processTextNode(node) {
var tl = getTranslation(text);
if (tl !== undefined) {
node.textContent = tl;
+ if (text && node.parentElement) {
+ node.parentElement.setAttribute("data-original-text", text);
+ }
}
}
@@ -77,6 +80,15 @@ function refresh_style_localization() {
processNode(document.querySelector('.style_selections'));
}
+function refresh_aspect_ratios_label(value) {
+ label = document.querySelector('#aspect_ratios_accordion div span');
+ translation = getTranslation("Aspect Ratios");
+ if (typeof translation == "undefined") {
+ translation = "Aspect Ratios";
+ }
+ label.textContent = translation + " " + htmlDecode(value);
+}
+
function localizeWholePage() {
processNode(gradioApp());
diff --git a/javascript/script.js b/javascript/script.js
index 593d8c36..21dd483d 100644
--- a/javascript/script.js
+++ b/javascript/script.js
@@ -119,6 +119,44 @@ document.addEventListener("DOMContentLoaded", function() {
}
});
mutationObserver.observe(gradioApp(), {childList: true, subtree: true});
+ initStylePreviewOverlay();
+});
+
+var onAppend = function(elem, f) {
+ var observer = new MutationObserver(function(mutations) {
+ mutations.forEach(function(m) {
+ if (m.addedNodes.length) {
+ f(m.addedNodes);
+ }
+ });
+ });
+ observer.observe(elem, {childList: true});
+}
+
+function addObserverIfDesiredNodeAvailable(querySelector, callback) {
+ var elem = document.querySelector(querySelector);
+ if (!elem) {
+ window.setTimeout(() => addObserverIfDesiredNodeAvailable(querySelector, callback), 1000);
+ return;
+ }
+
+ onAppend(elem, callback);
+}
+
+/**
+ * Show reset button on toast "Connection errored out."
+ */
+addObserverIfDesiredNodeAvailable(".toast-wrap", function(added) {
+ added.forEach(function(element) {
+ if (element.innerText.includes("Connection errored out.")) {
+ window.setTimeout(function() {
+ document.getElementById("reset_button").classList.remove("hidden");
+ document.getElementById("generate_button").classList.add("hidden");
+ document.getElementById("skip_button").classList.add("hidden");
+ document.getElementById("stop_button").classList.add("hidden");
+ });
+ }
+ });
});
/**
@@ -145,6 +183,46 @@ document.addEventListener('keydown', function(e) {
}
});
+function initStylePreviewOverlay() {
+ let overlayVisible = false;
+ const samplesPath = document.querySelector("meta[name='samples-path']").getAttribute("content")
+ const overlay = document.createElement('div');
+ const tooltip = document.createElement('div');
+ tooltip.className = 'preview-tooltip';
+ overlay.appendChild(tooltip);
+ overlay.id = 'stylePreviewOverlay';
+ document.body.appendChild(overlay);
+ document.addEventListener('mouseover', function (e) {
+ const label = e.target.closest('.style_selections label');
+ if (!label) return;
+ label.removeEventListener("mouseout", onMouseLeave);
+ label.addEventListener("mouseout", onMouseLeave);
+ overlayVisible = true;
+ overlay.style.opacity = "1";
+ const originalText = label.querySelector("span").getAttribute("data-original-text");
+ const name = originalText || label.querySelector("span").textContent;
+ overlay.style.backgroundImage = `url("${samplesPath.replace(
+ "fooocus_v2",
+ name.toLowerCase().replaceAll(" ", "_")
+ ).replaceAll("\\", "\\\\")}")`;
+
+ tooltip.textContent = name;
+
+ function onMouseLeave() {
+ overlayVisible = false;
+ overlay.style.opacity = "0";
+ overlay.style.backgroundImage = "";
+ label.removeEventListener("mouseout", onMouseLeave);
+ }
+ });
+ document.addEventListener('mousemove', function (e) {
+ if (!overlayVisible) return;
+ overlay.style.left = `${e.clientX}px`;
+ overlay.style.top = `${e.clientY}px`;
+ overlay.className = e.clientY > window.innerHeight / 2 ? "lower-half" : "upper-half";
+ });
+}
+
/**
* checks that a UI element is not in another hidden element or tab content
*/
@@ -178,3 +256,8 @@ function set_theme(theme) {
window.location.replace(gradioURL + '?__theme=' + theme);
}
}
+
+function htmlDecode(input) {
+ var doc = new DOMParser().parseFromString(input, "text/html");
+ return doc.documentElement.textContent;
+}
\ No newline at end of file
diff --git a/language/en.json b/language/en.json
index fd40ca2f..5819f4ee 100644
--- a/language/en.json
+++ b/language/en.json
@@ -4,12 +4,20 @@
"Generate": "Generate",
"Skip": "Skip",
"Stop": "Stop",
+ "Reconnect": "Reconnect",
"Input Image": "Input Image",
"Advanced": "Advanced",
"Upscale or Variation": "Upscale or Variation",
"Image Prompt": "Image Prompt",
- "Inpaint or Outpaint (beta)": "Inpaint or Outpaint (beta)",
- "Drag above image to here": "Drag above image to here",
+ "Inpaint or Outpaint": "Inpaint or Outpaint",
+ "Outpaint Direction": "Outpaint Direction",
+ "Method": "Method",
+ "Describe": "Describe",
+ "Content Type": "Content Type",
+ "Photograph": "Photograph",
+ "Art/Anime": "Art/Anime",
+ "Describe this Image into Prompt": "Describe this Image into Prompt",
+ "Image Size and Recommended Size": "Image Size and Recommended Size",
"Upscale or Variation:": "Upscale or Variation:",
"Disabled": "Disabled",
"Vary (Subtle)": "Vary (Subtle)",
@@ -38,9 +46,12 @@
"* \"Inpaint or Outpaint\" is powered by the sampler \"DPMPP Fooocus Seamless 2M SDE Karras Inpaint Sampler\" (beta)": "* \"Inpaint or Outpaint\" is powered by the sampler \"DPMPP Fooocus Seamless 2M SDE Karras Inpaint Sampler\" (beta)",
"Setting": "Setting",
"Style": "Style",
+ "Preset": "Preset",
"Performance": "Performance",
"Speed": "Speed",
"Quality": "Quality",
+ "Extreme Speed": "Extreme Speed",
+ "Lightning": "Lightning",
"Aspect Ratios": "Aspect Ratios",
"width \u00d7 height": "width \u00d7 height",
"Image Number": "Image Number",
@@ -48,9 +59,15 @@
"Describing what you do not want to see.": "Describing what you do not want to see.",
"Random": "Random",
"Seed": "Seed",
+ "Disable seed increment": "Disable seed increment",
+ "Disable automatic seed increment when image number is > 1.": "Disable automatic seed increment when image number is > 1.",
+ "Read wildcards in order": "Read wildcards in order",
+ "Black Out NSFW": "Black Out NSFW",
+ "Use black image if NSFW is detected.": "Use black image if NSFW is detected.",
"\ud83d\udcda History Log": "\uD83D\uDCDA History Log",
"Image Style": "Image Style",
"Fooocus V2": "Fooocus V2",
+ "Random Style": "Random Style",
"Default (Slightly Cinematic)": "Default (Slightly Cinematic)",
"Fooocus Masterpiece": "Fooocus Masterpiece",
"Fooocus Photograph": "Fooocus Photograph",
@@ -303,6 +320,8 @@
"vae": "vae",
"CFG Mimicking from TSNR": "CFG Mimicking from TSNR",
"Enabling Fooocus's implementation of CFG mimicking for TSNR (effective when real CFG > mimicked CFG).": "Enabling Fooocus's implementation of CFG mimicking for TSNR (effective when real CFG > mimicked CFG).",
+ "CLIP Skip": "CLIP Skip",
+ "Bypass CLIP layers to avoid overfitting (use 1 to not skip any layers, 2 is recommended).": "Bypass CLIP layers to avoid overfitting (use 1 to not skip any layers, 2 is recommended).",
"Sampler": "Sampler",
"dpmpp_2m_sde_gpu": "dpmpp_2m_sde_gpu",
"Only effective in non-inpaint mode.": "Only effective in non-inpaint mode.",
@@ -333,6 +352,8 @@
"sgm_uniform": "sgm_uniform",
"simple": "simple",
"ddim_uniform": "ddim_uniform",
+ "VAE": "VAE",
+ "Default (model)": "Default (model)",
"Forced Overwrite of Sampling Step": "Forced Overwrite of Sampling Step",
"Set as -1 to disable. For developer debugging.": "Set as -1 to disable. For developer debugging.",
"Forced Overwrite of Refiner Switch Step": "Forced Overwrite of Refiner Switch Step",
@@ -342,6 +363,10 @@
"Forced Overwrite of Denoising Strength of \"Vary\"": "Forced Overwrite of Denoising Strength of \"Vary\"",
"Set as negative number to disable. For developer debugging.": "Set as negative number to disable. For developer debugging.",
"Forced Overwrite of Denoising Strength of \"Upscale\"": "Forced Overwrite of Denoising Strength of \"Upscale\"",
+ "Disable Preview": "Disable Preview",
+ "Disable preview during generation.": "Disable preview during generation.",
+ "Disable Intermediate Results": "Disable Intermediate Results",
+ "Disable intermediate results during generation, only show final gallery.": "Disable intermediate results during generation, only show final gallery.",
"Inpaint Engine": "Inpaint Engine",
"v1": "v1",
"Version of Fooocus inpaint model": "Version of Fooocus inpaint model",
@@ -361,12 +386,19 @@
"B2": "B2",
"S1": "S1",
"S2": "S2",
- "Extreme Speed": "Extreme Speed",
"\uD83D\uDD0E Type here to search styles ...": "\uD83D\uDD0E Type here to search styles ...",
"Type prompt here.": "Type prompt here.",
"Outpaint Expansion Direction:": "Outpaint Expansion Direction:",
"* Powered by Fooocus Inpaint Engine (beta)": "* Powered by Fooocus Inpaint Engine (beta)",
"Fooocus Enhance": "Fooocus Enhance",
"Fooocus Cinematic": "Fooocus Cinematic",
- "Fooocus Sharp": "Fooocus Sharp"
+ "Fooocus Sharp": "Fooocus Sharp",
+ "For images created by Fooocus": "For images created by Fooocus",
+ "Metadata": "Metadata",
+ "Apply Metadata": "Apply Metadata",
+ "Metadata Scheme": "Metadata Scheme",
+ "Image Prompt parameters are not included. Use png and a1111 for compatibility with Civitai.": "Image Prompt parameters are not included. Use png and a1111 for compatibility with Civitai.",
+ "fooocus (json)": "fooocus (json)",
+ "a1111 (plain text)": "a1111 (plain text)",
+ "Unsupported image type in input": "Unsupported image type in input"
}
\ No newline at end of file
diff --git a/launch.py b/launch.py
index e98045f6..5d40cc5b 100644
--- a/launch.py
+++ b/launch.py
@@ -1,6 +1,6 @@
import os
-import sys
import ssl
+import sys
print('[System ARGV] ' + str(sys.argv))
@@ -10,20 +10,17 @@ os.chdir(root)
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
os.environ["PYTORCH_MPS_HIGH_WATERMARK_RATIO"] = "0.0"
-os.environ["GRADIO_SERVER_PORT"] = "7865"
+if "GRADIO_SERVER_PORT" not in os.environ:
+ os.environ["GRADIO_SERVER_PORT"] = "7865"
ssl._create_default_https_context = ssl._create_unverified_context
-
import platform
import fooocus_version
from build_launcher import build_launcher
-from modules.launch_util import is_installed, run, python, run_pip, requirements_met
+from modules.launch_util import is_installed, run, python, run_pip, requirements_met, delete_folder_content
from modules.model_loader import load_file_from_url
-from modules.config import path_checkpoints, path_loras, path_vae_approx, path_fooocus_expansion, \
- checkpoint_downloads, path_embeddings, embeddings_downloads, lora_downloads
-
REINSTALL_ALL = False
TRY_INSTALL_XFORMERS = False
@@ -43,7 +40,7 @@ def prepare_environment():
if TRY_INSTALL_XFORMERS:
if REINSTALL_ALL or not is_installed("xformers"):
- xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.20')
+ xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.23')
if platform.system() == "Windows":
if platform.python_version().startswith("3.10"):
run_pip(f"install -U -I --no-deps {xformers_package}", "xformers", live=True)
@@ -65,30 +62,11 @@ def prepare_environment():
vae_approx_filenames = [
('xlvaeapp.pth', 'https://huggingface.co/lllyasviel/misc/resolve/main/xlvaeapp.pth'),
('vaeapp_sd15.pth', 'https://huggingface.co/lllyasviel/misc/resolve/main/vaeapp_sd15.pt'),
- ('xl-to-v1_interposer-v3.1.safetensors',
- 'https://huggingface.co/lllyasviel/misc/resolve/main/xl-to-v1_interposer-v3.1.safetensors')
+ ('xl-to-v1_interposer-v4.0.safetensors',
+ 'https://huggingface.co/mashb1t/misc/resolve/main/xl-to-v1_interposer-v4.0.safetensors')
]
-def download_models():
- for file_name, url in checkpoint_downloads.items():
- load_file_from_url(url=url, model_dir=path_checkpoints, file_name=file_name)
- for file_name, url in embeddings_downloads.items():
- load_file_from_url(url=url, model_dir=path_embeddings, file_name=file_name)
- for file_name, url in lora_downloads.items():
- load_file_from_url(url=url, model_dir=path_loras, file_name=file_name)
- for file_name, url in vae_approx_filenames:
- load_file_from_url(url=url, model_dir=path_vae_approx, file_name=file_name)
-
- load_file_from_url(
- url='https://huggingface.co/lllyasviel/misc/resolve/main/fooocus_expansion.bin',
- model_dir=path_fooocus_expansion,
- file_name='pytorch_model.bin'
- )
-
- return
-
-
def ini_args():
from args_manager import args
return args
@@ -98,12 +76,65 @@ prepare_environment()
build_launcher()
args = ini_args()
-
if args.gpu_device_id is not None:
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_device_id)
print("Set device to:", args.gpu_device_id)
+if args.hf_mirror is not None :
+ os.environ['HF_MIRROR'] = str(args.hf_mirror)
+ print("Set hf_mirror to:", args.hf_mirror)
-download_models()
+from modules import config
+
+os.environ['GRADIO_TEMP_DIR'] = config.temp_path
+
+if config.temp_path_cleanup_on_launch:
+ print(f'[Cleanup] Attempting to delete content of temp dir {config.temp_path}')
+ result = delete_folder_content(config.temp_path, '[Cleanup] ')
+ if result:
+ print("[Cleanup] Cleanup successful")
+ else:
+ print(f"[Cleanup] Failed to delete content of temp dir.")
+
+
+def download_models(default_model, previous_default_models, checkpoint_downloads, embeddings_downloads, lora_downloads):
+ for file_name, url in vae_approx_filenames:
+ load_file_from_url(url=url, model_dir=config.path_vae_approx, file_name=file_name)
+
+ load_file_from_url(
+ url='https://huggingface.co/lllyasviel/misc/resolve/main/fooocus_expansion.bin',
+ model_dir=config.path_fooocus_expansion,
+ file_name='pytorch_model.bin'
+ )
+
+ if args.disable_preset_download:
+ print('Skipped model download.')
+ return default_model, checkpoint_downloads
+
+ if not args.always_download_new_model:
+ if not os.path.exists(os.path.join(config.paths_checkpoints[0], default_model)):
+ for alternative_model_name in previous_default_models:
+ if os.path.exists(os.path.join(config.paths_checkpoints[0], alternative_model_name)):
+ print(f'You do not have [{default_model}] but you have [{alternative_model_name}].')
+ print(f'Fooocus will use [{alternative_model_name}] to avoid downloading new models, '
+ f'but you are not using the latest models.')
+ print('Use --always-download-new-model to avoid fallback and always get new models.')
+ checkpoint_downloads = {}
+ default_model = alternative_model_name
+ break
+
+ for file_name, url in checkpoint_downloads.items():
+ load_file_from_url(url=url, model_dir=config.paths_checkpoints[0], file_name=file_name)
+ for file_name, url in embeddings_downloads.items():
+ load_file_from_url(url=url, model_dir=config.path_embeddings, file_name=file_name)
+ for file_name, url in lora_downloads.items():
+ load_file_from_url(url=url, model_dir=config.paths_loras[0], file_name=file_name)
+
+ return default_model, checkpoint_downloads
+
+
+config.default_base_model_name, config.checkpoint_downloads = download_models(
+ config.default_base_model_name, config.previous_default_models, config.checkpoint_downloads,
+ config.embeddings_downloads, config.lora_downloads)
from webui import *
diff --git a/ldm_patched/contrib/external.py b/ldm_patched/contrib/external.py
index 7f95f084..927cd3f3 100644
--- a/ldm_patched/contrib/external.py
+++ b/ldm_patched/contrib/external.py
@@ -11,7 +11,7 @@ import math
import time
import random
-from PIL import Image, ImageOps
+from PIL import Image, ImageOps, ImageSequence
from PIL.PngImagePlugin import PngInfo
import numpy as np
import safetensors.torch
@@ -361,6 +361,62 @@ class VAEEncodeForInpaint:
return ({"samples":t, "noise_mask": (mask_erosion[:,:,:x,:y].round())}, )
+
+class InpaintModelConditioning:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {"positive": ("CONDITIONING", ),
+ "negative": ("CONDITIONING", ),
+ "vae": ("VAE", ),
+ "pixels": ("IMAGE", ),
+ "mask": ("MASK", ),
+ }}
+
+ RETURN_TYPES = ("CONDITIONING","CONDITIONING","LATENT")
+ RETURN_NAMES = ("positive", "negative", "latent")
+ FUNCTION = "encode"
+
+ CATEGORY = "conditioning/inpaint"
+
+ def encode(self, positive, negative, pixels, vae, mask):
+ x = (pixels.shape[1] // 8) * 8
+ y = (pixels.shape[2] // 8) * 8
+ mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(pixels.shape[1], pixels.shape[2]), mode="bilinear")
+
+ orig_pixels = pixels
+ pixels = orig_pixels.clone()
+ if pixels.shape[1] != x or pixels.shape[2] != y:
+ x_offset = (pixels.shape[1] % 8) // 2
+ y_offset = (pixels.shape[2] % 8) // 2
+ pixels = pixels[:,x_offset:x + x_offset, y_offset:y + y_offset,:]
+ mask = mask[:,:,x_offset:x + x_offset, y_offset:y + y_offset]
+
+ m = (1.0 - mask.round()).squeeze(1)
+ for i in range(3):
+ pixels[:,:,:,i] -= 0.5
+ pixels[:,:,:,i] *= m
+ pixels[:,:,:,i] += 0.5
+ concat_latent = vae.encode(pixels)
+ orig_latent = vae.encode(orig_pixels)
+
+ out_latent = {}
+
+ out_latent["samples"] = orig_latent
+ out_latent["noise_mask"] = mask
+
+ out = []
+ for conditioning in [positive, negative]:
+ c = []
+ for t in conditioning:
+ d = t[1].copy()
+ d["concat_latent_image"] = concat_latent
+ d["concat_mask"] = mask
+ n = [t[0], d]
+ c.append(n)
+ out.append(c)
+ return (out[0], out[1], out_latent)
+
+
class SaveLatent:
def __init__(self):
self.output_dir = ldm_patched.utils.path_utils.get_output_directory()
@@ -1412,17 +1468,32 @@ class LoadImage:
FUNCTION = "load_image"
def load_image(self, image):
image_path = ldm_patched.utils.path_utils.get_annotated_filepath(image)
- i = Image.open(image_path)
- i = ImageOps.exif_transpose(i)
- image = i.convert("RGB")
- image = np.array(image).astype(np.float32) / 255.0
- image = torch.from_numpy(image)[None,]
- if 'A' in i.getbands():
- mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0
- mask = 1. - torch.from_numpy(mask)
+ img = Image.open(image_path)
+ output_images = []
+ output_masks = []
+ for i in ImageSequence.Iterator(img):
+ i = ImageOps.exif_transpose(i)
+ if i.mode == 'I':
+ i = i.point(lambda i: i * (1 / 255))
+ image = i.convert("RGB")
+ image = np.array(image).astype(np.float32) / 255.0
+ image = torch.from_numpy(image)[None,]
+ if 'A' in i.getbands():
+ mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0
+ mask = 1. - torch.from_numpy(mask)
+ else:
+ mask = torch.zeros((64,64), dtype=torch.float32, device="cpu")
+ output_images.append(image)
+ output_masks.append(mask.unsqueeze(0))
+
+ if len(output_images) > 1:
+ output_image = torch.cat(output_images, dim=0)
+ output_mask = torch.cat(output_masks, dim=0)
else:
- mask = torch.zeros((64,64), dtype=torch.float32, device="cpu")
- return (image, mask.unsqueeze(0))
+ output_image = output_images[0]
+ output_mask = output_masks[0]
+
+ return (output_image, output_mask)
@classmethod
def IS_CHANGED(s, image):
@@ -1459,6 +1530,8 @@ class LoadImageMask:
i = Image.open(image_path)
i = ImageOps.exif_transpose(i)
if i.getbands() != ("R", "G", "B", "A"):
+ if i.mode == 'I':
+ i = i.point(lambda i: i * (1 / 255))
i = i.convert("RGBA")
mask = None
c = channel[0].upper()
@@ -1480,13 +1553,10 @@ class LoadImageMask:
return m.digest().hex()
@classmethod
- def VALIDATE_INPUTS(s, image, channel):
+ def VALIDATE_INPUTS(s, image):
if not ldm_patched.utils.path_utils.exists_annotated_filepath(image):
return "Invalid image file: {}".format(image)
- if channel not in s._color_channels:
- return "Invalid color channel: {}".format(channel)
-
return True
class ImageScale:
@@ -1616,10 +1686,11 @@ class ImagePadForOutpaint:
def expand_image(self, image, left, top, right, bottom, feathering):
d1, d2, d3, d4 = image.size()
- new_image = torch.zeros(
+ new_image = torch.ones(
(d1, d2 + top + bottom, d3 + left + right, d4),
dtype=torch.float32,
- )
+ ) * 0.5
+
new_image[:, top:top + d2, left:left + d3, :] = image
mask = torch.ones(
@@ -1711,6 +1782,7 @@ NODE_CLASS_MAPPINGS = {
"unCLIPCheckpointLoader": unCLIPCheckpointLoader,
"GLIGENLoader": GLIGENLoader,
"GLIGENTextBoxApply": GLIGENTextBoxApply,
+ "InpaintModelConditioning": InpaintModelConditioning,
"CheckpointLoader": CheckpointLoader,
"DiffusersLoader": DiffusersLoader,
@@ -1871,6 +1943,9 @@ def init_custom_nodes():
"nodes_video_model.py",
"nodes_sag.py",
"nodes_perpneg.py",
+ "nodes_stable3d.py",
+ "nodes_sdupscale.py",
+ "nodes_photomaker.py",
]
for node_file in extras_files:
diff --git a/ldm_patched/contrib/external_align_your_steps.py b/ldm_patched/contrib/external_align_your_steps.py
new file mode 100644
index 00000000..624bbce2
--- /dev/null
+++ b/ldm_patched/contrib/external_align_your_steps.py
@@ -0,0 +1,55 @@
+# https://github.com/comfyanonymous/ComfyUI/blob/master/nodes.py
+
+#from: https://research.nvidia.com/labs/toronto-ai/AlignYourSteps/howto.html
+import numpy as np
+import torch
+
+def loglinear_interp(t_steps, num_steps):
+ """
+ Performs log-linear interpolation of a given array of decreasing numbers.
+ """
+ xs = np.linspace(0, 1, len(t_steps))
+ ys = np.log(t_steps[::-1])
+
+ new_xs = np.linspace(0, 1, num_steps)
+ new_ys = np.interp(new_xs, xs, ys)
+
+ interped_ys = np.exp(new_ys)[::-1].copy()
+ return interped_ys
+
+NOISE_LEVELS = {"SD1": [14.6146412293, 6.4745760956, 3.8636745985, 2.6946151520, 1.8841921177, 1.3943805092, 0.9642583904, 0.6523686016, 0.3977456272, 0.1515232662, 0.0291671582],
+ "SDXL":[14.6146412293, 6.3184485287, 3.7681790315, 2.1811480769, 1.3405244945, 0.8620721141, 0.5550693289, 0.3798540708, 0.2332364134, 0.1114188177, 0.0291671582],
+ "SVD": [700.00, 54.5, 15.886, 7.977, 4.248, 1.789, 0.981, 0.403, 0.173, 0.034, 0.002]}
+
+class AlignYourStepsScheduler:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required":
+ {"model_type": (["SD1", "SDXL", "SVD"], ),
+ "steps": ("INT", {"default": 10, "min": 10, "max": 10000}),
+ "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
+ }
+ }
+ RETURN_TYPES = ("SIGMAS",)
+ CATEGORY = "sampling/custom_sampling/schedulers"
+
+ FUNCTION = "get_sigmas"
+
+ def get_sigmas(self, model_type, steps, denoise):
+ total_steps = steps
+ if denoise < 1.0:
+ if denoise <= 0.0:
+ return (torch.FloatTensor([]),)
+ total_steps = round(steps * denoise)
+
+ sigmas = NOISE_LEVELS[model_type][:]
+ if (steps + 1) != len(sigmas):
+ sigmas = loglinear_interp(sigmas, steps + 1)
+
+ sigmas = sigmas[-(total_steps + 1):]
+ sigmas[-1] = 0
+ return (torch.FloatTensor(sigmas), )
+
+NODE_CLASS_MAPPINGS = {
+ "AlignYourStepsScheduler": AlignYourStepsScheduler,
+}
\ No newline at end of file
diff --git a/ldm_patched/contrib/external_canny.py b/ldm_patched/contrib/external_canny.py
index 42c22210..7347ba1e 100644
--- a/ldm_patched/contrib/external_canny.py
+++ b/ldm_patched/contrib/external_canny.py
@@ -78,7 +78,7 @@ def spatial_gradient(input, normalized: bool = True):
Return:
the derivatives of the input feature map. with shape :math:`(B, C, 2, H, W)`.
.. note::
- See a working example `here `__.
Examples:
>>> input = torch.rand(1, 3, 4, 4)
@@ -120,7 +120,7 @@ def rgb_to_grayscale(image, rgb_weights = None):
grayscale version of the image with shape :math:`(*,1,H,W)`.
.. note::
- See a working example `here `__.
Example:
@@ -176,7 +176,7 @@ def canny(
- the canny edge magnitudes map, shape of :math:`(B,1,H,W)`.
- the canny edge detection filtered by thresholds and hysteresis, shape of :math:`(B,1,H,W)`.
.. note::
- See a working example `here `__.
Example:
>>> input = torch.rand(5, 3, 4, 4)
diff --git a/ldm_patched/contrib/external_custom_sampler.py b/ldm_patched/contrib/external_custom_sampler.py
index 9413a58f..60d5e3bd 100644
--- a/ldm_patched/contrib/external_custom_sampler.py
+++ b/ldm_patched/contrib/external_custom_sampler.py
@@ -15,6 +15,7 @@ class BasicScheduler:
{"model": ("MODEL",),
"scheduler": (ldm_patched.modules.samplers.SCHEDULER_NAMES, ),
"steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
+ "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
}
}
RETURN_TYPES = ("SIGMAS",)
@@ -22,8 +23,14 @@ class BasicScheduler:
FUNCTION = "get_sigmas"
- def get_sigmas(self, model, scheduler, steps):
- sigmas = ldm_patched.modules.samplers.calculate_sigmas_scheduler(model.model, scheduler, steps).cpu()
+ def get_sigmas(self, model, scheduler, steps, denoise):
+ total_steps = steps
+ if denoise < 1.0:
+ total_steps = int(steps/denoise)
+
+ ldm_patched.modules.model_management.load_models_gpu([model])
+ sigmas = ldm_patched.modules.samplers.calculate_sigmas_scheduler(model.model, scheduler, total_steps).cpu()
+ sigmas = sigmas[-(steps + 1):]
return (sigmas, )
@@ -89,6 +96,7 @@ class SDTurboScheduler:
return {"required":
{"model": ("MODEL",),
"steps": ("INT", {"default": 1, "min": 1, "max": 10}),
+ "denoise": ("FLOAT", {"default": 1.0, "min": 0, "max": 1.0, "step": 0.01}),
}
}
RETURN_TYPES = ("SIGMAS",)
@@ -96,9 +104,10 @@ class SDTurboScheduler:
FUNCTION = "get_sigmas"
- def get_sigmas(self, model, steps):
- timesteps = torch.flip(torch.arange(1, 11) * 100 - 1, (0,))[:steps]
- sigmas = model.model.model_sampling.sigma(timesteps)
+ def get_sigmas(self, model, steps, denoise):
+ start_step = 10 - int(10 * denoise)
+ timesteps = torch.flip(torch.arange(1, 11) * 100 - 1, (0,))[start_step:start_step + steps]
+ sigmas = model.model_sampling.sigma(timesteps)
sigmas = torch.cat([sigmas, sigmas.new_zeros([1])])
return (sigmas, )
@@ -220,6 +229,25 @@ class SamplerDPMPP_SDE:
sampler = ldm_patched.modules.samplers.ksampler(sampler_name, {"eta": eta, "s_noise": s_noise, "r": r})
return (sampler, )
+
+class SamplerTCD:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "eta": ("FLOAT", {"default": 0.3, "min": 0.0, "max": 1.0, "step": 0.01}),
+ }
+ }
+ RETURN_TYPES = ("SAMPLER",)
+ CATEGORY = "sampling/custom_sampling/samplers"
+
+ FUNCTION = "get_sampler"
+
+ def get_sampler(self, eta=0.3):
+ sampler = ldm_patched.modules.samplers.ksampler("tcd", {"eta": eta})
+ return (sampler, )
+
+
class SamplerCustom:
@classmethod
def INPUT_TYPES(s):
@@ -282,6 +310,7 @@ NODE_CLASS_MAPPINGS = {
"KSamplerSelect": KSamplerSelect,
"SamplerDPMPP_2M_SDE": SamplerDPMPP_2M_SDE,
"SamplerDPMPP_SDE": SamplerDPMPP_SDE,
+ "SamplerTCD": SamplerTCD,
"SplitSigmas": SplitSigmas,
"FlipSigmas": FlipSigmas,
}
diff --git a/ldm_patched/contrib/external_freelunch.py b/ldm_patched/contrib/external_freelunch.py
index f8dd5a44..59ec5bab 100644
--- a/ldm_patched/contrib/external_freelunch.py
+++ b/ldm_patched/contrib/external_freelunch.py
@@ -36,7 +36,7 @@ class FreeU:
RETURN_TYPES = ("MODEL",)
FUNCTION = "patch"
- CATEGORY = "_for_testing"
+ CATEGORY = "model_patches"
def patch(self, model, b1, b2, s1, s2):
model_channels = model.model.model_config.unet_config["model_channels"]
@@ -75,7 +75,7 @@ class FreeU_V2:
RETURN_TYPES = ("MODEL",)
FUNCTION = "patch"
- CATEGORY = "_for_testing"
+ CATEGORY = "model_patches"
def patch(self, model, b1, b2, s1, s2):
model_channels = model.model.model_config.unet_config["model_channels"]
diff --git a/ldm_patched/contrib/external_hypertile.py b/ldm_patched/contrib/external_hypertile.py
index 45f7c3ea..5cf7d9d6 100644
--- a/ldm_patched/contrib/external_hypertile.py
+++ b/ldm_patched/contrib/external_hypertile.py
@@ -34,29 +34,29 @@ class HyperTile:
RETURN_TYPES = ("MODEL",)
FUNCTION = "patch"
- CATEGORY = "_for_testing"
+ CATEGORY = "model_patches"
def patch(self, model, tile_size, swap_size, max_depth, scale_depth):
model_channels = model.model.model_config.unet_config["model_channels"]
- apply_to = set()
- temp = model_channels
- for x in range(max_depth + 1):
- apply_to.add(temp)
- temp *= 2
-
latent_tile_size = max(32, tile_size) // 8
self.temp = None
def hypertile_in(q, k, v, extra_options):
- if q.shape[-1] in apply_to:
+ model_chans = q.shape[-2]
+ orig_shape = extra_options['original_shape']
+ apply_to = []
+ for i in range(max_depth + 1):
+ apply_to.append((orig_shape[-2] / (2 ** i)) * (orig_shape[-1] / (2 ** i)))
+
+ if model_chans in apply_to:
shape = extra_options["original_shape"]
aspect_ratio = shape[-1] / shape[-2]
hw = q.size(1)
h, w = round(math.sqrt(hw * aspect_ratio)), round(math.sqrt(hw / aspect_ratio))
- factor = 2**((q.shape[-1] // model_channels) - 1) if scale_depth else 1
+ factor = (2 ** apply_to.index(model_chans)) if scale_depth else 1
nh = random_divisor(h, latent_tile_size * factor, swap_size)
nw = random_divisor(w, latent_tile_size * factor, swap_size)
diff --git a/ldm_patched/contrib/external_latent.py b/ldm_patched/contrib/external_latent.py
index c6f874e1..6d753d0f 100644
--- a/ldm_patched/contrib/external_latent.py
+++ b/ldm_patched/contrib/external_latent.py
@@ -124,10 +124,34 @@ class LatentBatch:
samples_out["batch_index"] = samples1.get("batch_index", [x for x in range(0, s1.shape[0])]) + samples2.get("batch_index", [x for x in range(0, s2.shape[0])])
return (samples_out,)
+class LatentBatchSeedBehavior:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": { "samples": ("LATENT",),
+ "seed_behavior": (["random", "fixed"],),}}
+
+ RETURN_TYPES = ("LATENT",)
+ FUNCTION = "op"
+
+ CATEGORY = "latent/advanced"
+
+ def op(self, samples, seed_behavior):
+ samples_out = samples.copy()
+ latent = samples["samples"]
+ if seed_behavior == "random":
+ if 'batch_index' in samples_out:
+ samples_out.pop('batch_index')
+ elif seed_behavior == "fixed":
+ batch_number = samples_out.get("batch_index", [0])[0]
+ samples_out["batch_index"] = [batch_number] * latent.shape[0]
+
+ return (samples_out,)
+
NODE_CLASS_MAPPINGS = {
"LatentAdd": LatentAdd,
"LatentSubtract": LatentSubtract,
"LatentMultiply": LatentMultiply,
"LatentInterpolate": LatentInterpolate,
"LatentBatch": LatentBatch,
+ "LatentBatchSeedBehavior": LatentBatchSeedBehavior,
}
diff --git a/ldm_patched/contrib/external_mask.py b/ldm_patched/contrib/external_mask.py
index ab1da4c6..a86a7fe6 100644
--- a/ldm_patched/contrib/external_mask.py
+++ b/ldm_patched/contrib/external_mask.py
@@ -8,6 +8,7 @@ import ldm_patched.modules.utils
from ldm_patched.contrib.external import MAX_RESOLUTION
def composite(destination, source, x, y, mask = None, multiplier = 8, resize_source = False):
+ source = source.to(destination.device)
if resize_source:
source = torch.nn.functional.interpolate(source, size=(destination.shape[2], destination.shape[3]), mode="bilinear")
@@ -22,7 +23,7 @@ def composite(destination, source, x, y, mask = None, multiplier = 8, resize_sou
if mask is None:
mask = torch.ones_like(source)
else:
- mask = mask.clone()
+ mask = mask.to(destination.device, copy=True)
mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(source.shape[2], source.shape[3]), mode="bilinear")
mask = ldm_patched.modules.utils.repeat_to_batch_size(mask, source.shape[0])
diff --git a/ldm_patched/contrib/external_model_advanced.py b/ldm_patched/contrib/external_model_advanced.py
index 03a2f045..b9f0ebdc 100644
--- a/ldm_patched/contrib/external_model_advanced.py
+++ b/ldm_patched/contrib/external_model_advanced.py
@@ -70,7 +70,7 @@ class ModelSamplingDiscrete:
@classmethod
def INPUT_TYPES(s):
return {"required": { "model": ("MODEL",),
- "sampling": (["eps", "v_prediction", "lcm"],),
+ "sampling": (["eps", "v_prediction", "lcm", "tcd"]),
"zsnr": ("BOOLEAN", {"default": False}),
}}
@@ -90,6 +90,9 @@ class ModelSamplingDiscrete:
elif sampling == "lcm":
sampling_type = LCM
sampling_base = ModelSamplingDiscreteDistilled
+ elif sampling == "tcd":
+ sampling_type = ldm_patched.modules.model_sampling.EPS
+ sampling_base = ModelSamplingDiscreteDistilled
class ModelSamplingAdvanced(sampling_base, sampling_type):
pass
@@ -105,7 +108,7 @@ class ModelSamplingContinuousEDM:
@classmethod
def INPUT_TYPES(s):
return {"required": { "model": ("MODEL",),
- "sampling": (["v_prediction", "eps"],),
+ "sampling": (["v_prediction", "edm_playground_v2.5", "eps"],),
"sigma_max": ("FLOAT", {"default": 120.0, "min": 0.0, "max": 1000.0, "step":0.001, "round": False}),
"sigma_min": ("FLOAT", {"default": 0.002, "min": 0.0, "max": 1000.0, "step":0.001, "round": False}),
}}
@@ -118,17 +121,25 @@ class ModelSamplingContinuousEDM:
def patch(self, model, sampling, sigma_max, sigma_min):
m = model.clone()
+ latent_format = None
+ sigma_data = 1.0
if sampling == "eps":
sampling_type = ldm_patched.modules.model_sampling.EPS
elif sampling == "v_prediction":
sampling_type = ldm_patched.modules.model_sampling.V_PREDICTION
+ elif sampling == "edm_playground_v2.5":
+ sampling_type = ldm_patched.modules.model_sampling.EDM
+ sigma_data = 0.5
+ latent_format = ldm_patched.modules.latent_formats.SDXL_Playground_2_5()
class ModelSamplingAdvanced(ldm_patched.modules.model_sampling.ModelSamplingContinuousEDM, sampling_type):
pass
model_sampling = ModelSamplingAdvanced(model.model.model_config)
- model_sampling.set_sigma_range(sigma_min, sigma_max)
+ model_sampling.set_parameters(sigma_min, sigma_max, sigma_data)
m.add_object_patch("model_sampling", model_sampling)
+ if latent_format is not None:
+ m.add_object_patch("latent_format", latent_format)
return (m, )
class RescaleCFG:
diff --git a/ldm_patched/contrib/external_model_merging.py b/ldm_patched/contrib/external_model_merging.py
index c0cf9afd..ae8145d4 100644
--- a/ldm_patched/contrib/external_model_merging.py
+++ b/ldm_patched/contrib/external_model_merging.py
@@ -121,6 +121,48 @@ class ModelMergeBlocks:
m.add_patches({k: kp[k]}, 1.0 - ratio, ratio)
return (m, )
+def save_checkpoint(model, clip=None, vae=None, clip_vision=None, filename_prefix=None, output_dir=None, prompt=None, extra_pnginfo=None):
+ full_output_folder, filename, counter, subfolder, filename_prefix = ldm_patched.utils.path_utils.get_save_image_path(filename_prefix, output_dir)
+ prompt_info = ""
+ if prompt is not None:
+ prompt_info = json.dumps(prompt)
+
+ metadata = {}
+
+ enable_modelspec = True
+ if isinstance(model.model, ldm_patched.modules.model_base.SDXL):
+ metadata["modelspec.architecture"] = "stable-diffusion-xl-v1-base"
+ elif isinstance(model.model, ldm_patched.modules.model_base.SDXLRefiner):
+ metadata["modelspec.architecture"] = "stable-diffusion-xl-v1-refiner"
+ else:
+ enable_modelspec = False
+
+ if enable_modelspec:
+ metadata["modelspec.sai_model_spec"] = "1.0.0"
+ metadata["modelspec.implementation"] = "sgm"
+ metadata["modelspec.title"] = "{} {}".format(filename, counter)
+
+ #TODO:
+ # "stable-diffusion-v1", "stable-diffusion-v1-inpainting", "stable-diffusion-v2-512",
+ # "stable-diffusion-v2-768-v", "stable-diffusion-v2-unclip-l", "stable-diffusion-v2-unclip-h",
+ # "v2-inpainting"
+
+ if model.model.model_type == ldm_patched.modules.model_base.ModelType.EPS:
+ metadata["modelspec.predict_key"] = "epsilon"
+ elif model.model.model_type == ldm_patched.modules.model_base.ModelType.V_PREDICTION:
+ metadata["modelspec.predict_key"] = "v"
+
+ if not args.disable_server_info:
+ metadata["prompt"] = prompt_info
+ if extra_pnginfo is not None:
+ for x in extra_pnginfo:
+ metadata[x] = json.dumps(extra_pnginfo[x])
+
+ output_checkpoint = f"{filename}_{counter:05}_.safetensors"
+ output_checkpoint = os.path.join(full_output_folder, output_checkpoint)
+
+ ldm_patched.modules.sd.save_checkpoint(output_checkpoint, model, clip, vae, clip_vision, metadata=metadata)
+
class CheckpointSave:
def __init__(self):
self.output_dir = ldm_patched.utils.path_utils.get_output_directory()
@@ -139,46 +181,7 @@ class CheckpointSave:
CATEGORY = "advanced/model_merging"
def save(self, model, clip, vae, filename_prefix, prompt=None, extra_pnginfo=None):
- full_output_folder, filename, counter, subfolder, filename_prefix = ldm_patched.utils.path_utils.get_save_image_path(filename_prefix, self.output_dir)
- prompt_info = ""
- if prompt is not None:
- prompt_info = json.dumps(prompt)
-
- metadata = {}
-
- enable_modelspec = True
- if isinstance(model.model, ldm_patched.modules.model_base.SDXL):
- metadata["modelspec.architecture"] = "stable-diffusion-xl-v1-base"
- elif isinstance(model.model, ldm_patched.modules.model_base.SDXLRefiner):
- metadata["modelspec.architecture"] = "stable-diffusion-xl-v1-refiner"
- else:
- enable_modelspec = False
-
- if enable_modelspec:
- metadata["modelspec.sai_model_spec"] = "1.0.0"
- metadata["modelspec.implementation"] = "sgm"
- metadata["modelspec.title"] = "{} {}".format(filename, counter)
-
- #TODO:
- # "stable-diffusion-v1", "stable-diffusion-v1-inpainting", "stable-diffusion-v2-512",
- # "stable-diffusion-v2-768-v", "stable-diffusion-v2-unclip-l", "stable-diffusion-v2-unclip-h",
- # "v2-inpainting"
-
- if model.model.model_type == ldm_patched.modules.model_base.ModelType.EPS:
- metadata["modelspec.predict_key"] = "epsilon"
- elif model.model.model_type == ldm_patched.modules.model_base.ModelType.V_PREDICTION:
- metadata["modelspec.predict_key"] = "v"
-
- if not args.disable_server_info:
- metadata["prompt"] = prompt_info
- if extra_pnginfo is not None:
- for x in extra_pnginfo:
- metadata[x] = json.dumps(extra_pnginfo[x])
-
- output_checkpoint = f"{filename}_{counter:05}_.safetensors"
- output_checkpoint = os.path.join(full_output_folder, output_checkpoint)
-
- ldm_patched.modules.sd.save_checkpoint(output_checkpoint, model, clip, vae, metadata=metadata)
+ save_checkpoint(model, clip=clip, vae=vae, filename_prefix=filename_prefix, output_dir=self.output_dir, prompt=prompt, extra_pnginfo=extra_pnginfo)
return {}
class CLIPSave:
diff --git a/ldm_patched/contrib/external_photomaker.py b/ldm_patched/contrib/external_photomaker.py
new file mode 100644
index 00000000..cc7f6710
--- /dev/null
+++ b/ldm_patched/contrib/external_photomaker.py
@@ -0,0 +1,189 @@
+# https://github.com/comfyanonymous/ComfyUI/blob/master/nodes.py
+
+import torch
+import torch.nn as nn
+import ldm_patched.utils.path_utils
+import ldm_patched.modules.clip_model
+import ldm_patched.modules.clip_vision
+import ldm_patched.modules.ops
+
+# code for model from: https://github.com/TencentARC/PhotoMaker/blob/main/photomaker/model.py under Apache License Version 2.0
+VISION_CONFIG_DICT = {
+ "hidden_size": 1024,
+ "image_size": 224,
+ "intermediate_size": 4096,
+ "num_attention_heads": 16,
+ "num_channels": 3,
+ "num_hidden_layers": 24,
+ "patch_size": 14,
+ "projection_dim": 768,
+ "hidden_act": "quick_gelu",
+}
+
+class MLP(nn.Module):
+ def __init__(self, in_dim, out_dim, hidden_dim, use_residual=True, operations=ldm_patched.modules.ops):
+ super().__init__()
+ if use_residual:
+ assert in_dim == out_dim
+ self.layernorm = operations.LayerNorm(in_dim)
+ self.fc1 = operations.Linear(in_dim, hidden_dim)
+ self.fc2 = operations.Linear(hidden_dim, out_dim)
+ self.use_residual = use_residual
+ self.act_fn = nn.GELU()
+
+ def forward(self, x):
+ residual = x
+ x = self.layernorm(x)
+ x = self.fc1(x)
+ x = self.act_fn(x)
+ x = self.fc2(x)
+ if self.use_residual:
+ x = x + residual
+ return x
+
+
+class FuseModule(nn.Module):
+ def __init__(self, embed_dim, operations):
+ super().__init__()
+ self.mlp1 = MLP(embed_dim * 2, embed_dim, embed_dim, use_residual=False, operations=operations)
+ self.mlp2 = MLP(embed_dim, embed_dim, embed_dim, use_residual=True, operations=operations)
+ self.layer_norm = operations.LayerNorm(embed_dim)
+
+ def fuse_fn(self, prompt_embeds, id_embeds):
+ stacked_id_embeds = torch.cat([prompt_embeds, id_embeds], dim=-1)
+ stacked_id_embeds = self.mlp1(stacked_id_embeds) + prompt_embeds
+ stacked_id_embeds = self.mlp2(stacked_id_embeds)
+ stacked_id_embeds = self.layer_norm(stacked_id_embeds)
+ return stacked_id_embeds
+
+ def forward(
+ self,
+ prompt_embeds,
+ id_embeds,
+ class_tokens_mask,
+ ) -> torch.Tensor:
+ # id_embeds shape: [b, max_num_inputs, 1, 2048]
+ id_embeds = id_embeds.to(prompt_embeds.dtype)
+ num_inputs = class_tokens_mask.sum().unsqueeze(0) # TODO: check for training case
+ batch_size, max_num_inputs = id_embeds.shape[:2]
+ # seq_length: 77
+ seq_length = prompt_embeds.shape[1]
+ # flat_id_embeds shape: [b*max_num_inputs, 1, 2048]
+ flat_id_embeds = id_embeds.view(
+ -1, id_embeds.shape[-2], id_embeds.shape[-1]
+ )
+ # valid_id_mask [b*max_num_inputs]
+ valid_id_mask = (
+ torch.arange(max_num_inputs, device=flat_id_embeds.device)[None, :]
+ < num_inputs[:, None]
+ )
+ valid_id_embeds = flat_id_embeds[valid_id_mask.flatten()]
+
+ prompt_embeds = prompt_embeds.view(-1, prompt_embeds.shape[-1])
+ class_tokens_mask = class_tokens_mask.view(-1)
+ valid_id_embeds = valid_id_embeds.view(-1, valid_id_embeds.shape[-1])
+ # slice out the image token embeddings
+ image_token_embeds = prompt_embeds[class_tokens_mask]
+ stacked_id_embeds = self.fuse_fn(image_token_embeds, valid_id_embeds)
+ assert class_tokens_mask.sum() == stacked_id_embeds.shape[0], f"{class_tokens_mask.sum()} != {stacked_id_embeds.shape[0]}"
+ prompt_embeds.masked_scatter_(class_tokens_mask[:, None], stacked_id_embeds.to(prompt_embeds.dtype))
+ updated_prompt_embeds = prompt_embeds.view(batch_size, seq_length, -1)
+ return updated_prompt_embeds
+
+class PhotoMakerIDEncoder(ldm_patched.modules.clip_model.CLIPVisionModelProjection):
+ def __init__(self):
+ self.load_device = ldm_patched.modules.model_management.text_encoder_device()
+ offload_device = ldm_patched.modules.model_management.text_encoder_offload_device()
+ dtype = ldm_patched.modules.model_management.text_encoder_dtype(self.load_device)
+
+ super().__init__(VISION_CONFIG_DICT, dtype, offload_device, ldm_patched.modules.ops.manual_cast)
+ self.visual_projection_2 = ldm_patched.modules.ops.manual_cast.Linear(1024, 1280, bias=False)
+ self.fuse_module = FuseModule(2048, ldm_patched.modules.ops.manual_cast)
+
+ def forward(self, id_pixel_values, prompt_embeds, class_tokens_mask):
+ b, num_inputs, c, h, w = id_pixel_values.shape
+ id_pixel_values = id_pixel_values.view(b * num_inputs, c, h, w)
+
+ shared_id_embeds = self.vision_model(id_pixel_values)[2]
+ id_embeds = self.visual_projection(shared_id_embeds)
+ id_embeds_2 = self.visual_projection_2(shared_id_embeds)
+
+ id_embeds = id_embeds.view(b, num_inputs, 1, -1)
+ id_embeds_2 = id_embeds_2.view(b, num_inputs, 1, -1)
+
+ id_embeds = torch.cat((id_embeds, id_embeds_2), dim=-1)
+ updated_prompt_embeds = self.fuse_module(prompt_embeds, id_embeds, class_tokens_mask)
+
+ return updated_prompt_embeds
+
+
+class PhotoMakerLoader:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": { "photomaker_model_name": (ldm_patched.utils.path_utils.get_filename_list("photomaker"), )}}
+
+ RETURN_TYPES = ("PHOTOMAKER",)
+ FUNCTION = "load_photomaker_model"
+
+ CATEGORY = "_for_testing/photomaker"
+
+ def load_photomaker_model(self, photomaker_model_name):
+ photomaker_model_path = ldm_patched.utils.path_utils.get_full_path("photomaker", photomaker_model_name)
+ photomaker_model = PhotoMakerIDEncoder()
+ data = ldm_patched.modules.utils.load_torch_file(photomaker_model_path, safe_load=True)
+ if "id_encoder" in data:
+ data = data["id_encoder"]
+ photomaker_model.load_state_dict(data)
+ return (photomaker_model,)
+
+
+class PhotoMakerEncode:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": { "photomaker": ("PHOTOMAKER",),
+ "image": ("IMAGE",),
+ "clip": ("CLIP", ),
+ "text": ("STRING", {"multiline": True, "default": "photograph of photomaker"}),
+ }}
+
+ RETURN_TYPES = ("CONDITIONING",)
+ FUNCTION = "apply_photomaker"
+
+ CATEGORY = "_for_testing/photomaker"
+
+ def apply_photomaker(self, photomaker, image, clip, text):
+ special_token = "photomaker"
+ pixel_values = ldm_patched.modules.clip_vision.clip_preprocess(image.to(photomaker.load_device)).float()
+ try:
+ index = text.split(" ").index(special_token) + 1
+ except ValueError:
+ index = -1
+ tokens = clip.tokenize(text, return_word_ids=True)
+ out_tokens = {}
+ for k in tokens:
+ out_tokens[k] = []
+ for t in tokens[k]:
+ f = list(filter(lambda x: x[2] != index, t))
+ while len(f) < len(t):
+ f.append(t[-1])
+ out_tokens[k].append(f)
+
+ cond, pooled = clip.encode_from_tokens(out_tokens, return_pooled=True)
+
+ if index > 0:
+ token_index = index - 1
+ num_id_images = 1
+ class_tokens_mask = [True if token_index <= i < token_index+num_id_images else False for i in range(77)]
+ out = photomaker(id_pixel_values=pixel_values.unsqueeze(0), prompt_embeds=cond.to(photomaker.load_device),
+ class_tokens_mask=torch.tensor(class_tokens_mask, dtype=torch.bool, device=photomaker.load_device).unsqueeze(0))
+ else:
+ out = cond
+
+ return ([[out, {"pooled_output": pooled}]], )
+
+
+NODE_CLASS_MAPPINGS = {
+ "PhotoMakerLoader": PhotoMakerLoader,
+ "PhotoMakerEncode": PhotoMakerEncode,
+}
+
diff --git a/ldm_patched/contrib/external_post_processing.py b/ldm_patched/contrib/external_post_processing.py
index 432c53fb..93cb1212 100644
--- a/ldm_patched/contrib/external_post_processing.py
+++ b/ldm_patched/contrib/external_post_processing.py
@@ -35,6 +35,7 @@ class Blend:
CATEGORY = "image/postprocessing"
def blend_images(self, image1: torch.Tensor, image2: torch.Tensor, blend_factor: float, blend_mode: str):
+ image2 = image2.to(image1.device)
if image1.shape != image2.shape:
image2 = image2.permute(0, 3, 1, 2)
image2 = ldm_patched.modules.utils.common_upscale(image2, image1.shape[2], image1.shape[1], upscale_method='bicubic', crop='center')
diff --git a/ldm_patched/contrib/external_rebatch.py b/ldm_patched/contrib/external_rebatch.py
index 607c7feb..c24cc8c3 100644
--- a/ldm_patched/contrib/external_rebatch.py
+++ b/ldm_patched/contrib/external_rebatch.py
@@ -101,10 +101,40 @@ class LatentRebatch:
return (output_list,)
+class ImageRebatch:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": { "images": ("IMAGE",),
+ "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}),
+ }}
+ RETURN_TYPES = ("IMAGE",)
+ INPUT_IS_LIST = True
+ OUTPUT_IS_LIST = (True, )
+
+ FUNCTION = "rebatch"
+
+ CATEGORY = "image/batch"
+
+ def rebatch(self, images, batch_size):
+ batch_size = batch_size[0]
+
+ output_list = []
+ all_images = []
+ for img in images:
+ for i in range(img.shape[0]):
+ all_images.append(img[i:i+1])
+
+ for i in range(0, len(all_images), batch_size):
+ output_list.append(torch.cat(all_images[i:i+batch_size], dim=0))
+
+ return (output_list,)
+
NODE_CLASS_MAPPINGS = {
"RebatchLatents": LatentRebatch,
+ "RebatchImages": ImageRebatch,
}
NODE_DISPLAY_NAME_MAPPINGS = {
"RebatchLatents": "Rebatch Latents",
-}
\ No newline at end of file
+ "RebatchImages": "Rebatch Images",
+}
diff --git a/ldm_patched/contrib/external_sag.py b/ldm_patched/contrib/external_sag.py
index 06ca67fa..804d5611 100644
--- a/ldm_patched/contrib/external_sag.py
+++ b/ldm_patched/contrib/external_sag.py
@@ -60,7 +60,7 @@ def create_blur_map(x0, attn, sigma=3.0, threshold=1.0):
attn = attn.reshape(b, -1, hw1, hw2)
# Global Average Pool
mask = attn.mean(1, keepdim=False).sum(1, keepdim=False) > threshold
- ratio = math.ceil(math.sqrt(lh * lw / hw1))
+ ratio = 2**(math.ceil(math.sqrt(lh * lw / hw1)) - 1).bit_length()
mid_shape = [math.ceil(lh / ratio), math.ceil(lw / ratio)]
# Reshape
@@ -145,6 +145,8 @@ class SelfAttentionGuidance:
sigma = args["sigma"]
model_options = args["model_options"]
x = args["input"]
+ if min(cfg_result.shape[2:]) <= 4: #skip when too small to add padding
+ return cfg_result
# create the adversarially blurred image
degraded = create_blur_map(uncond_pred, uncond_attn, sag_sigma, sag_threshold)
@@ -153,7 +155,7 @@ class SelfAttentionGuidance:
(sag, _) = ldm_patched.modules.samplers.calc_cond_uncond_batch(model, uncond, None, degraded_noised, sigma, model_options)
return cfg_result + (degraded - sag) * sag_scale
- m.set_model_sampler_post_cfg_function(post_cfg_function)
+ m.set_model_sampler_post_cfg_function(post_cfg_function, disable_cfg1_optimization=True)
# from diffusers:
# unet.mid_block.attentions[0].transformer_blocks[0].attn1.patch
diff --git a/ldm_patched/contrib/external_sdupscale.py b/ldm_patched/contrib/external_sdupscale.py
new file mode 100644
index 00000000..68153c47
--- /dev/null
+++ b/ldm_patched/contrib/external_sdupscale.py
@@ -0,0 +1,49 @@
+# https://github.com/comfyanonymous/ComfyUI/blob/master/nodes.py
+
+import torch
+import ldm_patched.contrib.external
+import ldm_patched.modules.utils
+
+class SD_4XUpscale_Conditioning:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": { "images": ("IMAGE",),
+ "positive": ("CONDITIONING",),
+ "negative": ("CONDITIONING",),
+ "scale_ratio": ("FLOAT", {"default": 4.0, "min": 0.0, "max": 10.0, "step": 0.01}),
+ "noise_augmentation": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}),
+ }}
+ RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT")
+ RETURN_NAMES = ("positive", "negative", "latent")
+
+ FUNCTION = "encode"
+
+ CATEGORY = "conditioning/upscale_diffusion"
+
+ def encode(self, images, positive, negative, scale_ratio, noise_augmentation):
+ width = max(1, round(images.shape[-2] * scale_ratio))
+ height = max(1, round(images.shape[-3] * scale_ratio))
+
+ pixels = ldm_patched.modules.utils.common_upscale((images.movedim(-1,1) * 2.0) - 1.0, width // 4, height // 4, "bilinear", "center")
+
+ out_cp = []
+ out_cn = []
+
+ for t in positive:
+ n = [t[0], t[1].copy()]
+ n[1]['concat_image'] = pixels
+ n[1]['noise_augmentation'] = noise_augmentation
+ out_cp.append(n)
+
+ for t in negative:
+ n = [t[0], t[1].copy()]
+ n[1]['concat_image'] = pixels
+ n[1]['noise_augmentation'] = noise_augmentation
+ out_cn.append(n)
+
+ latent = torch.zeros([images.shape[0], 4, height // 4, width // 4])
+ return (out_cp, out_cn, {"samples":latent})
+
+NODE_CLASS_MAPPINGS = {
+ "SD_4XUpscale_Conditioning": SD_4XUpscale_Conditioning,
+}
diff --git a/ldm_patched/contrib/external_stable3d.py b/ldm_patched/contrib/external_stable3d.py
new file mode 100644
index 00000000..bae2623f
--- /dev/null
+++ b/ldm_patched/contrib/external_stable3d.py
@@ -0,0 +1,104 @@
+# https://github.com/comfyanonymous/ComfyUI/blob/master/nodes.py
+
+import torch
+import ldm_patched.contrib.external
+import ldm_patched.modules.utils
+
+def camera_embeddings(elevation, azimuth):
+ elevation = torch.as_tensor([elevation])
+ azimuth = torch.as_tensor([azimuth])
+ embeddings = torch.stack(
+ [
+ torch.deg2rad(
+ (90 - elevation) - (90)
+ ), # Zero123 polar is 90-elevation
+ torch.sin(torch.deg2rad(azimuth)),
+ torch.cos(torch.deg2rad(azimuth)),
+ torch.deg2rad(
+ 90 - torch.full_like(elevation, 0)
+ ),
+ ], dim=-1).unsqueeze(1)
+
+ return embeddings
+
+
+class StableZero123_Conditioning:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": { "clip_vision": ("CLIP_VISION",),
+ "init_image": ("IMAGE",),
+ "vae": ("VAE",),
+ "width": ("INT", {"default": 256, "min": 16, "max": ldm_patched.contrib.external.MAX_RESOLUTION, "step": 8}),
+ "height": ("INT", {"default": 256, "min": 16, "max": ldm_patched.contrib.external.MAX_RESOLUTION, "step": 8}),
+ "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}),
+ "elevation": ("FLOAT", {"default": 0.0, "min": -180.0, "max": 180.0}),
+ "azimuth": ("FLOAT", {"default": 0.0, "min": -180.0, "max": 180.0}),
+ }}
+ RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT")
+ RETURN_NAMES = ("positive", "negative", "latent")
+
+ FUNCTION = "encode"
+
+ CATEGORY = "conditioning/3d_models"
+
+ def encode(self, clip_vision, init_image, vae, width, height, batch_size, elevation, azimuth):
+ output = clip_vision.encode_image(init_image)
+ pooled = output.image_embeds.unsqueeze(0)
+ pixels = ldm_patched.modules.utils.common_upscale(init_image.movedim(-1,1), width, height, "bilinear", "center").movedim(1,-1)
+ encode_pixels = pixels[:,:,:,:3]
+ t = vae.encode(encode_pixels)
+ cam_embeds = camera_embeddings(elevation, azimuth)
+ cond = torch.cat([pooled, cam_embeds.to(pooled.device).repeat((pooled.shape[0], 1, 1))], dim=-1)
+
+ positive = [[cond, {"concat_latent_image": t}]]
+ negative = [[torch.zeros_like(pooled), {"concat_latent_image": torch.zeros_like(t)}]]
+ latent = torch.zeros([batch_size, 4, height // 8, width // 8])
+ return (positive, negative, {"samples":latent})
+
+class StableZero123_Conditioning_Batched:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": { "clip_vision": ("CLIP_VISION",),
+ "init_image": ("IMAGE",),
+ "vae": ("VAE",),
+ "width": ("INT", {"default": 256, "min": 16, "max": ldm_patched.contrib.external.MAX_RESOLUTION, "step": 8}),
+ "height": ("INT", {"default": 256, "min": 16, "max": ldm_patched.contrib.external.MAX_RESOLUTION, "step": 8}),
+ "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}),
+ "elevation": ("FLOAT", {"default": 0.0, "min": -180.0, "max": 180.0}),
+ "azimuth": ("FLOAT", {"default": 0.0, "min": -180.0, "max": 180.0}),
+ "elevation_batch_increment": ("FLOAT", {"default": 0.0, "min": -180.0, "max": 180.0}),
+ "azimuth_batch_increment": ("FLOAT", {"default": 0.0, "min": -180.0, "max": 180.0}),
+ }}
+ RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT")
+ RETURN_NAMES = ("positive", "negative", "latent")
+
+ FUNCTION = "encode"
+
+ CATEGORY = "conditioning/3d_models"
+
+ def encode(self, clip_vision, init_image, vae, width, height, batch_size, elevation, azimuth, elevation_batch_increment, azimuth_batch_increment):
+ output = clip_vision.encode_image(init_image)
+ pooled = output.image_embeds.unsqueeze(0)
+ pixels = ldm_patched.modules.utils.common_upscale(init_image.movedim(-1,1), width, height, "bilinear", "center").movedim(1,-1)
+ encode_pixels = pixels[:,:,:,:3]
+ t = vae.encode(encode_pixels)
+
+ cam_embeds = []
+ for i in range(batch_size):
+ cam_embeds.append(camera_embeddings(elevation, azimuth))
+ elevation += elevation_batch_increment
+ azimuth += azimuth_batch_increment
+
+ cam_embeds = torch.cat(cam_embeds, dim=0)
+ cond = torch.cat([ldm_patched.modules.utils.repeat_to_batch_size(pooled, batch_size), cam_embeds], dim=-1)
+
+ positive = [[cond, {"concat_latent_image": t}]]
+ negative = [[torch.zeros_like(pooled), {"concat_latent_image": torch.zeros_like(t)}]]
+ latent = torch.zeros([batch_size, 4, height // 8, width // 8])
+ return (positive, negative, {"samples":latent, "batch_index": [0] * batch_size})
+
+
+NODE_CLASS_MAPPINGS = {
+ "StableZero123_Conditioning": StableZero123_Conditioning,
+ "StableZero123_Conditioning_Batched": StableZero123_Conditioning_Batched,
+}
diff --git a/ldm_patched/contrib/external_video_model.py b/ldm_patched/contrib/external_video_model.py
index 4504528a..503df0e1 100644
--- a/ldm_patched/contrib/external_video_model.py
+++ b/ldm_patched/contrib/external_video_model.py
@@ -5,6 +5,7 @@ import torch
import ldm_patched.modules.utils
import ldm_patched.modules.sd
import ldm_patched.utils.path_utils
+import ldm_patched.contrib.external_model_merging
class ImageOnlyCheckpointLoader:
@@ -80,10 +81,26 @@ class VideoLinearCFGGuidance:
m.set_model_sampler_cfg_function(linear_cfg)
return (m, )
+class ImageOnlyCheckpointSave(ldm_patched.contrib.external_model_merging.CheckpointSave):
+ CATEGORY = "_for_testing"
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": { "model": ("MODEL",),
+ "clip_vision": ("CLIP_VISION",),
+ "vae": ("VAE",),
+ "filename_prefix": ("STRING", {"default": "checkpoints/ldm_patched"}),},
+ "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},}
+
+ def save(self, model, clip_vision, vae, filename_prefix, prompt=None, extra_pnginfo=None):
+ ldm_patched.contrib.external_model_merging.save_checkpoint(model, clip_vision=clip_vision, vae=vae, filename_prefix=filename_prefix, output_dir=self.output_dir, prompt=prompt, extra_pnginfo=extra_pnginfo)
+ return {}
+
NODE_CLASS_MAPPINGS = {
"ImageOnlyCheckpointLoader": ImageOnlyCheckpointLoader,
"SVD_img2vid_Conditioning": SVD_img2vid_Conditioning,
"VideoLinearCFGGuidance": VideoLinearCFGGuidance,
+ "ImageOnlyCheckpointSave": ImageOnlyCheckpointSave,
}
NODE_DISPLAY_NAME_MAPPINGS = {
diff --git a/ldm_patched/k_diffusion/sampling.py b/ldm_patched/k_diffusion/sampling.py
index 761c2e0e..ea5540a4 100644
--- a/ldm_patched/k_diffusion/sampling.py
+++ b/ldm_patched/k_diffusion/sampling.py
@@ -752,7 +752,6 @@ def sample_lcm(model, x, sigmas, extra_args=None, callback=None, disable=None, n
return x
-
@torch.no_grad()
def sample_heunpp2(model, x, sigmas, extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.):
# From MIT licensed: https://github.com/Carzit/sd-webui-samplers-scheduler/
@@ -808,3 +807,32 @@ def sample_heunpp2(model, x, sigmas, extra_args=None, callback=None, disable=Non
d_prime = w1 * d + w2 * d_2 + w3 * d_3
x = x + d_prime * dt
return x
+
+
+@torch.no_grad()
+def sample_tcd(model, x, sigmas, extra_args=None, callback=None, disable=None, noise_sampler=None, eta=0.3):
+ extra_args = {} if extra_args is None else extra_args
+ noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler
+ s_in = x.new_ones([x.shape[0]])
+
+ model_sampling = model.inner_model.inner_model.model_sampling
+ timesteps_s = torch.floor((1 - eta) * model_sampling.timestep(sigmas)).to(dtype=torch.long).detach().cpu()
+ timesteps_s[-1] = 0
+ alpha_prod_s = model_sampling.alphas_cumprod[timesteps_s]
+ beta_prod_s = 1 - alpha_prod_s
+ for i in trange(len(sigmas) - 1, disable=disable):
+ denoised = model(x, sigmas[i] * s_in, **extra_args) # predicted_original_sample
+ eps = (x - denoised) / sigmas[i]
+ denoised = alpha_prod_s[i + 1].sqrt() * denoised + beta_prod_s[i + 1].sqrt() * eps
+
+ if callback is not None:
+ callback({"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigmas[i], "denoised": denoised})
+
+ x = denoised
+ if eta > 0 and sigmas[i + 1] > 0:
+ noise = noise_sampler(sigmas[i], sigmas[i + 1])
+ x = x / alpha_prod_s[i+1].sqrt() + noise * (sigmas[i+1]**2 + 1 - 1/alpha_prod_s[i+1]).sqrt()
+ else:
+ x *= torch.sqrt(1.0 + sigmas[i + 1] ** 2)
+
+ return x
\ No newline at end of file
diff --git a/ldm_patched/ldm/models/autoencoder.py b/ldm_patched/ldm/models/autoencoder.py
index 14224ad3..c809a0c3 100644
--- a/ldm_patched/ldm/models/autoencoder.py
+++ b/ldm_patched/ldm/models/autoencoder.py
@@ -8,6 +8,7 @@ from ldm_patched.ldm.modules.distributions.distributions import DiagonalGaussian
from ldm_patched.ldm.util import instantiate_from_config
from ldm_patched.ldm.modules.ema import LitEma
+import ldm_patched.modules.ops
class DiagonalGaussianRegularizer(torch.nn.Module):
def __init__(self, sample: bool = True):
@@ -161,12 +162,12 @@ class AutoencodingEngineLegacy(AutoencodingEngine):
},
**kwargs,
)
- self.quant_conv = torch.nn.Conv2d(
+ self.quant_conv = ldm_patched.modules.ops.disable_weight_init.Conv2d(
(1 + ddconfig["double_z"]) * ddconfig["z_channels"],
(1 + ddconfig["double_z"]) * embed_dim,
1,
)
- self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1)
+ self.post_quant_conv = ldm_patched.modules.ops.disable_weight_init.Conv2d(embed_dim, ddconfig["z_channels"], 1)
self.embed_dim = embed_dim
def get_autoencoder_params(self) -> list:
diff --git a/ldm_patched/ldm/modules/attention.py b/ldm_patched/ldm/modules/attention.py
index 49e502ed..e10a868d 100644
--- a/ldm_patched/ldm/modules/attention.py
+++ b/ldm_patched/ldm/modules/attention.py
@@ -1,12 +1,9 @@
-from inspect import isfunction
import math
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, repeat
from typing import Optional, Any
-from functools import partial
-
from .diffusionmodules.util import checkpoint, AlphaBlender, timestep_embedding
from .sub_quadratic_attention import efficient_dot_product_attention
@@ -177,6 +174,7 @@ def attention_sub_quad(query, key, value, heads, mask=None):
kv_chunk_size_min=kv_chunk_size_min,
use_checkpoint=False,
upcast_attention=upcast_attention,
+ mask=mask,
)
hidden_states = hidden_states.to(dtype)
@@ -239,6 +237,12 @@ def attention_split(q, k, v, heads, mask=None):
else:
s1 = einsum('b i d, b j d -> b i j', q[:, i:end], k) * scale
+ if mask is not None:
+ if len(mask.shape) == 2:
+ s1 += mask[i:end]
+ else:
+ s1 += mask[:, i:end]
+
s2 = s1.softmax(dim=-1).to(v.dtype)
del s1
first_op_done = True
@@ -294,11 +298,14 @@ def attention_xformers(q, k, v, heads, mask=None):
(q, k, v),
)
- # actually compute the attention, what we cannot get enough of
- out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None)
+ if mask is not None:
+ pad = 8 - q.shape[1] % 8
+ mask_out = torch.empty([q.shape[0], q.shape[1], q.shape[1] + pad], dtype=q.dtype, device=q.device)
+ mask_out[:, :, :mask.shape[-1]] = mask
+ mask = mask_out[:, :, :mask.shape[-1]]
+
+ out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=mask)
- if exists(mask):
- raise NotImplementedError
out = (
out.unsqueeze(0)
.reshape(b, heads, -1, dim_head)
@@ -323,7 +330,6 @@ def attention_pytorch(q, k, v, heads, mask=None):
optimized_attention = attention_basic
-optimized_attention_masked = attention_basic
if model_management.xformers_enabled():
print("Using xformers cross attention")
@@ -339,15 +345,18 @@ else:
print("Using sub quadratic optimization for cross attention, if you have memory or speed issues try using: --attention-split")
optimized_attention = attention_sub_quad
-if model_management.pytorch_attention_enabled():
- optimized_attention_masked = attention_pytorch
+optimized_attention_masked = optimized_attention
-def optimized_attention_for_device(device, mask=False):
- if device == torch.device("cpu"): #TODO
+def optimized_attention_for_device(device, mask=False, small_input=False):
+ if small_input:
if model_management.pytorch_attention_enabled():
- return attention_pytorch
+ return attention_pytorch #TODO: need to confirm but this is probably slightly faster for small inputs in all cases
else:
return attention_basic
+
+ if device == torch.device("cpu"):
+ return attention_sub_quad
+
if mask:
return optimized_attention_masked
diff --git a/ldm_patched/ldm/modules/diffusionmodules/model.py b/ldm_patched/ldm/modules/diffusionmodules/model.py
index 9c898639..1901145c 100644
--- a/ldm_patched/ldm/modules/diffusionmodules/model.py
+++ b/ldm_patched/ldm/modules/diffusionmodules/model.py
@@ -41,7 +41,7 @@ def nonlinearity(x):
def Normalize(in_channels, num_groups=32):
- return torch.nn.GroupNorm(num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True)
+ return ops.GroupNorm(num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True)
class Upsample(nn.Module):
diff --git a/ldm_patched/ldm/modules/diffusionmodules/openaimodel.py b/ldm_patched/ldm/modules/diffusionmodules/openaimodel.py
index e5784f28..4b695f76 100644
--- a/ldm_patched/ldm/modules/diffusionmodules/openaimodel.py
+++ b/ldm_patched/ldm/modules/diffusionmodules/openaimodel.py
@@ -1,12 +1,9 @@
from abc import abstractmethod
-import math
-import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
-from functools import partial
from .util import (
checkpoint,
@@ -437,9 +434,6 @@ class UNetModel(nn.Module):
operations=ops,
):
super().__init__()
- assert use_spatial_transformer == True, "use_spatial_transformer has to be true"
- if use_spatial_transformer:
- assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...'
if context_dim is not None:
assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...'
@@ -456,7 +450,6 @@ class UNetModel(nn.Module):
if num_head_channels == -1:
assert num_heads != -1, 'Either num_heads or num_head_channels has to be set'
- self.image_size = image_size
self.in_channels = in_channels
self.model_channels = model_channels
self.out_channels = out_channels
@@ -502,7 +495,7 @@ class UNetModel(nn.Module):
if self.num_classes is not None:
if isinstance(self.num_classes, int):
- self.label_emb = nn.Embedding(num_classes, time_embed_dim)
+ self.label_emb = nn.Embedding(num_classes, time_embed_dim, dtype=self.dtype, device=device)
elif self.num_classes == "continuous":
print("setting up linear c_adm embedding layer")
self.label_emb = nn.Linear(1, time_embed_dim)
diff --git a/ldm_patched/ldm/modules/diffusionmodules/upscaling.py b/ldm_patched/ldm/modules/diffusionmodules/upscaling.py
index 59d4d3cc..a38bff57 100644
--- a/ldm_patched/ldm/modules/diffusionmodules/upscaling.py
+++ b/ldm_patched/ldm/modules/diffusionmodules/upscaling.py
@@ -41,10 +41,14 @@ class AbstractLowScaleModel(nn.Module):
self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod)))
self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1)))
- def q_sample(self, x_start, t, noise=None):
- noise = default(noise, lambda: torch.randn_like(x_start))
- return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
- extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise)
+ def q_sample(self, x_start, t, noise=None, seed=None):
+ if noise is None:
+ if seed is None:
+ noise = torch.randn_like(x_start)
+ else:
+ noise = torch.randn(x_start.size(), dtype=x_start.dtype, layout=x_start.layout, generator=torch.manual_seed(seed)).to(x_start.device)
+ return (extract_into_tensor(self.sqrt_alphas_cumprod.to(x_start.device), t, x_start.shape) * x_start +
+ extract_into_tensor(self.sqrt_one_minus_alphas_cumprod.to(x_start.device), t, x_start.shape) * noise)
def forward(self, x):
return x, None
@@ -69,12 +73,12 @@ class ImageConcatWithNoiseAugmentation(AbstractLowScaleModel):
super().__init__(noise_schedule_config=noise_schedule_config)
self.max_noise_level = max_noise_level
- def forward(self, x, noise_level=None):
+ def forward(self, x, noise_level=None, seed=None):
if noise_level is None:
noise_level = torch.randint(0, self.max_noise_level, (x.shape[0],), device=x.device).long()
else:
assert isinstance(noise_level, torch.Tensor)
- z = self.q_sample(x, noise_level)
+ z = self.q_sample(x, noise_level, seed=seed)
return z, noise_level
diff --git a/ldm_patched/ldm/modules/diffusionmodules/util.py b/ldm_patched/ldm/modules/diffusionmodules/util.py
index ca0f4b99..e261e06a 100644
--- a/ldm_patched/ldm/modules/diffusionmodules/util.py
+++ b/ldm_patched/ldm/modules/diffusionmodules/util.py
@@ -51,9 +51,9 @@ class AlphaBlender(nn.Module):
if self.merge_strategy == "fixed":
# make shape compatible
# alpha = repeat(self.mix_factor, '1 -> b () t () ()', t=t, b=bs)
- alpha = self.mix_factor
+ alpha = self.mix_factor.to(image_only_indicator.device)
elif self.merge_strategy == "learned":
- alpha = torch.sigmoid(self.mix_factor)
+ alpha = torch.sigmoid(self.mix_factor.to(image_only_indicator.device))
# make shape compatible
# alpha = repeat(alpha, '1 -> s () ()', s = t * bs)
elif self.merge_strategy == "learned_with_images":
@@ -61,7 +61,7 @@ class AlphaBlender(nn.Module):
alpha = torch.where(
image_only_indicator.bool(),
torch.ones(1, 1, device=image_only_indicator.device),
- rearrange(torch.sigmoid(self.mix_factor), "... -> ... 1"),
+ rearrange(torch.sigmoid(self.mix_factor.to(image_only_indicator.device)), "... -> ... 1"),
)
alpha = rearrange(alpha, self.rearrange_pattern)
# make shape compatible
diff --git a/ldm_patched/ldm/modules/encoders/noise_aug_modules.py b/ldm_patched/ldm/modules/encoders/noise_aug_modules.py
index b59bf204..a5d86603 100644
--- a/ldm_patched/ldm/modules/encoders/noise_aug_modules.py
+++ b/ldm_patched/ldm/modules/encoders/noise_aug_modules.py
@@ -15,21 +15,21 @@ class CLIPEmbeddingNoiseAugmentation(ImageConcatWithNoiseAugmentation):
def scale(self, x):
# re-normalize to centered mean and unit variance
- x = (x - self.data_mean) * 1. / self.data_std
+ x = (x - self.data_mean.to(x.device)) * 1. / self.data_std.to(x.device)
return x
def unscale(self, x):
# back to original data stats
- x = (x * self.data_std) + self.data_mean
+ x = (x * self.data_std.to(x.device)) + self.data_mean.to(x.device)
return x
- def forward(self, x, noise_level=None):
+ def forward(self, x, noise_level=None, seed=None):
if noise_level is None:
noise_level = torch.randint(0, self.max_noise_level, (x.shape[0],), device=x.device).long()
else:
assert isinstance(noise_level, torch.Tensor)
x = self.scale(x)
- z = self.q_sample(x, noise_level)
+ z = self.q_sample(x, noise_level, seed=seed)
z = self.unscale(z)
noise_level = self.time_embed(noise_level)
return z, noise_level
diff --git a/ldm_patched/ldm/modules/sub_quadratic_attention.py b/ldm_patched/ldm/modules/sub_quadratic_attention.py
index cabf1f67..9f4c23c7 100644
--- a/ldm_patched/ldm/modules/sub_quadratic_attention.py
+++ b/ldm_patched/ldm/modules/sub_quadratic_attention.py
@@ -61,6 +61,7 @@ def _summarize_chunk(
value: Tensor,
scale: float,
upcast_attention: bool,
+ mask,
) -> AttnChunk:
if upcast_attention:
with torch.autocast(enabled=False, device_type = 'cuda'):
@@ -84,6 +85,8 @@ def _summarize_chunk(
max_score, _ = torch.max(attn_weights, -1, keepdim=True)
max_score = max_score.detach()
attn_weights -= max_score
+ if mask is not None:
+ attn_weights += mask
torch.exp(attn_weights, out=attn_weights)
exp_weights = attn_weights.to(value.dtype)
exp_values = torch.bmm(exp_weights, value)
@@ -96,11 +99,12 @@ def _query_chunk_attention(
value: Tensor,
summarize_chunk: SummarizeChunk,
kv_chunk_size: int,
+ mask,
) -> Tensor:
batch_x_heads, k_channels_per_head, k_tokens = key_t.shape
_, _, v_channels_per_head = value.shape
- def chunk_scanner(chunk_idx: int) -> AttnChunk:
+ def chunk_scanner(chunk_idx: int, mask) -> AttnChunk:
key_chunk = dynamic_slice(
key_t,
(0, 0, chunk_idx),
@@ -111,10 +115,13 @@ def _query_chunk_attention(
(0, chunk_idx, 0),
(batch_x_heads, kv_chunk_size, v_channels_per_head)
)
- return summarize_chunk(query, key_chunk, value_chunk)
+ if mask is not None:
+ mask = mask[:,:,chunk_idx:chunk_idx + kv_chunk_size]
+
+ return summarize_chunk(query, key_chunk, value_chunk, mask=mask)
chunks: List[AttnChunk] = [
- chunk_scanner(chunk) for chunk in torch.arange(0, k_tokens, kv_chunk_size)
+ chunk_scanner(chunk, mask) for chunk in torch.arange(0, k_tokens, kv_chunk_size)
]
acc_chunk = AttnChunk(*map(torch.stack, zip(*chunks)))
chunk_values, chunk_weights, chunk_max = acc_chunk
@@ -135,6 +142,7 @@ def _get_attention_scores_no_kv_chunking(
value: Tensor,
scale: float,
upcast_attention: bool,
+ mask,
) -> Tensor:
if upcast_attention:
with torch.autocast(enabled=False, device_type = 'cuda'):
@@ -156,6 +164,8 @@ def _get_attention_scores_no_kv_chunking(
beta=0,
)
+ if mask is not None:
+ attn_scores += mask
try:
attn_probs = attn_scores.softmax(dim=-1)
del attn_scores
@@ -183,6 +193,7 @@ def efficient_dot_product_attention(
kv_chunk_size_min: Optional[int] = None,
use_checkpoint=True,
upcast_attention=False,
+ mask = None,
):
"""Computes efficient dot-product attention given query, transposed key, and value.
This is efficient version of attention presented in
@@ -209,13 +220,22 @@ def efficient_dot_product_attention(
if kv_chunk_size_min is not None:
kv_chunk_size = max(kv_chunk_size, kv_chunk_size_min)
+ if mask is not None and len(mask.shape) == 2:
+ mask = mask.unsqueeze(0)
+
def get_query_chunk(chunk_idx: int) -> Tensor:
return dynamic_slice(
query,
(0, chunk_idx, 0),
(batch_x_heads, min(query_chunk_size, q_tokens), q_channels_per_head)
)
-
+
+ def get_mask_chunk(chunk_idx: int) -> Tensor:
+ if mask is None:
+ return None
+ chunk = min(query_chunk_size, q_tokens)
+ return mask[:,chunk_idx:chunk_idx + chunk]
+
summarize_chunk: SummarizeChunk = partial(_summarize_chunk, scale=scale, upcast_attention=upcast_attention)
summarize_chunk: SummarizeChunk = partial(checkpoint, summarize_chunk) if use_checkpoint else summarize_chunk
compute_query_chunk_attn: ComputeQueryChunkAttn = partial(
@@ -237,6 +257,7 @@ def efficient_dot_product_attention(
query=query,
key_t=key_t,
value=value,
+ mask=mask,
)
# TODO: maybe we should use torch.empty_like(query) to allocate storage in-advance,
@@ -246,6 +267,7 @@ def efficient_dot_product_attention(
query=get_query_chunk(i * query_chunk_size),
key_t=key_t,
value=value,
+ mask=get_mask_chunk(i * query_chunk_size)
) for i in range(math.ceil(q_tokens / query_chunk_size))
], dim=1)
return res
diff --git a/ldm_patched/ldm/modules/temporal_ae.py b/ldm_patched/ldm/modules/temporal_ae.py
index 248d850b..ee851921 100644
--- a/ldm_patched/ldm/modules/temporal_ae.py
+++ b/ldm_patched/ldm/modules/temporal_ae.py
@@ -82,14 +82,14 @@ class VideoResBlock(ResnetBlock):
x = self.time_stack(x, temb)
- alpha = self.get_alpha(bs=b // timesteps)
+ alpha = self.get_alpha(bs=b // timesteps).to(x.device)
x = alpha * x + (1.0 - alpha) * x_mix
x = rearrange(x, "b c t h w -> (b t) c h w")
return x
-class AE3DConv(torch.nn.Conv2d):
+class AE3DConv(ops.Conv2d):
def __init__(self, in_channels, out_channels, video_kernel_size=3, *args, **kwargs):
super().__init__(in_channels, out_channels, *args, **kwargs)
if isinstance(video_kernel_size, Iterable):
@@ -97,7 +97,7 @@ class AE3DConv(torch.nn.Conv2d):
else:
padding = int(video_kernel_size // 2)
- self.time_mix_conv = torch.nn.Conv3d(
+ self.time_mix_conv = ops.Conv3d(
in_channels=out_channels,
out_channels=out_channels,
kernel_size=video_kernel_size,
@@ -167,7 +167,7 @@ class AttnVideoBlock(AttnBlock):
emb = emb[:, None, :]
x_mix = x_mix + emb
- alpha = self.get_alpha()
+ alpha = self.get_alpha().to(x.device)
x_mix = self.time_mix_block(x_mix, timesteps=timesteps)
x = alpha * x + (1.0 - alpha) * x_mix # alpha merge
diff --git a/ldm_patched/licenses-3rd/chainer b/ldm_patched/licenses-3rd/chainer
new file mode 100644
index 00000000..db8ef9d9
--- /dev/null
+++ b/ldm_patched/licenses-3rd/chainer
@@ -0,0 +1,20 @@
+Copyright (c) 2015 Preferred Infrastructure, Inc.
+Copyright (c) 2015 Preferred Networks, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
\ No newline at end of file
diff --git a/ldm_patched/licenses-3rd/comfyui b/ldm_patched/licenses-3rd/comfyui
new file mode 100644
index 00000000..e72bfdda
--- /dev/null
+++ b/ldm_patched/licenses-3rd/comfyui
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+
+ Copyright (C)
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ Copyright (C)
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+.
\ No newline at end of file
diff --git a/ldm_patched/licenses-3rd/diffusers b/ldm_patched/licenses-3rd/diffusers
new file mode 100644
index 00000000..f49a4e16
--- /dev/null
+++ b/ldm_patched/licenses-3rd/diffusers
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
\ No newline at end of file
diff --git a/ldm_patched/licenses-3rd/kdiffusion b/ldm_patched/licenses-3rd/kdiffusion
new file mode 100644
index 00000000..e20684e5
--- /dev/null
+++ b/ldm_patched/licenses-3rd/kdiffusion
@@ -0,0 +1,19 @@
+Copyright (c) 2022 Katherine Crowson
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
\ No newline at end of file
diff --git a/ldm_patched/licenses-3rd/ldm b/ldm_patched/licenses-3rd/ldm
new file mode 100644
index 00000000..1a1c5058
--- /dev/null
+++ b/ldm_patched/licenses-3rd/ldm
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2022 Machine Vision and Learning Group, LMU Munich
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/ldm_patched/licenses-3rd/taesd b/ldm_patched/licenses-3rd/taesd
new file mode 100644
index 00000000..62e6312e
--- /dev/null
+++ b/ldm_patched/licenses-3rd/taesd
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2023 Ollin Boer Bohan
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/ldm_patched/licenses-3rd/transformers b/ldm_patched/licenses-3rd/transformers
new file mode 100644
index 00000000..e44d8f5b
--- /dev/null
+++ b/ldm_patched/licenses-3rd/transformers
@@ -0,0 +1,203 @@
+Copyright 2018- The Hugging Face team. All rights reserved.
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
\ No newline at end of file
diff --git a/ldm_patched/modules/args_parser.py b/ldm_patched/modules/args_parser.py
index 7957783e..bf873783 100644
--- a/ldm_patched/modules/args_parser.py
+++ b/ldm_patched/modules/args_parser.py
@@ -37,6 +37,7 @@ parser.add_argument("--listen", type=str, default="127.0.0.1", metavar="IP", nar
parser.add_argument("--port", type=int, default=8188)
parser.add_argument("--disable-header-check", type=str, default=None, metavar="ORIGIN", nargs="?", const="*")
parser.add_argument("--web-upload-size", type=float, default=100)
+parser.add_argument("--hf-mirror", type=str, default=None)
parser.add_argument("--external-working-path", type=str, default=None, metavar="PATH", nargs='+', action='append')
parser.add_argument("--output-path", type=str, default=None)
@@ -66,6 +67,8 @@ fpvae_group.add_argument("--vae-in-fp16", action="store_true")
fpvae_group.add_argument("--vae-in-fp32", action="store_true")
fpvae_group.add_argument("--vae-in-bf16", action="store_true")
+parser.add_argument("--vae-in-cpu", action="store_true")
+
fpte_group = parser.add_mutually_exclusive_group()
fpte_group.add_argument("--clip-in-fp8-e4m3fn", action="store_true")
fpte_group.add_argument("--clip-in-fp8-e5m2", action="store_true")
@@ -98,8 +101,7 @@ vram_group.add_argument("--always-high-vram", action="store_true")
vram_group.add_argument("--always-normal-vram", action="store_true")
vram_group.add_argument("--always-low-vram", action="store_true")
vram_group.add_argument("--always-no-vram", action="store_true")
-vram_group.add_argument("--always-cpu", action="store_true")
-
+vram_group.add_argument("--always-cpu", type=int, nargs="?", metavar="CPU_NUM_THREADS", const=-1)
parser.add_argument("--always-offload-from-vram", action="store_true")
parser.add_argument("--pytorch-deterministic", action="store_true")
@@ -110,6 +112,8 @@ parser.add_argument("--is-windows-embedded-python", action="store_true")
parser.add_argument("--disable-server-info", action="store_true")
+parser.add_argument("--multi-user", action="store_true")
+
if ldm_patched.modules.options.args_parsing:
args = parser.parse_args([])
else:
diff --git a/ldm_patched/modules/clip_model.py b/ldm_patched/modules/clip_model.py
index e7f3fb2d..aceca86d 100644
--- a/ldm_patched/modules/clip_model.py
+++ b/ldm_patched/modules/clip_model.py
@@ -57,7 +57,7 @@ class CLIPEncoder(torch.nn.Module):
self.layers = torch.nn.ModuleList([CLIPLayer(embed_dim, heads, intermediate_size, intermediate_activation, dtype, device, operations) for i in range(num_layers)])
def forward(self, x, mask=None, intermediate_output=None):
- optimized_attention = optimized_attention_for_device(x.device, mask=mask is not None)
+ optimized_attention = optimized_attention_for_device(x.device, mask=mask is not None, small_input=True)
if intermediate_output is not None:
if intermediate_output < 0:
@@ -151,7 +151,7 @@ class CLIPVisionEmbeddings(torch.nn.Module):
def forward(self, pixel_values):
embeds = self.patch_embedding(pixel_values).flatten(2).transpose(1, 2)
- return torch.cat([self.class_embedding.expand(pixel_values.shape[0], 1, -1), embeds], dim=1) + self.position_embedding.weight
+ return torch.cat([self.class_embedding.to(embeds.device).expand(pixel_values.shape[0], 1, -1), embeds], dim=1) + self.position_embedding.weight.to(embeds.device)
class CLIPVision(torch.nn.Module):
diff --git a/ldm_patched/modules/clip_vision.py b/ldm_patched/modules/clip_vision.py
index 9699210d..affdb8b2 100644
--- a/ldm_patched/modules/clip_vision.py
+++ b/ldm_patched/modules/clip_vision.py
@@ -1,7 +1,6 @@
-from .utils import load_torch_file, transformers_convert, common_upscale
+from .utils import load_torch_file, transformers_convert, state_dict_prefix_replace
import os
import torch
-import contextlib
import json
import ldm_patched.modules.ops
@@ -41,9 +40,13 @@ class ClipVisionModel():
self.model.eval()
self.patcher = ldm_patched.modules.model_patcher.ModelPatcher(self.model, load_device=self.load_device, offload_device=offload_device)
+
def load_sd(self, sd):
return self.model.load_state_dict(sd, strict=False)
+ def get_sd(self):
+ return self.model.state_dict()
+
def encode_image(self, image):
ldm_patched.modules.model_management.load_model_gpu(self.patcher)
pixel_values = clip_preprocess(image.to(self.load_device)).float()
@@ -76,6 +79,9 @@ def convert_to_transformers(sd, prefix):
sd['visual_projection.weight'] = sd.pop("{}proj".format(prefix)).transpose(0, 1)
sd = transformers_convert(sd, prefix, "vision_model.", 48)
+ else:
+ replace_prefix = {prefix: ""}
+ sd = state_dict_prefix_replace(sd, replace_prefix)
return sd
def load_clipvision_from_sd(sd, prefix="", convert_keys=False):
diff --git a/ldm_patched/modules/conds.py b/ldm_patched/modules/conds.py
index a7325680..0ee184bc 100644
--- a/ldm_patched/modules/conds.py
+++ b/ldm_patched/modules/conds.py
@@ -1,11 +1,8 @@
-import enum
import torch
import math
import ldm_patched.modules.utils
-def lcm(a, b): #TODO: eventually replace by math.lcm (added in python3.9)
- return abs(a*b) // math.gcd(a, b)
class CONDRegular:
def __init__(self, cond):
@@ -42,7 +39,7 @@ class CONDCrossAttn(CONDRegular):
if s1[0] != s2[0] or s1[2] != s2[2]: #these 2 cases should not happen
return False
- mult_min = lcm(s1[1], s2[1])
+ mult_min = math.lcm(s1[1], s2[1])
diff = mult_min // min(s1[1], s2[1])
if diff > 4: #arbitrary limit on the padding because it's probably going to impact performance negatively if it's too much
return False
@@ -53,7 +50,7 @@ class CONDCrossAttn(CONDRegular):
crossattn_max_len = self.cond.shape[1]
for x in others:
c = x.cond
- crossattn_max_len = lcm(crossattn_max_len, c.shape[1])
+ crossattn_max_len = math.lcm(crossattn_max_len, c.shape[1])
conds.append(c)
out = []
diff --git a/ldm_patched/modules/controlnet.py b/ldm_patched/modules/controlnet.py
index e478e221..7e11497f 100644
--- a/ldm_patched/modules/controlnet.py
+++ b/ldm_patched/modules/controlnet.py
@@ -1,7 +1,6 @@
import torch
import math
import os
-import contextlib
import ldm_patched.modules.utils
import ldm_patched.modules.model_management
import ldm_patched.modules.model_detection
@@ -126,7 +125,10 @@ class ControlBase:
if o[i] is None:
o[i] = prev_val
else:
- o[i] += prev_val
+ if o[i].shape[0] < prev_val.shape[0]:
+ o[i] = prev_val + o[i]
+ else:
+ o[i] += prev_val
return out
class ControlNet(ControlBase):
@@ -283,7 +285,7 @@ class ControlLora(ControlNet):
cm = self.control_model.state_dict()
for k in sd:
- weight = ldm_patched.modules.model_management.resolve_lowvram_weight(sd[k], diffusion_model, k)
+ weight = sd[k]
try:
ldm_patched.modules.utils.set_attr(self.control_model, k, weight)
except:
diff --git a/ldm_patched/modules/diffusers_load.py b/ldm_patched/modules/diffusers_load.py
index 79fbbd55..62edc72b 100644
--- a/ldm_patched/modules/diffusers_load.py
+++ b/ldm_patched/modules/diffusers_load.py
@@ -1,4 +1,3 @@
-import json
import os
import ldm_patched.modules.sd
diff --git a/ldm_patched/modules/gligen.py b/ldm_patched/modules/gligen.py
index 8dbd5fa4..11f1ee93 100644
--- a/ldm_patched/modules/gligen.py
+++ b/ldm_patched/modules/gligen.py
@@ -1,5 +1,5 @@
import torch
-from torch import nn, einsum
+from torch import nn
from ldm_patched.ldm.modules.attention import CrossAttention
from inspect import isfunction
diff --git a/ldm_patched/modules/latent_formats.py b/ldm_patched/modules/latent_formats.py
index c209087e..1606793e 100644
--- a/ldm_patched/modules/latent_formats.py
+++ b/ldm_patched/modules/latent_formats.py
@@ -1,3 +1,4 @@
+import torch
class LatentFormat:
scale_factor = 1.0
@@ -33,3 +34,71 @@ class SDXL(LatentFormat):
[-0.3112, -0.2359, -0.2076]
]
self.taesd_decoder_name = "taesdxl_decoder"
+
+class SDXL_Playground_2_5(LatentFormat):
+ def __init__(self):
+ self.scale_factor = 0.5
+ self.latents_mean = torch.tensor([-1.6574, 1.886, -1.383, 2.5155]).view(1, 4, 1, 1)
+ self.latents_std = torch.tensor([8.4927, 5.9022, 6.5498, 5.2299]).view(1, 4, 1, 1)
+
+ self.latent_rgb_factors = [
+ # R G B
+ [ 0.3920, 0.4054, 0.4549],
+ [-0.2634, -0.0196, 0.0653],
+ [ 0.0568, 0.1687, -0.0755],
+ [-0.3112, -0.2359, -0.2076]
+ ]
+ self.taesd_decoder_name = "taesdxl_decoder"
+
+ def process_in(self, latent):
+ latents_mean = self.latents_mean.to(latent.device, latent.dtype)
+ latents_std = self.latents_std.to(latent.device, latent.dtype)
+ return (latent - latents_mean) * self.scale_factor / latents_std
+
+ def process_out(self, latent):
+ latents_mean = self.latents_mean.to(latent.device, latent.dtype)
+ latents_std = self.latents_std.to(latent.device, latent.dtype)
+ return latent * latents_std / self.scale_factor + latents_mean
+
+
+class SD_X4(LatentFormat):
+ def __init__(self):
+ self.scale_factor = 0.08333
+ self.latent_rgb_factors = [
+ [-0.2340, -0.3863, -0.3257],
+ [ 0.0994, 0.0885, -0.0908],
+ [-0.2833, -0.2349, -0.3741],
+ [ 0.2523, -0.0055, -0.1651]
+ ]
+
+class SC_Prior(LatentFormat):
+ def __init__(self):
+ self.scale_factor = 1.0
+ self.latent_rgb_factors = [
+ [-0.0326, -0.0204, -0.0127],
+ [-0.1592, -0.0427, 0.0216],
+ [ 0.0873, 0.0638, -0.0020],
+ [-0.0602, 0.0442, 0.1304],
+ [ 0.0800, -0.0313, -0.1796],
+ [-0.0810, -0.0638, -0.1581],
+ [ 0.1791, 0.1180, 0.0967],
+ [ 0.0740, 0.1416, 0.0432],
+ [-0.1745, -0.1888, -0.1373],
+ [ 0.2412, 0.1577, 0.0928],
+ [ 0.1908, 0.0998, 0.0682],
+ [ 0.0209, 0.0365, -0.0092],
+ [ 0.0448, -0.0650, -0.1728],
+ [-0.1658, -0.1045, -0.1308],
+ [ 0.0542, 0.1545, 0.1325],
+ [-0.0352, -0.1672, -0.2541]
+ ]
+
+class SC_B(LatentFormat):
+ def __init__(self):
+ self.scale_factor = 1.0 / 0.43
+ self.latent_rgb_factors = [
+ [ 0.1121, 0.2006, 0.1023],
+ [-0.2093, -0.0222, -0.0195],
+ [-0.3087, -0.1535, 0.0366],
+ [ 0.0290, -0.1574, -0.4078]
+ ]
\ No newline at end of file
diff --git a/ldm_patched/modules/model_base.py b/ldm_patched/modules/model_base.py
index 1374a669..9c69e98b 100644
--- a/ldm_patched/modules/model_base.py
+++ b/ldm_patched/modules/model_base.py
@@ -1,12 +1,11 @@
import torch
-from ldm_patched.ldm.modules.diffusionmodules.openaimodel import UNetModel
+from ldm_patched.ldm.modules.diffusionmodules.openaimodel import UNetModel, Timestep
from ldm_patched.ldm.modules.encoders.noise_aug_modules import CLIPEmbeddingNoiseAugmentation
-from ldm_patched.ldm.modules.diffusionmodules.openaimodel import Timestep
+from ldm_patched.ldm.modules.diffusionmodules.upscaling import ImageConcatWithNoiseAugmentation
import ldm_patched.modules.model_management
import ldm_patched.modules.conds
import ldm_patched.modules.ops
from enum import Enum
-import contextlib
from . import utils
class ModelType(Enum):
@@ -78,8 +77,9 @@ class BaseModel(torch.nn.Module):
extra_conds = {}
for o in kwargs:
extra = kwargs[o]
- if hasattr(extra, "to"):
- extra = extra.to(dtype)
+ if hasattr(extra, "dtype"):
+ if extra.dtype != torch.int and extra.dtype != torch.long:
+ extra = extra.to(dtype)
extra_conds[o] = extra
model_output = self.diffusion_model(xc, t, context=context, control=control, transformer_options=transformer_options, **extra_conds).float()
@@ -99,11 +99,29 @@ class BaseModel(torch.nn.Module):
if self.inpaint_model:
concat_keys = ("mask", "masked_image")
cond_concat = []
- denoise_mask = kwargs.get("denoise_mask", None)
- latent_image = kwargs.get("latent_image", None)
+ denoise_mask = kwargs.get("concat_mask", kwargs.get("denoise_mask", None))
+ concat_latent_image = kwargs.get("concat_latent_image", None)
+ if concat_latent_image is None:
+ concat_latent_image = kwargs.get("latent_image", None)
+ else:
+ concat_latent_image = self.process_latent_in(concat_latent_image)
+
noise = kwargs.get("noise", None)
device = kwargs["device"]
+ if concat_latent_image.shape[1:] != noise.shape[1:]:
+ concat_latent_image = utils.common_upscale(concat_latent_image, noise.shape[-1], noise.shape[-2], "bilinear", "center")
+
+ concat_latent_image = utils.resize_to_batch_size(concat_latent_image, noise.shape[0])
+
+ if len(denoise_mask.shape) == len(noise.shape):
+ denoise_mask = denoise_mask[:,:1]
+
+ denoise_mask = denoise_mask.reshape((-1, 1, denoise_mask.shape[-2], denoise_mask.shape[-1]))
+ if denoise_mask.shape[-2:] != noise.shape[-2:]:
+ denoise_mask = utils.common_upscale(denoise_mask, noise.shape[-1], noise.shape[-2], "bilinear", "center")
+ denoise_mask = utils.resize_to_batch_size(denoise_mask.round(), noise.shape[0])
+
def blank_inpaint_image_like(latent_image):
blank_image = torch.ones_like(latent_image)
# these are the values for "zero" in pixel space translated to latent space
@@ -116,9 +134,9 @@ class BaseModel(torch.nn.Module):
for ck in concat_keys:
if denoise_mask is not None:
if ck == "mask":
- cond_concat.append(denoise_mask[:,:1].to(device))
+ cond_concat.append(denoise_mask.to(device))
elif ck == "masked_image":
- cond_concat.append(latent_image.to(device)) #NOTE: the latent_image should be masked by the mask in pixel space
+ cond_concat.append(concat_latent_image.to(device)) #NOTE: the latent_image should be masked by the mask in pixel space
else:
if ck == "mask":
cond_concat.append(torch.ones_like(noise)[:,:1])
@@ -126,9 +144,15 @@ class BaseModel(torch.nn.Module):
cond_concat.append(blank_inpaint_image_like(noise))
data = torch.cat(cond_concat, dim=1)
out['c_concat'] = ldm_patched.modules.conds.CONDNoiseShape(data)
+
adm = self.encode_adm(**kwargs)
if adm is not None:
out['y'] = ldm_patched.modules.conds.CONDRegular(adm)
+
+ cross_attn = kwargs.get("cross_attn", None)
+ if cross_attn is not None:
+ out['c_crossattn'] = ldm_patched.modules.conds.CONDCrossAttn(cross_attn)
+
return out
def load_model_weights(self, sd, unet_prefix=""):
@@ -154,23 +178,28 @@ class BaseModel(torch.nn.Module):
def process_latent_out(self, latent):
return self.latent_format.process_out(latent)
- def state_dict_for_saving(self, clip_state_dict, vae_state_dict):
- clip_state_dict = self.model_config.process_clip_state_dict_for_saving(clip_state_dict)
- unet_sd = self.diffusion_model.state_dict()
- unet_state_dict = {}
- for k in unet_sd:
- unet_state_dict[k] = ldm_patched.modules.model_management.resolve_lowvram_weight(unet_sd[k], self.diffusion_model, k)
+ def state_dict_for_saving(self, clip_state_dict=None, vae_state_dict=None, clip_vision_state_dict=None):
+ extra_sds = []
+ if clip_state_dict is not None:
+ extra_sds.append(self.model_config.process_clip_state_dict_for_saving(clip_state_dict))
+ if vae_state_dict is not None:
+ extra_sds.append(self.model_config.process_vae_state_dict_for_saving(vae_state_dict))
+ if clip_vision_state_dict is not None:
+ extra_sds.append(self.model_config.process_clip_vision_state_dict_for_saving(clip_vision_state_dict))
+ unet_state_dict = self.diffusion_model.state_dict()
unet_state_dict = self.model_config.process_unet_state_dict_for_saving(unet_state_dict)
- vae_state_dict = self.model_config.process_vae_state_dict_for_saving(vae_state_dict)
+
if self.get_dtype() == torch.float16:
- clip_state_dict = utils.convert_sd_to(clip_state_dict, torch.float16)
- vae_state_dict = utils.convert_sd_to(vae_state_dict, torch.float16)
+ extra_sds = map(lambda sd: utils.convert_sd_to(sd, torch.float16), extra_sds)
if self.model_type == ModelType.V_PREDICTION:
unet_state_dict["v_pred"] = torch.tensor([])
- return {**unet_state_dict, **vae_state_dict, **clip_state_dict}
+ for sd in extra_sds:
+ unet_state_dict.update(sd)
+
+ return unet_state_dict
def set_inpaint(self):
self.inpaint_model = True
@@ -189,7 +218,7 @@ class BaseModel(torch.nn.Module):
return (((area * 0.6) / 0.9) + 1024) * (1024 * 1024)
-def unclip_adm(unclip_conditioning, device, noise_augmentor, noise_augment_merge=0.0):
+def unclip_adm(unclip_conditioning, device, noise_augmentor, noise_augment_merge=0.0, seed=None):
adm_inputs = []
weights = []
noise_aug = []
@@ -198,7 +227,7 @@ def unclip_adm(unclip_conditioning, device, noise_augmentor, noise_augment_merge
weight = unclip_cond["strength"]
noise_augment = unclip_cond["noise_augmentation"]
noise_level = round((noise_augmentor.max_noise_level - 1) * noise_augment)
- c_adm, noise_level_emb = noise_augmentor(adm_cond.to(device), noise_level=torch.tensor([noise_level], device=device))
+ c_adm, noise_level_emb = noise_augmentor(adm_cond.to(device), noise_level=torch.tensor([noise_level], device=device), seed=seed)
adm_out = torch.cat((c_adm, noise_level_emb), 1) * weight
weights.append(weight)
noise_aug.append(noise_augment)
@@ -224,11 +253,11 @@ class SD21UNCLIP(BaseModel):
if unclip_conditioning is None:
return torch.zeros((1, self.adm_channels))
else:
- return unclip_adm(unclip_conditioning, device, self.noise_augmentor, kwargs.get("unclip_noise_augment_merge", 0.05))
+ return unclip_adm(unclip_conditioning, device, self.noise_augmentor, kwargs.get("unclip_noise_augment_merge", 0.05), kwargs.get("seed", 0) - 10)
def sdxl_pooled(args, noise_augmentor):
if "unclip_conditioning" in args:
- return unclip_adm(args.get("unclip_conditioning", None), args["device"], noise_augmentor)[:,:1280]
+ return unclip_adm(args.get("unclip_conditioning", None), args["device"], noise_augmentor, seed=args.get("seed", 0) - 10)[:,:1280]
else:
return args["pooled_output"]
@@ -322,9 +351,75 @@ class SVD_img2vid(BaseModel):
out['c_concat'] = ldm_patched.modules.conds.CONDNoiseShape(latent_image)
+ cross_attn = kwargs.get("cross_attn", None)
+ if cross_attn is not None:
+ out['c_crossattn'] = ldm_patched.modules.conds.CONDCrossAttn(cross_attn)
+
if "time_conditioning" in kwargs:
out["time_context"] = ldm_patched.modules.conds.CONDCrossAttn(kwargs["time_conditioning"])
out['image_only_indicator'] = ldm_patched.modules.conds.CONDConstant(torch.zeros((1,), device=device))
out['num_video_frames'] = ldm_patched.modules.conds.CONDConstant(noise.shape[0])
return out
+
+class Stable_Zero123(BaseModel):
+ def __init__(self, model_config, model_type=ModelType.EPS, device=None, cc_projection_weight=None, cc_projection_bias=None):
+ super().__init__(model_config, model_type, device=device)
+ self.cc_projection = ldm_patched.modules.ops.manual_cast.Linear(cc_projection_weight.shape[1], cc_projection_weight.shape[0], dtype=self.get_dtype(), device=device)
+ self.cc_projection.weight.copy_(cc_projection_weight)
+ self.cc_projection.bias.copy_(cc_projection_bias)
+
+ def extra_conds(self, **kwargs):
+ out = {}
+
+ latent_image = kwargs.get("concat_latent_image", None)
+ noise = kwargs.get("noise", None)
+
+ if latent_image is None:
+ latent_image = torch.zeros_like(noise)
+
+ if latent_image.shape[1:] != noise.shape[1:]:
+ latent_image = utils.common_upscale(latent_image, noise.shape[-1], noise.shape[-2], "bilinear", "center")
+
+ latent_image = utils.resize_to_batch_size(latent_image, noise.shape[0])
+
+ out['c_concat'] = ldm_patched.modules.conds.CONDNoiseShape(latent_image)
+
+ cross_attn = kwargs.get("cross_attn", None)
+ if cross_attn is not None:
+ if cross_attn.shape[-1] != 768:
+ cross_attn = self.cc_projection(cross_attn)
+ out['c_crossattn'] = ldm_patched.modules.conds.CONDCrossAttn(cross_attn)
+ return out
+
+class SD_X4Upscaler(BaseModel):
+ def __init__(self, model_config, model_type=ModelType.V_PREDICTION, device=None):
+ super().__init__(model_config, model_type, device=device)
+ self.noise_augmentor = ImageConcatWithNoiseAugmentation(noise_schedule_config={"linear_start": 0.0001, "linear_end": 0.02}, max_noise_level=350)
+
+ def extra_conds(self, **kwargs):
+ out = {}
+
+ image = kwargs.get("concat_image", None)
+ noise = kwargs.get("noise", None)
+ noise_augment = kwargs.get("noise_augmentation", 0.0)
+ device = kwargs["device"]
+ seed = kwargs["seed"] - 10
+
+ noise_level = round((self.noise_augmentor.max_noise_level) * noise_augment)
+
+ if image is None:
+ image = torch.zeros_like(noise)[:,:3]
+
+ if image.shape[1:] != noise.shape[1:]:
+ image = utils.common_upscale(image.to(device), noise.shape[-1], noise.shape[-2], "bilinear", "center")
+
+ noise_level = torch.tensor([noise_level], device=device)
+ if noise_augment > 0:
+ image, noise_level = self.noise_augmentor(image.to(device), noise_level=noise_level, seed=seed)
+
+ image = utils.resize_to_batch_size(image, noise.shape[0])
+
+ out['c_concat'] = ldm_patched.modules.conds.CONDNoiseShape(image)
+ out['y'] = ldm_patched.modules.conds.CONDRegular(noise_level)
+ return out
diff --git a/ldm_patched/modules/model_detection.py b/ldm_patched/modules/model_detection.py
index e8fc87ac..126386ca 100644
--- a/ldm_patched/modules/model_detection.py
+++ b/ldm_patched/modules/model_detection.py
@@ -34,7 +34,6 @@ def detect_unet_config(state_dict, key_prefix, dtype):
unet_config = {
"use_checkpoint": False,
"image_size": 32,
- "out_channels": 4,
"use_spatial_transformer": True,
"legacy": False
}
@@ -50,6 +49,12 @@ def detect_unet_config(state_dict, key_prefix, dtype):
model_channels = state_dict['{}input_blocks.0.0.weight'.format(key_prefix)].shape[0]
in_channels = state_dict['{}input_blocks.0.0.weight'.format(key_prefix)].shape[1]
+ out_key = '{}out.2.weight'.format(key_prefix)
+ if out_key in state_dict:
+ out_channels = state_dict[out_key].shape[0]
+ else:
+ out_channels = 4
+
num_res_blocks = []
channel_mult = []
attention_resolutions = []
@@ -122,6 +127,7 @@ def detect_unet_config(state_dict, key_prefix, dtype):
transformer_depth_middle = -1
unet_config["in_channels"] = in_channels
+ unet_config["out_channels"] = out_channels
unet_config["model_channels"] = model_channels
unet_config["num_res_blocks"] = num_res_blocks
unet_config["transformer_depth"] = transformer_depth
diff --git a/ldm_patched/modules/model_management.py b/ldm_patched/modules/model_management.py
index 31cf95da..840d79a0 100644
--- a/ldm_patched/modules/model_management.py
+++ b/ldm_patched/modules/model_management.py
@@ -60,6 +60,9 @@ except:
pass
if args.always_cpu:
+ if args.always_cpu > 0:
+ torch.set_num_threads(args.always_cpu)
+ print(f"Running on {torch.get_num_threads()} CPU threads")
cpu_state = CPUState.CPU
def is_intel_xpu():
@@ -175,7 +178,7 @@ try:
if int(torch_version[0]) >= 2:
if ENABLE_PYTORCH_ATTENTION == False and args.attention_split == False and args.attention_quad == False:
ENABLE_PYTORCH_ATTENTION = True
- if torch.cuda.is_bf16_supported():
+ if torch.cuda.is_bf16_supported() and torch.cuda.get_device_properties(torch.cuda.current_device()).major >= 8:
VAE_DTYPE = torch.bfloat16
if is_intel_xpu():
if args.attention_split == False and args.attention_quad == False:
@@ -186,6 +189,9 @@ except:
if is_intel_xpu():
VAE_DTYPE = torch.bfloat16
+if args.vae_in_cpu:
+ VAE_DTYPE = torch.float32
+
if args.vae_in_fp16:
VAE_DTYPE = torch.float16
elif args.vae_in_bf16:
@@ -218,15 +224,8 @@ if args.all_in_fp16:
FORCE_FP16 = True
if lowvram_available:
- try:
- import accelerate
- if set_vram_to in (VRAMState.LOW_VRAM, VRAMState.NO_VRAM):
- vram_state = set_vram_to
- except Exception as e:
- import traceback
- print(traceback.format_exc())
- print("ERROR: LOW VRAM MODE NEEDS accelerate.")
- lowvram_available = False
+ if set_vram_to in (VRAMState.LOW_VRAM, VRAMState.NO_VRAM):
+ vram_state = set_vram_to
if cpu_state != CPUState.GPU:
@@ -266,6 +265,14 @@ print("VAE dtype:", VAE_DTYPE)
current_loaded_models = []
+def module_size(module):
+ module_mem = 0
+ sd = module.state_dict()
+ for k in sd:
+ t = sd[k]
+ module_mem += t.nelement() * t.element_size()
+ return module_mem
+
class LoadedModel:
def __init__(self, model):
self.model = model
@@ -298,8 +305,20 @@ class LoadedModel:
if lowvram_model_memory > 0:
print("loading in lowvram mode", lowvram_model_memory/(1024 * 1024))
- device_map = accelerate.infer_auto_device_map(self.real_model, max_memory={0: "{}MiB".format(lowvram_model_memory // (1024 * 1024)), "cpu": "16GiB"})
- accelerate.dispatch_model(self.real_model, device_map=device_map, main_device=self.device)
+ mem_counter = 0
+ for m in self.real_model.modules():
+ if hasattr(m, "ldm_patched_cast_weights"):
+ m.prev_ldm_patched_cast_weights = m.ldm_patched_cast_weights
+ m.ldm_patched_cast_weights = True
+ module_mem = module_size(m)
+ if mem_counter + module_mem < lowvram_model_memory:
+ m.to(self.device)
+ mem_counter += module_mem
+ elif hasattr(m, "weight"): #only modules with ldm_patched_cast_weights can be set to lowvram mode
+ m.to(self.device)
+ mem_counter += module_size(m)
+ print("lowvram: loaded module regularly", m)
+
self.model_accelerated = True
if is_intel_xpu() and not args.disable_ipex_hijack:
@@ -309,7 +328,11 @@ class LoadedModel:
def model_unload(self):
if self.model_accelerated:
- accelerate.hooks.remove_hook_from_submodules(self.real_model)
+ for m in self.real_model.modules():
+ if hasattr(m, "prev_ldm_patched_cast_weights"):
+ m.ldm_patched_cast_weights = m.prev_ldm_patched_cast_weights
+ del m.prev_ldm_patched_cast_weights
+
self.model_accelerated = False
self.model.unpatch_model(self.model.offload_device)
@@ -402,14 +425,14 @@ def load_models_gpu(models, memory_required=0):
if lowvram_available and (vram_set_state == VRAMState.LOW_VRAM or vram_set_state == VRAMState.NORMAL_VRAM):
model_size = loaded_model.model_memory_required(torch_dev)
current_free_mem = get_free_memory(torch_dev)
- lowvram_model_memory = int(max(256 * (1024 * 1024), (current_free_mem - 1024 * (1024 * 1024)) / 1.3 ))
+ lowvram_model_memory = int(max(64 * (1024 * 1024), (current_free_mem - 1024 * (1024 * 1024)) / 1.3 ))
if model_size > (current_free_mem - inference_memory): #only switch to lowvram if really necessary
vram_set_state = VRAMState.LOW_VRAM
else:
lowvram_model_memory = 0
if vram_set_state == VRAMState.NO_VRAM:
- lowvram_model_memory = 256 * 1024 * 1024
+ lowvram_model_memory = 64 * 1024 * 1024
cur_loaded_model = loaded_model.model_load(lowvram_model_memory)
current_loaded_models.insert(0, loaded_model)
@@ -538,6 +561,8 @@ def intermediate_device():
return torch.device("cpu")
def vae_device():
+ if args.vae_in_cpu:
+ return torch.device("cpu")
return get_torch_device()
def vae_offload_device():
@@ -566,6 +591,11 @@ def supports_dtype(device, dtype): #TODO
return True
return False
+def device_supports_non_blocking(device):
+ if is_device_mps(device):
+ return False #pytorch bug? mps doesn't support non blocking
+ return True
+
def cast_to_device(tensor, device, dtype, copy=False):
device_supports_cast = False
if tensor.dtype == torch.float32 or tensor.dtype == torch.float16:
@@ -576,9 +606,7 @@ def cast_to_device(tensor, device, dtype, copy=False):
elif is_intel_xpu():
device_supports_cast = True
- non_blocking = True
- if is_device_mps(device):
- non_blocking = False #pytorch bug? mps doesn't support non blocking
+ non_blocking = device_supports_non_blocking(device)
if device_supports_cast:
if copy:
@@ -742,11 +770,11 @@ def soft_empty_cache(force=False):
torch.cuda.empty_cache()
torch.cuda.ipc_collect()
-def resolve_lowvram_weight(weight, model, key):
- if weight.device == torch.device("meta"): #lowvram NOTE: this depends on the inner working of the accelerate library so it might break.
- key_split = key.split('.') # I have no idea why they don't just leave the weight there instead of using the meta device.
- op = ldm_patched.modules.utils.get_attr(model, '.'.join(key_split[:-1]))
- weight = op._hf_hook.weights_map[key_split[-1]]
+def unload_all_models():
+ free_memory(1e30, get_torch_device())
+
+
+def resolve_lowvram_weight(weight, model, key): #TODO: remove
return weight
#TODO: might be cleaner to put this somewhere else
diff --git a/ldm_patched/modules/model_patcher.py b/ldm_patched/modules/model_patcher.py
index ae795ca9..dd816e52 100644
--- a/ldm_patched/modules/model_patcher.py
+++ b/ldm_patched/modules/model_patcher.py
@@ -28,13 +28,9 @@ class ModelPatcher:
if self.size > 0:
return self.size
model_sd = self.model.state_dict()
- size = 0
- for k in model_sd:
- t = model_sd[k]
- size += t.nelement() * t.element_size()
- self.size = size
+ self.size = ldm_patched.modules.model_management.module_size(self.model)
self.model_keys = set(model_sd.keys())
- return size
+ return self.size
def clone(self):
n = ModelPatcher(self.model, self.load_device, self.offload_device, self.size, self.current_device, weight_inplace_update=self.weight_inplace_update)
@@ -55,14 +51,18 @@ class ModelPatcher:
def memory_required(self, input_shape):
return self.model.memory_required(input_shape=input_shape)
- def set_model_sampler_cfg_function(self, sampler_cfg_function):
+ def set_model_sampler_cfg_function(self, sampler_cfg_function, disable_cfg1_optimization=False):
if len(inspect.signature(sampler_cfg_function).parameters) == 3:
self.model_options["sampler_cfg_function"] = lambda args: sampler_cfg_function(args["cond"], args["uncond"], args["cond_scale"]) #Old way
else:
self.model_options["sampler_cfg_function"] = sampler_cfg_function
+ if disable_cfg1_optimization:
+ self.model_options["disable_cfg1_optimization"] = True
- def set_model_sampler_post_cfg_function(self, post_cfg_function):
+ def set_model_sampler_post_cfg_function(self, post_cfg_function, disable_cfg1_optimization=False):
self.model_options["sampler_post_cfg_function"] = self.model_options.get("sampler_post_cfg_function", []) + [post_cfg_function]
+ if disable_cfg1_optimization:
+ self.model_options["disable_cfg1_optimization"] = True
def set_model_unet_function_wrapper(self, unet_wrapper_function):
self.model_options["model_function_wrapper"] = unet_wrapper_function
@@ -174,40 +174,41 @@ class ModelPatcher:
sd.pop(k)
return sd
- def patch_model(self, device_to=None):
+ def patch_model(self, device_to=None, patch_weights=True):
for k in self.object_patches:
old = getattr(self.model, k)
if k not in self.object_patches_backup:
self.object_patches_backup[k] = old
setattr(self.model, k, self.object_patches[k])
- model_sd = self.model_state_dict()
- for key in self.patches:
- if key not in model_sd:
- print("could not patch. key doesn't exist in model:", key)
- continue
+ if patch_weights:
+ model_sd = self.model_state_dict()
+ for key in self.patches:
+ if key not in model_sd:
+ print("could not patch. key doesn't exist in model:", key)
+ continue
- weight = model_sd[key]
+ weight = model_sd[key]
- inplace_update = self.weight_inplace_update
+ inplace_update = self.weight_inplace_update
- if key not in self.backup:
- self.backup[key] = weight.to(device=self.offload_device, copy=inplace_update)
+ if key not in self.backup:
+ self.backup[key] = weight.to(device=self.offload_device, copy=inplace_update)
+
+ if device_to is not None:
+ temp_weight = ldm_patched.modules.model_management.cast_to_device(weight, device_to, torch.float32, copy=True)
+ else:
+ temp_weight = weight.to(torch.float32, copy=True)
+ out_weight = self.calculate_weight(self.patches[key], temp_weight, key).to(weight.dtype)
+ if inplace_update:
+ ldm_patched.modules.utils.copy_to_param(self.model, key, out_weight)
+ else:
+ ldm_patched.modules.utils.set_attr(self.model, key, out_weight)
+ del temp_weight
if device_to is not None:
- temp_weight = ldm_patched.modules.model_management.cast_to_device(weight, device_to, torch.float32, copy=True)
- else:
- temp_weight = weight.to(torch.float32, copy=True)
- out_weight = self.calculate_weight(self.patches[key], temp_weight, key).to(weight.dtype)
- if inplace_update:
- ldm_patched.modules.utils.copy_to_param(self.model, key, out_weight)
- else:
- ldm_patched.modules.utils.set_attr(self.model, key, out_weight)
- del temp_weight
-
- if device_to is not None:
- self.model.to(device_to)
- self.current_device = device_to
+ self.model.to(device_to)
+ self.current_device = device_to
return self.model
diff --git a/ldm_patched/modules/model_sampling.py b/ldm_patched/modules/model_sampling.py
index f39e275d..8971b4e6 100644
--- a/ldm_patched/modules/model_sampling.py
+++ b/ldm_patched/modules/model_sampling.py
@@ -1,7 +1,7 @@
import torch
-import numpy as np
from ldm_patched.ldm.modules.diffusionmodules.util import make_beta_schedule
import math
+import numpy as np
class EPS:
def calculate_input(self, sigma, noise):
@@ -12,12 +12,28 @@ class EPS:
sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1))
return model_input - model_output * sigma
+ def noise_scaling(self, sigma, noise, latent_image, max_denoise=False):
+ if max_denoise:
+ noise = noise * torch.sqrt(1.0 + sigma ** 2.0)
+ else:
+ noise = noise * sigma
+
+ noise += latent_image
+ return noise
+
+ def inverse_noise_scaling(self, sigma, latent):
+ return latent
class V_PREDICTION(EPS):
def calculate_denoised(self, sigma, model_output, model_input):
sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1))
return model_input * self.sigma_data ** 2 / (sigma ** 2 + self.sigma_data ** 2) - model_output * sigma * self.sigma_data / (sigma ** 2 + self.sigma_data ** 2) ** 0.5
+class EDM(V_PREDICTION):
+ def calculate_denoised(self, sigma, model_output, model_input):
+ sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1))
+ return model_input * self.sigma_data ** 2 / (sigma ** 2 + self.sigma_data ** 2) + model_output * sigma * self.sigma_data / (sigma ** 2 + self.sigma_data ** 2) ** 0.5
+
class ModelSamplingDiscrete(torch.nn.Module):
def __init__(self, model_config=None):
@@ -42,8 +58,7 @@ class ModelSamplingDiscrete(torch.nn.Module):
else:
betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s)
alphas = 1. - betas
- alphas_cumprod = torch.tensor(np.cumprod(alphas, axis=0), dtype=torch.float32)
- # alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
+ alphas_cumprod = torch.cumprod(alphas, dim=0)
timesteps, = betas.shape
self.num_timesteps = int(timesteps)
@@ -55,11 +70,16 @@ class ModelSamplingDiscrete(torch.nn.Module):
# self.register_buffer('alphas_cumprod_prev', torch.tensor(alphas_cumprod_prev, dtype=torch.float32))
sigmas = ((1 - alphas_cumprod) / alphas_cumprod) ** 0.5
+ alphas_cumprod = torch.tensor(np.cumprod(alphas, axis=0), dtype=torch.float32)
self.set_sigmas(sigmas)
+ self.set_alphas_cumprod(alphas_cumprod.float())
def set_sigmas(self, sigmas):
- self.register_buffer('sigmas', sigmas)
- self.register_buffer('log_sigmas', sigmas.log())
+ self.register_buffer('sigmas', sigmas.float())
+ self.register_buffer('log_sigmas', sigmas.log().float())
+
+ def set_alphas_cumprod(self, alphas_cumprod):
+ self.register_buffer("alphas_cumprod", alphas_cumprod.float())
@property
def sigma_min(self):
@@ -94,8 +114,6 @@ class ModelSamplingDiscrete(torch.nn.Module):
class ModelSamplingContinuousEDM(torch.nn.Module):
def __init__(self, model_config=None):
super().__init__()
- self.sigma_data = 1.0
-
if model_config is not None:
sampling_settings = model_config.sampling_settings
else:
@@ -103,9 +121,11 @@ class ModelSamplingContinuousEDM(torch.nn.Module):
sigma_min = sampling_settings.get("sigma_min", 0.002)
sigma_max = sampling_settings.get("sigma_max", 120.0)
- self.set_sigma_range(sigma_min, sigma_max)
+ sigma_data = sampling_settings.get("sigma_data", 1.0)
+ self.set_parameters(sigma_min, sigma_max, sigma_data)
- def set_sigma_range(self, sigma_min, sigma_max):
+ def set_parameters(self, sigma_min, sigma_max, sigma_data):
+ self.sigma_data = sigma_data
sigmas = torch.linspace(math.log(sigma_min), math.log(sigma_max), 1000).exp()
self.register_buffer('sigmas', sigmas) #for compatibility with some schedulers
@@ -134,3 +154,56 @@ class ModelSamplingContinuousEDM(torch.nn.Module):
log_sigma_min = math.log(self.sigma_min)
return math.exp((math.log(self.sigma_max) - log_sigma_min) * percent + log_sigma_min)
+
+class StableCascadeSampling(ModelSamplingDiscrete):
+ def __init__(self, model_config=None):
+ super().__init__()
+
+ if model_config is not None:
+ sampling_settings = model_config.sampling_settings
+ else:
+ sampling_settings = {}
+
+ self.set_parameters(sampling_settings.get("shift", 1.0))
+
+ def set_parameters(self, shift=1.0, cosine_s=8e-3):
+ self.shift = shift
+ self.cosine_s = torch.tensor(cosine_s)
+ self._init_alpha_cumprod = torch.cos(self.cosine_s / (1 + self.cosine_s) * torch.pi * 0.5) ** 2
+
+ #This part is just for compatibility with some schedulers in the codebase
+ self.num_timesteps = 10000
+ sigmas = torch.empty((self.num_timesteps), dtype=torch.float32)
+ for x in range(self.num_timesteps):
+ t = (x + 1) / self.num_timesteps
+ sigmas[x] = self.sigma(t)
+
+ self.set_sigmas(sigmas)
+
+ def sigma(self, timestep):
+ alpha_cumprod = (torch.cos((timestep + self.cosine_s) / (1 + self.cosine_s) * torch.pi * 0.5) ** 2 / self._init_alpha_cumprod)
+
+ if self.shift != 1.0:
+ var = alpha_cumprod
+ logSNR = (var/(1-var)).log()
+ logSNR += 2 * torch.log(1.0 / torch.tensor(self.shift))
+ alpha_cumprod = logSNR.sigmoid()
+
+ alpha_cumprod = alpha_cumprod.clamp(0.0001, 0.9999)
+ return ((1 - alpha_cumprod) / alpha_cumprod) ** 0.5
+
+ def timestep(self, sigma):
+ var = 1 / ((sigma * sigma) + 1)
+ var = var.clamp(0, 1.0)
+ s, min_var = self.cosine_s.to(var.device), self._init_alpha_cumprod.to(var.device)
+ t = (((var * min_var) ** 0.5).acos() / (torch.pi * 0.5)) * (1 + s) - s
+ return t
+
+ def percent_to_sigma(self, percent):
+ if percent <= 0.0:
+ return 999999999.9
+ if percent >= 1.0:
+ return 0.0
+
+ percent = 1.0 - percent
+ return self.sigma(torch.tensor(percent))
\ No newline at end of file
diff --git a/ldm_patched/modules/ops.py b/ldm_patched/modules/ops.py
index 08c63384..2d7fa377 100644
--- a/ldm_patched/modules/ops.py
+++ b/ldm_patched/modules/ops.py
@@ -1,27 +1,92 @@
import torch
-from contextlib import contextmanager
+import ldm_patched.modules.model_management
+
+def cast_bias_weight(s, input):
+ bias = None
+ non_blocking = ldm_patched.modules.model_management.device_supports_non_blocking(input.device)
+ if s.bias is not None:
+ bias = s.bias.to(device=input.device, dtype=input.dtype, non_blocking=non_blocking)
+ weight = s.weight.to(device=input.device, dtype=input.dtype, non_blocking=non_blocking)
+ return weight, bias
+
class disable_weight_init:
class Linear(torch.nn.Linear):
+ ldm_patched_cast_weights = False
def reset_parameters(self):
return None
+ def forward_ldm_patched_cast_weights(self, input):
+ weight, bias = cast_bias_weight(self, input)
+ return torch.nn.functional.linear(input, weight, bias)
+
+ def forward(self, *args, **kwargs):
+ if self.ldm_patched_cast_weights:
+ return self.forward_ldm_patched_cast_weights(*args, **kwargs)
+ else:
+ return super().forward(*args, **kwargs)
+
class Conv2d(torch.nn.Conv2d):
+ ldm_patched_cast_weights = False
def reset_parameters(self):
return None
+ def forward_ldm_patched_cast_weights(self, input):
+ weight, bias = cast_bias_weight(self, input)
+ return self._conv_forward(input, weight, bias)
+
+ def forward(self, *args, **kwargs):
+ if self.ldm_patched_cast_weights:
+ return self.forward_ldm_patched_cast_weights(*args, **kwargs)
+ else:
+ return super().forward(*args, **kwargs)
+
class Conv3d(torch.nn.Conv3d):
+ ldm_patched_cast_weights = False
def reset_parameters(self):
return None
+ def forward_ldm_patched_cast_weights(self, input):
+ weight, bias = cast_bias_weight(self, input)
+ return self._conv_forward(input, weight, bias)
+
+ def forward(self, *args, **kwargs):
+ if self.ldm_patched_cast_weights:
+ return self.forward_ldm_patched_cast_weights(*args, **kwargs)
+ else:
+ return super().forward(*args, **kwargs)
+
class GroupNorm(torch.nn.GroupNorm):
+ ldm_patched_cast_weights = False
def reset_parameters(self):
return None
+ def forward_ldm_patched_cast_weights(self, input):
+ weight, bias = cast_bias_weight(self, input)
+ return torch.nn.functional.group_norm(input, self.num_groups, weight, bias, self.eps)
+
+ def forward(self, *args, **kwargs):
+ if self.ldm_patched_cast_weights:
+ return self.forward_ldm_patched_cast_weights(*args, **kwargs)
+ else:
+ return super().forward(*args, **kwargs)
+
+
class LayerNorm(torch.nn.LayerNorm):
+ ldm_patched_cast_weights = False
def reset_parameters(self):
return None
+ def forward_ldm_patched_cast_weights(self, input):
+ weight, bias = cast_bias_weight(self, input)
+ return torch.nn.functional.layer_norm(input, self.normalized_shape, weight, bias, self.eps)
+
+ def forward(self, *args, **kwargs):
+ if self.ldm_patched_cast_weights:
+ return self.forward_ldm_patched_cast_weights(*args, **kwargs)
+ else:
+ return super().forward(*args, **kwargs)
+
@classmethod
def conv_nd(s, dims, *args, **kwargs):
if dims == 2:
@@ -31,35 +96,19 @@ class disable_weight_init:
else:
raise ValueError(f"unsupported dimensions: {dims}")
-def cast_bias_weight(s, input):
- bias = None
- if s.bias is not None:
- bias = s.bias.to(device=input.device, dtype=input.dtype)
- weight = s.weight.to(device=input.device, dtype=input.dtype)
- return weight, bias
class manual_cast(disable_weight_init):
class Linear(disable_weight_init.Linear):
- def forward(self, input):
- weight, bias = cast_bias_weight(self, input)
- return torch.nn.functional.linear(input, weight, bias)
+ ldm_patched_cast_weights = True
class Conv2d(disable_weight_init.Conv2d):
- def forward(self, input):
- weight, bias = cast_bias_weight(self, input)
- return self._conv_forward(input, weight, bias)
+ ldm_patched_cast_weights = True
class Conv3d(disable_weight_init.Conv3d):
- def forward(self, input):
- weight, bias = cast_bias_weight(self, input)
- return self._conv_forward(input, weight, bias)
+ ldm_patched_cast_weights = True
class GroupNorm(disable_weight_init.GroupNorm):
- def forward(self, input):
- weight, bias = cast_bias_weight(self, input)
- return torch.nn.functional.group_norm(input, self.num_groups, weight, bias, self.eps)
+ ldm_patched_cast_weights = True
class LayerNorm(disable_weight_init.LayerNorm):
- def forward(self, input):
- weight, bias = cast_bias_weight(self, input)
- return torch.nn.functional.layer_norm(input, self.normalized_shape, weight, bias, self.eps)
+ ldm_patched_cast_weights = True
diff --git a/ldm_patched/modules/sample.py b/ldm_patched/modules/sample.py
index 7a7e3092..0f483950 100644
--- a/ldm_patched/modules/sample.py
+++ b/ldm_patched/modules/sample.py
@@ -28,7 +28,6 @@ def prepare_noise(latent_image, seed, noise_inds=None):
def prepare_mask(noise_mask, shape, device):
"""ensures noise mask is of proper dimensions"""
noise_mask = torch.nn.functional.interpolate(noise_mask.reshape((-1, 1, noise_mask.shape[-2], noise_mask.shape[-1])), size=(shape[2], shape[3]), mode="bilinear")
- noise_mask = noise_mask.round()
noise_mask = torch.cat([noise_mask] * shape[1], dim=1)
noise_mask = ldm_patched.modules.utils.repeat_to_batch_size(noise_mask, shape[0])
noise_mask = noise_mask.to(device)
@@ -47,7 +46,8 @@ def convert_cond(cond):
temp = c[1].copy()
model_conds = temp.get("model_conds", {})
if c[0] is not None:
- model_conds["c_crossattn"] = ldm_patched.modules.conds.CONDCrossAttn(c[0])
+ model_conds["c_crossattn"] = ldm_patched.modules.conds.CONDCrossAttn(c[0]) #TODO: remove
+ temp["cross_attn"] = c[0]
temp["model_conds"] = model_conds
out.append(temp)
return out
diff --git a/ldm_patched/modules/samplers.py b/ldm_patched/modules/samplers.py
index bfcb3f56..9ed1fcd2 100644
--- a/ldm_patched/modules/samplers.py
+++ b/ldm_patched/modules/samplers.py
@@ -1,13 +1,9 @@
from ldm_patched.k_diffusion import sampling as k_diffusion_sampling
from ldm_patched.unipc import uni_pc
import torch
-import enum
import collections
from ldm_patched.modules import model_management
import math
-from ldm_patched.modules import model_base
-import ldm_patched.modules.utils
-import ldm_patched.modules.conds
def get_area_and_mult(conds, x_in, timestep_in):
area = (x_in.shape[2], x_in.shape[3], 0, 0)
@@ -244,7 +240,7 @@ def calc_cond_uncond_batch(model, cond, uncond, x_in, timestep, model_options):
#The main sampling function shared by all the samplers
#Returns denoised
def sampling_function(model, x, timestep, uncond, cond, cond_scale, model_options={}, seed=None):
- if math.isclose(cond_scale, 1.0):
+ if math.isclose(cond_scale, 1.0) and model_options.get("disable_cfg1_optimization", False) == False:
uncond_ = None
else:
uncond_ = uncond
@@ -527,7 +523,7 @@ class UNIPCBH2(Sampler):
KSAMPLER_NAMES = ["euler", "euler_ancestral", "heun", "heunpp2","dpm_2", "dpm_2_ancestral",
"lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_sde", "dpmpp_sde_gpu",
- "dpmpp_2m", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm"]
+ "dpmpp_2m", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "tcd", "edm_playground_v2.5"]
class KSAMPLER(Sampler):
def __init__(self, sampler_function, extra_options={}, inpaint_options={}):
@@ -599,6 +595,13 @@ def sample(model, noise, positive, negative, cfg, device, sampler, sigmas, model
calculate_start_end_timesteps(model, negative)
calculate_start_end_timesteps(model, positive)
+ if latent_image is not None:
+ latent_image = model.process_latent_in(latent_image)
+
+ if hasattr(model, 'extra_conds'):
+ positive = encode_model_conds(model.extra_conds, positive, noise, device, "positive", latent_image=latent_image, denoise_mask=denoise_mask, seed=seed)
+ negative = encode_model_conds(model.extra_conds, negative, noise, device, "negative", latent_image=latent_image, denoise_mask=denoise_mask, seed=seed)
+
#make sure each cond area has an opposite one with the same area
for c in positive:
create_cond_with_same_area_if_none(negative, c)
@@ -610,13 +613,6 @@ def sample(model, noise, positive, negative, cfg, device, sampler, sigmas, model
apply_empty_x_to_equal_area(list(filter(lambda c: c.get('control_apply_to_uncond', False) == True, positive)), negative, 'control', lambda cond_cnets, x: cond_cnets[x])
apply_empty_x_to_equal_area(positive, negative, 'gligen', lambda cond_cnets, x: cond_cnets[x])
- if latent_image is not None:
- latent_image = model.process_latent_in(latent_image)
-
- if hasattr(model, 'extra_conds'):
- positive = encode_model_conds(model.extra_conds, positive, noise, device, "positive", latent_image=latent_image, denoise_mask=denoise_mask)
- negative = encode_model_conds(model.extra_conds, negative, noise, device, "negative", latent_image=latent_image, denoise_mask=denoise_mask)
-
extra_args = {"cond":positive, "uncond":negative, "cond_scale": cfg, "model_options": model_options, "seed":seed}
samples = sampler.sample(model_wrap, sigmas, extra_args, callback, noise, latent_image, denoise_mask, disable_pbar)
@@ -639,7 +635,7 @@ def calculate_sigmas_scheduler(model, scheduler_name, steps):
elif scheduler_name == "sgm_uniform":
sigmas = normal_scheduler(model, steps, sgm=True)
else:
- print("error invalid scheduler", self.scheduler)
+ print("error invalid scheduler", scheduler_name)
return sigmas
def sampler_object(name):
diff --git a/ldm_patched/modules/sd.py b/ldm_patched/modules/sd.py
index 3caa92da..282f2559 100644
--- a/ldm_patched/modules/sd.py
+++ b/ldm_patched/modules/sd.py
@@ -1,9 +1,6 @@
import torch
-import contextlib
-import math
from ldm_patched.modules import model_management
-from ldm_patched.ldm.util import instantiate_from_config
from ldm_patched.ldm.models.autoencoder import AutoencoderKL, AutoencodingEngine
import yaml
@@ -157,6 +154,8 @@ class VAE:
self.memory_used_encode = lambda shape, dtype: (1767 * shape[2] * shape[3]) * model_management.dtype_size(dtype) #These are for AutoencoderKL and need tweaking (should be lower)
self.memory_used_decode = lambda shape, dtype: (2178 * shape[2] * shape[3] * 64) * model_management.dtype_size(dtype)
+ self.downscale_ratio = 8
+ self.latent_channels = 4
if config is None:
if "decoder.mid.block_1.mix_factor" in sd:
@@ -172,6 +171,11 @@ class VAE:
else:
#default SD1.x/SD2.x VAE parameters
ddconfig = {'double_z': True, 'z_channels': 4, 'resolution': 256, 'in_channels': 3, 'out_ch': 3, 'ch': 128, 'ch_mult': [1, 2, 4, 4], 'num_res_blocks': 2, 'attn_resolutions': [], 'dropout': 0.0}
+
+ if 'encoder.down.2.downsample.conv.weight' not in sd: #Stable diffusion x4 upscaler VAE
+ ddconfig['ch_mult'] = [1, 2, 4]
+ self.downscale_ratio = 4
+
self.first_stage_model = AutoencoderKL(ddconfig=ddconfig, embed_dim=4)
else:
self.first_stage_model = AutoencoderKL(**(config['params']))
@@ -204,9 +208,9 @@ class VAE:
decode_fn = lambda a: (self.first_stage_model.decode(a.to(self.vae_dtype).to(self.device)) + 1.0).float()
output = torch.clamp((
- (ldm_patched.modules.utils.tiled_scale(samples, decode_fn, tile_x // 2, tile_y * 2, overlap, upscale_amount = 8, output_device=self.output_device, pbar = pbar) +
- ldm_patched.modules.utils.tiled_scale(samples, decode_fn, tile_x * 2, tile_y // 2, overlap, upscale_amount = 8, output_device=self.output_device, pbar = pbar) +
- ldm_patched.modules.utils.tiled_scale(samples, decode_fn, tile_x, tile_y, overlap, upscale_amount = 8, output_device=self.output_device, pbar = pbar))
+ (ldm_patched.modules.utils.tiled_scale(samples, decode_fn, tile_x // 2, tile_y * 2, overlap, upscale_amount = self.downscale_ratio, output_device=self.output_device, pbar = pbar) +
+ ldm_patched.modules.utils.tiled_scale(samples, decode_fn, tile_x * 2, tile_y // 2, overlap, upscale_amount = self.downscale_ratio, output_device=self.output_device, pbar = pbar) +
+ ldm_patched.modules.utils.tiled_scale(samples, decode_fn, tile_x, tile_y, overlap, upscale_amount = self.downscale_ratio, output_device=self.output_device, pbar = pbar))
/ 3.0) / 2.0, min=0.0, max=1.0)
return output
@@ -217,9 +221,9 @@ class VAE:
pbar = ldm_patched.modules.utils.ProgressBar(steps)
encode_fn = lambda a: self.first_stage_model.encode((2. * a - 1.).to(self.vae_dtype).to(self.device)).float()
- samples = ldm_patched.modules.utils.tiled_scale(pixel_samples, encode_fn, tile_x, tile_y, overlap, upscale_amount = (1/8), out_channels=4, output_device=self.output_device, pbar=pbar)
- samples += ldm_patched.modules.utils.tiled_scale(pixel_samples, encode_fn, tile_x * 2, tile_y // 2, overlap, upscale_amount = (1/8), out_channels=4, output_device=self.output_device, pbar=pbar)
- samples += ldm_patched.modules.utils.tiled_scale(pixel_samples, encode_fn, tile_x // 2, tile_y * 2, overlap, upscale_amount = (1/8), out_channels=4, output_device=self.output_device, pbar=pbar)
+ samples = ldm_patched.modules.utils.tiled_scale(pixel_samples, encode_fn, tile_x, tile_y, overlap, upscale_amount = (1/self.downscale_ratio), out_channels=self.latent_channels, output_device=self.output_device, pbar=pbar)
+ samples += ldm_patched.modules.utils.tiled_scale(pixel_samples, encode_fn, tile_x * 2, tile_y // 2, overlap, upscale_amount = (1/self.downscale_ratio), out_channels=self.latent_channels, output_device=self.output_device, pbar=pbar)
+ samples += ldm_patched.modules.utils.tiled_scale(pixel_samples, encode_fn, tile_x // 2, tile_y * 2, overlap, upscale_amount = (1/self.downscale_ratio), out_channels=self.latent_channels, output_device=self.output_device, pbar=pbar)
samples /= 3.0
return samples
@@ -231,7 +235,7 @@ class VAE:
batch_number = int(free_memory / memory_used)
batch_number = max(1, batch_number)
- pixel_samples = torch.empty((samples_in.shape[0], 3, round(samples_in.shape[2] * 8), round(samples_in.shape[3] * 8)), device=self.output_device)
+ pixel_samples = torch.empty((samples_in.shape[0], 3, round(samples_in.shape[2] * self.downscale_ratio), round(samples_in.shape[3] * self.downscale_ratio)), device=self.output_device)
for x in range(0, samples_in.shape[0], batch_number):
samples = samples_in[x:x+batch_number].to(self.vae_dtype).to(self.device)
pixel_samples[x:x+batch_number] = torch.clamp((self.first_stage_model.decode(samples).to(self.output_device).float() + 1.0) / 2.0, min=0.0, max=1.0)
@@ -255,7 +259,7 @@ class VAE:
free_memory = model_management.get_free_memory(self.device)
batch_number = int(free_memory / memory_used)
batch_number = max(1, batch_number)
- samples = torch.empty((pixel_samples.shape[0], 4, round(pixel_samples.shape[2] // 8), round(pixel_samples.shape[3] // 8)), device=self.output_device)
+ samples = torch.empty((pixel_samples.shape[0], self.latent_channels, round(pixel_samples.shape[2] // self.downscale_ratio), round(pixel_samples.shape[3] // self.downscale_ratio)), device=self.output_device)
for x in range(0, pixel_samples.shape[0], batch_number):
pixels_in = (2. * pixel_samples[x:x+batch_number] - 1.).to(self.vae_dtype).to(self.device)
samples[x:x+batch_number] = self.first_stage_model.encode(pixels_in).to(self.output_device).float()
@@ -423,12 +427,13 @@ def load_checkpoint(config_path=None, ckpt_path=None, output_vae=True, output_cl
return (ldm_patched.modules.model_patcher.ModelPatcher(model, load_device=model_management.get_torch_device(), offload_device=offload_device), clip, vae)
-def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, output_clipvision=False, embedding_directory=None, output_model=True):
+def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, output_clipvision=False, embedding_directory=None, output_model=True, vae_filename_param=None):
sd = ldm_patched.modules.utils.load_torch_file(ckpt_path)
sd_keys = sd.keys()
clip = None
clipvision = None
vae = None
+ vae_filename = None
model = None
model_patcher = None
clip_target = None
@@ -458,8 +463,12 @@ def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, o
model.load_model_weights(sd, "model.diffusion_model.")
if output_vae:
- vae_sd = ldm_patched.modules.utils.state_dict_prefix_replace(sd, {"first_stage_model.": ""}, filter_keys=True)
- vae_sd = model_config.process_vae_state_dict(vae_sd)
+ if vae_filename_param is None:
+ vae_sd = ldm_patched.modules.utils.state_dict_prefix_replace(sd, {"first_stage_model.": ""}, filter_keys=True)
+ vae_sd = model_config.process_vae_state_dict(vae_sd)
+ else:
+ vae_sd = ldm_patched.modules.utils.load_torch_file(vae_filename_param)
+ vae_filename = vae_filename_param
vae = VAE(sd=vae_sd)
if output_clip:
@@ -481,7 +490,7 @@ def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, o
print("loaded straight to GPU")
model_management.load_model_gpu(model_patcher)
- return (model_patcher, clip, vae, clipvision)
+ return model_patcher, clip, vae, vae_filename, clipvision
def load_unet_state_dict(sd): #load unet in diffusers format
@@ -527,7 +536,14 @@ def load_unet(unet_path):
raise RuntimeError("ERROR: Could not detect model type of: {}".format(unet_path))
return model
-def save_checkpoint(output_path, model, clip, vae, metadata=None):
- model_management.load_models_gpu([model, clip.load_model()])
- sd = model.model.state_dict_for_saving(clip.get_sd(), vae.get_sd())
+def save_checkpoint(output_path, model, clip=None, vae=None, clip_vision=None, metadata=None):
+ clip_sd = None
+ load_models = [model]
+ if clip is not None:
+ load_models.append(clip.load_model())
+ clip_sd = clip.get_sd()
+
+ model_management.load_models_gpu(load_models)
+ clip_vision_sd = clip_vision.get_sd() if clip_vision is not None else None
+ sd = model.model.state_dict_for_saving(clip_sd, vae.get_sd(), clip_vision_sd)
ldm_patched.modules.utils.save_torch_file(sd, output_path, metadata=metadata)
diff --git a/ldm_patched/modules/sd1_clip.py b/ldm_patched/modules/sd1_clip.py
index 736d6167..3727fb48 100644
--- a/ldm_patched/modules/sd1_clip.py
+++ b/ldm_patched/modules/sd1_clip.py
@@ -6,7 +6,6 @@ import torch
import traceback
import zipfile
from . import model_management
-import contextlib
import ldm_patched.modules.clip_model
import json
diff --git a/ldm_patched/modules/supported_models.py b/ldm_patched/modules/supported_models.py
index 2f2dee87..1d442d4d 100644
--- a/ldm_patched/modules/supported_models.py
+++ b/ldm_patched/modules/supported_models.py
@@ -252,5 +252,59 @@ class SVD_img2vid(supported_models_base.BASE):
def clip_target(self):
return None
-models = [SD15, SD20, SD21UnclipL, SD21UnclipH, SDXLRefiner, SDXL, SSD1B, Segmind_Vega]
+class Stable_Zero123(supported_models_base.BASE):
+ unet_config = {
+ "context_dim": 768,
+ "model_channels": 320,
+ "use_linear_in_transformer": False,
+ "adm_in_channels": None,
+ "use_temporal_attention": False,
+ "in_channels": 8,
+ }
+
+ unet_extra_config = {
+ "num_heads": 8,
+ "num_head_channels": -1,
+ }
+
+ clip_vision_prefix = "cond_stage_model.model.visual."
+
+ latent_format = latent_formats.SD15
+
+ def get_model(self, state_dict, prefix="", device=None):
+ out = model_base.Stable_Zero123(self, device=device, cc_projection_weight=state_dict["cc_projection.weight"], cc_projection_bias=state_dict["cc_projection.bias"])
+ return out
+
+ def clip_target(self):
+ return None
+
+class SD_X4Upscaler(SD20):
+ unet_config = {
+ "context_dim": 1024,
+ "model_channels": 256,
+ 'in_channels': 7,
+ "use_linear_in_transformer": True,
+ "adm_in_channels": None,
+ "use_temporal_attention": False,
+ }
+
+ unet_extra_config = {
+ "disable_self_attentions": [True, True, True, False],
+ "num_classes": 1000,
+ "num_heads": 8,
+ "num_head_channels": -1,
+ }
+
+ latent_format = latent_formats.SD_X4
+
+ sampling_settings = {
+ "linear_start": 0.0001,
+ "linear_end": 0.02,
+ }
+
+ def get_model(self, state_dict, prefix="", device=None):
+ out = model_base.SD_X4Upscaler(self, device=device)
+ return out
+
+models = [Stable_Zero123, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXLRefiner, SDXL, SSD1B, Segmind_Vega, SD_X4Upscaler]
models += [SVD_img2vid]
diff --git a/ldm_patched/modules/supported_models_base.py b/ldm_patched/modules/supported_models_base.py
index 49087d23..5baf4bca 100644
--- a/ldm_patched/modules/supported_models_base.py
+++ b/ldm_patched/modules/supported_models_base.py
@@ -65,6 +65,12 @@ class BASE:
replace_prefix = {"": "cond_stage_model."}
return utils.state_dict_prefix_replace(state_dict, replace_prefix)
+ def process_clip_vision_state_dict_for_saving(self, state_dict):
+ replace_prefix = {}
+ if self.clip_vision_prefix is not None:
+ replace_prefix[""] = self.clip_vision_prefix
+ return utils.state_dict_prefix_replace(state_dict, replace_prefix)
+
def process_unet_state_dict_for_saving(self, state_dict):
replace_prefix = {"": "model.diffusion_model."}
return utils.state_dict_prefix_replace(state_dict, replace_prefix)
diff --git a/ldm_patched/pfn/architecture/HAT.py b/ldm_patched/pfn/architecture/HAT.py
index 66947421..7e12ad0f 100644
--- a/ldm_patched/pfn/architecture/HAT.py
+++ b/ldm_patched/pfn/architecture/HAT.py
@@ -14,7 +14,7 @@ from .timm.weight_init import trunc_normal_
def drop_path(x, drop_prob: float = 0.0, training: bool = False):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
- From: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/drop.py
+ From: https://github.com/huggingface/pytorch-image-models/blob/main/timm/layers/drop.py
"""
if drop_prob == 0.0 or not training:
return x
@@ -30,7 +30,7 @@ def drop_path(x, drop_prob: float = 0.0, training: bool = False):
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
- From: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/drop.py
+ From: https://github.com/huggingface/pytorch-image-models/blob/main/timm/layers/drop.py
"""
def __init__(self, drop_prob=None):
diff --git a/ldm_patched/pfn/architecture/RRDB.py b/ldm_patched/pfn/architecture/RRDB.py
index b50db7c2..8d318b90 100644
--- a/ldm_patched/pfn/architecture/RRDB.py
+++ b/ldm_patched/pfn/architecture/RRDB.py
@@ -13,7 +13,7 @@ import torch.nn.functional as F
from . import block as B
-# Borrowed from https://github.com/rlaphoenix/VSGAN/blob/master/vsgan/archs/ESRGAN.py
+# Borrowed from https://github.com/rlaphoenix/VSGAN/blob/master/vsgan/archs/esrgan.py
# Which enhanced stuff that was already here
class RRDBNet(nn.Module):
def __init__(
diff --git a/ldm_patched/pfn/architecture/face/codeformer.py b/ldm_patched/pfn/architecture/face/codeformer.py
index 06614007..a0e2e985 100644
--- a/ldm_patched/pfn/architecture/face/codeformer.py
+++ b/ldm_patched/pfn/architecture/face/codeformer.py
@@ -2,7 +2,7 @@
Modified from https://github.com/sczhou/CodeFormer
VQGAN code, adapted from the original created by the Unleashing Transformers authors:
https://github.com/samb-t/unleashing-transformers/blob/master/models/vqgan.py
-This verison of the arch specifically was gathered from an old version of GFPGAN. If this is a problem, please contact me.
+This version of the arch specifically was gathered from an old version of GFPGAN. If this is a problem, please contact me.
"""
import math
from typing import Optional
diff --git a/ldm_patched/taesd/taesd.py b/ldm_patched/taesd/taesd.py
index ac88e594..0b4b885f 100644
--- a/ldm_patched/taesd/taesd.py
+++ b/ldm_patched/taesd/taesd.py
@@ -7,9 +7,10 @@ import torch
import torch.nn as nn
import ldm_patched.modules.utils
+import ldm_patched.modules.ops
def conv(n_in, n_out, **kwargs):
- return nn.Conv2d(n_in, n_out, 3, padding=1, **kwargs)
+ return ldm_patched.modules.ops.disable_weight_init.Conv2d(n_in, n_out, 3, padding=1, **kwargs)
class Clamp(nn.Module):
def forward(self, x):
@@ -19,7 +20,7 @@ class Block(nn.Module):
def __init__(self, n_in, n_out):
super().__init__()
self.conv = nn.Sequential(conv(n_in, n_out), nn.ReLU(), conv(n_out, n_out), nn.ReLU(), conv(n_out, n_out))
- self.skip = nn.Conv2d(n_in, n_out, 1, bias=False) if n_in != n_out else nn.Identity()
+ self.skip = ldm_patched.modules.ops.disable_weight_init.Conv2d(n_in, n_out, 1, bias=False) if n_in != n_out else nn.Identity()
self.fuse = nn.ReLU()
def forward(self, x):
return self.fuse(self.conv(x) + self.skip(x))
diff --git a/ldm_patched/utils/path_utils.py b/ldm_patched/utils/path_utils.py
index 34cd52c9..6cae149b 100644
--- a/ldm_patched/utils/path_utils.py
+++ b/ldm_patched/utils/path_utils.py
@@ -29,11 +29,14 @@ folder_names_and_paths["custom_nodes"] = ([os.path.join(base_path, "custom_nodes
folder_names_and_paths["hypernetworks"] = ([os.path.join(models_dir, "hypernetworks")], supported_pt_extensions)
+folder_names_and_paths["photomaker"] = ([os.path.join(models_dir, "photomaker")], supported_pt_extensions)
+
folder_names_and_paths["classifiers"] = ([os.path.join(models_dir, "classifiers")], {""})
output_directory = os.path.join(os.getcwd(), "output")
temp_directory = os.path.join(os.getcwd(), "temp")
input_directory = os.path.join(os.getcwd(), "input")
+user_directory = os.path.join(os.getcwd(), "user")
filename_list_cache = {}
@@ -137,15 +140,27 @@ def recursive_search(directory, excluded_dir_names=None):
excluded_dir_names = []
result = []
- dirs = {directory: os.path.getmtime(directory)}
+ dirs = {}
+
+ # Attempt to add the initial directory to dirs with error handling
+ try:
+ dirs[directory] = os.path.getmtime(directory)
+ except FileNotFoundError:
+ print(f"Warning: Unable to access {directory}. Skipping this path.")
+
for dirpath, subdirs, filenames in os.walk(directory, followlinks=True, topdown=True):
subdirs[:] = [d for d in subdirs if d not in excluded_dir_names]
for file_name in filenames:
relative_path = os.path.relpath(os.path.join(dirpath, file_name), directory)
result.append(relative_path)
+
for d in subdirs:
path = os.path.join(dirpath, d)
- dirs[path] = os.path.getmtime(path)
+ try:
+ dirs[path] = os.path.getmtime(path)
+ except FileNotFoundError:
+ print(f"Warning: Unable to access {path}. Skipping this path.")
+ continue
return result, dirs
def filter_files_extensions(files, extensions):
@@ -184,8 +199,7 @@ def cached_filename_list_(folder_name):
if folder_name not in filename_list_cache:
return None
out = filename_list_cache[folder_name]
- if time.perf_counter() < (out[2] + 0.5):
- return out
+
for x in out[1]:
time_modified = out[1][x]
folder = x
diff --git a/models/safety_checker/put_safety_checker_models_here b/models/safety_checker/put_safety_checker_models_here
new file mode 100644
index 00000000..e69de29b
diff --git a/modules/__init__.py b/modules/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/modules/advanced_parameters.py b/modules/advanced_parameters.py
deleted file mode 100644
index ea04db6c..00000000
--- a/modules/advanced_parameters.py
+++ /dev/null
@@ -1,30 +0,0 @@
-disable_preview, adm_scaler_positive, adm_scaler_negative, adm_scaler_end, adaptive_cfg, sampler_name, \
- scheduler_name, generate_image_grid, overwrite_step, overwrite_switch, overwrite_width, overwrite_height, \
- overwrite_vary_strength, overwrite_upscale_strength, \
- mixing_image_prompt_and_vary_upscale, mixing_image_prompt_and_inpaint, \
- debugging_cn_preprocessor, skipping_cn_preprocessor, controlnet_softness, canny_low_threshold, canny_high_threshold, \
- refiner_swap_method, \
- freeu_enabled, freeu_b1, freeu_b2, freeu_s1, freeu_s2, \
- debugging_inpaint_preprocessor, inpaint_disable_initial_latent, inpaint_engine, inpaint_strength, inpaint_respective_field = [None] * 32
-
-
-def set_all_advanced_parameters(*args):
- global disable_preview, adm_scaler_positive, adm_scaler_negative, adm_scaler_end, adaptive_cfg, sampler_name, \
- scheduler_name, generate_image_grid, overwrite_step, overwrite_switch, overwrite_width, overwrite_height, \
- overwrite_vary_strength, overwrite_upscale_strength, \
- mixing_image_prompt_and_vary_upscale, mixing_image_prompt_and_inpaint, \
- debugging_cn_preprocessor, skipping_cn_preprocessor, controlnet_softness, canny_low_threshold, canny_high_threshold, \
- refiner_swap_method, \
- freeu_enabled, freeu_b1, freeu_b2, freeu_s1, freeu_s2, \
- debugging_inpaint_preprocessor, inpaint_disable_initial_latent, inpaint_engine, inpaint_strength, inpaint_respective_field
-
- disable_preview, adm_scaler_positive, adm_scaler_negative, adm_scaler_end, adaptive_cfg, sampler_name, \
- scheduler_name, generate_image_grid, overwrite_step, overwrite_switch, overwrite_width, overwrite_height, \
- overwrite_vary_strength, overwrite_upscale_strength, \
- mixing_image_prompt_and_vary_upscale, mixing_image_prompt_and_inpaint, \
- debugging_cn_preprocessor, skipping_cn_preprocessor, controlnet_softness, canny_low_threshold, canny_high_threshold, \
- refiner_swap_method, \
- freeu_enabled, freeu_b1, freeu_b2, freeu_s1, freeu_s2, \
- debugging_inpaint_preprocessor, inpaint_disable_initial_latent, inpaint_engine, inpaint_strength, inpaint_respective_field = args
-
- return
diff --git a/modules/async_worker.py b/modules/async_worker.py
index fab2508e..76e10f92 100644
--- a/modules/async_worker.py
+++ b/modules/async_worker.py
@@ -1,4 +1,8 @@
import threading
+import re
+from modules.patch import PatchSettings, patch_settings, patch_all
+
+patch_all()
class AsyncTask:
@@ -6,6 +10,8 @@ class AsyncTask:
self.args = args
self.yields = []
self.results = []
+ self.last_stop = False
+ self.processing = False
async_tasks = []
@@ -14,9 +20,11 @@ async_tasks = []
def worker():
global async_tasks
+ import os
import traceback
import math
import numpy as np
+ import cv2
import torch
import time
import shared
@@ -31,17 +39,24 @@ def worker():
import extras.preprocessors as preprocessors
import modules.inpaint_worker as inpaint_worker
import modules.constants as constants
- import modules.advanced_parameters as advanced_parameters
import extras.ip_adapter as ip_adapter
import extras.face_crop
import fooocus_version
+ import args_manager
- from modules.sdxl_styles import apply_style, apply_wildcards, fooocus_expansion
+ from extras.censor import default_censor
+ from modules.sdxl_styles import apply_style, get_random_style, fooocus_expansion, apply_arrays, random_style_name
from modules.private_logger import log
from extras.expansion import safe_str
- from modules.util import remove_empty_str, HWC3, resize_image, \
- get_image_shape_ceil, set_image_shape_ceil, get_shape_ceil, resample_image
+ from modules.util import (remove_empty_str, HWC3, resize_image, get_image_shape_ceil, set_image_shape_ceil,
+ get_shape_ceil, resample_image, erode_or_dilate, get_enabled_loras,
+ parse_lora_references_from_prompt, apply_wildcards)
from modules.upscaler import perform_upscale
+ from modules.flags import Performance
+ from modules.meta_parser import get_metadata_parser, MetadataScheme
+
+ pid = os.getpid()
+ print(f'Started worker with PID {pid}')
try:
async_gradio_app = shared.gradio_root
@@ -56,10 +71,15 @@ def worker():
print(f'[Fooocus] {text}')
async_task.yields.append(['preview', (number, text, None)])
- def yield_result(async_task, imgs, do_not_show_finished_images=False):
+ def yield_result(async_task, imgs, black_out_nsfw, censor=True, do_not_show_finished_images=False,
+ progressbar_index=flags.preparation_step_count):
if not isinstance(imgs, list):
imgs = [imgs]
+ if censor and (modules.config.default_black_out_nsfw or black_out_nsfw):
+ progressbar(async_task, progressbar_index, 'Checking for NSFW content ...')
+ imgs = default_censor(imgs)
+
async_task.results = async_task.results + imgs
if do_not_show_finished_images:
@@ -69,19 +89,20 @@ def worker():
return
def build_image_wall(async_task):
- if not advanced_parameters.generate_image_grid:
+ results = []
+
+ if len(async_task.results) < 2:
return
- results = async_task.results
-
- if len(results) < 2:
- return
-
- for img in results:
+ for img in async_task.results:
+ if isinstance(img, str) and os.path.exists(img):
+ img = cv2.imread(img)
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if not isinstance(img, np.ndarray):
return
if img.ndim != 3:
return
+ results.append(img)
H, W, C = results[0].shape
@@ -115,6 +136,7 @@ def worker():
@torch.inference_mode()
def handler(async_task):
execution_start_time = time.perf_counter()
+ async_task.processing = True
args = async_task.args
args.reverse()
@@ -122,16 +144,19 @@ def worker():
prompt = args.pop()
negative_prompt = args.pop()
style_selections = args.pop()
- performance_selection = args.pop()
+ performance_selection = Performance(args.pop())
aspect_ratios_selection = args.pop()
image_number = args.pop()
+ output_format = args.pop()
image_seed = args.pop()
+ read_wildcards_in_order = args.pop()
sharpness = args.pop()
guidance_scale = args.pop()
base_model_name = args.pop()
refiner_model_name = args.pop()
refiner_switch = args.pop()
- loras = [[str(args.pop()), float(args.pop())] for _ in range(5)]
+ loras = get_enabled_loras([(bool(args.pop()), str(args.pop()), float(args.pop())) for _ in
+ range(modules.config.default_max_lora_number)])
input_image_checkbox = args.pop()
current_tab = args.pop()
uov_method = args.pop()
@@ -139,9 +164,54 @@ def worker():
outpaint_selections = args.pop()
inpaint_input_image = args.pop()
inpaint_additional_prompt = args.pop()
+ inpaint_mask_image_upload = args.pop()
+
+ disable_preview = args.pop()
+ disable_intermediate_results = args.pop()
+ disable_seed_increment = args.pop()
+ black_out_nsfw = args.pop()
+ adm_scaler_positive = args.pop()
+ adm_scaler_negative = args.pop()
+ adm_scaler_end = args.pop()
+ adaptive_cfg = args.pop()
+ clip_skip = args.pop()
+ sampler_name = args.pop()
+ scheduler_name = args.pop()
+ vae_name = args.pop()
+ overwrite_step = args.pop()
+ overwrite_switch = args.pop()
+ overwrite_width = args.pop()
+ overwrite_height = args.pop()
+ overwrite_vary_strength = args.pop()
+ overwrite_upscale_strength = args.pop()
+ mixing_image_prompt_and_vary_upscale = args.pop()
+ mixing_image_prompt_and_inpaint = args.pop()
+ debugging_cn_preprocessor = args.pop()
+ skipping_cn_preprocessor = args.pop()
+ canny_low_threshold = args.pop()
+ canny_high_threshold = args.pop()
+ refiner_swap_method = args.pop()
+ controlnet_softness = args.pop()
+ freeu_enabled = args.pop()
+ freeu_b1 = args.pop()
+ freeu_b2 = args.pop()
+ freeu_s1 = args.pop()
+ freeu_s2 = args.pop()
+ debugging_inpaint_preprocessor = args.pop()
+ inpaint_disable_initial_latent = args.pop()
+ inpaint_engine = args.pop()
+ inpaint_strength = args.pop()
+ inpaint_respective_field = args.pop()
+ inpaint_mask_upload_checkbox = args.pop()
+ invert_mask_checkbox = args.pop()
+ inpaint_erode_or_dilate = args.pop()
+
+ save_metadata_to_images = args.pop() if not args_manager.args.disable_metadata else False
+ metadata_scheme = MetadataScheme(
+ args.pop()) if not args_manager.args.disable_metadata else MetadataScheme.FOOOCUS
cn_tasks = {x: [] for x in flags.ip_list}
- for _ in range(4):
+ for _ in range(flags.controlnet_image_count):
cn_img = args.pop()
cn_stop = args.pop()
cn_weight = args.pop()
@@ -166,49 +236,84 @@ def worker():
print(f'Refiner disabled because base model and refiner are same.')
refiner_model_name = 'None'
- assert performance_selection in ['Speed', 'Quality', 'Extreme Speed']
+ steps = performance_selection.steps()
- steps = 30
+ performance_loras = []
- if performance_selection == 'Speed':
- steps = 30
-
- if performance_selection == 'Quality':
- steps = 60
-
- if performance_selection == 'Extreme Speed':
+ if performance_selection == Performance.EXTREME_SPEED:
print('Enter LCM mode.')
progressbar(async_task, 1, 'Downloading LCM components ...')
- loras += [(modules.config.downloading_sdxl_lcm_lora(), 1.0)]
+ performance_loras += [(modules.config.downloading_sdxl_lcm_lora(), 1.0)]
if refiner_model_name != 'None':
print(f'Refiner disabled in LCM mode.')
refiner_model_name = 'None'
- sampler_name = advanced_parameters.sampler_name = 'lcm'
- scheduler_name = advanced_parameters.scheduler_name = 'lcm'
- modules.patch.sharpness = sharpness = 0.0
- cfg_scale = guidance_scale = 1.0
- modules.patch.adaptive_cfg = advanced_parameters.adaptive_cfg = 1.0
+ sampler_name = 'lcm'
+ scheduler_name = 'lcm'
+ sharpness = 0.0
+ guidance_scale = 1.0
+ adaptive_cfg = 1.0
refiner_switch = 1.0
- modules.patch.positive_adm_scale = advanced_parameters.adm_scaler_positive = 1.0
- modules.patch.negative_adm_scale = advanced_parameters.adm_scaler_negative = 1.0
- modules.patch.adm_scaler_end = advanced_parameters.adm_scaler_end = 0.0
- steps = 8
+ adm_scaler_positive = 1.0
+ adm_scaler_negative = 1.0
+ adm_scaler_end = 0.0
- modules.patch.adaptive_cfg = advanced_parameters.adaptive_cfg
- print(f'[Parameters] Adaptive CFG = {modules.patch.adaptive_cfg}')
+ elif performance_selection == Performance.LIGHTNING:
+ print('Enter Lightning mode.')
+ progressbar(async_task, 1, 'Downloading Lightning components ...')
+ performance_loras += [(modules.config.downloading_sdxl_lightning_lora(), 1.0)]
- modules.patch.sharpness = sharpness
- print(f'[Parameters] Sharpness = {modules.patch.sharpness}')
+ if refiner_model_name != 'None':
+ print(f'Refiner disabled in Lightning mode.')
- modules.patch.positive_adm_scale = advanced_parameters.adm_scaler_positive
- modules.patch.negative_adm_scale = advanced_parameters.adm_scaler_negative
- modules.patch.adm_scaler_end = advanced_parameters.adm_scaler_end
+ refiner_model_name = 'None'
+ sampler_name = 'euler'
+ scheduler_name = 'sgm_uniform'
+ sharpness = 0.0
+ guidance_scale = 1.0
+ adaptive_cfg = 1.0
+ refiner_switch = 1.0
+ adm_scaler_positive = 1.0
+ adm_scaler_negative = 1.0
+ adm_scaler_end = 0.0
+
+ elif performance_selection == Performance.HYPER_SD:
+ print('Enter Hyper-SD mode.')
+ progressbar(async_task, 1, 'Downloading Hyper-SD components ...')
+ performance_loras += [(modules.config.downloading_sdxl_hyper_sd_lora(), 0.8)]
+
+ if refiner_model_name != 'None':
+ print(f'Refiner disabled in Hyper-SD mode.')
+
+ refiner_model_name = 'None'
+ sampler_name = 'dpmpp_sde_gpu'
+ scheduler_name = 'karras'
+ sharpness = 0.0
+ guidance_scale = 1.0
+ adaptive_cfg = 1.0
+ refiner_switch = 1.0
+ adm_scaler_positive = 1.0
+ adm_scaler_negative = 1.0
+ adm_scaler_end = 0.0
+
+ print(f'[Parameters] Adaptive CFG = {adaptive_cfg}')
+ print(f'[Parameters] CLIP Skip = {clip_skip}')
+ print(f'[Parameters] Sharpness = {sharpness}')
+ print(f'[Parameters] ControlNet Softness = {controlnet_softness}')
print(f'[Parameters] ADM Scale = '
- f'{modules.patch.positive_adm_scale} : '
- f'{modules.patch.negative_adm_scale} : '
- f'{modules.patch.adm_scaler_end}')
+ f'{adm_scaler_positive} : '
+ f'{adm_scaler_negative} : '
+ f'{adm_scaler_end}')
+
+ patch_settings[pid] = PatchSettings(
+ sharpness,
+ adm_scaler_end,
+ adm_scaler_positive,
+ adm_scaler_negative,
+ controlnet_softness,
+ adaptive_cfg
+ )
cfg_scale = float(guidance_scale)
print(f'[Parameters] CFG = {cfg_scale}')
@@ -221,10 +326,9 @@ def worker():
width, height = int(width), int(height)
skip_prompt_processing = False
- refiner_swap_method = advanced_parameters.refiner_swap_method
inpaint_worker.current_task = None
- inpaint_parameterized = advanced_parameters.inpaint_engine != 'None'
+ inpaint_parameterized = inpaint_engine != 'None'
inpaint_image = None
inpaint_mask = None
inpaint_head_model_path = None
@@ -238,15 +342,12 @@ def worker():
seed = int(image_seed)
print(f'[Parameters] Seed = {seed}')
- sampler_name = advanced_parameters.sampler_name
- scheduler_name = advanced_parameters.scheduler_name
-
goals = []
tasks = []
if input_image_checkbox:
if (current_tab == 'uov' or (
- current_tab == 'ip' and advanced_parameters.mixing_image_prompt_and_vary_upscale)) \
+ current_tab == 'ip' and mixing_image_prompt_and_vary_upscale)) \
and uov_method != flags.disabled and uov_input_image is not None:
uov_input_image = HWC3(uov_input_image)
if 'vary' in uov_method:
@@ -256,24 +357,31 @@ def worker():
if 'fast' in uov_method:
skip_prompt_processing = True
else:
- steps = 18
-
- if performance_selection == 'Speed':
- steps = 18
-
- if performance_selection == 'Quality':
- steps = 36
-
- if performance_selection == 'Extreme Speed':
- steps = 8
+ steps = performance_selection.steps_uov()
progressbar(async_task, 1, 'Downloading upscale models ...')
modules.config.downloading_upscale_model()
if (current_tab == 'inpaint' or (
- current_tab == 'ip' and advanced_parameters.mixing_image_prompt_and_inpaint)) \
+ current_tab == 'ip' and mixing_image_prompt_and_inpaint)) \
and isinstance(inpaint_input_image, dict):
inpaint_image = inpaint_input_image['image']
inpaint_mask = inpaint_input_image['mask'][:, :, 0]
+
+ if inpaint_mask_upload_checkbox:
+ if isinstance(inpaint_mask_image_upload, np.ndarray):
+ if inpaint_mask_image_upload.ndim == 3:
+ H, W, C = inpaint_image.shape
+ inpaint_mask_image_upload = resample_image(inpaint_mask_image_upload, width=W, height=H)
+ inpaint_mask_image_upload = np.mean(inpaint_mask_image_upload, axis=2)
+ inpaint_mask_image_upload = (inpaint_mask_image_upload > 127).astype(np.uint8) * 255
+ inpaint_mask = np.maximum(inpaint_mask, inpaint_mask_image_upload)
+
+ if int(inpaint_erode_or_dilate) != 0:
+ inpaint_mask = erode_or_dilate(inpaint_mask, inpaint_erode_or_dilate)
+
+ if invert_mask_checkbox:
+ inpaint_mask = 255 - inpaint_mask
+
inpaint_image = HWC3(inpaint_image)
if isinstance(inpaint_image, np.ndarray) and isinstance(inpaint_mask, np.ndarray) \
and (np.any(inpaint_mask > 127) or len(outpaint_selections) > 0):
@@ -282,12 +390,12 @@ def worker():
if inpaint_parameterized:
progressbar(async_task, 1, 'Downloading inpainter ...')
inpaint_head_model_path, inpaint_patch_model_path = modules.config.downloading_inpaint_models(
- advanced_parameters.inpaint_engine)
+ inpaint_engine)
base_model_additional_loras += [(inpaint_patch_model_path, 1.0)]
print(f'[Inpaint] Current inpaint model is {inpaint_patch_model_path}')
if refiner_model_name == 'None':
use_synthetic_refiner = True
- refiner_switch = 0.5
+ refiner_switch = 0.8
else:
inpaint_head_model_path, inpaint_patch_model_path = None, None
print(f'[Inpaint] Parameterized inpaint is disabled.')
@@ -298,8 +406,8 @@ def worker():
prompt = inpaint_additional_prompt + '\n' + prompt
goals.append('inpaint')
if current_tab == 'ip' or \
- advanced_parameters.mixing_image_prompt_and_inpaint or \
- advanced_parameters.mixing_image_prompt_and_vary_upscale:
+ mixing_image_prompt_and_vary_upscale or \
+ mixing_image_prompt_and_inpaint:
goals.append('cn')
progressbar(async_task, 1, 'Downloading control models ...')
if len(cn_tasks[flags.cn_canny]) > 0:
@@ -318,19 +426,19 @@ def worker():
ip_adapter.load_ip_adapter(clip_vision_path, ip_negative_path, ip_adapter_path)
ip_adapter.load_ip_adapter(clip_vision_path, ip_negative_path, ip_adapter_face_path)
+ if overwrite_step > 0:
+ steps = overwrite_step
+
switch = int(round(steps * refiner_switch))
- if advanced_parameters.overwrite_step > 0:
- steps = advanced_parameters.overwrite_step
+ if overwrite_switch > 0:
+ switch = overwrite_switch
- if advanced_parameters.overwrite_switch > 0:
- switch = advanced_parameters.overwrite_switch
+ if overwrite_width > 0:
+ width = overwrite_width
- if advanced_parameters.overwrite_width > 0:
- width = advanced_parameters.overwrite_width
-
- if advanced_parameters.overwrite_height > 0:
- height = advanced_parameters.overwrite_height
+ if overwrite_height > 0:
+ height = overwrite_height
print(f'[Parameters] Sampler = {sampler_name} - {scheduler_name}')
print(f'[Parameters] Steps = {steps} - {switch}')
@@ -352,27 +460,45 @@ def worker():
extra_positive_prompts = prompts[1:] if len(prompts) > 1 else []
extra_negative_prompts = negative_prompts[1:] if len(negative_prompts) > 1 else []
- progressbar(async_task, 3, 'Loading models ...')
+ progressbar(async_task, 2, 'Loading models ...')
+
+ lora_filenames = modules.util.remove_performance_lora(modules.config.lora_filenames, performance_selection)
+ loras, prompt = parse_lora_references_from_prompt(prompt, loras, modules.config.default_max_lora_number, lora_filenames=lora_filenames)
+ loras += performance_loras
+
pipeline.refresh_everything(refiner_model_name=refiner_model_name, base_model_name=base_model_name,
loras=loras, base_model_additional_loras=base_model_additional_loras,
- use_synthetic_refiner=use_synthetic_refiner)
+ use_synthetic_refiner=use_synthetic_refiner, vae_name=vae_name)
+
+ pipeline.set_clip_skip(clip_skip)
progressbar(async_task, 3, 'Processing prompts ...')
tasks = []
- for i in range(image_number):
- task_seed = (seed + i) % (constants.MAX_SEED + 1) # randint is inclusive, % is not
- task_rng = random.Random(task_seed) # may bind to inpaint noise in the future
- task_prompt = apply_wildcards(prompt, task_rng)
- task_negative_prompt = apply_wildcards(negative_prompt, task_rng)
- task_extra_positive_prompts = [apply_wildcards(pmt, task_rng) for pmt in extra_positive_prompts]
- task_extra_negative_prompts = [apply_wildcards(pmt, task_rng) for pmt in extra_negative_prompts]
+ for i in range(image_number):
+ if disable_seed_increment:
+ task_seed = seed % (constants.MAX_SEED + 1)
+ else:
+ task_seed = (seed + i) % (constants.MAX_SEED + 1) # randint is inclusive, % is not
+
+ task_rng = random.Random(task_seed) # may bind to inpaint noise in the future
+ task_prompt = apply_wildcards(prompt, task_rng, i, read_wildcards_in_order)
+ task_prompt = apply_arrays(task_prompt, i)
+ task_negative_prompt = apply_wildcards(negative_prompt, task_rng, i, read_wildcards_in_order)
+ task_extra_positive_prompts = [apply_wildcards(pmt, task_rng, i, read_wildcards_in_order) for pmt in
+ extra_positive_prompts]
+ task_extra_negative_prompts = [apply_wildcards(pmt, task_rng, i, read_wildcards_in_order) for pmt in
+ extra_negative_prompts]
positive_basic_workloads = []
negative_basic_workloads = []
+ task_styles = style_selections.copy()
if use_style:
- for s in style_selections:
+ for i, s in enumerate(task_styles):
+ if s == random_style_name:
+ s = get_random_style(task_rng)
+ task_styles[i] = s
p, n = apply_style(s, positive=task_prompt)
positive_basic_workloads = positive_basic_workloads + p
negative_basic_workloads = negative_basic_workloads + n
@@ -400,37 +526,38 @@ def worker():
negative_top_k=len(negative_basic_workloads),
log_positive_prompt='\n'.join([task_prompt] + task_extra_positive_prompts),
log_negative_prompt='\n'.join([task_negative_prompt] + task_extra_negative_prompts),
+ styles=task_styles
))
if use_expansion:
for i, t in enumerate(tasks):
- progressbar(async_task, 5, f'Preparing Fooocus text #{i + 1} ...')
+ progressbar(async_task, 4, f'Preparing Fooocus text #{i + 1} ...')
expansion = pipeline.final_expansion(t['task_prompt'], t['task_seed'])
print(f'[Prompt Expansion] {expansion}')
t['expansion'] = expansion
t['positive'] = copy.deepcopy(t['positive']) + [expansion] # Deep copy.
for i, t in enumerate(tasks):
- progressbar(async_task, 7, f'Encoding positive #{i + 1} ...')
+ progressbar(async_task, 5, f'Encoding positive #{i + 1} ...')
t['c'] = pipeline.clip_encode(texts=t['positive'], pool_top_k=t['positive_top_k'])
for i, t in enumerate(tasks):
if abs(float(cfg_scale) - 1.0) < 1e-4:
t['uc'] = pipeline.clone_cond(t['c'])
else:
- progressbar(async_task, 10, f'Encoding negative #{i + 1} ...')
+ progressbar(async_task, 6, f'Encoding negative #{i + 1} ...')
t['uc'] = pipeline.clip_encode(texts=t['negative'], pool_top_k=t['negative_top_k'])
if len(goals) > 0:
- progressbar(async_task, 13, 'Image processing ...')
+ progressbar(async_task, 7, 'Image processing ...')
if 'vary' in goals:
if 'subtle' in uov_method:
denoising_strength = 0.5
if 'strong' in uov_method:
denoising_strength = 0.85
- if advanced_parameters.overwrite_vary_strength > 0:
- denoising_strength = advanced_parameters.overwrite_vary_strength
+ if overwrite_vary_strength > 0:
+ denoising_strength = overwrite_vary_strength
shape_ceil = get_image_shape_ceil(uov_input_image)
if shape_ceil < 1024:
@@ -443,7 +570,7 @@ def worker():
uov_input_image = set_image_shape_ceil(uov_input_image, shape_ceil)
initial_pixels = core.numpy_to_pytorch(uov_input_image)
- progressbar(async_task, 13, 'VAE encoding ...')
+ progressbar(async_task, 8, 'VAE encoding ...')
candidate_vae, _ = pipeline.get_candidate_vae(
steps=steps,
@@ -460,7 +587,7 @@ def worker():
if 'upscale' in goals:
H, W, C = uov_input_image.shape
- progressbar(async_task, 13, f'Upscaling image from {str((H, W))} ...')
+ progressbar(async_task, 9, f'Upscaling image from {str((H, W))} ...')
uov_input_image = perform_upscale(uov_input_image)
print(f'Image upscaled.')
@@ -493,19 +620,23 @@ def worker():
direct_return = False
if direct_return:
- d = [('Upscale (Fast)', '2x')]
- log(uov_input_image, d)
- yield_result(async_task, uov_input_image, do_not_show_finished_images=True)
+ d = [('Upscale (Fast)', 'upscale_fast', '2x')]
+ if modules.config.default_black_out_nsfw or black_out_nsfw:
+ progressbar(async_task, 100, 'Checking for NSFW content ...')
+ uov_input_image = default_censor(uov_input_image)
+ progressbar(async_task, 100, 'Saving image to system ...')
+ uov_input_image_path = log(uov_input_image, d, output_format=output_format)
+ yield_result(async_task, uov_input_image_path, black_out_nsfw, False, do_not_show_finished_images=True)
return
tiled = True
denoising_strength = 0.382
- if advanced_parameters.overwrite_upscale_strength > 0:
- denoising_strength = advanced_parameters.overwrite_upscale_strength
+ if overwrite_upscale_strength > 0:
+ denoising_strength = overwrite_upscale_strength
initial_pixels = core.numpy_to_pytorch(uov_input_image)
- progressbar(async_task, 13, 'VAE encoding ...')
+ progressbar(async_task, 10, 'VAE encoding ...')
candidate_vae, _ = pipeline.get_candidate_vae(
steps=steps,
@@ -536,34 +667,34 @@ def worker():
H, W, C = inpaint_image.shape
if 'left' in outpaint_selections:
- inpaint_image = np.pad(inpaint_image, [[0, 0], [int(H * 0.3), 0], [0, 0]], mode='edge')
- inpaint_mask = np.pad(inpaint_mask, [[0, 0], [int(H * 0.3), 0]], mode='constant',
+ inpaint_image = np.pad(inpaint_image, [[0, 0], [int(W * 0.3), 0], [0, 0]], mode='edge')
+ inpaint_mask = np.pad(inpaint_mask, [[0, 0], [int(W * 0.3), 0]], mode='constant',
constant_values=255)
if 'right' in outpaint_selections:
- inpaint_image = np.pad(inpaint_image, [[0, 0], [0, int(H * 0.3)], [0, 0]], mode='edge')
- inpaint_mask = np.pad(inpaint_mask, [[0, 0], [0, int(H * 0.3)]], mode='constant',
+ inpaint_image = np.pad(inpaint_image, [[0, 0], [0, int(W * 0.3)], [0, 0]], mode='edge')
+ inpaint_mask = np.pad(inpaint_mask, [[0, 0], [0, int(W * 0.3)]], mode='constant',
constant_values=255)
inpaint_image = np.ascontiguousarray(inpaint_image.copy())
inpaint_mask = np.ascontiguousarray(inpaint_mask.copy())
- advanced_parameters.inpaint_strength = 1.0
- advanced_parameters.inpaint_respective_field = 1.0
+ inpaint_strength = 1.0
+ inpaint_respective_field = 1.0
- denoising_strength = advanced_parameters.inpaint_strength
+ denoising_strength = inpaint_strength
inpaint_worker.current_task = inpaint_worker.InpaintWorker(
image=inpaint_image,
mask=inpaint_mask,
use_fill=denoising_strength > 0.99,
- k=advanced_parameters.inpaint_respective_field
+ k=inpaint_respective_field
)
- if advanced_parameters.debugging_inpaint_preprocessor:
- yield_result(async_task, inpaint_worker.current_task.visualize_mask_processing(),
+ if debugging_inpaint_preprocessor:
+ yield_result(async_task, inpaint_worker.current_task.visualize_mask_processing(), black_out_nsfw,
do_not_show_finished_images=True)
return
- progressbar(async_task, 13, 'VAE Inpaint encoding ...')
+ progressbar(async_task, 11, 'VAE Inpaint encoding ...')
inpaint_pixel_fill = core.numpy_to_pytorch(inpaint_worker.current_task.interested_fill)
inpaint_pixel_image = core.numpy_to_pytorch(inpaint_worker.current_task.interested_image)
@@ -583,7 +714,7 @@ def worker():
latent_swap = None
if candidate_vae_swap is not None:
- progressbar(async_task, 13, 'VAE SD15 encoding ...')
+ progressbar(async_task, 12, 'VAE SD15 encoding ...')
latent_swap = core.encode_vae(
vae=candidate_vae_swap,
pixels=inpaint_pixel_fill)['samples']
@@ -604,7 +735,7 @@ def worker():
model=pipeline.final_unet
)
- if not advanced_parameters.inpaint_disable_initial_latent:
+ if not inpaint_disable_initial_latent:
initial_latent = {'samples': latent_fill}
B, C, H, W = latent_fill.shape
@@ -617,25 +748,25 @@ def worker():
cn_img, cn_stop, cn_weight = task
cn_img = resize_image(HWC3(cn_img), width=width, height=height)
- if not advanced_parameters.skipping_cn_preprocessor:
- cn_img = preprocessors.canny_pyramid(cn_img)
+ if not skipping_cn_preprocessor:
+ cn_img = preprocessors.canny_pyramid(cn_img, canny_low_threshold, canny_high_threshold)
cn_img = HWC3(cn_img)
task[0] = core.numpy_to_pytorch(cn_img)
- if advanced_parameters.debugging_cn_preprocessor:
- yield_result(async_task, cn_img, do_not_show_finished_images=True)
+ if debugging_cn_preprocessor:
+ yield_result(async_task, cn_img, black_out_nsfw, do_not_show_finished_images=True)
return
for task in cn_tasks[flags.cn_cpds]:
cn_img, cn_stop, cn_weight = task
cn_img = resize_image(HWC3(cn_img), width=width, height=height)
- if not advanced_parameters.skipping_cn_preprocessor:
+ if not skipping_cn_preprocessor:
cn_img = preprocessors.cpds(cn_img)
cn_img = HWC3(cn_img)
task[0] = core.numpy_to_pytorch(cn_img)
- if advanced_parameters.debugging_cn_preprocessor:
- yield_result(async_task, cn_img, do_not_show_finished_images=True)
+ if debugging_cn_preprocessor:
+ yield_result(async_task, cn_img, black_out_nsfw, do_not_show_finished_images=True)
return
for task in cn_tasks[flags.cn_ip]:
cn_img, cn_stop, cn_weight = task
@@ -645,22 +776,22 @@ def worker():
cn_img = resize_image(cn_img, width=224, height=224, resize_mode=0)
task[0] = ip_adapter.preprocess(cn_img, ip_adapter_path=ip_adapter_path)
- if advanced_parameters.debugging_cn_preprocessor:
- yield_result(async_task, cn_img, do_not_show_finished_images=True)
+ if debugging_cn_preprocessor:
+ yield_result(async_task, cn_img, black_out_nsfw, do_not_show_finished_images=True)
return
for task in cn_tasks[flags.cn_ip_face]:
cn_img, cn_stop, cn_weight = task
cn_img = HWC3(cn_img)
- if not advanced_parameters.skipping_cn_preprocessor:
+ if not skipping_cn_preprocessor:
cn_img = extras.face_crop.crop_image(cn_img)
# https://github.com/tencent-ailab/IP-Adapter/blob/d580c50a291566bbf9fc7ac0f760506607297e6d/README.md?plain=1#L75
cn_img = resize_image(cn_img, width=224, height=224, resize_mode=0)
task[0] = ip_adapter.preprocess(cn_img, ip_adapter_path=ip_adapter_face_path)
- if advanced_parameters.debugging_cn_preprocessor:
- yield_result(async_task, cn_img, do_not_show_finished_images=True)
+ if debugging_cn_preprocessor:
+ yield_result(async_task, cn_img, black_out_nsfw, do_not_show_finished_images=True)
return
all_ip_tasks = cn_tasks[flags.cn_ip] + cn_tasks[flags.cn_ip_face]
@@ -668,14 +799,14 @@ def worker():
if len(all_ip_tasks) > 0:
pipeline.final_unet = ip_adapter.patch_model(pipeline.final_unet, all_ip_tasks)
- if advanced_parameters.freeu_enabled:
+ if freeu_enabled:
print(f'FreeU is enabled!')
pipeline.final_unet = core.apply_freeu(
pipeline.final_unet,
- advanced_parameters.freeu_b1,
- advanced_parameters.freeu_b2,
- advanced_parameters.freeu_s1,
- advanced_parameters.freeu_s2
+ freeu_b1,
+ freeu_b2,
+ freeu_s1,
+ freeu_s2
)
all_steps = steps * image_number
@@ -695,33 +826,53 @@ def worker():
final_sampler_name = sampler_name
final_scheduler_name = scheduler_name
- if scheduler_name == 'lcm':
+ if scheduler_name in ['lcm', 'tcd']:
final_scheduler_name = 'sgm_uniform'
- if pipeline.final_unet is not None:
- pipeline.final_unet = core.opModelSamplingDiscrete.patch(
- pipeline.final_unet,
- sampling='lcm',
- zsnr=False)[0]
- if pipeline.final_refiner_unet is not None:
- pipeline.final_refiner_unet = core.opModelSamplingDiscrete.patch(
- pipeline.final_refiner_unet,
- sampling='lcm',
- zsnr=False)[0]
- print('Using lcm scheduler.')
- async_task.yields.append(['preview', (13, 'Moving model to GPU ...', None)])
+ def patch_discrete(unet):
+ return core.opModelSamplingDiscrete.patch(
+ pipeline.final_unet,
+ sampling=scheduler_name,
+ zsnr=False)[0]
+
+ if pipeline.final_unet is not None:
+ pipeline.final_unet = patch_discrete(pipeline.final_unet)
+ if pipeline.final_refiner_unet is not None:
+ pipeline.final_refiner_unet = patch_discrete(pipeline.final_refiner_unet)
+ print(f'Using {scheduler_name} scheduler.')
+ elif scheduler_name == 'edm_playground_v2.5':
+ final_scheduler_name = 'karras'
+
+ def patch_edm(unet):
+ return core.opModelSamplingContinuousEDM.patch(
+ unet,
+ sampling=scheduler_name,
+ sigma_max=120.0,
+ sigma_min=0.002)[0]
+
+ if pipeline.final_unet is not None:
+ pipeline.final_unet = patch_edm(pipeline.final_unet)
+ if pipeline.final_refiner_unet is not None:
+ pipeline.final_refiner_unet = patch_edm(pipeline.final_refiner_unet)
+
+ print(f'Using {scheduler_name} scheduler.')
+
+ async_task.yields.append(['preview', (flags.preparation_step_count, 'Moving model to GPU ...', None)])
def callback(step, x0, x, total_steps, y):
done_steps = current_task_id * steps + step
async_task.yields.append(['preview', (
- int(15.0 + 85.0 * float(done_steps) / float(all_steps)),
- f'Step {step}/{total_steps} in the {current_task_id + 1}-th Sampling',
- y)])
+ int(flags.preparation_step_count + (100 - flags.preparation_step_count) * float(done_steps) / float(all_steps)),
+ f'Sampling step {step + 1}/{total_steps}, image {current_task_id + 1}/{image_number} ...', y)])
for current_task_id, task in enumerate(tasks):
+ current_progress = int(flags.preparation_step_count + (100 - flags.preparation_step_count) * float(current_task_id * steps) / float(all_steps))
+ progressbar(async_task, current_progress, f'Preparing task {current_task_id + 1}/{image_number} ...')
execution_start_time = time.perf_counter()
try:
+ if async_task.last_stop is not False:
+ ldm_patched.modules.model_management.interrupt_current_processing()
positive_cond, negative_cond = task['c'], task['uc']
if 'cn' in goals:
@@ -749,7 +900,8 @@ def worker():
denoise=denoising_strength,
tiled=tiled,
cfg_scale=cfg_scale,
- refiner_swap_method=refiner_swap_method
+ refiner_swap_method=refiner_swap_method,
+ disable_preview=disable_preview
)
del task['c'], task['uc'], positive_cond, negative_cond # Save memory
@@ -757,37 +909,75 @@ def worker():
if inpaint_worker.current_task is not None:
imgs = [inpaint_worker.current_task.post_process(x) for x in imgs]
+ img_paths = []
+ current_progress = int(flags.preparation_step_count + (100 - flags.preparation_step_count) * float((current_task_id + 1) * steps) / float(all_steps))
+ if modules.config.default_black_out_nsfw or black_out_nsfw:
+ progressbar(async_task, current_progress, 'Checking for NSFW content ...')
+ imgs = default_censor(imgs)
+
+ progressbar(async_task, current_progress, f'Saving image {current_task_id + 1}/{image_number} to system ...')
for x in imgs:
- d = [
- ('Prompt', task['log_positive_prompt']),
- ('Negative Prompt', task['log_negative_prompt']),
- ('Fooocus V2 Expansion', task['expansion']),
- ('Styles', str(raw_style_selections)),
- ('Performance', performance_selection),
- ('Resolution', str((width, height))),
- ('Sharpness', sharpness),
- ('Guidance Scale', guidance_scale),
- ('ADM Guidance', str((
- modules.patch.positive_adm_scale,
- modules.patch.negative_adm_scale,
- modules.patch.adm_scaler_end))),
- ('Base Model', base_model_name),
- ('Refiner Model', refiner_model_name),
- ('Refiner Switch', refiner_switch),
- ('Sampler', sampler_name),
- ('Scheduler', scheduler_name),
- ('Seed', task['task_seed']),
- ]
+ d = [('Prompt', 'prompt', task['log_positive_prompt']),
+ ('Negative Prompt', 'negative_prompt', task['log_negative_prompt']),
+ ('Fooocus V2 Expansion', 'prompt_expansion', task['expansion']),
+ ('Styles', 'styles',
+ str(task['styles'] if not use_expansion else [fooocus_expansion] + task['styles'])),
+ ('Performance', 'performance', performance_selection.value)]
+
+ if performance_selection.steps() != steps:
+ d.append(('Steps', 'steps', steps))
+
+ d += [('Resolution', 'resolution', str((width, height))),
+ ('Guidance Scale', 'guidance_scale', guidance_scale),
+ ('Sharpness', 'sharpness', sharpness),
+ ('ADM Guidance', 'adm_guidance', str((
+ modules.patch.patch_settings[pid].positive_adm_scale,
+ modules.patch.patch_settings[pid].negative_adm_scale,
+ modules.patch.patch_settings[pid].adm_scaler_end))),
+ ('Base Model', 'base_model', base_model_name),
+ ('Refiner Model', 'refiner_model', refiner_model_name),
+ ('Refiner Switch', 'refiner_switch', refiner_switch)]
+
+ if refiner_model_name != 'None':
+ if overwrite_switch > 0:
+ d.append(('Overwrite Switch', 'overwrite_switch', overwrite_switch))
+ if refiner_swap_method != flags.refiner_swap_method:
+ d.append(('Refiner Swap Method', 'refiner_swap_method', refiner_swap_method))
+ if modules.patch.patch_settings[pid].adaptive_cfg != modules.config.default_cfg_tsnr:
+ d.append(
+ ('CFG Mimicking from TSNR', 'adaptive_cfg', modules.patch.patch_settings[pid].adaptive_cfg))
+
+ if clip_skip > 1:
+ d.append(('CLIP Skip', 'clip_skip', clip_skip))
+ d.append(('Sampler', 'sampler', sampler_name))
+ d.append(('Scheduler', 'scheduler', scheduler_name))
+ d.append(('VAE', 'vae', vae_name))
+ d.append(('Seed', 'seed', str(task['task_seed'])))
+
+ if freeu_enabled:
+ d.append(('FreeU', 'freeu', str((freeu_b1, freeu_b2, freeu_s1, freeu_s2))))
+
for li, (n, w) in enumerate(loras):
if n != 'None':
- d.append((f'LoRA {li + 1}', f'{n} : {w}'))
- d.append(('Version', 'v' + fooocus_version.version))
- log(x, d)
+ d.append((f'LoRA {li + 1}', f'lora_combined_{li + 1}', f'{n} : {w}'))
- yield_result(async_task, imgs, do_not_show_finished_images=len(tasks) == 1)
+ metadata_parser = None
+ if save_metadata_to_images:
+ metadata_parser = modules.meta_parser.get_metadata_parser(metadata_scheme)
+ metadata_parser.set_data(task['log_positive_prompt'], task['positive'],
+ task['log_negative_prompt'], task['negative'],
+ steps, base_model_name, refiner_model_name, loras, vae_name)
+ d.append(('Metadata Scheme', 'metadata_scheme',
+ metadata_scheme.value if save_metadata_to_images else save_metadata_to_images))
+ d.append(('Version', 'version', 'Fooocus v' + fooocus_version.version))
+ img_paths.append(log(x, d, metadata_parser, output_format, task))
+
+ yield_result(async_task, img_paths, black_out_nsfw, False,
+ do_not_show_finished_images=len(tasks) == 1 or disable_intermediate_results)
except ldm_patched.modules.model_management.InterruptProcessingException as e:
- if shared.last_stop == 'skip':
+ if async_task.last_stop == 'skip':
print('User skipped')
+ async_task.last_stop = False
continue
else:
print('User stopped')
@@ -795,21 +985,27 @@ def worker():
execution_time = time.perf_counter() - execution_start_time
print(f'Generating and saving time: {execution_time:.2f} seconds')
-
+ async_task.processing = False
return
while True:
time.sleep(0.01)
if len(async_tasks) > 0:
task = async_tasks.pop(0)
+ generate_image_grid = task.args.pop(0)
+
try:
handler(task)
- build_image_wall(task)
+ if generate_image_grid:
+ build_image_wall(task)
task.yields.append(['finish', task.results])
pipeline.prepare_text_encoder(async_call=True)
except:
traceback.print_exc()
task.yields.append(['finish', task.results])
+ finally:
+ if pid in modules.patch.patch_settings:
+ del modules.patch.patch_settings[pid]
pass
diff --git a/modules/config.py b/modules/config.py
index 9cc61788..a6767c37 100644
--- a/modules/config.py
+++ b/modules/config.py
@@ -2,24 +2,43 @@ import os
import json
import math
import numbers
+
import args_manager
+import tempfile
import modules.flags
import modules.sdxl_styles
from modules.model_loader import load_file_from_url
-from modules.util import get_files_from_folder
+from modules.extra_utils import makedirs_with_log, get_files_from_folder, try_eval_env_var
+from modules.flags import OutputFormat, Performance, MetadataScheme
-config_path = os.path.abspath("./config.txt")
-config_example_path = os.path.abspath("config_modification_tutorial.txt")
+def get_config_path(key, default_value):
+ env = os.getenv(key)
+ if env is not None and isinstance(env, str):
+ print(f"Environment: {key} = {env}")
+ return env
+ else:
+ return os.path.abspath(default_value)
+
+wildcards_max_bfs_depth = 64
+config_path = get_config_path('config_path', "./config.txt")
+config_example_path = get_config_path('config_example_path', "config_modification_tutorial.txt")
config_dict = {}
always_save_keys = []
visited_keys = []
+try:
+ with open(os.path.abspath(f'./presets/default.json'), "r", encoding="utf-8") as json_file:
+ config_dict.update(json.load(json_file))
+except Exception as e:
+ print(f'Load default preset failed.')
+ print(e)
+
try:
if os.path.exists(config_path):
with open(config_path, "r", encoding="utf-8") as json_file:
- config_dict = json.load(json_file)
+ config_dict.update(json.load(json_file))
always_save_keys = list(config_dict.keys())
except Exception as e:
print(f'Failed to load config file "{config_path}" . The reason is: {str(e)}')
@@ -79,23 +98,50 @@ def try_load_deprecated_user_path_config():
try_load_deprecated_user_path_config()
+
+def get_presets():
+ preset_folder = 'presets'
+ presets = ['initial']
+ if not os.path.exists(preset_folder):
+ print('No presets found.')
+ return presets
+
+ return presets + [f[:f.index('.json')] for f in os.listdir(preset_folder) if f.endswith('.json')]
+
+
+def try_get_preset_content(preset):
+ if isinstance(preset, str):
+ preset_path = os.path.abspath(f'./presets/{preset}.json')
+ try:
+ if os.path.exists(preset_path):
+ with open(preset_path, "r", encoding="utf-8") as json_file:
+ json_content = json.load(json_file)
+ print(f'Loaded preset: {preset_path}')
+ return json_content
+ else:
+ raise FileNotFoundError
+ except Exception as e:
+ print(f'Load preset [{preset_path}] failed')
+ print(e)
+ return {}
+
+available_presets = get_presets()
preset = args_manager.args.preset
+config_dict.update(try_get_preset_content(preset))
-if isinstance(preset, str):
- preset_path = os.path.abspath(f'./presets/{preset}.json')
- try:
- if os.path.exists(preset_path):
- with open(preset_path, "r", encoding="utf-8") as json_file:
- config_dict.update(json.load(json_file))
- print(f'Loaded preset: {preset_path}')
- else:
- raise FileNotFoundError
- except Exception as e:
- print(f'Load preset [{preset_path}] failed')
- print(e)
+def get_path_output() -> str:
+ """
+ Checking output path argument and overriding default path.
+ """
+ global config_dict
+ path_output = get_dir_or_set_default('path_outputs', '../outputs/', make_directory=True)
+ if args_manager.args.output_path:
+ print(f'Overriding config value path_outputs with {args_manager.args.output_path}')
+ config_dict['path_outputs'] = path_output = args_manager.args.output_path
+ return path_output
-def get_dir_or_set_default(key, default_value):
+def get_dir_or_set_default(key, default_value, as_array=False, make_directory=False):
global config_dict, visited_keys, always_save_keys
if key not in visited_keys:
@@ -104,36 +150,69 @@ def get_dir_or_set_default(key, default_value):
if key not in always_save_keys:
always_save_keys.append(key)
- v = config_dict.get(key, None)
- if isinstance(v, str) and os.path.exists(v) and os.path.isdir(v):
- return v
+ v = os.getenv(key)
+ if v is not None:
+ print(f"Environment: {key} = {v}")
+ config_dict[key] = v
+ else:
+ v = config_dict.get(key, None)
+
+ if isinstance(v, str):
+ if make_directory:
+ makedirs_with_log(v)
+ if os.path.exists(v) and os.path.isdir(v):
+ return v if not as_array else [v]
+ elif isinstance(v, list):
+ if make_directory:
+ for d in v:
+ makedirs_with_log(d)
+ if all([os.path.exists(d) and os.path.isdir(d) for d in v]):
+ return v
+
+ if v is not None:
+ print(f'Failed to load config key: {json.dumps({key:v})} is invalid or does not exist; will use {json.dumps({key:default_value})} instead.')
+ if isinstance(default_value, list):
+ dp = []
+ for path in default_value:
+ abs_path = os.path.abspath(os.path.join(os.path.dirname(__file__), path))
+ dp.append(abs_path)
+ os.makedirs(abs_path, exist_ok=True)
else:
- if v is not None:
- print(f'Failed to load config key: {json.dumps({key:v})} is invalid or does not exist; will use {json.dumps({key:default_value})} instead.')
dp = os.path.abspath(os.path.join(os.path.dirname(__file__), default_value))
os.makedirs(dp, exist_ok=True)
- config_dict[key] = dp
- return dp
+ if as_array:
+ dp = [dp]
+ config_dict[key] = dp
+ return dp
-path_checkpoints = get_dir_or_set_default('path_checkpoints', '../models/checkpoints/')
-path_loras = get_dir_or_set_default('path_loras', '../models/loras/')
+paths_checkpoints = get_dir_or_set_default('path_checkpoints', ['../models/checkpoints/'], True)
+paths_loras = get_dir_or_set_default('path_loras', ['../models/loras/'], True)
path_embeddings = get_dir_or_set_default('path_embeddings', '../models/embeddings/')
path_vae_approx = get_dir_or_set_default('path_vae_approx', '../models/vae_approx/')
+path_vae = get_dir_or_set_default('path_vae', '../models/vae/')
path_upscale_models = get_dir_or_set_default('path_upscale_models', '../models/upscale_models/')
path_inpaint = get_dir_or_set_default('path_inpaint', '../models/inpaint/')
path_controlnet = get_dir_or_set_default('path_controlnet', '../models/controlnet/')
path_clip_vision = get_dir_or_set_default('path_clip_vision', '../models/clip_vision/')
path_fooocus_expansion = get_dir_or_set_default('path_fooocus_expansion', '../models/prompt_expansion/fooocus_expansion')
-path_outputs = get_dir_or_set_default('path_outputs', '../outputs/')
+path_wildcards = get_dir_or_set_default('path_wildcards', '../wildcards/')
+path_safety_checker = get_dir_or_set_default('path_safety_checker', '../models/safety_checker/')
+path_outputs = get_path_output()
-def get_config_item_or_set_default(key, default_value, validator, disable_empty_as_none=False):
+def get_config_item_or_set_default(key, default_value, validator, disable_empty_as_none=False, expected_type=None):
global config_dict, visited_keys
if key not in visited_keys:
visited_keys.append(key)
+ v = os.getenv(key)
+ if v is not None:
+ v = try_eval_env_var(v, expected_type)
+ print(f"Environment: {key} = {v}")
+ config_dict[key] = v
+
if key not in config_dict:
config_dict[key] = default_value
return default_value
@@ -151,66 +230,145 @@ def get_config_item_or_set_default(key, default_value, validator, disable_empty_
return default_value
-default_base_model_name = get_config_item_or_set_default(
- key='default_model',
- default_value='juggernautXL_version6Rundiffusion.safetensors',
- validator=lambda x: isinstance(x, str)
+def init_temp_path(path: str | None, default_path: str) -> str:
+ if args_manager.args.temp_path:
+ path = args_manager.args.temp_path
+
+ if path != '' and path != default_path:
+ try:
+ if not os.path.isabs(path):
+ path = os.path.abspath(path)
+ os.makedirs(path, exist_ok=True)
+ print(f'Using temp path {path}')
+ return path
+ except Exception as e:
+ print(f'Could not create temp path {path}. Reason: {e}')
+ print(f'Using default temp path {default_path} instead.')
+
+ os.makedirs(default_path, exist_ok=True)
+ return default_path
+
+
+default_temp_path = os.path.join(tempfile.gettempdir(), 'fooocus')
+temp_path = init_temp_path(get_config_item_or_set_default(
+ key='temp_path',
+ default_value=default_temp_path,
+ validator=lambda x: isinstance(x, str),
+ expected_type=str
+), default_temp_path)
+temp_path_cleanup_on_launch = get_config_item_or_set_default(
+ key='temp_path_cleanup_on_launch',
+ default_value=True,
+ validator=lambda x: isinstance(x, bool),
+ expected_type=bool
)
-default_refiner_model_name = get_config_item_or_set_default(
+default_base_model_name = default_model = get_config_item_or_set_default(
+ key='default_model',
+ default_value='model.safetensors',
+ validator=lambda x: isinstance(x, str),
+ expected_type=str
+)
+previous_default_models = get_config_item_or_set_default(
+ key='previous_default_models',
+ default_value=[],
+ validator=lambda x: isinstance(x, list) and all(isinstance(k, str) for k in x),
+ expected_type=list
+)
+default_refiner_model_name = default_refiner = get_config_item_or_set_default(
key='default_refiner',
default_value='None',
- validator=lambda x: isinstance(x, str)
+ validator=lambda x: isinstance(x, str),
+ expected_type=str
)
default_refiner_switch = get_config_item_or_set_default(
key='default_refiner_switch',
- default_value=0.5,
- validator=lambda x: isinstance(x, numbers.Number) and 0 <= x <= 1
+ default_value=0.8,
+ validator=lambda x: isinstance(x, numbers.Number) and 0 <= x <= 1,
+ expected_type=numbers.Number
+)
+default_loras_min_weight = get_config_item_or_set_default(
+ key='default_loras_min_weight',
+ default_value=-2,
+ validator=lambda x: isinstance(x, numbers.Number) and -10 <= x <= 10,
+ expected_type=numbers.Number
+)
+default_loras_max_weight = get_config_item_or_set_default(
+ key='default_loras_max_weight',
+ default_value=2,
+ validator=lambda x: isinstance(x, numbers.Number) and -10 <= x <= 10,
+ expected_type=numbers.Number
)
default_loras = get_config_item_or_set_default(
key='default_loras',
default_value=[
[
- "sd_xl_offset_example-lora_1.0.safetensors",
- 0.1
- ],
- [
+ True,
"None",
1.0
],
[
+ True,
"None",
1.0
],
[
+ True,
"None",
1.0
],
[
+ True,
+ "None",
+ 1.0
+ ],
+ [
+ True,
"None",
1.0
]
],
- validator=lambda x: isinstance(x, list) and all(len(y) == 2 and isinstance(y[0], str) and isinstance(y[1], numbers.Number) for y in x)
+ validator=lambda x: isinstance(x, list) and all(
+ len(y) == 3 and isinstance(y[0], bool) and isinstance(y[1], str) and isinstance(y[2], numbers.Number)
+ or len(y) == 2 and isinstance(y[0], str) and isinstance(y[1], numbers.Number)
+ for y in x),
+ expected_type=list
+)
+default_loras = [(y[0], y[1], y[2]) if len(y) == 3 else (True, y[0], y[1]) for y in default_loras]
+default_max_lora_number = get_config_item_or_set_default(
+ key='default_max_lora_number',
+ default_value=len(default_loras) if isinstance(default_loras, list) and len(default_loras) > 0 else 5,
+ validator=lambda x: isinstance(x, int) and x >= 1,
+ expected_type=int
)
default_cfg_scale = get_config_item_or_set_default(
key='default_cfg_scale',
- default_value=4.0,
- validator=lambda x: isinstance(x, numbers.Number)
+ default_value=7.0,
+ validator=lambda x: isinstance(x, numbers.Number),
+ expected_type=numbers.Number
)
default_sample_sharpness = get_config_item_or_set_default(
key='default_sample_sharpness',
default_value=2.0,
- validator=lambda x: isinstance(x, numbers.Number)
+ validator=lambda x: isinstance(x, numbers.Number),
+ expected_type=numbers.Number
)
default_sampler = get_config_item_or_set_default(
key='default_sampler',
default_value='dpmpp_2m_sde_gpu',
- validator=lambda x: x in modules.flags.sampler_list
+ validator=lambda x: x in modules.flags.sampler_list,
+ expected_type=str
)
default_scheduler = get_config_item_or_set_default(
key='default_scheduler',
default_value='karras',
- validator=lambda x: x in modules.flags.scheduler_list
+ validator=lambda x: x in modules.flags.scheduler_list,
+ expected_type=str
+)
+default_vae = get_config_item_or_set_default(
+ key='default_vae',
+ default_value=modules.flags.default_vae,
+ validator=lambda x: isinstance(x, str),
+ expected_type=str
)
default_styles = get_config_item_or_set_default(
key='default_styles',
@@ -219,121 +377,178 @@ default_styles = get_config_item_or_set_default(
"Fooocus Enhance",
"Fooocus Sharp"
],
- validator=lambda x: isinstance(x, list) and all(y in modules.sdxl_styles.legal_style_names for y in x)
+ validator=lambda x: isinstance(x, list) and all(y in modules.sdxl_styles.legal_style_names for y in x),
+ expected_type=list
)
default_prompt_negative = get_config_item_or_set_default(
key='default_prompt_negative',
default_value='',
validator=lambda x: isinstance(x, str),
- disable_empty_as_none=True
+ disable_empty_as_none=True,
+ expected_type=str
)
default_prompt = get_config_item_or_set_default(
key='default_prompt',
default_value='',
validator=lambda x: isinstance(x, str),
- disable_empty_as_none=True
+ disable_empty_as_none=True,
+ expected_type=str
)
default_performance = get_config_item_or_set_default(
key='default_performance',
- default_value='Speed',
- validator=lambda x: x in modules.flags.performance_selections
+ default_value=Performance.SPEED.value,
+ validator=lambda x: x in Performance.list(),
+ expected_type=str
)
default_advanced_checkbox = get_config_item_or_set_default(
key='default_advanced_checkbox',
default_value=False,
- validator=lambda x: isinstance(x, bool)
+ validator=lambda x: isinstance(x, bool),
+ expected_type=bool
+)
+default_max_image_number = get_config_item_or_set_default(
+ key='default_max_image_number',
+ default_value=32,
+ validator=lambda x: isinstance(x, int) and x >= 1,
+ expected_type=int
+)
+default_output_format = get_config_item_or_set_default(
+ key='default_output_format',
+ default_value='png',
+ validator=lambda x: x in OutputFormat.list(),
+ expected_type=str
)
default_image_number = get_config_item_or_set_default(
key='default_image_number',
default_value=2,
- validator=lambda x: isinstance(x, int) and 1 <= x <= 32
+ validator=lambda x: isinstance(x, int) and 1 <= x <= default_max_image_number,
+ expected_type=int
)
checkpoint_downloads = get_config_item_or_set_default(
key='checkpoint_downloads',
- default_value={
- "juggernautXL_version6Rundiffusion.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/juggernautXL_version6Rundiffusion.safetensors"
- },
- validator=lambda x: isinstance(x, dict) and all(isinstance(k, str) and isinstance(v, str) for k, v in x.items())
+ default_value={},
+ validator=lambda x: isinstance(x, dict) and all(isinstance(k, str) and isinstance(v, str) for k, v in x.items()),
+ expected_type=dict
)
lora_downloads = get_config_item_or_set_default(
key='lora_downloads',
- default_value={
- "sd_xl_offset_example-lora_1.0.safetensors": "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_offset_example-lora_1.0.safetensors"
- },
- validator=lambda x: isinstance(x, dict) and all(isinstance(k, str) and isinstance(v, str) for k, v in x.items())
+ default_value={},
+ validator=lambda x: isinstance(x, dict) and all(isinstance(k, str) and isinstance(v, str) for k, v in x.items()),
+ expected_type=dict
)
embeddings_downloads = get_config_item_or_set_default(
key='embeddings_downloads',
default_value={},
- validator=lambda x: isinstance(x, dict) and all(isinstance(k, str) and isinstance(v, str) for k, v in x.items())
+ validator=lambda x: isinstance(x, dict) and all(isinstance(k, str) and isinstance(v, str) for k, v in x.items()),
+ expected_type=dict
)
available_aspect_ratios = get_config_item_or_set_default(
key='available_aspect_ratios',
- default_value=[
- '704*1408', '704*1344', '768*1344', '768*1280', '832*1216', '832*1152',
- '896*1152', '896*1088', '960*1088', '960*1024', '1024*1024', '1024*960',
- '1088*960', '1088*896', '1152*896', '1152*832', '1216*832', '1280*768',
- '1344*768', '1344*704', '1408*704', '1472*704', '1536*640', '1600*640',
- '1664*576', '1728*576'
- ],
- validator=lambda x: isinstance(x, list) and all('*' in v for v in x) and len(x) > 1
+ default_value=modules.flags.sdxl_aspect_ratios,
+ validator=lambda x: isinstance(x, list) and all('*' in v for v in x) and len(x) > 1,
+ expected_type=list
)
default_aspect_ratio = get_config_item_or_set_default(
key='default_aspect_ratio',
default_value='1152*896' if '1152*896' in available_aspect_ratios else available_aspect_ratios[0],
- validator=lambda x: x in available_aspect_ratios
+ validator=lambda x: x in available_aspect_ratios,
+ expected_type=str
)
default_inpaint_engine_version = get_config_item_or_set_default(
key='default_inpaint_engine_version',
default_value='v2.6',
- validator=lambda x: x in modules.flags.inpaint_engine_versions
+ validator=lambda x: x in modules.flags.inpaint_engine_versions,
+ expected_type=str
)
default_cfg_tsnr = get_config_item_or_set_default(
key='default_cfg_tsnr',
default_value=7.0,
- validator=lambda x: isinstance(x, numbers.Number)
+ validator=lambda x: isinstance(x, numbers.Number),
+ expected_type=numbers.Number
+)
+default_clip_skip = get_config_item_or_set_default(
+ key='default_clip_skip',
+ default_value=2,
+ validator=lambda x: isinstance(x, int) and 1 <= x <= modules.flags.clip_skip_max,
+ expected_type=int
)
default_overwrite_step = get_config_item_or_set_default(
key='default_overwrite_step',
default_value=-1,
- validator=lambda x: isinstance(x, int)
+ validator=lambda x: isinstance(x, int),
+ expected_type=int
)
default_overwrite_switch = get_config_item_or_set_default(
key='default_overwrite_switch',
default_value=-1,
- validator=lambda x: isinstance(x, int)
+ validator=lambda x: isinstance(x, int),
+ expected_type=int
)
example_inpaint_prompts = get_config_item_or_set_default(
key='example_inpaint_prompts',
default_value=[
'highly detailed face', 'detailed girl face', 'detailed man face', 'detailed hand', 'beautiful eyes'
],
- validator=lambda x: isinstance(x, list) and all(isinstance(v, str) for v in x)
+ validator=lambda x: isinstance(x, list) and all(isinstance(v, str) for v in x),
+ expected_type=list
+)
+default_black_out_nsfw = get_config_item_or_set_default(
+ key='default_black_out_nsfw',
+ default_value=False,
+ validator=lambda x: isinstance(x, bool),
+ expected_type=bool
+)
+default_save_metadata_to_images = get_config_item_or_set_default(
+ key='default_save_metadata_to_images',
+ default_value=False,
+ validator=lambda x: isinstance(x, bool),
+ expected_type=bool
+)
+default_metadata_scheme = get_config_item_or_set_default(
+ key='default_metadata_scheme',
+ default_value=MetadataScheme.FOOOCUS.value,
+ validator=lambda x: x in [y[1] for y in modules.flags.metadata_scheme if y[1] == x],
+ expected_type=str
+)
+metadata_created_by = get_config_item_or_set_default(
+ key='metadata_created_by',
+ default_value='',
+ validator=lambda x: isinstance(x, str),
+ expected_type=str
)
example_inpaint_prompts = [[x] for x in example_inpaint_prompts]
-config_dict["default_loras"] = default_loras = default_loras[:5] + [['None', 1.0] for _ in range(5 - len(default_loras))]
-
-possible_preset_keys = [
- "default_model",
- "default_refiner",
- "default_refiner_switch",
- "default_loras",
- "default_cfg_scale",
- "default_sample_sharpness",
- "default_sampler",
- "default_scheduler",
- "default_performance",
- "default_prompt",
- "default_prompt_negative",
- "default_styles",
- "default_aspect_ratio",
- "checkpoint_downloads",
- "embeddings_downloads",
- "lora_downloads",
-]
+config_dict["default_loras"] = default_loras = default_loras[:default_max_lora_number] + [[True, 'None', 1.0] for _ in range(default_max_lora_number - len(default_loras))]
+# mapping config to meta parameter
+possible_preset_keys = {
+ "default_model": "base_model",
+ "default_refiner": "refiner_model",
+ "default_refiner_switch": "refiner_switch",
+ "previous_default_models": "previous_default_models",
+ "default_loras_min_weight": "default_loras_min_weight",
+ "default_loras_max_weight": "default_loras_max_weight",
+ "default_loras": "",
+ "default_cfg_scale": "guidance_scale",
+ "default_sample_sharpness": "sharpness",
+ "default_cfg_tsnr": "adaptive_cfg",
+ "default_clip_skip": "clip_skip",
+ "default_sampler": "sampler",
+ "default_scheduler": "scheduler",
+ "default_overwrite_step": "steps",
+ "default_performance": "performance",
+ "default_image_number": "image_number",
+ "default_prompt": "prompt",
+ "default_prompt_negative": "negative_prompt",
+ "default_styles": "styles",
+ "default_aspect_ratio": "resolution",
+ "default_save_metadata_to_images": "default_save_metadata_to_images",
+ "checkpoint_downloads": "checkpoint_downloads",
+ "embeddings_downloads": "embeddings_downloads",
+ "lora_downloads": "lora_downloads",
+ "default_vae": "vae"
+}
REWRITE_PRESET = False
@@ -353,7 +568,7 @@ def add_ratio(x):
default_aspect_ratio = add_ratio(default_aspect_ratio)
-available_aspect_ratios = [add_ratio(x) for x in available_aspect_ratios]
+available_aspect_ratios_labels = [add_ratio(x) for x in available_aspect_ratios]
# Only write config in the first launch.
@@ -372,21 +587,32 @@ with open(config_example_path, "w", encoding="utf-8") as json_file:
'and there is no "," before the last "}". \n\n\n')
json.dump({k: config_dict[k] for k in visited_keys}, json_file, indent=4)
-
-os.makedirs(path_outputs, exist_ok=True)
-
model_filenames = []
lora_filenames = []
+vae_filenames = []
+wildcard_filenames = []
-def get_model_filenames(folder_path, name_filter=None):
- return get_files_from_folder(folder_path, ['.pth', '.ckpt', '.bin', '.safetensors', '.fooocus.patch'], name_filter)
+def get_model_filenames(folder_paths, extensions=None, name_filter=None):
+ if extensions is None:
+ extensions = ['.pth', '.ckpt', '.bin', '.safetensors', '.fooocus.patch']
+ files = []
+
+ if not isinstance(folder_paths, list):
+ folder_paths = [folder_paths]
+ for folder in folder_paths:
+ files += get_files_from_folder(folder, extensions, name_filter)
+
+ return files
-def update_all_model_names():
- global model_filenames, lora_filenames
- model_filenames = get_model_filenames(path_checkpoints)
- lora_filenames = get_model_filenames(path_loras)
+def update_files():
+ global model_filenames, lora_filenames, vae_filenames, wildcard_filenames, available_presets
+ model_filenames = get_model_filenames(paths_checkpoints)
+ lora_filenames = get_model_filenames(paths_loras)
+ vae_filenames = get_model_filenames(path_vae)
+ wildcard_filenames = get_files_from_folder(path_wildcards, ['.txt'])
+ available_presets = get_presets()
return
@@ -431,10 +657,28 @@ def downloading_inpaint_models(v):
def downloading_sdxl_lcm_lora():
load_file_from_url(
url='https://huggingface.co/lllyasviel/misc/resolve/main/sdxl_lcm_lora.safetensors',
- model_dir=path_loras,
- file_name='sdxl_lcm_lora.safetensors'
+ model_dir=paths_loras[0],
+ file_name=modules.flags.PerformanceLoRA.EXTREME_SPEED.value
)
- return 'sdxl_lcm_lora.safetensors'
+ return modules.flags.PerformanceLoRA.EXTREME_SPEED.value
+
+
+def downloading_sdxl_lightning_lora():
+ load_file_from_url(
+ url='https://huggingface.co/mashb1t/misc/resolve/main/sdxl_lightning_4step_lora.safetensors',
+ model_dir=paths_loras[0],
+ file_name=modules.flags.PerformanceLoRA.LIGHTNING.value
+ )
+ return modules.flags.PerformanceLoRA.LIGHTNING.value
+
+
+def downloading_sdxl_hyper_sd_lora():
+ load_file_from_url(
+ url='https://huggingface.co/mashb1t/misc/resolve/main/sdxl_hyper_sd_4step_lora.safetensors',
+ model_dir=paths_loras[0],
+ file_name=modules.flags.PerformanceLoRA.HYPER_SD.value
+ )
+ return modules.flags.PerformanceLoRA.HYPER_SD.value
def downloading_controlnet_canny():
@@ -501,5 +745,13 @@ def downloading_upscale_model():
)
return os.path.join(path_upscale_models, 'fooocus_upscaler_s409985e5.bin')
+def downloading_safety_checker_model():
+ load_file_from_url(
+ url='https://huggingface.co/mashb1t/misc/resolve/main/stable-diffusion-safety-checker.bin',
+ model_dir=path_safety_checker,
+ file_name='stable-diffusion-safety-checker.bin'
+ )
+ return os.path.join(path_safety_checker, 'stable-diffusion-safety-checker.bin')
-update_all_model_names()
+
+update_files()
diff --git a/modules/core.py b/modules/core.py
index 989b8e32..78c89759 100644
--- a/modules/core.py
+++ b/modules/core.py
@@ -1,8 +1,3 @@
-from modules.patch import patch_all
-
-patch_all()
-
-
import os
import einops
import torch
@@ -16,7 +11,6 @@ import ldm_patched.modules.controlnet
import modules.sample_hijack
import ldm_patched.modules.samplers
import ldm_patched.modules.latent_formats
-import modules.advanced_parameters
from ldm_patched.modules.sd import load_checkpoint_guess_config
from ldm_patched.contrib.external import VAEDecode, EmptyLatentImage, VAEEncode, VAEEncodeTiled, VAEDecodeTiled, \
@@ -24,10 +18,10 @@ from ldm_patched.contrib.external import VAEDecode, EmptyLatentImage, VAEEncode,
from ldm_patched.contrib.external_freelunch import FreeU_V2
from ldm_patched.modules.sample import prepare_mask
from modules.lora import match_lora
+from modules.util import get_file_from_folder_list
from ldm_patched.modules.lora import model_lora_keys_unet, model_lora_keys_clip
from modules.config import path_embeddings
-from ldm_patched.contrib.external_model_advanced import ModelSamplingDiscrete
-
+from ldm_patched.contrib.external_model_advanced import ModelSamplingDiscrete, ModelSamplingContinuousEDM
opEmptyLatentImage = EmptyLatentImage()
opVAEDecode = VAEDecode()
@@ -37,15 +31,17 @@ opVAEEncodeTiled = VAEEncodeTiled()
opControlNetApplyAdvanced = ControlNetApplyAdvanced()
opFreeU = FreeU_V2()
opModelSamplingDiscrete = ModelSamplingDiscrete()
+opModelSamplingContinuousEDM = ModelSamplingContinuousEDM()
class StableDiffusionModel:
- def __init__(self, unet=None, vae=None, clip=None, clip_vision=None, filename=None):
+ def __init__(self, unet=None, vae=None, clip=None, clip_vision=None, filename=None, vae_filename=None):
self.unet = unet
self.vae = vae
self.clip = clip
self.clip_vision = clip_vision
self.filename = filename
+ self.vae_filename = vae_filename
self.unet_with_lora = unet
self.clip_with_lora = clip
self.visited_loras = ''
@@ -78,14 +74,14 @@ class StableDiffusionModel:
loras_to_load = []
- for name, weight in loras:
- if name == 'None':
+ for filename, weight in loras:
+ if filename == 'None':
continue
- if os.path.exists(name):
- lora_filename = name
+ if os.path.exists(filename):
+ lora_filename = filename
else:
- lora_filename = os.path.join(modules.config.path_loras, name)
+ lora_filename = get_file_from_folder_list(filename, modules.config.paths_loras)
if not os.path.exists(lora_filename):
print(f'Lora file not found: {lora_filename}')
@@ -147,9 +143,10 @@ def apply_controlnet(positive, negative, control_net, image, strength, start_per
@torch.no_grad()
@torch.inference_mode()
-def load_model(ckpt_filename):
- unet, clip, vae, clip_vision = load_checkpoint_guess_config(ckpt_filename, embedding_directory=path_embeddings)
- return StableDiffusionModel(unet=unet, clip=clip, vae=vae, clip_vision=clip_vision, filename=ckpt_filename)
+def load_model(ckpt_filename, vae_filename=None):
+ unet, clip, vae, vae_filename, clip_vision = load_checkpoint_guess_config(ckpt_filename, embedding_directory=path_embeddings,
+ vae_filename_param=vae_filename)
+ return StableDiffusionModel(unet=unet, clip=clip, vae=vae, clip_vision=clip_vision, filename=ckpt_filename, vae_filename=vae_filename)
@torch.no_grad()
@@ -268,7 +265,7 @@ def get_previewer(model):
def ksampler(model, positive, negative, latent, seed=None, steps=30, cfg=7.0, sampler_name='dpmpp_2m_sde_gpu',
scheduler='karras', denoise=1.0, disable_noise=False, start_step=None, last_step=None,
force_full_denoise=False, callback_function=None, refiner=None, refiner_switch=-1,
- previewer_start=None, previewer_end=None, sigmas=None, noise_mean=None):
+ previewer_start=None, previewer_end=None, sigmas=None, noise_mean=None, disable_preview=False):
if sigmas is not None:
sigmas = sigmas.clone().to(ldm_patched.modules.model_management.get_torch_device())
@@ -299,7 +296,7 @@ def ksampler(model, positive, negative, latent, seed=None, steps=30, cfg=7.0, sa
def callback(step, x0, x, total_steps):
ldm_patched.modules.model_management.throw_exception_if_processing_interrupted()
y = None
- if previewer is not None and not modules.advanced_parameters.disable_preview:
+ if previewer is not None and not disable_preview:
y = previewer(x0, previewer_start + step, previewer_end)
if callback_function is not None:
callback_function(previewer_start + step, x0, x, previewer_end, y)
diff --git a/modules/default_pipeline.py b/modules/default_pipeline.py
index 6001d97f..494644d6 100644
--- a/modules/default_pipeline.py
+++ b/modules/default_pipeline.py
@@ -3,6 +3,7 @@ import os
import torch
import modules.patch
import modules.config
+import modules.flags
import ldm_patched.modules.model_management
import ldm_patched.modules.latent_formats
import modules.inpaint_worker
@@ -11,6 +12,7 @@ from extras.expansion import FooocusExpansion
from ldm_patched.modules.model_base import SDXL, SDXLRefiner
from modules.sample_hijack import clip_separate
+from modules.util import get_file_from_folder_list, get_enabled_loras
model_base = core.StableDiffusionModel()
@@ -57,17 +59,21 @@ def assert_model_integrity():
@torch.no_grad()
@torch.inference_mode()
-def refresh_base_model(name):
+def refresh_base_model(name, vae_name=None):
global model_base
- filename = os.path.abspath(os.path.realpath(os.path.join(modules.config.path_checkpoints, name)))
+ filename = get_file_from_folder_list(name, modules.config.paths_checkpoints)
- if model_base.filename == filename:
+ vae_filename = None
+ if vae_name is not None and vae_name != modules.flags.default_vae:
+ vae_filename = get_file_from_folder_list(vae_name, modules.config.path_vae)
+
+ if model_base.filename == filename and model_base.vae_filename == vae_filename:
return
- model_base = core.StableDiffusionModel()
- model_base = core.load_model(filename)
+ model_base = core.load_model(filename, vae_filename)
print(f'Base model loaded: {model_base.filename}')
+ print(f'VAE loaded: {model_base.vae_filename}')
return
@@ -76,7 +82,7 @@ def refresh_base_model(name):
def refresh_refiner_model(name):
global model_refiner
- filename = os.path.abspath(os.path.realpath(os.path.join(modules.config.path_checkpoints, name)))
+ filename = get_file_from_folder_list(name, modules.config.paths_checkpoints)
if model_refiner.filename == filename:
return
@@ -195,6 +201,17 @@ def clip_encode(texts, pool_top_k=1):
return [[torch.cat(cond_list, dim=1), {"pooled_output": pooled_acc}]]
+@torch.no_grad()
+@torch.inference_mode()
+def set_clip_skip(clip_skip: int):
+ global final_clip
+
+ if final_clip is None:
+ return
+
+ final_clip.clip_layer(-abs(clip_skip))
+ return
+
@torch.no_grad()
@torch.inference_mode()
def clear_all_caches():
@@ -215,7 +232,7 @@ def prepare_text_encoder(async_call=True):
@torch.no_grad()
@torch.inference_mode()
def refresh_everything(refiner_model_name, base_model_name, loras,
- base_model_additional_loras=None, use_synthetic_refiner=False):
+ base_model_additional_loras=None, use_synthetic_refiner=False, vae_name=None):
global final_unet, final_clip, final_vae, final_refiner_unet, final_refiner_vae, final_expansion
final_unet = None
@@ -226,11 +243,11 @@ def refresh_everything(refiner_model_name, base_model_name, loras,
if use_synthetic_refiner and refiner_model_name == 'None':
print('Synthetic Refiner Activated')
- refresh_base_model(base_model_name)
+ refresh_base_model(base_model_name, vae_name)
synthesize_refiner_model()
else:
refresh_refiner_model(refiner_model_name)
- refresh_base_model(base_model_name)
+ refresh_base_model(base_model_name, vae_name)
refresh_loras(loras, base_model_additional_loras=base_model_additional_loras)
assert_model_integrity()
@@ -253,7 +270,8 @@ def refresh_everything(refiner_model_name, base_model_name, loras,
refresh_everything(
refiner_model_name=modules.config.default_refiner_model_name,
base_model_name=modules.config.default_base_model_name,
- loras=modules.config.default_loras
+ loras=get_enabled_loras(modules.config.default_loras),
+ vae_name=modules.config.default_vae,
)
@@ -315,7 +333,7 @@ def get_candidate_vae(steps, switch, denoise=1.0, refiner_swap_method='joint'):
@torch.no_grad()
@torch.inference_mode()
-def process_diffusion(positive_cond, negative_cond, steps, switch, width, height, image_seed, callback, sampler_name, scheduler_name, latent=None, denoise=1.0, tiled=False, cfg_scale=7.0, refiner_swap_method='joint'):
+def process_diffusion(positive_cond, negative_cond, steps, switch, width, height, image_seed, callback, sampler_name, scheduler_name, latent=None, denoise=1.0, tiled=False, cfg_scale=7.0, refiner_swap_method='joint', disable_preview=False):
target_unet, target_vae, target_refiner_unet, target_refiner_vae, target_clip \
= final_unet, final_vae, final_refiner_unet, final_refiner_vae, final_clip
@@ -374,6 +392,7 @@ def process_diffusion(positive_cond, negative_cond, steps, switch, width, height
refiner_switch=switch,
previewer_start=0,
previewer_end=steps,
+ disable_preview=disable_preview
)
decoded_latent = core.decode_vae(vae=target_vae, latent_image=sampled_latent, tiled=tiled)
@@ -392,6 +411,7 @@ def process_diffusion(positive_cond, negative_cond, steps, switch, width, height
scheduler=scheduler_name,
previewer_start=0,
previewer_end=steps,
+ disable_preview=disable_preview
)
print('Refiner swapped by changing ksampler. Noise preserved.')
@@ -414,6 +434,7 @@ def process_diffusion(positive_cond, negative_cond, steps, switch, width, height
scheduler=scheduler_name,
previewer_start=switch,
previewer_end=steps,
+ disable_preview=disable_preview
)
target_model = target_refiner_vae
@@ -422,7 +443,7 @@ def process_diffusion(positive_cond, negative_cond, steps, switch, width, height
decoded_latent = core.decode_vae(vae=target_model, latent_image=sampled_latent, tiled=tiled)
if refiner_swap_method == 'vae':
- modules.patch.eps_record = 'vae'
+ modules.patch.patch_settings[os.getpid()].eps_record = 'vae'
if modules.inpaint_worker.current_task is not None:
modules.inpaint_worker.current_task.unswap()
@@ -440,7 +461,8 @@ def process_diffusion(positive_cond, negative_cond, steps, switch, width, height
sampler_name=sampler_name,
scheduler=scheduler_name,
previewer_start=0,
- previewer_end=steps
+ previewer_end=steps,
+ disable_preview=disable_preview
)
print('Fooocus VAE-based swap.')
@@ -459,7 +481,7 @@ def process_diffusion(positive_cond, negative_cond, steps, switch, width, height
denoise=denoise)[switch:] * k_sigmas
len_sigmas = len(sigmas) - 1
- noise_mean = torch.mean(modules.patch.eps_record, dim=1, keepdim=True)
+ noise_mean = torch.mean(modules.patch.patch_settings[os.getpid()].eps_record, dim=1, keepdim=True)
if modules.inpaint_worker.current_task is not None:
modules.inpaint_worker.current_task.swap()
@@ -479,7 +501,8 @@ def process_diffusion(positive_cond, negative_cond, steps, switch, width, height
previewer_start=switch,
previewer_end=steps,
sigmas=sigmas,
- noise_mean=noise_mean
+ noise_mean=noise_mean,
+ disable_preview=disable_preview
)
target_model = target_refiner_vae
@@ -488,5 +511,5 @@ def process_diffusion(positive_cond, negative_cond, steps, switch, width, height
decoded_latent = core.decode_vae(vae=target_model, latent_image=sampled_latent, tiled=tiled)
images = core.pytorch_to_numpy(decoded_latent)
- modules.patch.eps_record = None
+ modules.patch.patch_settings[os.getpid()].eps_record = None
return images
diff --git a/modules/extra_utils.py b/modules/extra_utils.py
new file mode 100644
index 00000000..c2dfa810
--- /dev/null
+++ b/modules/extra_utils.py
@@ -0,0 +1,41 @@
+import os
+from ast import literal_eval
+
+
+def makedirs_with_log(path):
+ try:
+ os.makedirs(path, exist_ok=True)
+ except OSError as error:
+ print(f'Directory {path} could not be created, reason: {error}')
+
+
+def get_files_from_folder(folder_path, extensions=None, name_filter=None):
+ if not os.path.isdir(folder_path):
+ raise ValueError("Folder path is not a valid directory.")
+
+ filenames = []
+
+ for root, _, files in os.walk(folder_path, topdown=False):
+ relative_path = os.path.relpath(root, folder_path)
+ if relative_path == ".":
+ relative_path = ""
+ for filename in sorted(files, key=lambda s: s.casefold()):
+ _, file_extension = os.path.splitext(filename)
+ if (extensions is None or file_extension.lower() in extensions) and (name_filter is None or name_filter in _):
+ path = os.path.join(relative_path, filename)
+ filenames.append(path)
+
+ return filenames
+
+
+def try_eval_env_var(value: str, expected_type=None):
+ try:
+ value_eval = value
+ if expected_type is bool:
+ value_eval = value.title()
+ value_eval = literal_eval(value_eval)
+ if expected_type is not None and not isinstance(value_eval, expected_type):
+ return value
+ return value_eval
+ except:
+ return value
diff --git a/modules/flags.py b/modules/flags.py
index 27f2d716..29ac4615 100644
--- a/modules/flags.py
+++ b/modules/flags.py
@@ -1,3 +1,5 @@
+from enum import IntEnum, Enum
+
disabled = 'Disabled'
enabled = 'Enabled'
subtle_variation = 'Vary (Subtle)'
@@ -10,16 +12,54 @@ uov_list = [
disabled, subtle_variation, strong_variation, upscale_15, upscale_2, upscale_fast
]
-KSAMPLER_NAMES = ["euler", "euler_ancestral", "heun", "heunpp2","dpm_2", "dpm_2_ancestral",
- "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_sde", "dpmpp_sde_gpu",
- "dpmpp_2m", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm"]
+CIVITAI_NO_KARRAS = ["euler", "euler_ancestral", "heun", "dpm_fast", "dpm_adaptive", "ddim", "uni_pc"]
-SCHEDULER_NAMES = ["normal", "karras", "exponential", "sgm_uniform", "simple", "ddim_uniform", "lcm", "turbo"]
-SAMPLER_NAMES = KSAMPLER_NAMES + ["ddim", "uni_pc", "uni_pc_bh2"]
+# fooocus: a1111 (Civitai)
+KSAMPLER = {
+ "euler": "Euler",
+ "euler_ancestral": "Euler a",
+ "heun": "Heun",
+ "heunpp2": "",
+ "dpm_2": "DPM2",
+ "dpm_2_ancestral": "DPM2 a",
+ "lms": "LMS",
+ "dpm_fast": "DPM fast",
+ "dpm_adaptive": "DPM adaptive",
+ "dpmpp_2s_ancestral": "DPM++ 2S a",
+ "dpmpp_sde": "DPM++ SDE",
+ "dpmpp_sde_gpu": "DPM++ SDE",
+ "dpmpp_2m": "DPM++ 2M",
+ "dpmpp_2m_sde": "DPM++ 2M SDE",
+ "dpmpp_2m_sde_gpu": "DPM++ 2M SDE",
+ "dpmpp_3m_sde": "",
+ "dpmpp_3m_sde_gpu": "",
+ "ddpm": "",
+ "lcm": "LCM",
+ "tcd": "TCD"
+}
+
+SAMPLER_EXTRA = {
+ "ddim": "DDIM",
+ "uni_pc": "UniPC",
+ "uni_pc_bh2": ""
+}
+
+SAMPLERS = KSAMPLER | SAMPLER_EXTRA
+
+KSAMPLER_NAMES = list(KSAMPLER.keys())
+
+SCHEDULER_NAMES = ["normal", "karras", "exponential", "sgm_uniform", "simple", "ddim_uniform", "lcm", "turbo", "align_your_steps", "tcd", "edm_playground_v2.5"]
+SAMPLER_NAMES = KSAMPLER_NAMES + list(SAMPLER_EXTRA.keys())
sampler_list = SAMPLER_NAMES
scheduler_list = SCHEDULER_NAMES
+clip_skip_max = 12
+
+default_vae = 'Default (model)'
+
+refiner_swap_method = 'joint'
+
cn_ip = "ImagePrompt"
cn_ip_face = "FaceSwap"
cn_canny = "PyraCanny"
@@ -32,9 +72,9 @@ default_parameters = {
cn_ip: (0.5, 0.6), cn_ip_face: (0.9, 0.75), cn_canny: (0.5, 1.0), cn_cpds: (0.5, 1.0)
} # stop, weight
-inpaint_engine_versions = ['None', 'v1', 'v2.5', 'v2.6']
-performance_selections = ['Speed', 'Quality', 'Extreme Speed']
+output_formats = ['png', 'jpeg', 'webp']
+inpaint_engine_versions = ['None', 'v1', 'v2.5', 'v2.6']
inpaint_option_default = 'Inpaint or Outpaint (default)'
inpaint_option_detail = 'Improve Detail (face, hand, eyes, etc.)'
inpaint_option_modify = 'Modify Content (add objects, change background, etc.)'
@@ -42,3 +82,98 @@ inpaint_options = [inpaint_option_default, inpaint_option_detail, inpaint_option
desc_type_photo = 'Photograph'
desc_type_anime = 'Art/Anime'
+
+sdxl_aspect_ratios = [
+ '704*1408', '704*1344', '768*1344', '768*1280', '832*1216', '832*1152',
+ '896*1152', '896*1088', '960*1088', '960*1024', '1024*1024', '1024*960',
+ '1088*960', '1088*896', '1152*896', '1152*832', '1216*832', '1280*768',
+ '1344*768', '1344*704', '1408*704', '1472*704', '1536*640', '1600*640',
+ '1664*576', '1728*576'
+]
+
+
+class MetadataScheme(Enum):
+ FOOOCUS = 'fooocus'
+ A1111 = 'a1111'
+
+
+metadata_scheme = [
+ (f'{MetadataScheme.FOOOCUS.value} (json)', MetadataScheme.FOOOCUS.value),
+ (f'{MetadataScheme.A1111.value} (plain text)', MetadataScheme.A1111.value),
+]
+
+controlnet_image_count = 4
+preparation_step_count = 13
+
+
+class OutputFormat(Enum):
+ PNG = 'png'
+ JPEG = 'jpeg'
+ WEBP = 'webp'
+
+ @classmethod
+ def list(cls) -> list:
+ return list(map(lambda c: c.value, cls))
+
+
+class PerformanceLoRA(Enum):
+ QUALITY = None
+ SPEED = None
+ EXTREME_SPEED = 'sdxl_lcm_lora.safetensors'
+ LIGHTNING = 'sdxl_lightning_4step_lora.safetensors'
+ HYPER_SD = 'sdxl_hyper_sd_4step_lora.safetensors'
+
+
+class Steps(IntEnum):
+ QUALITY = 60
+ SPEED = 30
+ EXTREME_SPEED = 8
+ LIGHTNING = 4
+ HYPER_SD = 4
+
+ @classmethod
+ def keys(cls) -> list:
+ return list(map(lambda c: c, Steps.__members__))
+
+
+class StepsUOV(IntEnum):
+ QUALITY = 36
+ SPEED = 18
+ EXTREME_SPEED = 8
+ LIGHTNING = 4
+ HYPER_SD = 4
+
+
+class Performance(Enum):
+ QUALITY = 'Quality'
+ SPEED = 'Speed'
+ EXTREME_SPEED = 'Extreme Speed'
+ LIGHTNING = 'Lightning'
+ HYPER_SD = 'Hyper-SD'
+
+ @classmethod
+ def list(cls) -> list:
+ return list(map(lambda c: c.value, cls))
+
+ @classmethod
+ def values(cls) -> list:
+ return list(map(lambda c: c.value, cls))
+
+ @classmethod
+ def by_steps(cls, steps: int | str):
+ return cls[Steps(int(steps)).name]
+
+ @classmethod
+ def has_restricted_features(cls, x) -> bool:
+ if isinstance(x, Performance):
+ x = x.value
+ return x in [cls.EXTREME_SPEED.value, cls.LIGHTNING.value, cls.HYPER_SD.value]
+
+ def steps(self) -> int | None:
+ return Steps[self.name].value if self.name in Steps.__members__ else None
+
+ def steps_uov(self) -> int | None:
+ return StepsUOV[self.name].value if self.name in StepsUOV.__members__ else None
+
+ def lora_filename(self) -> str | None:
+ return PerformanceLoRA[self.name].value if self.name in PerformanceLoRA.__members__ else None
diff --git a/modules/gradio_hijack.py b/modules/gradio_hijack.py
index 181429ec..35df81c0 100644
--- a/modules/gradio_hijack.py
+++ b/modules/gradio_hijack.py
@@ -17,7 +17,7 @@ from gradio_client.documentation import document, set_documentation_group
from gradio_client.serializing import ImgSerializable
from PIL import Image as _Image # using _ to minimize namespace pollution
-from gradio import processing_utils, utils
+from gradio import processing_utils, utils, Error
from gradio.components.base import IOComponent, _Keywords, Block
from gradio.deprecation import warn_style_method_deprecation
from gradio.events import (
@@ -275,7 +275,10 @@ class Image(
x, mask = x["image"], x["mask"]
assert isinstance(x, str)
- im = processing_utils.decode_base64_to_image(x)
+ try:
+ im = processing_utils.decode_base64_to_image(x)
+ except PIL.UnidentifiedImageError:
+ raise Error("Unsupported image type in input")
with warnings.catch_warnings():
warnings.simplefilter("ignore")
im = im.convert(self.image_mode)
diff --git a/modules/html.py b/modules/html.py
index 3ec6f2d6..25771cb9 100644
--- a/modules/html.py
+++ b/modules/html.py
@@ -1,118 +1,3 @@
-css = '''
-.loader-container {
- display: flex; /* Use flex to align items horizontally */
- align-items: center; /* Center items vertically within the container */
- white-space: nowrap; /* Prevent line breaks within the container */
-}
-
-.loader {
- border: 8px solid #f3f3f3; /* Light grey */
- border-top: 8px solid #3498db; /* Blue */
- border-radius: 50%;
- width: 30px;
- height: 30px;
- animation: spin 2s linear infinite;
-}
-
-@keyframes spin {
- 0% { transform: rotate(0deg); }
- 100% { transform: rotate(360deg); }
-}
-
-/* Style the progress bar */
-progress {
- appearance: none; /* Remove default styling */
- height: 20px; /* Set the height of the progress bar */
- border-radius: 5px; /* Round the corners of the progress bar */
- background-color: #f3f3f3; /* Light grey background */
- width: 100%;
-}
-
-/* Style the progress bar container */
-.progress-container {
- margin-left: 20px;
- margin-right: 20px;
- flex-grow: 1; /* Allow the progress container to take up remaining space */
-}
-
-/* Set the color of the progress bar fill */
-progress::-webkit-progress-value {
- background-color: #3498db; /* Blue color for the fill */
-}
-
-progress::-moz-progress-bar {
- background-color: #3498db; /* Blue color for the fill in Firefox */
-}
-
-/* Style the text on the progress bar */
-progress::after {
- content: attr(value '%'); /* Display the progress value followed by '%' */
- position: absolute;
- top: 50%;
- left: 50%;
- transform: translate(-50%, -50%);
- color: white; /* Set text color */
- font-size: 14px; /* Set font size */
-}
-
-/* Style other texts */
-.loader-container > span {
- margin-left: 5px; /* Add spacing between the progress bar and the text */
-}
-
-.progress-bar > .generating {
- display: none !important;
-}
-
-.progress-bar{
- height: 30px !important;
-}
-
-.type_row{
- height: 80px !important;
-}
-
-.type_row_half{
- height: 32px !important;
-}
-
-.scroll-hide{
- resize: none !important;
-}
-
-.refresh_button{
- border: none !important;
- background: none !important;
- font-size: none !important;
- box-shadow: none !important;
-}
-
-.advanced_check_row{
- width: 250px !important;
-}
-
-.min_check{
- min-width: min(1px, 100%) !important;
-}
-
-.resizable_area {
- resize: vertical;
- overflow: auto !important;
-}
-
-.aspect_ratios label {
- width: 140px !important;
-}
-
-.aspect_ratios label span {
- white-space: nowrap !important;
-}
-
-.aspect_ratios label input {
- margin-left: -5px !important;
-}
-
-'''
progress_html = '''
diff --git a/modules/inpaint_worker.py b/modules/inpaint_worker.py
index 88ec39d6..43a7ae23 100644
--- a/modules/inpaint_worker.py
+++ b/modules/inpaint_worker.py
@@ -4,6 +4,7 @@ import numpy as np
from PIL import Image, ImageFilter
from modules.util import resample_image, set_image_shape_ceil, get_image_shape_ceil
from modules.upscaler import perform_upscale
+import cv2
inpaint_head_model = None
@@ -28,19 +29,25 @@ def box_blur(x, k):
return np.array(x)
-def max33(x):
- x = Image.fromarray(x)
- x = x.filter(ImageFilter.MaxFilter(3))
- return np.array(x)
+def max_filter_opencv(x, ksize=3):
+ # Use OpenCV maximum filter
+ # Make sure the input type is int16
+ return cv2.dilate(x, np.ones((ksize, ksize), dtype=np.int16))
def morphological_open(x):
- x_int32 = np.zeros_like(x).astype(np.int32)
- x_int32[x > 127] = 256
- for _ in range(32):
- maxed = max33(x_int32) - 8
- x_int32 = np.maximum(maxed, x_int32)
- return x_int32.clip(0, 255).astype(np.uint8)
+ # Convert array to int16 type via threshold operation
+ x_int16 = np.zeros_like(x, dtype=np.int16)
+ x_int16[x > 127] = 256
+
+ for i in range(32):
+ # Use int16 type to avoid overflow
+ maxed = max_filter_opencv(x_int16, ksize=3) - 8
+ x_int16 = np.maximum(maxed, x_int16)
+
+ # Clip negative values to 0 and convert back to uint8 type
+ x_uint8 = np.clip(x_int16, 0, 255).astype(np.uint8)
+ return x_uint8
def up255(x, t=0):
diff --git a/modules/launch_util.py b/modules/launch_util.py
index 00fff8ae..370dc048 100644
--- a/modules/launch_util.py
+++ b/modules/launch_util.py
@@ -1,16 +1,19 @@
import os
import importlib
import importlib.util
+import shutil
import subprocess
import sys
import re
import logging
-
+import importlib.metadata
+import packaging.version
+from packaging.requirements import Requirement
logging.getLogger("torch.distributed.nn").setLevel(logging.ERROR) # sshh...
logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage())
-re_requirement = re.compile(r"\s*([-_a-zA-Z0-9]+)\s*(?:==\s*([-+_.a-zA-Z0-9]+))?\s*")
+re_requirement = re.compile(r"\s*([-\w]+)\s*(?:==\s*([-+.\w]+))?\s*")
python = sys.executable
default_command_live = (os.environ.get('LAUNCH_LIVE_OUTPUT') == "1")
@@ -73,35 +76,42 @@ def run_pip(command, desc=None, live=default_command_live):
def requirements_met(requirements_file):
- """
- Does a simple parse of a requirements.txt file to determine if all rerqirements in it
- are already installed. Returns True if so, False if not installed or parsing fails.
- """
-
- import importlib.metadata
- import packaging.version
-
with open(requirements_file, "r", encoding="utf8") as file:
for line in file:
- if line.strip() == "":
+ line = line.strip()
+ if line == "" or line.startswith('#'):
continue
- m = re.match(re_requirement, line)
- if m is None:
- return False
-
- package = m.group(1).strip()
- version_required = (m.group(2) or "").strip()
-
- if version_required == "":
- continue
+ requirement = Requirement(line)
+ package = requirement.name
try:
version_installed = importlib.metadata.version(package)
- except Exception:
- return False
+ installed_version = packaging.version.parse(version_installed)
- if packaging.version.parse(version_required) != packaging.version.parse(version_installed):
+ # Check if the installed version satisfies the requirement
+ if installed_version not in requirement.specifier:
+ print(f"Version mismatch for {package}: Installed version {version_installed} does not meet requirement {requirement}")
+ return False
+ except Exception as e:
+ print(f"Error checking version for {package}: {e}")
return False
return True
+
+
+def delete_folder_content(folder, prefix=None):
+ result = True
+
+ for filename in os.listdir(folder):
+ file_path = os.path.join(folder, filename)
+ try:
+ if os.path.isfile(file_path) or os.path.islink(file_path):
+ os.unlink(file_path)
+ elif os.path.isdir(file_path):
+ shutil.rmtree(file_path)
+ except Exception as e:
+ print(f'{prefix}Failed to delete {file_path}. Reason: {e}')
+ result = False
+
+ return result
\ No newline at end of file
diff --git a/modules/meta_parser.py b/modules/meta_parser.py
index 78d73978..ff930cc0 100644
--- a/modules/meta_parser.py
+++ b/modules/meta_parser.py
@@ -1,78 +1,168 @@
import json
+import re
+from abc import ABC, abstractmethod
+from pathlib import Path
+
import gradio as gr
+from PIL import Image
+
+import fooocus_version
import modules.config
+import modules.sdxl_styles
+from modules.flags import MetadataScheme, Performance, Steps
+from modules.flags import SAMPLERS, CIVITAI_NO_KARRAS
+from modules.util import quote, unquote, extract_styles_from_prompt, is_json, get_file_from_folder_list, sha256
+
+re_param_code = r'\s*(\w[\w \-/]+):\s*("(?:\\.|[^\\"])+"|[^,]*)(?:,|$)'
+re_param = re.compile(re_param_code)
+re_imagesize = re.compile(r"^(\d+)x(\d+)$")
+
+hash_cache = {}
-def load_parameter_button_click(raw_prompt_txt):
- loaded_parameter_dict = json.loads(raw_prompt_txt)
+def load_parameter_button_click(raw_metadata: dict | str, is_generating: bool):
+ loaded_parameter_dict = raw_metadata
+ if isinstance(raw_metadata, str):
+ loaded_parameter_dict = json.loads(raw_metadata)
assert isinstance(loaded_parameter_dict, dict)
- results = [True, 1]
+ results = [len(loaded_parameter_dict) > 0]
+ get_image_number('image_number', 'Image Number', loaded_parameter_dict, results)
+ get_str('prompt', 'Prompt', loaded_parameter_dict, results)
+ get_str('negative_prompt', 'Negative Prompt', loaded_parameter_dict, results)
+ get_list('styles', 'Styles', loaded_parameter_dict, results)
+ performance = get_str('performance', 'Performance', loaded_parameter_dict, results)
+ get_steps('steps', 'Steps', loaded_parameter_dict, results)
+ get_number('overwrite_switch', 'Overwrite Switch', loaded_parameter_dict, results)
+ get_resolution('resolution', 'Resolution', loaded_parameter_dict, results)
+ get_number('guidance_scale', 'Guidance Scale', loaded_parameter_dict, results)
+ get_number('sharpness', 'Sharpness', loaded_parameter_dict, results)
+ get_adm_guidance('adm_guidance', 'ADM Guidance', loaded_parameter_dict, results)
+ get_str('refiner_swap_method', 'Refiner Swap Method', loaded_parameter_dict, results)
+ get_number('adaptive_cfg', 'CFG Mimicking from TSNR', loaded_parameter_dict, results)
+ get_number('clip_skip', 'CLIP Skip', loaded_parameter_dict, results, cast_type=int)
+ get_str('base_model', 'Base Model', loaded_parameter_dict, results)
+ get_str('refiner_model', 'Refiner Model', loaded_parameter_dict, results)
+ get_number('refiner_switch', 'Refiner Switch', loaded_parameter_dict, results)
+ get_str('sampler', 'Sampler', loaded_parameter_dict, results)
+ get_str('scheduler', 'Scheduler', loaded_parameter_dict, results)
+ get_str('vae', 'VAE', loaded_parameter_dict, results)
+ get_seed('seed', 'Seed', loaded_parameter_dict, results)
+
+ if is_generating:
+ results.append(gr.update())
+ else:
+ results.append(gr.update(visible=True))
+
+ results.append(gr.update(visible=False))
+
+ get_freeu('freeu', 'FreeU', loaded_parameter_dict, results)
+
+ # prevent performance LoRAs to be added twice, by performance and by lora
+ performance_filename = None
+ if performance is not None and performance in Performance.values():
+ performance = Performance(performance)
+ performance_filename = performance.lora_filename()
+
+ for i in range(modules.config.default_max_lora_number):
+ get_lora(f'lora_combined_{i + 1}', f'LoRA {i + 1}', loaded_parameter_dict, results, performance_filename)
+
+ return results
+
+
+def get_str(key: str, fallback: str | None, source_dict: dict, results: list, default=None) -> str | None:
try:
- h = loaded_parameter_dict.get('Prompt', None)
+ h = source_dict.get(key, source_dict.get(fallback, default))
assert isinstance(h, str)
results.append(h)
+ return h
except:
results.append(gr.update())
+ return None
- try:
- h = loaded_parameter_dict.get('Negative Prompt', None)
- assert isinstance(h, str)
- results.append(h)
- except:
- results.append(gr.update())
+def get_list(key: str, fallback: str | None, source_dict: dict, results: list, default=None):
try:
- h = loaded_parameter_dict.get('Styles', None)
+ h = source_dict.get(key, source_dict.get(fallback, default))
h = eval(h)
assert isinstance(h, list)
results.append(h)
except:
results.append(gr.update())
+
+def get_number(key: str, fallback: str | None, source_dict: dict, results: list, default=None, cast_type=float):
try:
- h = loaded_parameter_dict.get('Performance', None)
- assert isinstance(h, str)
+ h = source_dict.get(key, source_dict.get(fallback, default))
+ assert h is not None
+ h = cast_type(h)
results.append(h)
except:
results.append(gr.update())
+
+def get_image_number(key: str, fallback: str | None, source_dict: dict, results: list, default=None):
try:
- h = loaded_parameter_dict.get('Resolution', None)
+ h = source_dict.get(key, source_dict.get(fallback, default))
+ assert h is not None
+ h = int(h)
+ h = min(h, modules.config.default_max_image_number)
+ results.append(h)
+ except:
+ results.append(1)
+
+
+def get_steps(key: str, fallback: str | None, source_dict: dict, results: list, default=None):
+ try:
+ h = source_dict.get(key, source_dict.get(fallback, default))
+ assert h is not None
+ h = int(h)
+ # if not in steps or in steps and performance is not the same
+ performance_name = source_dict.get('performance', '').replace(' ', '_').replace('-', '_').casefold()
+ performance_candidates = [key for key in Steps.keys() if key.casefold() == performance_name and Steps[key] == h]
+ if len(performance_candidates) == 0:
+ results.append(h)
+ return
+ results.append(-1)
+ except:
+ results.append(-1)
+
+
+def get_resolution(key: str, fallback: str | None, source_dict: dict, results: list, default=None):
+ try:
+ h = source_dict.get(key, source_dict.get(fallback, default))
width, height = eval(h)
formatted = modules.config.add_ratio(f'{width}*{height}')
- if formatted in modules.config.available_aspect_ratios:
+ if formatted in modules.config.available_aspect_ratios_labels:
results.append(formatted)
results.append(-1)
results.append(-1)
else:
results.append(gr.update())
- results.append(width)
- results.append(height)
+ results.append(int(width))
+ results.append(int(height))
except:
results.append(gr.update())
results.append(gr.update())
results.append(gr.update())
+
+def get_seed(key: str, fallback: str | None, source_dict: dict, results: list, default=None):
try:
- h = loaded_parameter_dict.get('Sharpness', None)
+ h = source_dict.get(key, source_dict.get(fallback, default))
assert h is not None
- h = float(h)
+ h = int(h)
+ results.append(False)
results.append(h)
except:
results.append(gr.update())
-
- try:
- h = loaded_parameter_dict.get('Guidance Scale', None)
- assert h is not None
- h = float(h)
- results.append(h)
- except:
results.append(gr.update())
+
+def get_adm_guidance(key: str, fallback: str | None, source_dict: dict, results: list, default=None):
try:
- h = loaded_parameter_dict.get('ADM Guidance', None)
+ h = source_dict.get(key, source_dict.get(fallback, default))
p, n, e = eval(h)
results.append(float(p))
results.append(float(n))
@@ -82,63 +172,458 @@ def load_parameter_button_click(raw_prompt_txt):
results.append(gr.update())
results.append(gr.update())
- try:
- h = loaded_parameter_dict.get('Base Model', None)
- assert isinstance(h, str)
- results.append(h)
- except:
- results.append(gr.update())
+def get_freeu(key: str, fallback: str | None, source_dict: dict, results: list, default=None):
try:
- h = loaded_parameter_dict.get('Refiner Model', None)
- assert isinstance(h, str)
- results.append(h)
+ h = source_dict.get(key, source_dict.get(fallback, default))
+ b1, b2, s1, s2 = eval(h)
+ results.append(True)
+ results.append(float(b1))
+ results.append(float(b2))
+ results.append(float(s1))
+ results.append(float(s2))
except:
- results.append(gr.update())
-
- try:
- h = loaded_parameter_dict.get('Refiner Switch', None)
- assert h is not None
- h = float(h)
- results.append(h)
- except:
- results.append(gr.update())
-
- try:
- h = loaded_parameter_dict.get('Sampler', None)
- assert isinstance(h, str)
- results.append(h)
- except:
- results.append(gr.update())
-
- try:
- h = loaded_parameter_dict.get('Scheduler', None)
- assert isinstance(h, str)
- results.append(h)
- except:
- results.append(gr.update())
-
- try:
- h = loaded_parameter_dict.get('Seed', None)
- assert h is not None
- h = int(h)
results.append(False)
- results.append(h)
+ results.append(gr.update())
+ results.append(gr.update())
+ results.append(gr.update())
+ results.append(gr.update())
+
+
+def get_lora(key: str, fallback: str | None, source_dict: dict, results: list, performance_filename: str | None):
+ try:
+ split_data = source_dict.get(key, source_dict.get(fallback)).split(' : ')
+ enabled = True
+ name = split_data[0]
+ weight = split_data[1]
+
+ if len(split_data) == 3:
+ enabled = split_data[0] == 'True'
+ name = split_data[1]
+ weight = split_data[2]
+
+ if name == performance_filename:
+ raise Exception
+
+ weight = float(weight)
+ results.append(enabled)
+ results.append(name)
+ results.append(weight)
except:
- results.append(gr.update())
- results.append(gr.update())
+ results.append(True)
+ results.append('None')
+ results.append(1)
- results.append(gr.update(visible=True))
- results.append(gr.update(visible=False))
- for i in range(1, 6):
- try:
- n, w = loaded_parameter_dict.get(f'LoRA {i}').split(' : ')
- w = float(w)
- results.append(n)
- results.append(w)
- except:
- results.append(gr.update())
- results.append(gr.update())
+def get_sha256(filepath):
+ global hash_cache
+ if filepath not in hash_cache:
+ hash_cache[filepath] = sha256(filepath)
- return results
+ return hash_cache[filepath]
+
+
+def parse_meta_from_preset(preset_content):
+ assert isinstance(preset_content, dict)
+ preset_prepared = {}
+ items = preset_content
+
+ for settings_key, meta_key in modules.config.possible_preset_keys.items():
+ if settings_key == "default_loras":
+ loras = getattr(modules.config, settings_key)
+ if settings_key in items:
+ loras = items[settings_key]
+ for index, lora in enumerate(loras[:modules.config.default_max_lora_number]):
+ preset_prepared[f'lora_combined_{index + 1}'] = ' : '.join(map(str, lora))
+ elif settings_key == "default_aspect_ratio":
+ if settings_key in items and items[settings_key] is not None:
+ default_aspect_ratio = items[settings_key]
+ width, height = default_aspect_ratio.split('*')
+ else:
+ default_aspect_ratio = getattr(modules.config, settings_key)
+ width, height = default_aspect_ratio.split('×')
+ height = height[:height.index(" ")]
+ preset_prepared[meta_key] = (width, height)
+ else:
+ preset_prepared[meta_key] = items[settings_key] if settings_key in items and items[
+ settings_key] is not None else getattr(modules.config, settings_key)
+
+ if settings_key == "default_styles" or settings_key == "default_aspect_ratio":
+ preset_prepared[meta_key] = str(preset_prepared[meta_key])
+
+ return preset_prepared
+
+
+class MetadataParser(ABC):
+ def __init__(self):
+ self.raw_prompt: str = ''
+ self.full_prompt: str = ''
+ self.raw_negative_prompt: str = ''
+ self.full_negative_prompt: str = ''
+ self.steps: int = Steps.SPEED.value
+ self.base_model_name: str = ''
+ self.base_model_hash: str = ''
+ self.refiner_model_name: str = ''
+ self.refiner_model_hash: str = ''
+ self.loras: list = []
+ self.vae_name: str = ''
+
+ @abstractmethod
+ def get_scheme(self) -> MetadataScheme:
+ raise NotImplementedError
+
+ @abstractmethod
+ def to_json(self, metadata: dict | str) -> dict:
+ raise NotImplementedError
+
+ @abstractmethod
+ def to_string(self, metadata: dict) -> str:
+ raise NotImplementedError
+
+ def set_data(self, raw_prompt, full_prompt, raw_negative_prompt, full_negative_prompt, steps, base_model_name,
+ refiner_model_name, loras, vae_name):
+ self.raw_prompt = raw_prompt
+ self.full_prompt = full_prompt
+ self.raw_negative_prompt = raw_negative_prompt
+ self.full_negative_prompt = full_negative_prompt
+ self.steps = steps
+ self.base_model_name = Path(base_model_name).stem
+
+ base_model_path = get_file_from_folder_list(base_model_name, modules.config.paths_checkpoints)
+ self.base_model_hash = get_sha256(base_model_path)
+
+ if refiner_model_name not in ['', 'None']:
+ self.refiner_model_name = Path(refiner_model_name).stem
+ refiner_model_path = get_file_from_folder_list(refiner_model_name, modules.config.paths_checkpoints)
+ self.refiner_model_hash = get_sha256(refiner_model_path)
+
+ self.loras = []
+ for (lora_name, lora_weight) in loras:
+ if lora_name != 'None':
+ lora_path = get_file_from_folder_list(lora_name, modules.config.paths_loras)
+ lora_hash = get_sha256(lora_path)
+ self.loras.append((Path(lora_name).stem, lora_weight, lora_hash))
+ self.vae_name = Path(vae_name).stem
+
+
+class A1111MetadataParser(MetadataParser):
+ def get_scheme(self) -> MetadataScheme:
+ return MetadataScheme.A1111
+
+ fooocus_to_a1111 = {
+ 'raw_prompt': 'Raw prompt',
+ 'raw_negative_prompt': 'Raw negative prompt',
+ 'negative_prompt': 'Negative prompt',
+ 'styles': 'Styles',
+ 'performance': 'Performance',
+ 'steps': 'Steps',
+ 'sampler': 'Sampler',
+ 'scheduler': 'Scheduler',
+ 'vae': 'VAE',
+ 'guidance_scale': 'CFG scale',
+ 'seed': 'Seed',
+ 'resolution': 'Size',
+ 'sharpness': 'Sharpness',
+ 'adm_guidance': 'ADM Guidance',
+ 'refiner_swap_method': 'Refiner Swap Method',
+ 'adaptive_cfg': 'Adaptive CFG',
+ 'clip_skip': 'Clip skip',
+ 'overwrite_switch': 'Overwrite Switch',
+ 'freeu': 'FreeU',
+ 'base_model': 'Model',
+ 'base_model_hash': 'Model hash',
+ 'refiner_model': 'Refiner',
+ 'refiner_model_hash': 'Refiner hash',
+ 'lora_hashes': 'Lora hashes',
+ 'lora_weights': 'Lora weights',
+ 'created_by': 'User',
+ 'version': 'Version'
+ }
+
+ def to_json(self, metadata: str) -> dict:
+ metadata_prompt = ''
+ metadata_negative_prompt = ''
+
+ done_with_prompt = False
+
+ *lines, lastline = metadata.strip().split("\n")
+ if len(re_param.findall(lastline)) < 3:
+ lines.append(lastline)
+ lastline = ''
+
+ for line in lines:
+ line = line.strip()
+ if line.startswith(f"{self.fooocus_to_a1111['negative_prompt']}:"):
+ done_with_prompt = True
+ line = line[len(f"{self.fooocus_to_a1111['negative_prompt']}:"):].strip()
+ if done_with_prompt:
+ metadata_negative_prompt += ('' if metadata_negative_prompt == '' else "\n") + line
+ else:
+ metadata_prompt += ('' if metadata_prompt == '' else "\n") + line
+
+ found_styles, prompt, negative_prompt = extract_styles_from_prompt(metadata_prompt, metadata_negative_prompt)
+
+ data = {
+ 'prompt': prompt,
+ 'negative_prompt': negative_prompt
+ }
+
+ for k, v in re_param.findall(lastline):
+ try:
+ if v != '' and v[0] == '"' and v[-1] == '"':
+ v = unquote(v)
+
+ m = re_imagesize.match(v)
+ if m is not None:
+ data['resolution'] = str((m.group(1), m.group(2)))
+ else:
+ data[list(self.fooocus_to_a1111.keys())[list(self.fooocus_to_a1111.values()).index(k)]] = v
+ except Exception:
+ print(f"Error parsing \"{k}: {v}\"")
+
+ # workaround for multiline prompts
+ if 'raw_prompt' in data:
+ data['prompt'] = data['raw_prompt']
+ raw_prompt = data['raw_prompt'].replace("\n", ', ')
+ if metadata_prompt != raw_prompt and modules.sdxl_styles.fooocus_expansion not in found_styles:
+ found_styles.append(modules.sdxl_styles.fooocus_expansion)
+
+ if 'raw_negative_prompt' in data:
+ data['negative_prompt'] = data['raw_negative_prompt']
+
+ data['styles'] = str(found_styles)
+
+ # try to load performance based on steps, fallback for direct A1111 imports
+ if 'steps' in data and 'performance' in data is None:
+ try:
+ data['performance'] = Performance.by_steps(data['steps']).value
+ except ValueError | KeyError:
+ pass
+
+ if 'sampler' in data:
+ data['sampler'] = data['sampler'].replace(' Karras', '')
+ # get key
+ for k, v in SAMPLERS.items():
+ if v == data['sampler']:
+ data['sampler'] = k
+ break
+
+ for key in ['base_model', 'refiner_model', 'vae']:
+ if key in data:
+ if key == 'vae':
+ self.add_extension_to_filename(data, modules.config.vae_filenames, 'vae')
+ else:
+ self.add_extension_to_filename(data, modules.config.model_filenames, key)
+
+ lora_data = ''
+ if 'lora_weights' in data and data['lora_weights'] != '':
+ lora_data = data['lora_weights']
+ elif 'lora_hashes' in data and data['lora_hashes'] != '' and data['lora_hashes'].split(', ')[0].count(':') == 2:
+ lora_data = data['lora_hashes']
+
+ if lora_data != '':
+ for li, lora in enumerate(lora_data.split(', ')):
+ lora_split = lora.split(': ')
+ lora_name = lora_split[0]
+ lora_weight = lora_split[2] if len(lora_split) == 3 else lora_split[1]
+ for filename in modules.config.lora_filenames:
+ path = Path(filename)
+ if lora_name == path.stem:
+ data[f'lora_combined_{li + 1}'] = f'{filename} : {lora_weight}'
+ break
+
+ return data
+
+ def to_string(self, metadata: dict) -> str:
+ data = {k: v for _, k, v in metadata}
+
+ width, height = eval(data['resolution'])
+
+ sampler = data['sampler']
+ scheduler = data['scheduler']
+
+ if sampler in SAMPLERS and SAMPLERS[sampler] != '':
+ sampler = SAMPLERS[sampler]
+ if sampler not in CIVITAI_NO_KARRAS and scheduler == 'karras':
+ sampler += f' Karras'
+
+ generation_params = {
+ self.fooocus_to_a1111['steps']: self.steps,
+ self.fooocus_to_a1111['sampler']: sampler,
+ self.fooocus_to_a1111['seed']: data['seed'],
+ self.fooocus_to_a1111['resolution']: f'{width}x{height}',
+ self.fooocus_to_a1111['guidance_scale']: data['guidance_scale'],
+ self.fooocus_to_a1111['sharpness']: data['sharpness'],
+ self.fooocus_to_a1111['adm_guidance']: data['adm_guidance'],
+ self.fooocus_to_a1111['base_model']: Path(data['base_model']).stem,
+ self.fooocus_to_a1111['base_model_hash']: self.base_model_hash,
+
+ self.fooocus_to_a1111['performance']: data['performance'],
+ self.fooocus_to_a1111['scheduler']: scheduler,
+ self.fooocus_to_a1111['vae']: Path(data['vae']).stem,
+ # workaround for multiline prompts
+ self.fooocus_to_a1111['raw_prompt']: self.raw_prompt,
+ self.fooocus_to_a1111['raw_negative_prompt']: self.raw_negative_prompt,
+ }
+
+ if self.refiner_model_name not in ['', 'None']:
+ generation_params |= {
+ self.fooocus_to_a1111['refiner_model']: self.refiner_model_name,
+ self.fooocus_to_a1111['refiner_model_hash']: self.refiner_model_hash
+ }
+
+ for key in ['adaptive_cfg', 'clip_skip', 'overwrite_switch', 'refiner_swap_method', 'freeu']:
+ if key in data:
+ generation_params[self.fooocus_to_a1111[key]] = data[key]
+
+ if len(self.loras) > 0:
+ lora_hashes = []
+ lora_weights = []
+ for index, (lora_name, lora_weight, lora_hash) in enumerate(self.loras):
+ # workaround for Fooocus not knowing LoRA name in LoRA metadata
+ lora_hashes.append(f'{lora_name}: {lora_hash}')
+ lora_weights.append(f'{lora_name}: {lora_weight}')
+ lora_hashes_string = ', '.join(lora_hashes)
+ lora_weights_string = ', '.join(lora_weights)
+ generation_params[self.fooocus_to_a1111['lora_hashes']] = lora_hashes_string
+ generation_params[self.fooocus_to_a1111['lora_weights']] = lora_weights_string
+
+ generation_params[self.fooocus_to_a1111['version']] = data['version']
+
+ if modules.config.metadata_created_by != '':
+ generation_params[self.fooocus_to_a1111['created_by']] = modules.config.metadata_created_by
+
+ generation_params_text = ", ".join(
+ [k if k == v else f'{k}: {quote(v)}' for k, v in generation_params.items() if
+ v is not None])
+ positive_prompt_resolved = ', '.join(self.full_prompt)
+ negative_prompt_resolved = ', '.join(self.full_negative_prompt)
+ negative_prompt_text = f"\nNegative prompt: {negative_prompt_resolved}" if negative_prompt_resolved else ""
+ return f"{positive_prompt_resolved}{negative_prompt_text}\n{generation_params_text}".strip()
+
+ @staticmethod
+ def add_extension_to_filename(data, filenames, key):
+ for filename in filenames:
+ path = Path(filename)
+ if data[key] == path.stem:
+ data[key] = filename
+ break
+
+
+class FooocusMetadataParser(MetadataParser):
+ def get_scheme(self) -> MetadataScheme:
+ return MetadataScheme.FOOOCUS
+
+ def to_json(self, metadata: dict) -> dict:
+ for key, value in metadata.items():
+ if value in ['', 'None']:
+ continue
+ if key in ['base_model', 'refiner_model']:
+ metadata[key] = self.replace_value_with_filename(key, value, modules.config.model_filenames)
+ elif key.startswith('lora_combined_'):
+ metadata[key] = self.replace_value_with_filename(key, value, modules.config.lora_filenames)
+ elif key == 'vae':
+ metadata[key] = self.replace_value_with_filename(key, value, modules.config.vae_filenames)
+ else:
+ continue
+
+ return metadata
+
+ def to_string(self, metadata: list) -> str:
+ for li, (label, key, value) in enumerate(metadata):
+ # remove model folder paths from metadata
+ if key.startswith('lora_combined_'):
+ name, weight = value.split(' : ')
+ name = Path(name).stem
+ value = f'{name} : {weight}'
+ metadata[li] = (label, key, value)
+
+ res = {k: v for _, k, v in metadata}
+
+ res['full_prompt'] = self.full_prompt
+ res['full_negative_prompt'] = self.full_negative_prompt
+ res['steps'] = self.steps
+ res['base_model'] = self.base_model_name
+ res['base_model_hash'] = self.base_model_hash
+
+ if self.refiner_model_name not in ['', 'None']:
+ res['refiner_model'] = self.refiner_model_name
+ res['refiner_model_hash'] = self.refiner_model_hash
+
+ res['vae'] = self.vae_name
+ res['loras'] = self.loras
+
+ if modules.config.metadata_created_by != '':
+ res['created_by'] = modules.config.metadata_created_by
+
+ return json.dumps(dict(sorted(res.items())))
+
+ @staticmethod
+ def replace_value_with_filename(key, value, filenames):
+ for filename in filenames:
+ path = Path(filename)
+ if key.startswith('lora_combined_'):
+ name, weight = value.split(' : ')
+ if name == path.stem:
+ return f'{filename} : {weight}'
+ elif value == path.stem:
+ return filename
+
+ return None
+
+
+def get_metadata_parser(metadata_scheme: MetadataScheme) -> MetadataParser:
+ match metadata_scheme:
+ case MetadataScheme.FOOOCUS:
+ return FooocusMetadataParser()
+ case MetadataScheme.A1111:
+ return A1111MetadataParser()
+ case _:
+ raise NotImplementedError
+
+
+def read_info_from_image(filepath) -> tuple[str | None, MetadataScheme | None]:
+ with Image.open(filepath) as image:
+ items = (image.info or {}).copy()
+
+ parameters = items.pop('parameters', None)
+ metadata_scheme = items.pop('fooocus_scheme', None)
+ exif = items.pop('exif', None)
+
+ if parameters is not None and is_json(parameters):
+ parameters = json.loads(parameters)
+ elif exif is not None:
+ exif = image.getexif()
+ # 0x9286 = UserComment
+ parameters = exif.get(0x9286, None)
+ # 0x927C = MakerNote
+ metadata_scheme = exif.get(0x927C, None)
+
+ if is_json(parameters):
+ parameters = json.loads(parameters)
+
+ try:
+ metadata_scheme = MetadataScheme(metadata_scheme)
+ except ValueError:
+ metadata_scheme = None
+
+ # broad fallback
+ if isinstance(parameters, dict):
+ metadata_scheme = MetadataScheme.FOOOCUS
+
+ if isinstance(parameters, str):
+ metadata_scheme = MetadataScheme.A1111
+
+ return parameters, metadata_scheme
+
+
+def get_exif(metadata: str | None, metadata_scheme: str):
+ exif = Image.Exif()
+ # tags see see https://github.com/python-pillow/Pillow/blob/9.2.x/src/PIL/ExifTags.py
+ # 0x9286 = UserComment
+ exif[0x9286] = metadata
+ # 0x0131 = Software
+ exif[0x0131] = 'Fooocus v' + fooocus_version.version
+ # 0x927C = MakerNote
+ exif[0x927C] = metadata_scheme
+ return exif
diff --git a/modules/model_loader.py b/modules/model_loader.py
index 8ba336a9..1143f75e 100644
--- a/modules/model_loader.py
+++ b/modules/model_loader.py
@@ -14,6 +14,8 @@ def load_file_from_url(
Returns the path to the downloaded file.
"""
+ domain = os.environ.get("HF_MIRROR", "https://huggingface.co").rstrip('/')
+ url = str.replace(url, "https://huggingface.co", domain, 1)
os.makedirs(model_dir, exist_ok=True)
if not file_name:
parts = urlparse(url)
diff --git a/modules/ops.py b/modules/ops.py
new file mode 100644
index 00000000..ee0e7756
--- /dev/null
+++ b/modules/ops.py
@@ -0,0 +1,19 @@
+import torch
+import contextlib
+
+
+@contextlib.contextmanager
+def use_patched_ops(operations):
+ op_names = ['Linear', 'Conv2d', 'Conv3d', 'GroupNorm', 'LayerNorm']
+ backups = {op_name: getattr(torch.nn, op_name) for op_name in op_names}
+
+ try:
+ for op_name in op_names:
+ setattr(torch.nn, op_name, getattr(operations, op_name))
+
+ yield
+
+ finally:
+ for op_name in op_names:
+ setattr(torch.nn, op_name, backups[op_name])
+ return
diff --git a/modules/patch.py b/modules/patch.py
index 66b243cb..3c2dd8f4 100644
--- a/modules/patch.py
+++ b/modules/patch.py
@@ -17,7 +17,6 @@ import ldm_patched.controlnet.cldm
import ldm_patched.modules.model_patcher
import ldm_patched.modules.samplers
import ldm_patched.modules.args_parser
-import modules.advanced_parameters as advanced_parameters
import warnings
import safetensors.torch
import modules.constants as constants
@@ -29,15 +28,25 @@ from modules.patch_precision import patch_all_precision
from modules.patch_clip import patch_all_clip
-sharpness = 2.0
+class PatchSettings:
+ def __init__(self,
+ sharpness=2.0,
+ adm_scaler_end=0.3,
+ positive_adm_scale=1.5,
+ negative_adm_scale=0.8,
+ controlnet_softness=0.25,
+ adaptive_cfg=7.0):
+ self.sharpness = sharpness
+ self.adm_scaler_end = adm_scaler_end
+ self.positive_adm_scale = positive_adm_scale
+ self.negative_adm_scale = negative_adm_scale
+ self.controlnet_softness = controlnet_softness
+ self.adaptive_cfg = adaptive_cfg
+ self.global_diffusion_progress = 0
+ self.eps_record = None
-adm_scaler_end = 0.3
-positive_adm_scale = 1.5
-negative_adm_scale = 0.8
-adaptive_cfg = 7.0
-global_diffusion_progress = 0
-eps_record = None
+patch_settings = {}
def calculate_weight_patched(self, patches, weight, key):
@@ -201,14 +210,13 @@ class BrownianTreeNoiseSamplerPatched:
def compute_cfg(uncond, cond, cfg_scale, t):
- global adaptive_cfg
-
- mimic_cfg = float(adaptive_cfg)
+ pid = os.getpid()
+ mimic_cfg = float(patch_settings[pid].adaptive_cfg)
real_cfg = float(cfg_scale)
real_eps = uncond + real_cfg * (cond - uncond)
- if cfg_scale > adaptive_cfg:
+ if cfg_scale > patch_settings[pid].adaptive_cfg:
mimicked_eps = uncond + mimic_cfg * (cond - uncond)
return real_eps * t + mimicked_eps * (1 - t)
else:
@@ -216,13 +224,13 @@ def compute_cfg(uncond, cond, cfg_scale, t):
def patched_sampling_function(model, x, timestep, uncond, cond, cond_scale, model_options=None, seed=None):
- global eps_record
+ pid = os.getpid()
- if math.isclose(cond_scale, 1.0):
+ if math.isclose(cond_scale, 1.0) and not model_options.get("disable_cfg1_optimization", False):
final_x0 = calc_cond_uncond_batch(model, cond, None, x, timestep, model_options)[0]
- if eps_record is not None:
- eps_record = ((x - final_x0) / timestep).cpu()
+ if patch_settings[pid].eps_record is not None:
+ patch_settings[pid].eps_record = ((x - final_x0) / timestep).cpu()
return final_x0
@@ -231,16 +239,16 @@ def patched_sampling_function(model, x, timestep, uncond, cond, cond_scale, mode
positive_eps = x - positive_x0
negative_eps = x - negative_x0
- alpha = 0.001 * sharpness * global_diffusion_progress
+ alpha = 0.001 * patch_settings[pid].sharpness * patch_settings[pid].global_diffusion_progress
positive_eps_degraded = anisotropic.adaptive_anisotropic_filter(x=positive_eps, g=positive_x0)
positive_eps_degraded_weighted = positive_eps_degraded * alpha + positive_eps * (1.0 - alpha)
final_eps = compute_cfg(uncond=negative_eps, cond=positive_eps_degraded_weighted,
- cfg_scale=cond_scale, t=global_diffusion_progress)
+ cfg_scale=cond_scale, t=patch_settings[pid].global_diffusion_progress)
- if eps_record is not None:
- eps_record = (final_eps / timestep).cpu()
+ if patch_settings[pid].eps_record is not None:
+ patch_settings[pid].eps_record = (final_eps / timestep).cpu()
return x - final_eps
@@ -255,20 +263,19 @@ def round_to_64(x):
def sdxl_encode_adm_patched(self, **kwargs):
- global positive_adm_scale, negative_adm_scale
-
clip_pooled = ldm_patched.modules.model_base.sdxl_pooled(kwargs, self.noise_augmentor)
width = kwargs.get("width", 1024)
height = kwargs.get("height", 1024)
target_width = width
target_height = height
+ pid = os.getpid()
if kwargs.get("prompt_type", "") == "negative":
- width = float(width) * negative_adm_scale
- height = float(height) * negative_adm_scale
+ width = float(width) * patch_settings[pid].negative_adm_scale
+ height = float(height) * patch_settings[pid].negative_adm_scale
elif kwargs.get("prompt_type", "") == "positive":
- width = float(width) * positive_adm_scale
- height = float(height) * positive_adm_scale
+ width = float(width) * patch_settings[pid].positive_adm_scale
+ height = float(height) * patch_settings[pid].positive_adm_scale
def embedder(number_list):
h = self.embedder(torch.tensor(number_list, dtype=torch.float32))
@@ -322,7 +329,7 @@ def patched_KSamplerX0Inpaint_forward(self, x, sigma, uncond, cond, cond_scale,
def timed_adm(y, timesteps):
if isinstance(y, torch.Tensor) and int(y.dim()) == 2 and int(y.shape[1]) == 5632:
- y_mask = (timesteps > 999.0 * (1.0 - float(adm_scaler_end))).to(y)[..., None]
+ y_mask = (timesteps > 999.0 * (1.0 - float(patch_settings[os.getpid()].adm_scaler_end))).to(y)[..., None]
y_with_adm = y[..., :2816].clone()
y_without_adm = y[..., 2816:].clone()
return y_with_adm * y_mask + y_without_adm * (1.0 - y_mask)
@@ -332,6 +339,7 @@ def timed_adm(y, timesteps):
def patched_cldm_forward(self, x, hint, timesteps, context, y=None, **kwargs):
t_emb = ldm_patched.ldm.modules.diffusionmodules.openaimodel.timestep_embedding(timesteps, self.model_channels, repeat_only=False).to(x.dtype)
emb = self.time_embed(t_emb)
+ pid = os.getpid()
guided_hint = self.input_hint_block(hint, emb, context)
@@ -357,19 +365,17 @@ def patched_cldm_forward(self, x, hint, timesteps, context, y=None, **kwargs):
h = self.middle_block(h, emb, context)
outs.append(self.middle_block_out(h, emb, context))
- if advanced_parameters.controlnet_softness > 0:
+ if patch_settings[pid].controlnet_softness > 0:
for i in range(10):
k = 1.0 - float(i) / 9.0
- outs[i] = outs[i] * (1.0 - advanced_parameters.controlnet_softness * k)
+ outs[i] = outs[i] * (1.0 - patch_settings[pid].controlnet_softness * k)
return outs
def patched_unet_forward(self, x, timesteps=None, context=None, y=None, control=None, transformer_options={}, **kwargs):
- global global_diffusion_progress
-
self.current_step = 1.0 - timesteps.to(x) / 999.0
- global_diffusion_progress = float(self.current_step.detach().cpu().numpy().tolist()[0])
+ patch_settings[os.getpid()].global_diffusion_progress = float(self.current_step.detach().cpu().numpy().tolist()[0])
y = timed_adm(y, timesteps)
@@ -480,6 +486,10 @@ def build_loaded(module, loader_name):
def patch_all():
+ if ldm_patched.modules.model_management.directml_enabled:
+ ldm_patched.modules.model_management.lowvram_available = True
+ ldm_patched.modules.model_management.OOM_EXCEPTION = Exception
+
patch_all_precision()
patch_all_clip()
diff --git a/modules/patch_clip.py b/modules/patch_clip.py
index 74ee436a..06b7f01b 100644
--- a/modules/patch_clip.py
+++ b/modules/patch_clip.py
@@ -16,30 +16,12 @@ import ldm_patched.modules.samplers
import ldm_patched.modules.sd
import ldm_patched.modules.sd1_clip
import ldm_patched.modules.clip_vision
-import ldm_patched.modules.model_management as model_management
import ldm_patched.modules.ops as ops
-import contextlib
+from modules.ops import use_patched_ops
from transformers import CLIPTextModel, CLIPTextConfig, modeling_utils, CLIPVisionConfig, CLIPVisionModelWithProjection
-@contextlib.contextmanager
-def use_patched_ops(operations):
- op_names = ['Linear', 'Conv2d', 'Conv3d', 'GroupNorm', 'LayerNorm']
- backups = {op_name: getattr(torch.nn, op_name) for op_name in op_names}
-
- try:
- for op_name in op_names:
- setattr(torch.nn, op_name, getattr(operations, op_name))
-
- yield
-
- finally:
- for op_name in op_names:
- setattr(torch.nn, op_name, backups[op_name])
- return
-
-
def patched_encode_token_weights(self, token_weight_pairs):
to_encode = list()
max_token_len = 0
diff --git a/modules/patch_precision.py b/modules/patch_precision.py
index 83569bdd..22ffda0a 100644
--- a/modules/patch_precision.py
+++ b/modules/patch_precision.py
@@ -51,6 +51,8 @@ def patched_register_schedule(self, given_betas=None, beta_schedule="linear", ti
self.linear_end = linear_end
sigmas = torch.tensor(((1 - alphas_cumprod) / alphas_cumprod) ** 0.5, dtype=torch.float32)
self.set_sigmas(sigmas)
+ alphas_cumprod = torch.tensor(alphas_cumprod, dtype=torch.float32)
+ self.set_alphas_cumprod(alphas_cumprod)
return
diff --git a/modules/private_logger.py b/modules/private_logger.py
index 83ba9e36..6fdb680c 100644
--- a/modules/private_logger.py
+++ b/modules/private_logger.py
@@ -5,26 +5,49 @@ import json
import urllib.parse
from PIL import Image
+from PIL.PngImagePlugin import PngInfo
+from modules.flags import OutputFormat
+from modules.meta_parser import MetadataParser, get_exif
from modules.util import generate_temp_filename
-
log_cache = {}
-def get_current_html_path():
+def get_current_html_path(output_format=None):
+ output_format = output_format if output_format else modules.config.default_output_format
date_string, local_temp_filename, only_name = generate_temp_filename(folder=modules.config.path_outputs,
- extension='png')
+ extension=output_format)
html_name = os.path.join(os.path.dirname(local_temp_filename), 'log.html')
return html_name
-def log(img, dic):
- if args_manager.args.disable_image_log:
- return
-
- date_string, local_temp_filename, only_name = generate_temp_filename(folder=modules.config.path_outputs, extension='png')
+def log(img, metadata, metadata_parser: MetadataParser | None = None, output_format=None, task=None) -> str:
+ path_outputs = modules.config.temp_path if args_manager.args.disable_image_log else modules.config.path_outputs
+ output_format = output_format if output_format else modules.config.default_output_format
+ date_string, local_temp_filename, only_name = generate_temp_filename(folder=path_outputs, extension=output_format)
os.makedirs(os.path.dirname(local_temp_filename), exist_ok=True)
- Image.fromarray(img).save(local_temp_filename)
+
+ parsed_parameters = metadata_parser.to_string(metadata.copy()) if metadata_parser is not None else ''
+ image = Image.fromarray(img)
+
+ if output_format == OutputFormat.PNG.value:
+ if parsed_parameters != '':
+ pnginfo = PngInfo()
+ pnginfo.add_text('parameters', parsed_parameters)
+ pnginfo.add_text('fooocus_scheme', metadata_parser.get_scheme().value)
+ else:
+ pnginfo = None
+ image.save(local_temp_filename, pnginfo=pnginfo)
+ elif output_format == OutputFormat.JPEG.value:
+ image.save(local_temp_filename, quality=95, optimize=True, progressive=True, exif=get_exif(parsed_parameters, metadata_parser.get_scheme().value) if metadata_parser else Image.Exif())
+ elif output_format == OutputFormat.WEBP.value:
+ image.save(local_temp_filename, quality=95, lossless=False, exif=get_exif(parsed_parameters, metadata_parser.get_scheme().value) if metadata_parser else Image.Exif())
+ else:
+ image.save(local_temp_filename)
+
+ if args_manager.args.disable_image_log:
+ return local_temp_filename
+
html_name = os.path.join(os.path.dirname(local_temp_filename), 'log.html')
css_styles = (
@@ -32,7 +55,7 @@ def log(img, dic):
"body { background-color: #121212; color: #E0E0E0; } "
"a { color: #BB86FC; } "
".metadata { border-collapse: collapse; width: 100%; } "
- ".metadata .key { width: 15%; } "
+ ".metadata .label { width: 15%; } "
".metadata .value { width: 85%; font-weight: bold; } "
".metadata th, .metadata td { border: 1px solid #4d4d4d; padding: 4px; } "
".image-container img { height: auto; max-width: 512px; display: block; padding-right:10px; } "
@@ -44,16 +67,31 @@ def log(img, dic):
)
js = (
- ""
+ """"""
)
- begin_part = f"Fooocus Log {date_string}{css_styles}{js}
Fooocus Log {date_string} (private)
\n
All images are clean, without any hidden data/meta, and safe to share with others.
"
- for key, value in dic:
+ for label, key, value in metadata:
value_txt = str(value).replace('\n', ' ')
- item += f"
{key}
{value_txt}
\n"
+ item += f"
{label}
{value_txt}
\n"
+
+ if task is not None and 'positive' in task and 'negative' in task:
+ full_prompt_details = f"""Positive{', '.join(task['positive'])}
+ Negative{', '.join(task['negative'])}"""
+ item += f"
Full raw prompt
{full_prompt_details}
\n"
+
item += "
"
- js_txt = urllib.parse.quote(json.dumps({k: v for k, v in dic}, indent=0), safe='')
+ js_txt = urllib.parse.quote(json.dumps({k: v for _, k, v, in metadata}, indent=0), safe='')
item += f""
item += "
"
@@ -90,4 +134,4 @@ def log(img, dic):
log_cache[html_name] = middle_part
- return
+ return local_temp_filename
diff --git a/modules/sample_hijack.py b/modules/sample_hijack.py
index 7d8f757b..84752ede 100644
--- a/modules/sample_hijack.py
+++ b/modules/sample_hijack.py
@@ -3,6 +3,7 @@ import ldm_patched.modules.samplers
import ldm_patched.modules.model_management
from collections import namedtuple
+from ldm_patched.contrib.external_align_your_steps import AlignYourStepsScheduler
from ldm_patched.contrib.external_custom_sampler import SDTurboScheduler
from ldm_patched.k_diffusion import sampling as k_diffusion_sampling
from ldm_patched.modules.samplers import normal_scheduler, simple_scheduler, ddim_scheduler
@@ -99,6 +100,13 @@ def sample_hacked(model, noise, positive, negative, cfg, device, sampler, sigmas
calculate_start_end_timesteps(model, negative)
calculate_start_end_timesteps(model, positive)
+ if latent_image is not None:
+ latent_image = model.process_latent_in(latent_image)
+
+ if hasattr(model, 'extra_conds'):
+ positive = encode_model_conds(model.extra_conds, positive, noise, device, "positive", latent_image=latent_image, denoise_mask=denoise_mask)
+ negative = encode_model_conds(model.extra_conds, negative, noise, device, "negative", latent_image=latent_image, denoise_mask=denoise_mask)
+
#make sure each cond area has an opposite one with the same area
for c in positive:
create_cond_with_same_area_if_none(negative, c)
@@ -111,13 +119,6 @@ def sample_hacked(model, noise, positive, negative, cfg, device, sampler, sigmas
apply_empty_x_to_equal_area(list(filter(lambda c: c.get('control_apply_to_uncond', False) == True, positive)), negative, 'control', lambda cond_cnets, x: cond_cnets[x])
apply_empty_x_to_equal_area(positive, negative, 'gligen', lambda cond_cnets, x: cond_cnets[x])
- if latent_image is not None:
- latent_image = model.process_latent_in(latent_image)
-
- if hasattr(model, 'extra_conds'):
- positive = encode_model_conds(model.extra_conds, positive, noise, device, "positive", latent_image=latent_image, denoise_mask=denoise_mask)
- negative = encode_model_conds(model.extra_conds, negative, noise, device, "negative", latent_image=latent_image, denoise_mask=denoise_mask)
-
extra_args = {"cond":positive, "uncond":negative, "cond_scale": cfg, "model_options": model_options, "seed":seed}
if current_refiner is not None and hasattr(current_refiner.model, 'extra_conds'):
@@ -174,7 +175,10 @@ def calculate_sigmas_scheduler_hacked(model, scheduler_name, steps):
elif scheduler_name == "sgm_uniform":
sigmas = normal_scheduler(model, steps, sgm=True)
elif scheduler_name == "turbo":
- sigmas = SDTurboScheduler().get_sigmas(namedtuple('Patcher', ['model'])(model=model), steps)[0]
+ sigmas = SDTurboScheduler().get_sigmas(model=model, steps=steps, denoise=1.0)[0]
+ elif scheduler_name == "align_your_steps":
+ model_type = 'SDXL' if isinstance(model.latent_format, ldm_patched.modules.latent_formats.SDXL) else 'SD1'
+ sigmas = AlignYourStepsScheduler().get_sigmas(model_type=model_type, steps=steps, denoise=1.0)[0]
else:
raise TypeError("error invalid scheduler")
return sigmas
diff --git a/modules/sdxl_styles.py b/modules/sdxl_styles.py
index f5bb6276..12ab6c5c 100644
--- a/modules/sdxl_styles.py
+++ b/modules/sdxl_styles.py
@@ -1,14 +1,13 @@
import os
import re
import json
+import math
-from modules.util import get_files_from_folder
-
+from modules.extra_utils import get_files_from_folder
+from random import Random
# cannot use modules.config - validators causing circular imports
styles_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../sdxl_styles/'))
-wildcards_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../wildcards/'))
-wildcards_max_bfs_depth = 64
def normalize_key(k):
@@ -24,7 +23,6 @@ def normalize_key(k):
styles = {}
-
styles_files = get_files_from_folder(styles_path, ['.json'])
for x in ['sdxl_styles_fooocus.json',
@@ -50,8 +48,13 @@ for styles_file in styles_files:
print(f'Failed to load style file {styles_file}')
style_keys = list(styles.keys())
-fooocus_expansion = "Fooocus V2"
-legal_style_names = [fooocus_expansion] + style_keys
+fooocus_expansion = 'Fooocus V2'
+random_style_name = 'Random Style'
+legal_style_names = [fooocus_expansion, random_style_name] + style_keys
+
+
+def get_random_style(rng: Random) -> str:
+ return rng.choice(list(styles.items()))[0]
def apply_style(style, positive):
@@ -59,24 +62,36 @@ def apply_style(style, positive):
return p.replace('{prompt}', positive).splitlines(), n.splitlines()
-def apply_wildcards(wildcard_text, rng, directory=wildcards_path):
- for _ in range(wildcards_max_bfs_depth):
- placeholders = re.findall(r'__([\w-]+)__', wildcard_text)
- if len(placeholders) == 0:
- return wildcard_text
+def get_words(arrays, total_mult, index):
+ if len(arrays) == 1:
+ return [arrays[0].split(',')[index]]
+ else:
+ words = arrays[0].split(',')
+ word = words[index % len(words)]
+ index -= index % len(words)
+ index /= len(words)
+ index = math.floor(index)
+ return [word] + get_words(arrays[1:], math.floor(total_mult / len(words)), index)
- print(f'[Wildcards] processing: {wildcard_text}')
- for placeholder in placeholders:
- try:
- words = open(os.path.join(directory, f'{placeholder}.txt'), encoding='utf-8').read().splitlines()
- words = [x for x in words if x != '']
- assert len(words) > 0
- wildcard_text = wildcard_text.replace(f'__{placeholder}__', rng.choice(words), 1)
- except:
- print(f'[Wildcards] Warning: {placeholder}.txt missing or empty. '
- f'Using "{placeholder}" as a normal word.')
- wildcard_text = wildcard_text.replace(f'__{placeholder}__', placeholder)
- print(f'[Wildcards] {wildcard_text}')
- print(f'[Wildcards] BFS stack overflow. Current text: {wildcard_text}')
- return wildcard_text
+def apply_arrays(text, index):
+ arrays = re.findall(r'\[\[(.*?)\]\]', text)
+ if len(arrays) == 0:
+ return text
+
+ print(f'[Arrays] processing: {text}')
+ mult = 1
+ for arr in arrays:
+ words = arr.split(',')
+ mult *= len(words)
+
+ index %= mult
+ chosen_words = get_words(arrays, mult, index)
+
+ i = 0
+ for arr in arrays:
+ text = text.replace(f'[[{arr}]]', chosen_words[i], 1)
+ i = i+1
+
+ return text
+
diff --git a/modules/ui_gradio_extensions.py b/modules/ui_gradio_extensions.py
index e59b151b..409c7e33 100644
--- a/modules/ui_gradio_extensions.py
+++ b/modules/ui_gradio_extensions.py
@@ -30,6 +30,7 @@ def javascript_html():
edit_attention_js_path = webpath('javascript/edit-attention.js')
viewer_js_path = webpath('javascript/viewer.js')
image_viewer_js_path = webpath('javascript/imageviewer.js')
+ samples_path = webpath(os.path.abspath('./sdxl_styles/samples/fooocus_v2.jpg'))
head = f'\n'
head += f'\n'
head += f'\n'
@@ -38,6 +39,7 @@ def javascript_html():
head += f'\n'
head += f'\n'
head += f'\n'
+ head += f'\n'
if args_manager.args.theme:
head += f'\n'
diff --git a/modules/util.py b/modules/util.py
index fce7efd7..5003f79a 100644
--- a/modules/util.py
+++ b/modules/util.py
@@ -1,14 +1,41 @@
+from pathlib import Path
+
import numpy as np
import datetime
import random
import math
import os
+import cv2
+import re
+from typing import List, Tuple, AnyStr, NamedTuple
+
+import json
+import hashlib
from PIL import Image
+import modules.config
+import modules.sdxl_styles
+from modules.flags import Performance
LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS)
+# Regexp compiled once. Matches entries with the following pattern:
+#
+#
+LORAS_PROMPT_PATTERN = re.compile(r"()", re.X)
+
+HASH_SHA256_LENGTH = 10
+
+
+def erode_or_dilate(x, k):
+ k = int(k)
+ if k > 0:
+ return cv2.dilate(x, kernel=np.ones(shape=(3, 3), dtype=np.uint8), iterations=k)
+ if k < 0:
+ return cv2.erode(x, kernel=np.ones(shape=(3, 3), dtype=np.uint8), iterations=-k)
+ return x
+
def resample_image(im, width, height):
im = Image.fromarray(im)
@@ -145,23 +172,350 @@ def generate_temp_filename(folder='./outputs/', extension='png'):
random_number = random.randint(1000, 9999)
filename = f"{time_string}_{random_number}.{extension}"
result = os.path.join(folder, date_string, filename)
- return date_string, os.path.abspath(os.path.realpath(result)), filename
+ return date_string, os.path.abspath(result), filename
-def get_files_from_folder(folder_path, exensions=None, name_filter=None):
- if not os.path.isdir(folder_path):
- raise ValueError("Folder path is not a valid directory.")
+def sha256(filename, use_addnet_hash=False, length=HASH_SHA256_LENGTH):
+ print(f"Calculating sha256 for {filename}: ", end='')
+ if use_addnet_hash:
+ with open(filename, "rb") as file:
+ sha256_value = addnet_hash_safetensors(file)
+ else:
+ sha256_value = calculate_sha256(filename)
+ print(f"{sha256_value}")
- filenames = []
+ return sha256_value[:length] if length is not None else sha256_value
- for root, dirs, files in os.walk(folder_path):
- relative_path = os.path.relpath(root, folder_path)
- if relative_path == ".":
- relative_path = ""
- for filename in files:
- _, file_extension = os.path.splitext(filename)
- if (exensions == None or file_extension.lower() in exensions) and (name_filter == None or name_filter in _):
- path = os.path.join(relative_path, filename)
- filenames.append(path)
- return sorted(filenames, key=lambda x: -1 if os.sep in x else 1)
+def addnet_hash_safetensors(b):
+ """kohya-ss hash for safetensors from https://github.com/kohya-ss/sd-scripts/blob/main/library/train_util.py"""
+ hash_sha256 = hashlib.sha256()
+ blksize = 1024 * 1024
+
+ b.seek(0)
+ header = b.read(8)
+ n = int.from_bytes(header, "little")
+
+ offset = n + 8
+ b.seek(offset)
+ for chunk in iter(lambda: b.read(blksize), b""):
+ hash_sha256.update(chunk)
+
+ return hash_sha256.hexdigest()
+
+
+def calculate_sha256(filename) -> str:
+ hash_sha256 = hashlib.sha256()
+ blksize = 1024 * 1024
+
+ with open(filename, "rb") as f:
+ for chunk in iter(lambda: f.read(blksize), b""):
+ hash_sha256.update(chunk)
+
+ return hash_sha256.hexdigest()
+
+
+def quote(text):
+ if ',' not in str(text) and '\n' not in str(text) and ':' not in str(text):
+ return text
+
+ return json.dumps(text, ensure_ascii=False)
+
+
+def unquote(text):
+ if len(text) == 0 or text[0] != '"' or text[-1] != '"':
+ return text
+
+ try:
+ return json.loads(text)
+ except Exception:
+ return text
+
+
+def unwrap_style_text_from_prompt(style_text, prompt):
+ """
+ Checks the prompt to see if the style text is wrapped around it. If so,
+ returns True plus the prompt text without the style text. Otherwise, returns
+ False with the original prompt.
+
+ Note that the "cleaned" version of the style text is only used for matching
+ purposes here. It isn't returned; the original style text is not modified.
+ """
+ stripped_prompt = prompt
+ stripped_style_text = style_text
+ if "{prompt}" in stripped_style_text:
+ # Work out whether the prompt is wrapped in the style text. If so, we
+ # return True and the "inner" prompt text that isn't part of the style.
+ try:
+ left, right = stripped_style_text.split("{prompt}", 2)
+ except ValueError as e:
+ # If the style text has multple "{prompt}"s, we can't split it into
+ # two parts. This is an error, but we can't do anything about it.
+ print(f"Unable to compare style text to prompt:\n{style_text}")
+ print(f"Error: {e}")
+ return False, prompt, ''
+
+ left_pos = stripped_prompt.find(left)
+ right_pos = stripped_prompt.find(right)
+ if 0 <= left_pos < right_pos:
+ real_prompt = stripped_prompt[left_pos + len(left):right_pos]
+ prompt = stripped_prompt.replace(left + real_prompt + right, '', 1)
+ if prompt.startswith(", "):
+ prompt = prompt[2:]
+ if prompt.endswith(", "):
+ prompt = prompt[:-2]
+ return True, prompt, real_prompt
+ else:
+ # Work out whether the given prompt starts with the style text. If so, we
+ # return True and the prompt text up to where the style text starts.
+ if stripped_prompt.endswith(stripped_style_text):
+ prompt = stripped_prompt[: len(stripped_prompt) - len(stripped_style_text)]
+ if prompt.endswith(", "):
+ prompt = prompt[:-2]
+ return True, prompt, prompt
+
+ return False, prompt, ''
+
+
+def extract_original_prompts(style, prompt, negative_prompt):
+ """
+ Takes a style and compares it to the prompt and negative prompt. If the style
+ matches, returns True plus the prompt and negative prompt with the style text
+ removed. Otherwise, returns False with the original prompt and negative prompt.
+ """
+ if not style.prompt and not style.negative_prompt:
+ return False, prompt, negative_prompt
+
+ match_positive, extracted_positive, real_prompt = unwrap_style_text_from_prompt(
+ style.prompt, prompt
+ )
+ if not match_positive:
+ return False, prompt, negative_prompt, ''
+
+ match_negative, extracted_negative, _ = unwrap_style_text_from_prompt(
+ style.negative_prompt, negative_prompt
+ )
+ if not match_negative:
+ return False, prompt, negative_prompt, ''
+
+ return True, extracted_positive, extracted_negative, real_prompt
+
+
+def extract_styles_from_prompt(prompt, negative_prompt):
+ extracted = []
+ applicable_styles = []
+
+ for style_name, (style_prompt, style_negative_prompt) in modules.sdxl_styles.styles.items():
+ applicable_styles.append(PromptStyle(name=style_name, prompt=style_prompt, negative_prompt=style_negative_prompt))
+
+ real_prompt = ''
+
+ while True:
+ found_style = None
+
+ for style in applicable_styles:
+ is_match, new_prompt, new_neg_prompt, new_real_prompt = extract_original_prompts(
+ style, prompt, negative_prompt
+ )
+ if is_match:
+ found_style = style
+ prompt = new_prompt
+ negative_prompt = new_neg_prompt
+ if real_prompt == '' and new_real_prompt != '' and new_real_prompt != prompt:
+ real_prompt = new_real_prompt
+ break
+
+ if not found_style:
+ break
+
+ applicable_styles.remove(found_style)
+ extracted.append(found_style.name)
+
+ # add prompt expansion if not all styles could be resolved
+ if prompt != '':
+ if real_prompt != '':
+ extracted.append(modules.sdxl_styles.fooocus_expansion)
+ else:
+ # find real_prompt when only prompt expansion is selected
+ first_word = prompt.split(', ')[0]
+ first_word_positions = [i for i in range(len(prompt)) if prompt.startswith(first_word, i)]
+ if len(first_word_positions) > 1:
+ real_prompt = prompt[:first_word_positions[-1]]
+ extracted.append(modules.sdxl_styles.fooocus_expansion)
+ if real_prompt.endswith(', '):
+ real_prompt = real_prompt[:-2]
+
+ return list(reversed(extracted)), real_prompt, negative_prompt
+
+
+class PromptStyle(NamedTuple):
+ name: str
+ prompt: str
+ negative_prompt: str
+
+
+def is_json(data: str) -> bool:
+ try:
+ loaded_json = json.loads(data)
+ assert isinstance(loaded_json, dict)
+ except (ValueError, AssertionError):
+ return False
+ return True
+
+
+def get_filname_by_stem(lora_name, filenames: List[str]) -> str | None:
+ for filename in filenames:
+ path = Path(filename)
+ if lora_name == path.stem:
+ return filename
+ return None
+
+
+def get_file_from_folder_list(name, folders):
+ if not isinstance(folders, list):
+ folders = [folders]
+
+ for folder in folders:
+ filename = os.path.abspath(os.path.realpath(os.path.join(folder, name)))
+ if os.path.isfile(filename):
+ return filename
+
+ return os.path.abspath(os.path.realpath(os.path.join(folders[0], name)))
+
+
+def makedirs_with_log(path):
+ try:
+ os.makedirs(path, exist_ok=True)
+ except OSError as error:
+ print(f'Directory {path} could not be created, reason: {error}')
+
+
+def get_enabled_loras(loras: list, remove_none=True) -> list:
+ return [(lora[1], lora[2]) for lora in loras if lora[0] and (lora[1] != 'None' if remove_none else True)]
+
+
+def parse_lora_references_from_prompt(prompt: str, loras: List[Tuple[AnyStr, float]], loras_limit: int = 5,
+ skip_file_check=False, prompt_cleanup=True, deduplicate_loras=True,
+ lora_filenames=None) -> tuple[List[Tuple[AnyStr, float]], str]:
+ if lora_filenames is None:
+ lora_filenames = []
+
+ found_loras = []
+ prompt_without_loras = ''
+ cleaned_prompt = ''
+
+ for token in prompt.split(','):
+ matches = LORAS_PROMPT_PATTERN.findall(token)
+
+ if len(matches) == 0:
+ prompt_without_loras += token + ', '
+ continue
+ for match in matches:
+ lora_name = match[1] + '.safetensors'
+ if not skip_file_check:
+ lora_name = get_filname_by_stem(match[1], lora_filenames)
+ if lora_name is not None:
+ found_loras.append((lora_name, float(match[2])))
+ token = token.replace(match[0], '')
+ prompt_without_loras += token + ', '
+
+ if prompt_without_loras != '':
+ cleaned_prompt = prompt_without_loras[:-2]
+
+ if prompt_cleanup:
+ cleaned_prompt = cleanup_prompt(prompt_without_loras)
+
+ new_loras = []
+ lora_names = [lora[0] for lora in loras]
+ for found_lora in found_loras:
+ if deduplicate_loras and (found_lora[0] in lora_names or found_lora in new_loras):
+ continue
+ new_loras.append(found_lora)
+
+ if len(new_loras) == 0:
+ return loras, cleaned_prompt
+
+ updated_loras = []
+ for lora in loras + new_loras:
+ if lora[0] != "None":
+ updated_loras.append(lora)
+
+ return updated_loras[:loras_limit], cleaned_prompt
+
+
+def remove_performance_lora(filenames: list, performance: Performance | None):
+ loras_without_performance = filenames.copy()
+
+ if performance is None:
+ return loras_without_performance
+
+ performance_lora = performance.lora_filename()
+
+ for filename in filenames:
+ path = Path(filename)
+ if performance_lora == path.name:
+ loras_without_performance.remove(filename)
+
+ return loras_without_performance
+
+
+def cleanup_prompt(prompt):
+ prompt = re.sub(' +', ' ', prompt)
+ prompt = re.sub(',+', ',', prompt)
+ cleaned_prompt = ''
+ for token in prompt.split(','):
+ token = token.strip()
+ if token == '':
+ continue
+ cleaned_prompt += token + ', '
+ return cleaned_prompt[:-2]
+
+
+def apply_wildcards(wildcard_text, rng, i, read_wildcards_in_order) -> str:
+ for _ in range(modules.config.wildcards_max_bfs_depth):
+ placeholders = re.findall(r'__([\w-]+)__', wildcard_text)
+ if len(placeholders) == 0:
+ return wildcard_text
+
+ print(f'[Wildcards] processing: {wildcard_text}')
+ for placeholder in placeholders:
+ try:
+ matches = [x for x in modules.config.wildcard_filenames if os.path.splitext(os.path.basename(x))[0] == placeholder]
+ words = open(os.path.join(modules.config.path_wildcards, matches[0]), encoding='utf-8').read().splitlines()
+ words = [x for x in words if x != '']
+ assert len(words) > 0
+ if read_wildcards_in_order:
+ wildcard_text = wildcard_text.replace(f'__{placeholder}__', words[i % len(words)], 1)
+ else:
+ wildcard_text = wildcard_text.replace(f'__{placeholder}__', rng.choice(words), 1)
+ except:
+ print(f'[Wildcards] Warning: {placeholder}.txt missing or empty. '
+ f'Using "{placeholder}" as a normal word.')
+ wildcard_text = wildcard_text.replace(f'__{placeholder}__', placeholder)
+ print(f'[Wildcards] {wildcard_text}')
+
+ print(f'[Wildcards] BFS stack overflow. Current text: {wildcard_text}')
+ return wildcard_text
+
+
+def get_image_size_info(image: np.ndarray, aspect_ratios: list) -> str:
+ try:
+ image = Image.fromarray(np.uint8(image))
+ width, height = image.size
+ ratio = round(width / height, 2)
+ gcd = math.gcd(width, height)
+ lcm_ratio = f'{width // gcd}:{height // gcd}'
+ size_info = f'Image Size: {width} x {height}, Ratio: {ratio}, {lcm_ratio}'
+
+ closest_ratio = min(aspect_ratios, key=lambda x: abs(ratio - float(x.split('*')[0]) / float(x.split('*')[1])))
+ recommended_width, recommended_height = map(int, closest_ratio.split('*'))
+ recommended_ratio = round(recommended_width / recommended_height, 2)
+ recommended_gcd = math.gcd(recommended_width, recommended_height)
+ recommended_lcm_ratio = f'{recommended_width // recommended_gcd}:{recommended_height // recommended_gcd}'
+
+ size_info = f'{width} x {height}, {ratio}, {lcm_ratio}'
+ size_info += f'\n{recommended_width} x {recommended_height}, {recommended_ratio}, {recommended_lcm_ratio}'
+
+ return size_info
+ except Exception as e:
+ return f'Error reading image: {e}'
diff --git a/presets/.gitignore b/presets/.gitignore
new file mode 100644
index 00000000..27e74136
--- /dev/null
+++ b/presets/.gitignore
@@ -0,0 +1,7 @@
+*.json
+!anime.json
+!default.json
+!lcm.json
+!playground_v2.5.json
+!realistic.json
+!sai.json
\ No newline at end of file
diff --git a/presets/anime.json b/presets/anime.json
index 32428a71..78607edb 100644
--- a/presets/anime.json
+++ b/presets/anime.json
@@ -1,53 +1,57 @@
{
- "default_model": "bluePencilXL_v050.safetensors",
- "default_refiner": "DreamShaper_8_pruned.safetensors",
- "default_refiner_switch": 0.667,
+ "default_model": "animaPencilXL_v310.safetensors",
+ "default_refiner": "None",
+ "default_refiner_switch": 0.5,
"default_loras": [
[
- "sd_xl_offset_example-lora_1.0.safetensors",
- 0.5
- ],
- [
+ true,
"None",
1.0
],
[
+ true,
"None",
1.0
],
[
+ true,
"None",
1.0
],
[
+ true,
+ "None",
+ 1.0
+ ],
+ [
+ true,
"None",
1.0
]
],
- "default_cfg_scale": 7.0,
+ "default_cfg_scale": 6.0,
"default_sample_sharpness": 2.0,
"default_sampler": "dpmpp_2m_sde_gpu",
"default_scheduler": "karras",
"default_performance": "Speed",
- "default_prompt": "1girl, ",
- "default_prompt_negative": "(embedding:unaestheticXLv31:0.8), low quality, watermark",
+ "default_prompt": "",
+ "default_prompt_negative": "",
"default_styles": [
"Fooocus V2",
- "Fooocus Masterpiece",
- "SAI Anime",
- "SAI Digital Art",
- "SAI Enhance",
- "SAI Fantasy Art"
+ "Fooocus Semi Realistic",
+ "Fooocus Masterpiece"
],
"default_aspect_ratio": "896*1152",
"checkpoint_downloads": {
- "bluePencilXL_v050.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/bluePencilXL_v050.safetensors",
- "DreamShaper_8_pruned.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/DreamShaper_8_pruned.safetensors"
+ "animaPencilXL_v310.safetensors": "https://huggingface.co/mashb1t/fav_models/resolve/main/fav/animaPencilXL_v310.safetensors"
},
- "embeddings_downloads": {
- "unaestheticXLv31.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/unaestheticXLv31.safetensors"
- },
- "lora_downloads": {
- "sd_xl_offset_example-lora_1.0.safetensors": "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_offset_example-lora_1.0.safetensors"
- }
+ "embeddings_downloads": {},
+ "lora_downloads": {},
+ "previous_default_models": [
+ "animaPencilXL_v300.safetensors",
+ "animaPencilXL_v260.safetensors",
+ "animaPencilXL_v210.safetensors",
+ "animaPencilXL_v200.safetensors",
+ "animaPencilXL_v100.safetensors"
+ ]
}
\ No newline at end of file
diff --git a/presets/default.json b/presets/default.json
index bc014035..d02bb8a4 100644
--- a/presets/default.json
+++ b/presets/default.json
@@ -1,25 +1,30 @@
{
- "default_model": "juggernautXL_version6Rundiffusion.safetensors",
+ "default_model": "juggernautXL_v8Rundiffusion.safetensors",
"default_refiner": "None",
"default_refiner_switch": 0.5,
"default_loras": [
[
+ true,
"sd_xl_offset_example-lora_1.0.safetensors",
0.1
],
[
+ true,
"None",
1.0
],
[
+ true,
"None",
1.0
],
[
+ true,
"None",
1.0
],
[
+ true,
"None",
1.0
]
@@ -38,10 +43,17 @@
],
"default_aspect_ratio": "1152*896",
"checkpoint_downloads": {
- "juggernautXL_version6Rundiffusion.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/juggernautXL_version6Rundiffusion.safetensors"
+ "juggernautXL_v8Rundiffusion.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/juggernautXL_v8Rundiffusion.safetensors"
},
"embeddings_downloads": {},
"lora_downloads": {
"sd_xl_offset_example-lora_1.0.safetensors": "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_offset_example-lora_1.0.safetensors"
- }
+ },
+ "previous_default_models": [
+ "juggernautXL_version8Rundiffusion.safetensors",
+ "juggernautXL_version7Rundiffusion.safetensors",
+ "juggernautXL_v7Rundiffusion.safetensors",
+ "juggernautXL_version6Rundiffusion.safetensors",
+ "juggernautXL_v6Rundiffusion.safetensors"
+ ]
}
\ No newline at end of file
diff --git a/presets/lcm.json b/presets/lcm.json
index 8822172d..6713fdd5 100644
--- a/presets/lcm.json
+++ b/presets/lcm.json
@@ -1,25 +1,30 @@
{
- "default_model": "juggernautXL_version6Rundiffusion.safetensors",
+ "default_model": "juggernautXL_v8Rundiffusion.safetensors",
"default_refiner": "None",
"default_refiner_switch": 0.5,
"default_loras": [
[
+ true,
"None",
1.0
],
[
+ true,
"None",
1.0
],
[
+ true,
"None",
1.0
],
[
+ true,
"None",
1.0
],
[
+ true,
"None",
1.0
]
@@ -38,8 +43,15 @@
],
"default_aspect_ratio": "1152*896",
"checkpoint_downloads": {
- "juggernautXL_version6Rundiffusion.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/juggernautXL_version6Rundiffusion.safetensors"
+ "juggernautXL_v8Rundiffusion.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/juggernautXL_v8Rundiffusion.safetensors"
},
"embeddings_downloads": {},
- "lora_downloads": {}
+ "lora_downloads": {},
+ "previous_default_models": [
+ "juggernautXL_version8Rundiffusion.safetensors",
+ "juggernautXL_version7Rundiffusion.safetensors",
+ "juggernautXL_v7Rundiffusion.safetensors",
+ "juggernautXL_version6Rundiffusion.safetensors",
+ "juggernautXL_v6Rundiffusion.safetensors"
+ ]
}
\ No newline at end of file
diff --git a/presets/lightning.json b/presets/lightning.json
new file mode 100644
index 00000000..d1466c10
--- /dev/null
+++ b/presets/lightning.json
@@ -0,0 +1,57 @@
+{
+ "default_model": "juggernautXL_v8Rundiffusion.safetensors",
+ "default_refiner": "None",
+ "default_refiner_switch": 0.5,
+ "default_loras": [
+ [
+ true,
+ "None",
+ 1.0
+ ],
+ [
+ true,
+ "None",
+ 1.0
+ ],
+ [
+ true,
+ "None",
+ 1.0
+ ],
+ [
+ true,
+ "None",
+ 1.0
+ ],
+ [
+ true,
+ "None",
+ 1.0
+ ]
+ ],
+ "default_cfg_scale": 4.0,
+ "default_sample_sharpness": 2.0,
+ "default_sampler": "dpmpp_2m_sde_gpu",
+ "default_scheduler": "karras",
+ "default_performance": "Lightning",
+ "default_prompt": "",
+ "default_prompt_negative": "",
+ "default_styles": [
+ "Fooocus V2",
+ "Fooocus Enhance",
+ "Fooocus Sharp"
+ ],
+ "default_aspect_ratio": "1152*896",
+ "checkpoint_downloads": {
+ "juggernautXL_v8Rundiffusion.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/juggernautXL_v8Rundiffusion.safetensors"
+ },
+ "embeddings_downloads": {},
+ "lora_downloads": {},
+ "previous_default_models": [
+ "juggernautXL_version8Rundiffusion.safetensors",
+ "juggernautXL_version7Rundiffusion.safetensors",
+ "juggernautXL_v7Rundiffusion.safetensors",
+ "juggernautXL_version6Rundiffusion.safetensors",
+ "juggernautXL_v6Rundiffusion.safetensors"
+ ]
+}
\ No newline at end of file
diff --git a/presets/playground_v2.5.json b/presets/playground_v2.5.json
new file mode 100644
index 00000000..5bc6059e
--- /dev/null
+++ b/presets/playground_v2.5.json
@@ -0,0 +1,49 @@
+{
+ "default_model": "playground-v2.5-1024px-aesthetic.fp16.safetensors",
+ "default_refiner": "None",
+ "default_refiner_switch": 0.5,
+ "default_loras": [
+ [
+ true,
+ "None",
+ 1.0
+ ],
+ [
+ true,
+ "None",
+ 1.0
+ ],
+ [
+ true,
+ "None",
+ 1.0
+ ],
+ [
+ true,
+ "None",
+ 1.0
+ ],
+ [
+ true,
+ "None",
+ 1.0
+ ]
+ ],
+ "default_cfg_scale": 2.0,
+ "default_sample_sharpness": 2.0,
+ "default_sampler": "dpmpp_2m",
+ "default_scheduler": "edm_playground_v2.5",
+ "default_performance": "Speed",
+ "default_prompt": "",
+ "default_prompt_negative": "",
+ "default_styles": [
+ "Fooocus V2"
+ ],
+ "default_aspect_ratio": "1024*1024",
+ "checkpoint_downloads": {
+ "playground-v2.5-1024px-aesthetic.fp16.safetensors": "https://huggingface.co/mashb1t/fav_models/resolve/main/fav/playground-v2.5-1024px-aesthetic.fp16.safetensors"
+ },
+ "embeddings_downloads": {},
+ "lora_downloads": {},
+ "previous_default_models": []
+}
\ No newline at end of file
diff --git a/presets/realistic.json b/presets/realistic.json
index ed625d45..6db6d0b7 100644
--- a/presets/realistic.json
+++ b/presets/realistic.json
@@ -1,25 +1,30 @@
{
- "default_model": "realisticStockPhoto_v10.safetensors",
- "default_refiner": "",
+ "default_model": "realisticStockPhoto_v20.safetensors",
+ "default_refiner": "None",
"default_refiner_switch": 0.5,
"default_loras": [
[
+ true,
"SDXL_FILM_PHOTOGRAPHY_STYLE_BetaV0.4.safetensors",
0.25
],
[
+ true,
"None",
1.0
],
[
+ true,
"None",
1.0
],
[
+ true,
"None",
1.0
],
[
+ true,
"None",
1.0
]
@@ -38,10 +43,11 @@
],
"default_aspect_ratio": "896*1152",
"checkpoint_downloads": {
- "realisticStockPhoto_v10.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/realisticStockPhoto_v10.safetensors"
+ "realisticStockPhoto_v20.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/realisticStockPhoto_v20.safetensors"
},
"embeddings_downloads": {},
"lora_downloads": {
"SDXL_FILM_PHOTOGRAPHY_STYLE_BetaV0.4.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/SDXL_FILM_PHOTOGRAPHY_STYLE_BetaV0.4.safetensors"
- }
+ },
+ "previous_default_models": ["realisticStockPhoto_v10.safetensors"]
}
\ No newline at end of file
diff --git a/presets/sai.json b/presets/sai.json
index ac9c17d1..918028f3 100644
--- a/presets/sai.json
+++ b/presets/sai.json
@@ -4,22 +4,27 @@
"default_refiner_switch": 0.75,
"default_loras": [
[
+ true,
"sd_xl_offset_example-lora_1.0.safetensors",
0.5
],
[
+ true,
"None",
1.0
],
[
+ true,
"None",
1.0
],
[
+ true,
"None",
1.0
],
[
+ true,
"None",
1.0
]
@@ -43,5 +48,6 @@
"embeddings_downloads": {},
"lora_downloads": {
"sd_xl_offset_example-lora_1.0.safetensors": "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_offset_example-lora_1.0.safetensors"
- }
+ },
+ "previous_default_models": []
}
\ No newline at end of file
diff --git a/readme.md b/readme.md
index 6b458e74..e79b72aa 100644
--- a/readme.md
+++ b/readme.md
@@ -5,7 +5,7 @@
without any parameter tweaking, without any strange prompt tags.
-See also **non-cherry-picked** generalization and diversity tests [here](https://github.com/lllyasviel/Fooocus/discussions/808) and [here](https://github.com/lllyasviel/Fooocus/discussions/679) and [here](https://github.com/lllyasviel/Fooocus/discussions/679#realistic).
+See also **non-cherry-picked** generalization and diversity tests [here](https://github.com/lllyasviel/Fooocus/discussions/2067) and [here](https://github.com/lllyasviel/Fooocus/discussions/808) and [here](https://github.com/lllyasviel/Fooocus/discussions/679) and [here](https://github.com/lllyasviel/Fooocus/discussions/679#realistic).
In the entire open source community, only Fooocus can achieve this level of **non-cherry-picked** quality.
@@ -38,7 +38,7 @@ Using Fooocus is as easy as (probably easier than) Midjourney – but this does
| Midjourney | Fooocus |
| - | - |
-| High-quality text-to-image without needing much prompt engineering or parameter tuning. (Unknown method) | High-quality text-to-image without needing much prompt engineering or parameter tuning. (Fooocus has offline GPT-2 based prompt processing engine and lots of sampling improvements so that results are always beautiful, no matter your prompt is as short as “house in garden” or as long as 1000 words) |
+| High-quality text-to-image without needing much prompt engineering or parameter tuning. (Unknown method) | High-quality text-to-image without needing much prompt engineering or parameter tuning. (Fooocus has an offline GPT-2 based prompt processing engine and lots of sampling improvements so that results are always beautiful, no matter if your prompt is as short as “house in garden” or as long as 1000 words) |
| V1 V2 V3 V4 | Input Image -> Upscale or Variation -> Vary (Subtle) / Vary (Strong)|
| U1 U2 U3 U4 | Input Image -> Upscale or Variation -> Upscale (1.5x) / Upscale (2x) |
| Inpaint / Up / Down / Left / Right (Pan) | Input Image -> Inpaint or Outpaint -> Inpaint / Up / Down / Left / Right (Fooocus uses its own inpaint algorithm and inpaint models so that results are more satisfying than all other software that uses standard SDXL inpaint method/model) |
@@ -73,16 +73,20 @@ You can directly download Fooocus with:
**[>>> Click here to download <<<](https://github.com/lllyasviel/Fooocus/releases/download/release/Fooocus_win64_2-1-831.7z)**
-After you download the file, please uncompress it, and then run the "run.bat".
+After you download the file, please uncompress it and then run the "run.bat".

-In the first time you launch the software, it will automatically download models:
+The first time you launch the software, it will automatically download models:
1. It will download [default models](#models) to the folder "Fooocus\models\checkpoints" given different presets. You can download them in advance if you do not want automatic download.
2. Note that if you use inpaint, at the first time you inpaint an image, it will download [Fooocus's own inpaint control model from here](https://huggingface.co/lllyasviel/fooocus_inpaint/resolve/main/inpaint_v26.fooocus.patch) as the file "Fooocus\models\inpaint\inpaint_v26.fooocus.patch" (the size of this file is 1.28GB).
-After Fooocus 2.1.60, you will also have `run_anime.bat` and `run_realistic.bat`. They are different model presets (and requires different models, but they will be automatically downloaded). [Check here for more details](https://github.com/lllyasviel/Fooocus/discussions/679).
+After Fooocus 2.1.60, you will also have `run_anime.bat` and `run_realistic.bat`. They are different model presets (and require different models, but they will be automatically downloaded). [Check here for more details](https://github.com/lllyasviel/Fooocus/discussions/679).
+
+After Fooocus 2.3.0 you can also switch presets directly in the browser. Keep in mind to add these arguments if you want to change the default behavior:
+* Use `--disable-preset-selection` to disable preset selection in the browser.
+* Use `--always-download-new-model` to download missing models on preset switch. Default is fallback to `previous_default_models` defined in the corresponding preset, also see terminal output.

@@ -99,7 +103,7 @@ Besides, recently many other software report that Nvidia driver above 532 is som
Note that the minimal requirement is **4GB Nvidia GPU memory (4GB VRAM)** and **8GB system memory (8GB RAM)**. This requires using Microsoft’s Virtual Swap technique, which is automatically enabled by your Windows installation in most cases, so you often do not need to do anything about it. However, if you are not sure, or if you manually turned it off (would anyone really do that?), or **if you see any "RuntimeError: CPUAllocator"**, you can enable it here:
-Click here to the see the image instruction.
+Click here to see the image instructions.

@@ -115,17 +119,21 @@ See also the common problems and troubleshoots [here](troubleshoot.md).
### Colab
-(Last tested - 2023 Dec 12)
+(Last tested - 2024 Mar 18 by [mashb1t](https://github.com/mashb1t))
| Colab | Info
| --- | --- |
[](https://colab.research.google.com/github/lllyasviel/Fooocus/blob/main/fooocus_colab.ipynb) | Fooocus Official
-In Colab, you can modify the last line to `!python entry_with_update.py --share` or `!python entry_with_update.py --preset anime --share` or `!python entry_with_update.py --preset realistic --share` for Fooocus Default/Anime/Realistic Edition.
+In Colab, you can modify the last line to `!python entry_with_update.py --share --always-high-vram` or `!python entry_with_update.py --share --always-high-vram --preset anime` or `!python entry_with_update.py --share --always-high-vram --preset realistic` for Fooocus Default/Anime/Realistic Edition.
-Note that this Colab will disable refiner by default because Colab free's resource is relatively limited (and some "big" features like image prompt may cause free-tier Colab to disconnect). We make sure that basic text-to-image is always working on free-tier Colab.
+You can also change the preset in the UI. Please be aware that this may lead to timeouts after 60 seconds. If this is the case, please wait until the download has finished, change the preset to initial and back to the one you've selected or reload the page.
-Thanks to [camenduru](https://github.com/camenduru)!
+Note that this Colab will disable refiner by default because Colab free's resources are relatively limited (and some "big" features like image prompt may cause free-tier Colab to disconnect). We make sure that basic text-to-image is always working on free-tier Colab.
+
+Using `--always-high-vram` shifts resource allocation from RAM to VRAM and achieves the overall best balance between performance, flexibility and stability on the default T4 instance. Please find more information [here](https://github.com/lllyasviel/Fooocus/pull/1710#issuecomment-1989185346).
+
+Thanks to [camenduru](https://github.com/camenduru) for the template!
### Linux (Using Anaconda)
@@ -142,7 +150,7 @@ Then download the models: download [default models](#models) to the folder "Fooo
conda activate fooocus
python entry_with_update.py
-Or if you want to open a remote port, use
+Or, if you want to open a remote port, use
conda activate fooocus
python entry_with_update.py --listen
@@ -151,7 +159,7 @@ Use `python entry_with_update.py --preset anime` or `python entry_with_update.py
### Linux (Using Python Venv)
-Your Linux needs to have **Python 3.10** installed, and lets say your Python can be called with command **python3** with your venv system working, you can
+Your Linux needs to have **Python 3.10** installed, and let's say your Python can be called with the command **python3** with your venv system working; you can
git clone https://github.com/lllyasviel/Fooocus.git
cd Fooocus
@@ -164,7 +172,7 @@ See the above sections for model downloads. You can launch the software with:
source fooocus_env/bin/activate
python entry_with_update.py
-Or if you want to open a remote port, use
+Or, if you want to open a remote port, use
source fooocus_env/bin/activate
python entry_with_update.py --listen
@@ -173,7 +181,7 @@ Use `python entry_with_update.py --preset anime` or `python entry_with_update.py
### Linux (Using native system Python)
-If you know what you are doing, and your Linux already has **Python 3.10** installed, and your Python can be called with command **python3** (and Pip with **pip3**), you can
+If you know what you are doing, and your Linux already has **Python 3.10** installed, and your Python can be called with the command **python3** (and Pip with **pip3**), you can
git clone https://github.com/lllyasviel/Fooocus.git
cd Fooocus
@@ -183,7 +191,7 @@ See the above sections for model downloads. You can launch the software with:
python3 entry_with_update.py
-Or if you want to open a remote port, use
+Or, if you want to open a remote port, use
python3 entry_with_update.py --listen
@@ -193,7 +201,7 @@ Use `python entry_with_update.py --preset anime` or `python entry_with_update.py
Note that the [minimal requirement](#minimal-requirement) for different platforms is different.
-Same with the above instructions. You need to change torch to AMD version
+Same with the above instructions. You need to change torch to the AMD version
pip uninstall torch torchvision torchaudio torchtext functorch xformers
pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/rocm5.6
@@ -202,11 +210,11 @@ AMD is not intensively tested, however. The AMD support is in beta.
Use `python entry_with_update.py --preset anime` or `python entry_with_update.py --preset realistic` for Fooocus Anime/Realistic Edition.
-### Windows(AMD GPUs)
+### Windows (AMD GPUs)
Note that the [minimal requirement](#minimal-requirement) for different platforms is different.
-Same with Windows. Download the software, edit the content of `run.bat` as:
+Same with Windows. Download the software and edit the content of `run.bat` as:
.\python_embeded\python.exe -m pip uninstall torch torchvision torchaudio torchtext functorch xformers -y
.\python_embeded\python.exe -m pip install torch-directml
@@ -217,7 +225,7 @@ Then run the `run.bat`.
AMD is not intensively tested, however. The AMD support is in beta.
-For AMD, use `python entry_with_update.py --directml --preset anime` or `python entry_with_update.py --directml --preset realistic` for Fooocus Anime/Realistic Edition.
+For AMD, use `.\python_embeded\python.exe entry_with_update.py --directml --preset anime` or `.\python_embeded\python.exe entry_with_update.py --directml --preset realistic` for Fooocus Anime/Realistic Edition.
### Mac
@@ -233,10 +241,14 @@ You can install Fooocus on Apple Mac silicon (M1 or M2) with macOS 'Catalina' or
1. Create a new conda environment, `conda env create -f environment.yaml`.
1. Activate your new conda environment, `conda activate fooocus`.
1. Install the packages required by Fooocus, `pip install -r requirements_versions.txt`.
-1. Launch Fooocus by running `python entry_with_update.py`. (Some Mac M2 users may need `python entry_with_update.py --disable-offload-from-vram` to speed up model loading/unloading.) The first time you run Fooocus, it will automatically download the Stable Diffusion SDXL models and will take a significant time, depending on your internet connection.
+1. Launch Fooocus by running `python entry_with_update.py`. (Some Mac M2 users may need `python entry_with_update.py --disable-offload-from-vram` to speed up model loading/unloading.) The first time you run Fooocus, it will automatically download the Stable Diffusion SDXL models and will take a significant amount of time, depending on your internet connection.
Use `python entry_with_update.py --preset anime` or `python entry_with_update.py --preset realistic` for Fooocus Anime/Realistic Edition.
+### Docker
+
+See [docker.md](docker.md)
+
### Download Previous Version
See the guidelines [here](https://github.com/lllyasviel/Fooocus/discussions/1405).
@@ -245,23 +257,22 @@ See the guidelines [here](https://github.com/lllyasviel/Fooocus/discussions/1405
Below is the minimal requirement for running Fooocus locally. If your device capability is lower than this spec, you may not be able to use Fooocus locally. (Please let us know, in any case, if your device capability is lower but Fooocus still works.)
-| Operating System | GPU | Minimal GPU Memory | Minimal System Memory | [System Swap](troubleshoot.md) | Note |
-|-------------------|------------------------------|---------------------------|---------------------------|--------------------------------|---------------------------------------|
-| Windows/Linux | Nvidia RTX 4XXX | 4GB | 8GB | Required | fastest |
-| Windows/Linux | Nvidia RTX 3XXX | 4GB | 8GB | Required | usually faster than RTX 2XXX |
-| Windows/Linux | Nvidia RTX 2XXX | 4GB | 8GB | Required | usually faster than GTX 1XXX |
-| Windows/Linux | Nvidia GTX 1XXX | 8GB (* 6GB uncertain) | 8GB | Required | only marginally faster than CPU |
-| Windows/Linux | Nvidia GTX 9XX | 8GB | 8GB | Required | faster or slower than CPU |
-| Windows/Linux | Nvidia GTX < 9XX | Not supported | / | / | / |
-| Windows | AMD GPU | 16GB | 8GB | Required | via DirectML |
-| Linux | AMD GPU | 8GB | 8GB | Required | via ROCm |
-| Windows | * AMD GPU ROCm (on hold) | 8GB (on hold) | 8GB (on hold) | Required (on hold) | via ROCm (on hold) |
-| Mac | M1/M2 MPS | Shared | Shared | Shared | about 9x slower than Nvidia RTX 3XXX |
-| Windows/Linux/Mac | only use CPU | 0GB | 32GB | Required | about 17x slower than Nvidia RTX 3XXX |
+| Operating System | GPU | Minimal GPU Memory | Minimal System Memory | [System Swap](troubleshoot.md) | Note |
+|-------------------|------------------------------|------------------------------|---------------------------|--------------------------------|----------------------------------------------------------------------------|
+| Windows/Linux | Nvidia RTX 4XXX | 4GB | 8GB | Required | fastest |
+| Windows/Linux | Nvidia RTX 3XXX | 4GB | 8GB | Required | usually faster than RTX 2XXX |
+| Windows/Linux | Nvidia RTX 2XXX | 4GB | 8GB | Required | usually faster than GTX 1XXX |
+| Windows/Linux | Nvidia GTX 1XXX | 8GB (* 6GB uncertain) | 8GB | Required | only marginally faster than CPU |
+| Windows/Linux | Nvidia GTX 9XX | 8GB | 8GB | Required | faster or slower than CPU |
+| Windows/Linux | Nvidia GTX < 9XX | Not supported | / | / | / |
+| Windows | AMD GPU | 8GB (updated 2023 Dec 30) | 8GB | Required | via DirectML (* ROCm is on hold), about 3x slower than Nvidia RTX 3XXX |
+| Linux | AMD GPU | 8GB | 8GB | Required | via ROCm, about 1.5x slower than Nvidia RTX 3XXX |
+| Mac | M1/M2 MPS | Shared | Shared | Shared | about 9x slower than Nvidia RTX 3XXX |
+| Windows/Linux/Mac | only use CPU | 0GB | 32GB | Required | about 17x slower than Nvidia RTX 3XXX |
* AMD GPU ROCm (on hold): The AMD is still working on supporting ROCm on Windows.
-* Nvidia GTX 1XXX 6GB uncertain: Some people reports 6GB success on GTX 10XX but some other people reports failure cases.
+* Nvidia GTX 1XXX 6GB uncertain: Some people report 6GB success on GTX 10XX, but some other people report failure cases.
*Note that Fooocus is only for extremely high quality image generating. We will not support smaller models to reduce the requirement and sacrifice result quality.*
@@ -272,39 +283,46 @@ See the common problems [here](troubleshoot.md).
## Default Models
-Given different goals, the default models and configs of Fooocus is different:
+Given different goals, the default models and configs of Fooocus are different:
-| Task | Windows | Linux args | Main Model | Refiner | Config |
-| --- | --- | --- | --- | --- | --- |
-| General | run.bat | | [juggernautXL v6_RunDiffusion](https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/juggernautXL_version6Rundiffusion.safetensors) | not used | [here](https://github.com/lllyasviel/Fooocus/blob/main/modules/path.py) |
-| Realistic | run_realistic.bat | --preset realistic | [realistic_stock_photo](https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/realisticStockPhoto_v10.safetensors) | not used | [here](https://github.com/lllyasviel/Fooocus/blob/main/presets/realistic.json) |
-| Anime | run_anime.bat | --preset anime | [bluepencil_v50](https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/bluePencilXL_v050.safetensors) | [dreamsharper_v8](https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/DreamShaper_8_pruned.safetensors) (SD1.5) | [here](https://github.com/lllyasviel/Fooocus/blob/main/presets/anime.json) |
+| Task | Windows | Linux args | Main Model | Refiner | Config |
+| --- | --- | --- | --- | --- |--------------------------------------------------------------------------------|
+| General | run.bat | | juggernautXL_v8Rundiffusion | not used | [here](https://github.com/lllyasviel/Fooocus/blob/main/presets/default.json) |
+| Realistic | run_realistic.bat | --preset realistic | realisticStockPhoto_v20 | not used | [here](https://github.com/lllyasviel/Fooocus/blob/main/presets/realistic.json) |
+| Anime | run_anime.bat | --preset anime | animaPencilXL_v100 | not used | [here](https://github.com/lllyasviel/Fooocus/blob/main/presets/anime.json) |
Note that the download is **automatic** - you do not need to do anything if the internet connection is okay. However, you can download them manually if you (or move them from somewhere else) have your own preparation.
+## UI Access and Authentication
+In addition to running on localhost, Fooocus can also expose its UI in two ways:
+* Local UI listener: use `--listen` (specify port e.g. with `--port 8888`).
+* API access: use `--share` (registers an endpoint at `.gradio.live`).
+
+In both ways the access is unauthenticated by default. You can add basic authentication by creating a file called `auth.json` in the main directory, which contains a list of JSON objects with the keys `user` and `pass` (see example in [auth-example.json](./auth-example.json)).
+
## List of "Hidden" Tricks
-Below things are already inside the software, and **users do not need to do anything about these**.
+The below things are already inside the software, and **users do not need to do anything about these**.
-1. GPT2-based [prompt expansion as a dynamic style "Fooocus V2".](https://github.com/lllyasviel/Fooocus/discussions/117#raw) (similar to Midjourney's hidden pre-processsing and "raw" mode, or the LeonardoAI's Prompt Magic).
-2. Native refiner swap inside one single k-sampler. The advantage is that now the refiner model can reuse the base model's momentum (or ODE's history parameters) collected from k-sampling to achieve more coherent sampling. In Automatic1111's high-res fix and ComfyUI's node system, the base model and refiner use two independent k-samplers, which means the momentum is largely wasted, and the sampling continuity is broken. Fooocus uses its own advanced k-diffusion sampling that ensures seamless, native, and continuous swap in a refiner setup. (Update Aug 13: Actually I discussed this with Automatic1111 several days ago and it seems that the “native refiner swap inside one single k-sampler” is [merged]( https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12371) into the dev branch of webui. Great!)
-3. Negative ADM guidance. Because the highest resolution level of XL Base does not have cross attentions, the positive and negative signals for XL's highest resolution level cannot receive enough contrasts during the CFG sampling, causing the results look a bit plastic or overly smooth in certain cases. Fortunately, since the XL's highest resolution level is still conditioned on image aspect ratios (ADM), we can modify the adm on the positive/negative side to compensate for the lack of CFG contrast in the highest resolution level. (Update Aug 16, the IOS App [Drawing Things](https://apps.apple.com/us/app/draw-things-ai-generation/id6444050820) will support Negative ADM Guidance. Great!)
-4. We implemented a carefully tuned variation of the Section 5.1 of ["Improving Sample Quality of Diffusion Models Using Self-Attention Guidance"](https://arxiv.org/pdf/2210.00939.pdf). The weight is set to very low, but this is Fooocus's final guarantee to make sure that the XL will never yield overly smooth or plastic appearance (examples [here](https://github.com/lllyasviel/Fooocus/discussions/117#sharpness)). This can almostly eliminate all cases that XL still occasionally produce overly smooth results even with negative ADM guidance. (Update 2023 Aug 18, the Gaussian kernel of SAG is changed to an anisotropic kernel for better structure preservation and fewer artifacts.)
+1. GPT2-based [prompt expansion as a dynamic style "Fooocus V2".](https://github.com/lllyasviel/Fooocus/discussions/117#raw) (similar to Midjourney's hidden pre-processing and "raw" mode, or the LeonardoAI's Prompt Magic).
+2. Native refiner swap inside one single k-sampler. The advantage is that the refiner model can now reuse the base model's momentum (or ODE's history parameters) collected from k-sampling to achieve more coherent sampling. In Automatic1111's high-res fix and ComfyUI's node system, the base model and refiner use two independent k-samplers, which means the momentum is largely wasted, and the sampling continuity is broken. Fooocus uses its own advanced k-diffusion sampling that ensures seamless, native, and continuous swap in a refiner setup. (Update Aug 13: Actually, I discussed this with Automatic1111 several days ago, and it seems that the “native refiner swap inside one single k-sampler” is [merged]( https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12371) into the dev branch of webui. Great!)
+3. Negative ADM guidance. Because the highest resolution level of XL Base does not have cross attentions, the positive and negative signals for XL's highest resolution level cannot receive enough contrasts during the CFG sampling, causing the results to look a bit plastic or overly smooth in certain cases. Fortunately, since the XL's highest resolution level is still conditioned on image aspect ratios (ADM), we can modify the adm on the positive/negative side to compensate for the lack of CFG contrast in the highest resolution level. (Update Aug 16, the IOS App [Draw Things](https://apps.apple.com/us/app/draw-things-ai-generation/id6444050820) will support Negative ADM Guidance. Great!)
+4. We implemented a carefully tuned variation of Section 5.1 of ["Improving Sample Quality of Diffusion Models Using Self-Attention Guidance"](https://arxiv.org/pdf/2210.00939.pdf). The weight is set to very low, but this is Fooocus's final guarantee to make sure that the XL will never yield an overly smooth or plastic appearance (examples [here](https://github.com/lllyasviel/Fooocus/discussions/117#sharpness)). This can almost eliminate all cases for which XL still occasionally produces overly smooth results, even with negative ADM guidance. (Update 2023 Aug 18, the Gaussian kernel of SAG is changed to an anisotropic kernel for better structure preservation and fewer artifacts.)
5. We modified the style templates a bit and added the "cinematic-default".
6. We tested the "sd_xl_offset_example-lora_1.0.safetensors" and it seems that when the lora weight is below 0.5, the results are always better than XL without lora.
7. The parameters of samplers are carefully tuned.
-8. Because XL uses positional encoding for generation resolution, images generated by several fixed resolutions look a bit better than that from arbitrary resolutions (because the positional encoding is not very good at handling int numbers that are unseen during training). This suggests that the resolutions in UI may be hard coded for best results.
-9. Separated prompts for two different text encoders seem unnecessary. Separated prompts for base model and refiner may work but the effects are random, and we refrain from implement this.
-10. DPM family seems well-suited for XL, since XL sometimes generates overly smooth texture but DPM family sometimes generate overly dense detail in texture. Their joint effect looks neutral and appealing to human perception.
+8. Because XL uses positional encoding for generation resolution, images generated by several fixed resolutions look a bit better than those from arbitrary resolutions (because the positional encoding is not very good at handling int numbers that are unseen during training). This suggests that the resolutions in UI may be hard coded for best results.
+9. Separated prompts for two different text encoders seem unnecessary. Separated prompts for the base model and refiner may work, but the effects are random, and we refrain from implementing this.
+10. The DPM family seems well-suited for XL since XL sometimes generates overly smooth texture, but the DPM family sometimes generates overly dense detail in texture. Their joint effect looks neutral and appealing to human perception.
11. A carefully designed system for balancing multiple styles as well as prompt expansion.
-12. Using automatic1111's method to normalize prompt emphasizing. This significantly improve results when users directly copy prompts from civitai.
-13. The joint swap system of refiner now also support img2img and upscale in a seamless way.
+12. Using automatic1111's method to normalize prompt emphasizing. This significantly improves results when users directly copy prompts from civitai.
+13. The joint swap system of the refiner now also supports img2img and upscale in a seamless way.
14. CFG Scale and TSNR correction (tuned for SDXL) when CFG is bigger than 10.
## Customization
-After the first time you run Fooocus, a config file will be generated at `Fooocus\config.txt`. This file can be edited for changing the model path or default parameters.
+After the first time you run Fooocus, a config file will be generated at `Fooocus\config.txt`. This file can be edited to change the model path or default parameters.
For example, an edited `Fooocus\config.txt` (this file will be generated after the first launch) may look like this:
@@ -340,7 +358,7 @@ Many other keys, formats, and examples are in `Fooocus\config_modification_tutor
Consider twice before you really change the config. If you find yourself breaking things, just delete `Fooocus\config.txt`. Fooocus will go back to default.
-A safter way is just to try "run_anime.bat" or "run_realistic.bat" - they should be already good enough for different tasks.
+A safer way is just to try "run_anime.bat" or "run_realistic.bat" - they should already be good enough for different tasks.
~Note that `user_path_config.txt` is deprecated and will be removed soon.~ (Edit: it is already removed.)
@@ -350,26 +368,38 @@ A safter way is just to try "run_anime.bat" or "run_realistic.bat" - they should
entry_with_update.py [-h] [--listen [IP]] [--port PORT]
[--disable-header-check [ORIGIN]]
[--web-upload-size WEB_UPLOAD_SIZE]
+ [--hf-mirror HF_MIRROR]
[--external-working-path PATH [PATH ...]]
- [--output-path OUTPUT_PATH] [--temp-path TEMP_PATH]
+ [--output-path OUTPUT_PATH]
+ [--temp-path TEMP_PATH]
[--cache-path CACHE_PATH] [--in-browser]
- [--disable-in-browser] [--gpu-device-id DEVICE_ID]
+ [--disable-in-browser]
+ [--gpu-device-id DEVICE_ID]
[--async-cuda-allocation | --disable-async-cuda-allocation]
- [--disable-attention-upcast] [--all-in-fp32 | --all-in-fp16]
+ [--disable-attention-upcast]
+ [--all-in-fp32 | --all-in-fp16]
[--unet-in-bf16 | --unet-in-fp16 | --unet-in-fp8-e4m3fn | --unet-in-fp8-e5m2]
- [--vae-in-fp16 | --vae-in-fp32 | --vae-in-bf16]
+ [--vae-in-fp16 | --vae-in-fp32 | --vae-in-bf16]
+ [--vae-in-cpu]
[--clip-in-fp8-e4m3fn | --clip-in-fp8-e5m2 | --clip-in-fp16 | --clip-in-fp32]
- [--directml [DIRECTML_DEVICE]] [--disable-ipex-hijack]
+ [--directml [DIRECTML_DEVICE]]
+ [--disable-ipex-hijack]
[--preview-option [none,auto,fast,taesd]]
[--attention-split | --attention-quad | --attention-pytorch]
[--disable-xformers]
- [--always-gpu | --always-high-vram | --always-normal-vram |
- --always-low-vram | --always-no-vram | --always-cpu]
- [--always-offload-from-vram] [--disable-server-log]
- [--debug-mode] [--is-windows-embedded-python]
- [--disable-server-info] [--share] [--preset PRESET]
- [--language LANGUAGE] [--disable-offload-from-vram]
- [--theme THEME] [--disable-image-log]
+ [--always-gpu | --always-high-vram | --always-normal-vram |
+ --always-low-vram | --always-no-vram | --always-cpu [CPU_NUM_THREADS]]
+ [--always-offload-from-vram]
+ [--pytorch-deterministic] [--disable-server-log]
+ [--debug-mode] [--is-windows-embedded-python]
+ [--disable-server-info] [--multi-user] [--share]
+ [--preset PRESET] [--disable-preset-selection]
+ [--language LANGUAGE]
+ [--disable-offload-from-vram] [--theme THEME]
+ [--disable-image-log] [--disable-analytics]
+ [--disable-metadata] [--disable-preset-download]
+ [--enable-describe-uov-image]
+ [--always-download-new-model]
```
## Advanced Features
@@ -394,7 +424,7 @@ The log is [here](update_log.md).
## Localization/Translation/I18N
-**We need your help!** Please help with translating Fooocus to international languages.
+**We need your help!** Please help translate Fooocus into international languages.
You can put json files in the `language` folder to translate the user interface.
diff --git a/requirements_docker.txt b/requirements_docker.txt
new file mode 100644
index 00000000..21883adf
--- /dev/null
+++ b/requirements_docker.txt
@@ -0,0 +1,2 @@
+torch==2.1.0
+torchvision==0.16.0
diff --git a/sdxl_styles/samples/abstract_expressionism.jpg b/sdxl_styles/samples/abstract_expressionism.jpg
new file mode 100644
index 00000000..226b8fa7
Binary files /dev/null and b/sdxl_styles/samples/abstract_expressionism.jpg differ
diff --git a/sdxl_styles/samples/academia.jpg b/sdxl_styles/samples/academia.jpg
new file mode 100644
index 00000000..26a700d0
Binary files /dev/null and b/sdxl_styles/samples/academia.jpg differ
diff --git a/sdxl_styles/samples/action_figure.jpg b/sdxl_styles/samples/action_figure.jpg
new file mode 100644
index 00000000..fcd1c092
Binary files /dev/null and b/sdxl_styles/samples/action_figure.jpg differ
diff --git a/sdxl_styles/samples/adorable_3d_character.jpg b/sdxl_styles/samples/adorable_3d_character.jpg
new file mode 100644
index 00000000..493bfb8f
Binary files /dev/null and b/sdxl_styles/samples/adorable_3d_character.jpg differ
diff --git a/sdxl_styles/samples/adorable_kawaii.jpg b/sdxl_styles/samples/adorable_kawaii.jpg
new file mode 100644
index 00000000..52bc7733
Binary files /dev/null and b/sdxl_styles/samples/adorable_kawaii.jpg differ
diff --git a/sdxl_styles/samples/ads_advertising.jpg b/sdxl_styles/samples/ads_advertising.jpg
new file mode 100644
index 00000000..40631d4c
Binary files /dev/null and b/sdxl_styles/samples/ads_advertising.jpg differ
diff --git a/sdxl_styles/samples/ads_automotive.jpg b/sdxl_styles/samples/ads_automotive.jpg
new file mode 100644
index 00000000..ceea6c4d
Binary files /dev/null and b/sdxl_styles/samples/ads_automotive.jpg differ
diff --git a/sdxl_styles/samples/ads_corporate.jpg b/sdxl_styles/samples/ads_corporate.jpg
new file mode 100644
index 00000000..1d590743
Binary files /dev/null and b/sdxl_styles/samples/ads_corporate.jpg differ
diff --git a/sdxl_styles/samples/ads_fashion_editorial.jpg b/sdxl_styles/samples/ads_fashion_editorial.jpg
new file mode 100644
index 00000000..22fdd232
Binary files /dev/null and b/sdxl_styles/samples/ads_fashion_editorial.jpg differ
diff --git a/sdxl_styles/samples/ads_food_photography.jpg b/sdxl_styles/samples/ads_food_photography.jpg
new file mode 100644
index 00000000..64c38aff
Binary files /dev/null and b/sdxl_styles/samples/ads_food_photography.jpg differ
diff --git a/sdxl_styles/samples/ads_gourmet_food_photography.jpg b/sdxl_styles/samples/ads_gourmet_food_photography.jpg
new file mode 100644
index 00000000..305770b0
Binary files /dev/null and b/sdxl_styles/samples/ads_gourmet_food_photography.jpg differ
diff --git a/sdxl_styles/samples/ads_luxury.jpg b/sdxl_styles/samples/ads_luxury.jpg
new file mode 100644
index 00000000..54248568
Binary files /dev/null and b/sdxl_styles/samples/ads_luxury.jpg differ
diff --git a/sdxl_styles/samples/ads_real_estate.jpg b/sdxl_styles/samples/ads_real_estate.jpg
new file mode 100644
index 00000000..438b9fd1
Binary files /dev/null and b/sdxl_styles/samples/ads_real_estate.jpg differ
diff --git a/sdxl_styles/samples/ads_retail.jpg b/sdxl_styles/samples/ads_retail.jpg
new file mode 100644
index 00000000..93aea1e7
Binary files /dev/null and b/sdxl_styles/samples/ads_retail.jpg differ
diff --git a/sdxl_styles/samples/art_deco.jpg b/sdxl_styles/samples/art_deco.jpg
new file mode 100644
index 00000000..7a37c722
Binary files /dev/null and b/sdxl_styles/samples/art_deco.jpg differ
diff --git a/sdxl_styles/samples/art_nouveau.jpg b/sdxl_styles/samples/art_nouveau.jpg
new file mode 100644
index 00000000..e318db83
Binary files /dev/null and b/sdxl_styles/samples/art_nouveau.jpg differ
diff --git a/sdxl_styles/samples/artstyle_abstract.jpg b/sdxl_styles/samples/artstyle_abstract.jpg
new file mode 100644
index 00000000..d1c3223b
Binary files /dev/null and b/sdxl_styles/samples/artstyle_abstract.jpg differ
diff --git a/sdxl_styles/samples/artstyle_abstract_expressionism.jpg b/sdxl_styles/samples/artstyle_abstract_expressionism.jpg
new file mode 100644
index 00000000..c2a9db02
Binary files /dev/null and b/sdxl_styles/samples/artstyle_abstract_expressionism.jpg differ
diff --git a/sdxl_styles/samples/artstyle_art_deco.jpg b/sdxl_styles/samples/artstyle_art_deco.jpg
new file mode 100644
index 00000000..d466541e
Binary files /dev/null and b/sdxl_styles/samples/artstyle_art_deco.jpg differ
diff --git a/sdxl_styles/samples/artstyle_art_nouveau.jpg b/sdxl_styles/samples/artstyle_art_nouveau.jpg
new file mode 100644
index 00000000..1f34ae95
Binary files /dev/null and b/sdxl_styles/samples/artstyle_art_nouveau.jpg differ
diff --git a/sdxl_styles/samples/artstyle_constructivist.jpg b/sdxl_styles/samples/artstyle_constructivist.jpg
new file mode 100644
index 00000000..161161a5
Binary files /dev/null and b/sdxl_styles/samples/artstyle_constructivist.jpg differ
diff --git a/sdxl_styles/samples/artstyle_cubist.jpg b/sdxl_styles/samples/artstyle_cubist.jpg
new file mode 100644
index 00000000..016cce7d
Binary files /dev/null and b/sdxl_styles/samples/artstyle_cubist.jpg differ
diff --git a/sdxl_styles/samples/artstyle_expressionist.jpg b/sdxl_styles/samples/artstyle_expressionist.jpg
new file mode 100644
index 00000000..40eec1db
Binary files /dev/null and b/sdxl_styles/samples/artstyle_expressionist.jpg differ
diff --git a/sdxl_styles/samples/artstyle_graffiti.jpg b/sdxl_styles/samples/artstyle_graffiti.jpg
new file mode 100644
index 00000000..12c6c5fa
Binary files /dev/null and b/sdxl_styles/samples/artstyle_graffiti.jpg differ
diff --git a/sdxl_styles/samples/artstyle_hyperrealism.jpg b/sdxl_styles/samples/artstyle_hyperrealism.jpg
new file mode 100644
index 00000000..8ab9e619
Binary files /dev/null and b/sdxl_styles/samples/artstyle_hyperrealism.jpg differ
diff --git a/sdxl_styles/samples/artstyle_impressionist.jpg b/sdxl_styles/samples/artstyle_impressionist.jpg
new file mode 100644
index 00000000..a932fb99
Binary files /dev/null and b/sdxl_styles/samples/artstyle_impressionist.jpg differ
diff --git a/sdxl_styles/samples/artstyle_pointillism.jpg b/sdxl_styles/samples/artstyle_pointillism.jpg
new file mode 100644
index 00000000..902ee1c7
Binary files /dev/null and b/sdxl_styles/samples/artstyle_pointillism.jpg differ
diff --git a/sdxl_styles/samples/artstyle_pop_art.jpg b/sdxl_styles/samples/artstyle_pop_art.jpg
new file mode 100644
index 00000000..1c9864b0
Binary files /dev/null and b/sdxl_styles/samples/artstyle_pop_art.jpg differ
diff --git a/sdxl_styles/samples/artstyle_psychedelic.jpg b/sdxl_styles/samples/artstyle_psychedelic.jpg
new file mode 100644
index 00000000..42b7c990
Binary files /dev/null and b/sdxl_styles/samples/artstyle_psychedelic.jpg differ
diff --git a/sdxl_styles/samples/artstyle_renaissance.jpg b/sdxl_styles/samples/artstyle_renaissance.jpg
new file mode 100644
index 00000000..322b758d
Binary files /dev/null and b/sdxl_styles/samples/artstyle_renaissance.jpg differ
diff --git a/sdxl_styles/samples/artstyle_steampunk.jpg b/sdxl_styles/samples/artstyle_steampunk.jpg
new file mode 100644
index 00000000..0ecf4ff9
Binary files /dev/null and b/sdxl_styles/samples/artstyle_steampunk.jpg differ
diff --git a/sdxl_styles/samples/artstyle_surrealist.jpg b/sdxl_styles/samples/artstyle_surrealist.jpg
new file mode 100644
index 00000000..44c48215
Binary files /dev/null and b/sdxl_styles/samples/artstyle_surrealist.jpg differ
diff --git a/sdxl_styles/samples/artstyle_typography.jpg b/sdxl_styles/samples/artstyle_typography.jpg
new file mode 100644
index 00000000..5a36ae50
Binary files /dev/null and b/sdxl_styles/samples/artstyle_typography.jpg differ
diff --git a/sdxl_styles/samples/artstyle_watercolor.jpg b/sdxl_styles/samples/artstyle_watercolor.jpg
new file mode 100644
index 00000000..f7d9cc30
Binary files /dev/null and b/sdxl_styles/samples/artstyle_watercolor.jpg differ
diff --git a/sdxl_styles/samples/astral_aura.jpg b/sdxl_styles/samples/astral_aura.jpg
new file mode 100644
index 00000000..e13f8493
Binary files /dev/null and b/sdxl_styles/samples/astral_aura.jpg differ
diff --git a/sdxl_styles/samples/avant_garde.jpg b/sdxl_styles/samples/avant_garde.jpg
new file mode 100644
index 00000000..f1e29b89
Binary files /dev/null and b/sdxl_styles/samples/avant_garde.jpg differ
diff --git a/sdxl_styles/samples/baroque.jpg b/sdxl_styles/samples/baroque.jpg
new file mode 100644
index 00000000..718aef7a
Binary files /dev/null and b/sdxl_styles/samples/baroque.jpg differ
diff --git a/sdxl_styles/samples/bauhaus_style_poster.jpg b/sdxl_styles/samples/bauhaus_style_poster.jpg
new file mode 100644
index 00000000..087fe3b5
Binary files /dev/null and b/sdxl_styles/samples/bauhaus_style_poster.jpg differ
diff --git a/sdxl_styles/samples/blueprint_schematic_drawing.jpg b/sdxl_styles/samples/blueprint_schematic_drawing.jpg
new file mode 100644
index 00000000..e3012010
Binary files /dev/null and b/sdxl_styles/samples/blueprint_schematic_drawing.jpg differ
diff --git a/sdxl_styles/samples/caricature.jpg b/sdxl_styles/samples/caricature.jpg
new file mode 100644
index 00000000..2ff3ee35
Binary files /dev/null and b/sdxl_styles/samples/caricature.jpg differ
diff --git a/sdxl_styles/samples/cel_shaded_art.jpg b/sdxl_styles/samples/cel_shaded_art.jpg
new file mode 100644
index 00000000..8a69ac22
Binary files /dev/null and b/sdxl_styles/samples/cel_shaded_art.jpg differ
diff --git a/sdxl_styles/samples/character_design_sheet.jpg b/sdxl_styles/samples/character_design_sheet.jpg
new file mode 100644
index 00000000..6f8fb665
Binary files /dev/null and b/sdxl_styles/samples/character_design_sheet.jpg differ
diff --git a/sdxl_styles/samples/cinematic_diva.jpg b/sdxl_styles/samples/cinematic_diva.jpg
new file mode 100644
index 00000000..74483019
Binary files /dev/null and b/sdxl_styles/samples/cinematic_diva.jpg differ
diff --git a/sdxl_styles/samples/classicism_art.jpg b/sdxl_styles/samples/classicism_art.jpg
new file mode 100644
index 00000000..bf8e7033
Binary files /dev/null and b/sdxl_styles/samples/classicism_art.jpg differ
diff --git a/sdxl_styles/samples/color_field_painting.jpg b/sdxl_styles/samples/color_field_painting.jpg
new file mode 100644
index 00000000..92b4e098
Binary files /dev/null and b/sdxl_styles/samples/color_field_painting.jpg differ
diff --git a/sdxl_styles/samples/colored_pencil_art.jpg b/sdxl_styles/samples/colored_pencil_art.jpg
new file mode 100644
index 00000000..1a7c590e
Binary files /dev/null and b/sdxl_styles/samples/colored_pencil_art.jpg differ
diff --git a/sdxl_styles/samples/conceptual_art.jpg b/sdxl_styles/samples/conceptual_art.jpg
new file mode 100644
index 00000000..06882a20
Binary files /dev/null and b/sdxl_styles/samples/conceptual_art.jpg differ
diff --git a/sdxl_styles/samples/constructivism.jpg b/sdxl_styles/samples/constructivism.jpg
new file mode 100644
index 00000000..d49c6828
Binary files /dev/null and b/sdxl_styles/samples/constructivism.jpg differ
diff --git a/sdxl_styles/samples/cubism.jpg b/sdxl_styles/samples/cubism.jpg
new file mode 100644
index 00000000..2ca0f286
Binary files /dev/null and b/sdxl_styles/samples/cubism.jpg differ
diff --git a/sdxl_styles/samples/dadaism.jpg b/sdxl_styles/samples/dadaism.jpg
new file mode 100644
index 00000000..5573cb07
Binary files /dev/null and b/sdxl_styles/samples/dadaism.jpg differ
diff --git a/sdxl_styles/samples/dark_fantasy.jpg b/sdxl_styles/samples/dark_fantasy.jpg
new file mode 100644
index 00000000..7d60f6dd
Binary files /dev/null and b/sdxl_styles/samples/dark_fantasy.jpg differ
diff --git a/sdxl_styles/samples/dark_moody_atmosphere.jpg b/sdxl_styles/samples/dark_moody_atmosphere.jpg
new file mode 100644
index 00000000..38921c62
Binary files /dev/null and b/sdxl_styles/samples/dark_moody_atmosphere.jpg differ
diff --git a/sdxl_styles/samples/dmt_art_style.jpg b/sdxl_styles/samples/dmt_art_style.jpg
new file mode 100644
index 00000000..a7ffae0b
Binary files /dev/null and b/sdxl_styles/samples/dmt_art_style.jpg differ
diff --git a/sdxl_styles/samples/doodle_art.jpg b/sdxl_styles/samples/doodle_art.jpg
new file mode 100644
index 00000000..8944eb0b
Binary files /dev/null and b/sdxl_styles/samples/doodle_art.jpg differ
diff --git a/sdxl_styles/samples/double_exposure.jpg b/sdxl_styles/samples/double_exposure.jpg
new file mode 100644
index 00000000..15b6fbb4
Binary files /dev/null and b/sdxl_styles/samples/double_exposure.jpg differ
diff --git a/sdxl_styles/samples/dripping_paint_splatter_art.jpg b/sdxl_styles/samples/dripping_paint_splatter_art.jpg
new file mode 100644
index 00000000..697c4438
Binary files /dev/null and b/sdxl_styles/samples/dripping_paint_splatter_art.jpg differ
diff --git a/sdxl_styles/samples/expressionism.jpg b/sdxl_styles/samples/expressionism.jpg
new file mode 100644
index 00000000..df5e7770
Binary files /dev/null and b/sdxl_styles/samples/expressionism.jpg differ
diff --git a/sdxl_styles/samples/faded_polaroid_photo.jpg b/sdxl_styles/samples/faded_polaroid_photo.jpg
new file mode 100644
index 00000000..51b2a135
Binary files /dev/null and b/sdxl_styles/samples/faded_polaroid_photo.jpg differ
diff --git a/sdxl_styles/samples/fauvism.jpg b/sdxl_styles/samples/fauvism.jpg
new file mode 100644
index 00000000..5afaaf5e
Binary files /dev/null and b/sdxl_styles/samples/fauvism.jpg differ
diff --git a/sdxl_styles/samples/flat_2d_art.jpg b/sdxl_styles/samples/flat_2d_art.jpg
new file mode 100644
index 00000000..9fba930e
Binary files /dev/null and b/sdxl_styles/samples/flat_2d_art.jpg differ
diff --git a/sdxl_styles/samples/fooocus_cinematic.jpg b/sdxl_styles/samples/fooocus_cinematic.jpg
new file mode 100644
index 00000000..1521f740
Binary files /dev/null and b/sdxl_styles/samples/fooocus_cinematic.jpg differ
diff --git a/sdxl_styles/samples/fooocus_enhance.jpg b/sdxl_styles/samples/fooocus_enhance.jpg
new file mode 100644
index 00000000..20e5ba2f
Binary files /dev/null and b/sdxl_styles/samples/fooocus_enhance.jpg differ
diff --git a/sdxl_styles/samples/fooocus_masterpiece.jpg b/sdxl_styles/samples/fooocus_masterpiece.jpg
new file mode 100644
index 00000000..e57b1fd0
Binary files /dev/null and b/sdxl_styles/samples/fooocus_masterpiece.jpg differ
diff --git a/sdxl_styles/samples/fooocus_negative.jpg b/sdxl_styles/samples/fooocus_negative.jpg
new file mode 100644
index 00000000..b025c43f
Binary files /dev/null and b/sdxl_styles/samples/fooocus_negative.jpg differ
diff --git a/sdxl_styles/samples/fooocus_photograph.jpg b/sdxl_styles/samples/fooocus_photograph.jpg
new file mode 100644
index 00000000..3f28b857
Binary files /dev/null and b/sdxl_styles/samples/fooocus_photograph.jpg differ
diff --git a/sdxl_styles/samples/fooocus_semi_realistic.jpg b/sdxl_styles/samples/fooocus_semi_realistic.jpg
new file mode 100644
index 00000000..b07555a7
Binary files /dev/null and b/sdxl_styles/samples/fooocus_semi_realistic.jpg differ
diff --git a/sdxl_styles/samples/fooocus_sharp.jpg b/sdxl_styles/samples/fooocus_sharp.jpg
new file mode 100644
index 00000000..12f7145c
Binary files /dev/null and b/sdxl_styles/samples/fooocus_sharp.jpg differ
diff --git a/sdxl_styles/samples/fooocus_v2.jpg b/sdxl_styles/samples/fooocus_v2.jpg
new file mode 100644
index 00000000..6e94d5b0
Binary files /dev/null and b/sdxl_styles/samples/fooocus_v2.jpg differ
diff --git a/sdxl_styles/samples/fortnite_art_style.jpg b/sdxl_styles/samples/fortnite_art_style.jpg
new file mode 100644
index 00000000..e90a4f64
Binary files /dev/null and b/sdxl_styles/samples/fortnite_art_style.jpg differ
diff --git a/sdxl_styles/samples/futurism.jpg b/sdxl_styles/samples/futurism.jpg
new file mode 100644
index 00000000..85267a62
Binary files /dev/null and b/sdxl_styles/samples/futurism.jpg differ
diff --git a/sdxl_styles/samples/futuristic_biomechanical.jpg b/sdxl_styles/samples/futuristic_biomechanical.jpg
new file mode 100644
index 00000000..f8c5c082
Binary files /dev/null and b/sdxl_styles/samples/futuristic_biomechanical.jpg differ
diff --git a/sdxl_styles/samples/futuristic_biomechanical_cyberpunk.jpg b/sdxl_styles/samples/futuristic_biomechanical_cyberpunk.jpg
new file mode 100644
index 00000000..e29a9b5b
Binary files /dev/null and b/sdxl_styles/samples/futuristic_biomechanical_cyberpunk.jpg differ
diff --git a/sdxl_styles/samples/futuristic_cybernetic.jpg b/sdxl_styles/samples/futuristic_cybernetic.jpg
new file mode 100644
index 00000000..f8042285
Binary files /dev/null and b/sdxl_styles/samples/futuristic_cybernetic.jpg differ
diff --git a/sdxl_styles/samples/futuristic_cybernetic_robot.jpg b/sdxl_styles/samples/futuristic_cybernetic_robot.jpg
new file mode 100644
index 00000000..6f988b78
Binary files /dev/null and b/sdxl_styles/samples/futuristic_cybernetic_robot.jpg differ
diff --git a/sdxl_styles/samples/futuristic_cyberpunk_cityscape.jpg b/sdxl_styles/samples/futuristic_cyberpunk_cityscape.jpg
new file mode 100644
index 00000000..c05280b7
Binary files /dev/null and b/sdxl_styles/samples/futuristic_cyberpunk_cityscape.jpg differ
diff --git a/sdxl_styles/samples/futuristic_futuristic.jpg b/sdxl_styles/samples/futuristic_futuristic.jpg
new file mode 100644
index 00000000..da8d4ccf
Binary files /dev/null and b/sdxl_styles/samples/futuristic_futuristic.jpg differ
diff --git a/sdxl_styles/samples/futuristic_retro_cyberpunk.jpg b/sdxl_styles/samples/futuristic_retro_cyberpunk.jpg
new file mode 100644
index 00000000..7686243e
Binary files /dev/null and b/sdxl_styles/samples/futuristic_retro_cyberpunk.jpg differ
diff --git a/sdxl_styles/samples/futuristic_retro_futurism.jpg b/sdxl_styles/samples/futuristic_retro_futurism.jpg
new file mode 100644
index 00000000..f0fa6e94
Binary files /dev/null and b/sdxl_styles/samples/futuristic_retro_futurism.jpg differ
diff --git a/sdxl_styles/samples/futuristic_sci_fi.jpg b/sdxl_styles/samples/futuristic_sci_fi.jpg
new file mode 100644
index 00000000..571c6141
Binary files /dev/null and b/sdxl_styles/samples/futuristic_sci_fi.jpg differ
diff --git a/sdxl_styles/samples/futuristic_vaporwave.jpg b/sdxl_styles/samples/futuristic_vaporwave.jpg
new file mode 100644
index 00000000..f8a77fe6
Binary files /dev/null and b/sdxl_styles/samples/futuristic_vaporwave.jpg differ
diff --git a/sdxl_styles/samples/game_bubble_bobble.jpg b/sdxl_styles/samples/game_bubble_bobble.jpg
new file mode 100644
index 00000000..1111de9e
Binary files /dev/null and b/sdxl_styles/samples/game_bubble_bobble.jpg differ
diff --git a/sdxl_styles/samples/game_cyberpunk_game.jpg b/sdxl_styles/samples/game_cyberpunk_game.jpg
new file mode 100644
index 00000000..e87451de
Binary files /dev/null and b/sdxl_styles/samples/game_cyberpunk_game.jpg differ
diff --git a/sdxl_styles/samples/game_fighting_game.jpg b/sdxl_styles/samples/game_fighting_game.jpg
new file mode 100644
index 00000000..b12c07d3
Binary files /dev/null and b/sdxl_styles/samples/game_fighting_game.jpg differ
diff --git a/sdxl_styles/samples/game_gta.jpg b/sdxl_styles/samples/game_gta.jpg
new file mode 100644
index 00000000..6458c6d8
Binary files /dev/null and b/sdxl_styles/samples/game_gta.jpg differ
diff --git a/sdxl_styles/samples/game_mario.jpg b/sdxl_styles/samples/game_mario.jpg
new file mode 100644
index 00000000..17cff4c4
Binary files /dev/null and b/sdxl_styles/samples/game_mario.jpg differ
diff --git a/sdxl_styles/samples/game_minecraft.jpg b/sdxl_styles/samples/game_minecraft.jpg
new file mode 100644
index 00000000..4e20641f
Binary files /dev/null and b/sdxl_styles/samples/game_minecraft.jpg differ
diff --git a/sdxl_styles/samples/game_pokemon.jpg b/sdxl_styles/samples/game_pokemon.jpg
new file mode 100644
index 00000000..20071f80
Binary files /dev/null and b/sdxl_styles/samples/game_pokemon.jpg differ
diff --git a/sdxl_styles/samples/game_retro_arcade.jpg b/sdxl_styles/samples/game_retro_arcade.jpg
new file mode 100644
index 00000000..c3836dc8
Binary files /dev/null and b/sdxl_styles/samples/game_retro_arcade.jpg differ
diff --git a/sdxl_styles/samples/game_retro_game.jpg b/sdxl_styles/samples/game_retro_game.jpg
new file mode 100644
index 00000000..ff81488a
Binary files /dev/null and b/sdxl_styles/samples/game_retro_game.jpg differ
diff --git a/sdxl_styles/samples/game_rpg_fantasy_game.jpg b/sdxl_styles/samples/game_rpg_fantasy_game.jpg
new file mode 100644
index 00000000..c32a2cc7
Binary files /dev/null and b/sdxl_styles/samples/game_rpg_fantasy_game.jpg differ
diff --git a/sdxl_styles/samples/game_strategy_game.jpg b/sdxl_styles/samples/game_strategy_game.jpg
new file mode 100644
index 00000000..a55eff5c
Binary files /dev/null and b/sdxl_styles/samples/game_strategy_game.jpg differ
diff --git a/sdxl_styles/samples/game_streetfighter.jpg b/sdxl_styles/samples/game_streetfighter.jpg
new file mode 100644
index 00000000..f389e0d3
Binary files /dev/null and b/sdxl_styles/samples/game_streetfighter.jpg differ
diff --git a/sdxl_styles/samples/game_zelda.jpg b/sdxl_styles/samples/game_zelda.jpg
new file mode 100644
index 00000000..f9b875d7
Binary files /dev/null and b/sdxl_styles/samples/game_zelda.jpg differ
diff --git a/sdxl_styles/samples/glitchcore.jpg b/sdxl_styles/samples/glitchcore.jpg
new file mode 100644
index 00000000..3122cda8
Binary files /dev/null and b/sdxl_styles/samples/glitchcore.jpg differ
diff --git a/sdxl_styles/samples/glo_fi.jpg b/sdxl_styles/samples/glo_fi.jpg
new file mode 100644
index 00000000..816b2244
Binary files /dev/null and b/sdxl_styles/samples/glo_fi.jpg differ
diff --git a/sdxl_styles/samples/googie_art_style.jpg b/sdxl_styles/samples/googie_art_style.jpg
new file mode 100644
index 00000000..e9a08c20
Binary files /dev/null and b/sdxl_styles/samples/googie_art_style.jpg differ
diff --git a/sdxl_styles/samples/graffiti_art.jpg b/sdxl_styles/samples/graffiti_art.jpg
new file mode 100644
index 00000000..87aebdda
Binary files /dev/null and b/sdxl_styles/samples/graffiti_art.jpg differ
diff --git a/sdxl_styles/samples/harlem_renaissance_art.jpg b/sdxl_styles/samples/harlem_renaissance_art.jpg
new file mode 100644
index 00000000..bd335494
Binary files /dev/null and b/sdxl_styles/samples/harlem_renaissance_art.jpg differ
diff --git a/sdxl_styles/samples/high_fashion.jpg b/sdxl_styles/samples/high_fashion.jpg
new file mode 100644
index 00000000..4dfc404d
Binary files /dev/null and b/sdxl_styles/samples/high_fashion.jpg differ
diff --git a/sdxl_styles/samples/idyllic.jpg b/sdxl_styles/samples/idyllic.jpg
new file mode 100644
index 00000000..660e9cac
Binary files /dev/null and b/sdxl_styles/samples/idyllic.jpg differ
diff --git a/sdxl_styles/samples/impressionism.jpg b/sdxl_styles/samples/impressionism.jpg
new file mode 100644
index 00000000..52522233
Binary files /dev/null and b/sdxl_styles/samples/impressionism.jpg differ
diff --git a/sdxl_styles/samples/infographic_drawing.jpg b/sdxl_styles/samples/infographic_drawing.jpg
new file mode 100644
index 00000000..41fdf2e9
Binary files /dev/null and b/sdxl_styles/samples/infographic_drawing.jpg differ
diff --git a/sdxl_styles/samples/ink_dripping_drawing.jpg b/sdxl_styles/samples/ink_dripping_drawing.jpg
new file mode 100644
index 00000000..6b88b62d
Binary files /dev/null and b/sdxl_styles/samples/ink_dripping_drawing.jpg differ
diff --git a/sdxl_styles/samples/japanese_ink_drawing.jpg b/sdxl_styles/samples/japanese_ink_drawing.jpg
new file mode 100644
index 00000000..ec90c8d0
Binary files /dev/null and b/sdxl_styles/samples/japanese_ink_drawing.jpg differ
diff --git a/sdxl_styles/samples/knolling_photography.jpg b/sdxl_styles/samples/knolling_photography.jpg
new file mode 100644
index 00000000..2f1b7f1e
Binary files /dev/null and b/sdxl_styles/samples/knolling_photography.jpg differ
diff --git a/sdxl_styles/samples/light_cheery_atmosphere.jpg b/sdxl_styles/samples/light_cheery_atmosphere.jpg
new file mode 100644
index 00000000..e769c892
Binary files /dev/null and b/sdxl_styles/samples/light_cheery_atmosphere.jpg differ
diff --git a/sdxl_styles/samples/logo_design.jpg b/sdxl_styles/samples/logo_design.jpg
new file mode 100644
index 00000000..8d71ea76
Binary files /dev/null and b/sdxl_styles/samples/logo_design.jpg differ
diff --git a/sdxl_styles/samples/luxurious_elegance.jpg b/sdxl_styles/samples/luxurious_elegance.jpg
new file mode 100644
index 00000000..515a01d8
Binary files /dev/null and b/sdxl_styles/samples/luxurious_elegance.jpg differ
diff --git a/sdxl_styles/samples/macro_photography.jpg b/sdxl_styles/samples/macro_photography.jpg
new file mode 100644
index 00000000..c775121a
Binary files /dev/null and b/sdxl_styles/samples/macro_photography.jpg differ
diff --git a/sdxl_styles/samples/mandola_art.jpg b/sdxl_styles/samples/mandola_art.jpg
new file mode 100644
index 00000000..1d9619b5
Binary files /dev/null and b/sdxl_styles/samples/mandola_art.jpg differ
diff --git a/sdxl_styles/samples/marker_drawing.jpg b/sdxl_styles/samples/marker_drawing.jpg
new file mode 100644
index 00000000..37f37fe1
Binary files /dev/null and b/sdxl_styles/samples/marker_drawing.jpg differ
diff --git a/sdxl_styles/samples/medievalism.jpg b/sdxl_styles/samples/medievalism.jpg
new file mode 100644
index 00000000..f26e28cf
Binary files /dev/null and b/sdxl_styles/samples/medievalism.jpg differ
diff --git a/sdxl_styles/samples/minimalism.jpg b/sdxl_styles/samples/minimalism.jpg
new file mode 100644
index 00000000..5c4f1848
Binary files /dev/null and b/sdxl_styles/samples/minimalism.jpg differ
diff --git a/sdxl_styles/samples/misc_architectural.jpg b/sdxl_styles/samples/misc_architectural.jpg
new file mode 100644
index 00000000..8db96999
Binary files /dev/null and b/sdxl_styles/samples/misc_architectural.jpg differ
diff --git a/sdxl_styles/samples/misc_disco.jpg b/sdxl_styles/samples/misc_disco.jpg
new file mode 100644
index 00000000..665dc347
Binary files /dev/null and b/sdxl_styles/samples/misc_disco.jpg differ
diff --git a/sdxl_styles/samples/misc_dreamscape.jpg b/sdxl_styles/samples/misc_dreamscape.jpg
new file mode 100644
index 00000000..cb2c6021
Binary files /dev/null and b/sdxl_styles/samples/misc_dreamscape.jpg differ
diff --git a/sdxl_styles/samples/misc_dystopian.jpg b/sdxl_styles/samples/misc_dystopian.jpg
new file mode 100644
index 00000000..2a8e21ca
Binary files /dev/null and b/sdxl_styles/samples/misc_dystopian.jpg differ
diff --git a/sdxl_styles/samples/misc_fairy_tale.jpg b/sdxl_styles/samples/misc_fairy_tale.jpg
new file mode 100644
index 00000000..effaa2ea
Binary files /dev/null and b/sdxl_styles/samples/misc_fairy_tale.jpg differ
diff --git a/sdxl_styles/samples/misc_gothic.jpg b/sdxl_styles/samples/misc_gothic.jpg
new file mode 100644
index 00000000..e47b38dc
Binary files /dev/null and b/sdxl_styles/samples/misc_gothic.jpg differ
diff --git a/sdxl_styles/samples/misc_grunge.jpg b/sdxl_styles/samples/misc_grunge.jpg
new file mode 100644
index 00000000..db85f75d
Binary files /dev/null and b/sdxl_styles/samples/misc_grunge.jpg differ
diff --git a/sdxl_styles/samples/misc_horror.jpg b/sdxl_styles/samples/misc_horror.jpg
new file mode 100644
index 00000000..f188b854
Binary files /dev/null and b/sdxl_styles/samples/misc_horror.jpg differ
diff --git a/sdxl_styles/samples/misc_kawaii.jpg b/sdxl_styles/samples/misc_kawaii.jpg
new file mode 100644
index 00000000..6897ed0a
Binary files /dev/null and b/sdxl_styles/samples/misc_kawaii.jpg differ
diff --git a/sdxl_styles/samples/misc_lovecraftian.jpg b/sdxl_styles/samples/misc_lovecraftian.jpg
new file mode 100644
index 00000000..835848e2
Binary files /dev/null and b/sdxl_styles/samples/misc_lovecraftian.jpg differ
diff --git a/sdxl_styles/samples/misc_macabre.jpg b/sdxl_styles/samples/misc_macabre.jpg
new file mode 100644
index 00000000..eeeb14c5
Binary files /dev/null and b/sdxl_styles/samples/misc_macabre.jpg differ
diff --git a/sdxl_styles/samples/misc_manga.jpg b/sdxl_styles/samples/misc_manga.jpg
new file mode 100644
index 00000000..aaecd109
Binary files /dev/null and b/sdxl_styles/samples/misc_manga.jpg differ
diff --git a/sdxl_styles/samples/misc_metropolis.jpg b/sdxl_styles/samples/misc_metropolis.jpg
new file mode 100644
index 00000000..51390016
Binary files /dev/null and b/sdxl_styles/samples/misc_metropolis.jpg differ
diff --git a/sdxl_styles/samples/misc_minimalist.jpg b/sdxl_styles/samples/misc_minimalist.jpg
new file mode 100644
index 00000000..45c70f62
Binary files /dev/null and b/sdxl_styles/samples/misc_minimalist.jpg differ
diff --git a/sdxl_styles/samples/misc_monochrome.jpg b/sdxl_styles/samples/misc_monochrome.jpg
new file mode 100644
index 00000000..9230e2e1
Binary files /dev/null and b/sdxl_styles/samples/misc_monochrome.jpg differ
diff --git a/sdxl_styles/samples/misc_nautical.jpg b/sdxl_styles/samples/misc_nautical.jpg
new file mode 100644
index 00000000..76ce3ac6
Binary files /dev/null and b/sdxl_styles/samples/misc_nautical.jpg differ
diff --git a/sdxl_styles/samples/misc_space.jpg b/sdxl_styles/samples/misc_space.jpg
new file mode 100644
index 00000000..b57c161f
Binary files /dev/null and b/sdxl_styles/samples/misc_space.jpg differ
diff --git a/sdxl_styles/samples/misc_stained_glass.jpg b/sdxl_styles/samples/misc_stained_glass.jpg
new file mode 100644
index 00000000..c2edf80c
Binary files /dev/null and b/sdxl_styles/samples/misc_stained_glass.jpg differ
diff --git a/sdxl_styles/samples/misc_techwear_fashion.jpg b/sdxl_styles/samples/misc_techwear_fashion.jpg
new file mode 100644
index 00000000..abdef86a
Binary files /dev/null and b/sdxl_styles/samples/misc_techwear_fashion.jpg differ
diff --git a/sdxl_styles/samples/misc_tribal.jpg b/sdxl_styles/samples/misc_tribal.jpg
new file mode 100644
index 00000000..436af144
Binary files /dev/null and b/sdxl_styles/samples/misc_tribal.jpg differ
diff --git a/sdxl_styles/samples/misc_zentangle.jpg b/sdxl_styles/samples/misc_zentangle.jpg
new file mode 100644
index 00000000..0aea7d40
Binary files /dev/null and b/sdxl_styles/samples/misc_zentangle.jpg differ
diff --git a/sdxl_styles/samples/mk_adnate_style.jpg b/sdxl_styles/samples/mk_adnate_style.jpg
new file mode 100644
index 00000000..642ea85b
Binary files /dev/null and b/sdxl_styles/samples/mk_adnate_style.jpg differ
diff --git a/sdxl_styles/samples/mk_afrofuturism.jpg b/sdxl_styles/samples/mk_afrofuturism.jpg
new file mode 100644
index 00000000..279c1db1
Binary files /dev/null and b/sdxl_styles/samples/mk_afrofuturism.jpg differ
diff --git a/sdxl_styles/samples/mk_albumen_print.jpg b/sdxl_styles/samples/mk_albumen_print.jpg
new file mode 100644
index 00000000..9bc89526
Binary files /dev/null and b/sdxl_styles/samples/mk_albumen_print.jpg differ
diff --git a/sdxl_styles/samples/mk_alcohol_ink_art.jpg b/sdxl_styles/samples/mk_alcohol_ink_art.jpg
new file mode 100644
index 00000000..daac2c95
Binary files /dev/null and b/sdxl_styles/samples/mk_alcohol_ink_art.jpg differ
diff --git a/sdxl_styles/samples/mk_andy_warhol.jpg b/sdxl_styles/samples/mk_andy_warhol.jpg
new file mode 100644
index 00000000..bfdd38e4
Binary files /dev/null and b/sdxl_styles/samples/mk_andy_warhol.jpg differ
diff --git a/sdxl_styles/samples/mk_anthotype_print.jpg b/sdxl_styles/samples/mk_anthotype_print.jpg
new file mode 100644
index 00000000..8de4085b
Binary files /dev/null and b/sdxl_styles/samples/mk_anthotype_print.jpg differ
diff --git a/sdxl_styles/samples/mk_aquatint_print.jpg b/sdxl_styles/samples/mk_aquatint_print.jpg
new file mode 100644
index 00000000..6f0f0e15
Binary files /dev/null and b/sdxl_styles/samples/mk_aquatint_print.jpg differ
diff --git a/sdxl_styles/samples/mk_atompunk.jpg b/sdxl_styles/samples/mk_atompunk.jpg
new file mode 100644
index 00000000..7da970ad
Binary files /dev/null and b/sdxl_styles/samples/mk_atompunk.jpg differ
diff --git a/sdxl_styles/samples/mk_basquiat.jpg b/sdxl_styles/samples/mk_basquiat.jpg
new file mode 100644
index 00000000..20a67367
Binary files /dev/null and b/sdxl_styles/samples/mk_basquiat.jpg differ
diff --git a/sdxl_styles/samples/mk_bauhaus_style.jpg b/sdxl_styles/samples/mk_bauhaus_style.jpg
new file mode 100644
index 00000000..be1b7820
Binary files /dev/null and b/sdxl_styles/samples/mk_bauhaus_style.jpg differ
diff --git a/sdxl_styles/samples/mk_blacklight_paint.jpg b/sdxl_styles/samples/mk_blacklight_paint.jpg
new file mode 100644
index 00000000..f185b904
Binary files /dev/null and b/sdxl_styles/samples/mk_blacklight_paint.jpg differ
diff --git a/sdxl_styles/samples/mk_bromoil_print.jpg b/sdxl_styles/samples/mk_bromoil_print.jpg
new file mode 100644
index 00000000..14445691
Binary files /dev/null and b/sdxl_styles/samples/mk_bromoil_print.jpg differ
diff --git a/sdxl_styles/samples/mk_calotype_print.jpg b/sdxl_styles/samples/mk_calotype_print.jpg
new file mode 100644
index 00000000..13a5f310
Binary files /dev/null and b/sdxl_styles/samples/mk_calotype_print.jpg differ
diff --git a/sdxl_styles/samples/mk_carnival_glass.jpg b/sdxl_styles/samples/mk_carnival_glass.jpg
new file mode 100644
index 00000000..62428739
Binary files /dev/null and b/sdxl_styles/samples/mk_carnival_glass.jpg differ
diff --git a/sdxl_styles/samples/mk_chicano_art.jpg b/sdxl_styles/samples/mk_chicano_art.jpg
new file mode 100644
index 00000000..66d29311
Binary files /dev/null and b/sdxl_styles/samples/mk_chicano_art.jpg differ
diff --git a/sdxl_styles/samples/mk_chromolithography.jpg b/sdxl_styles/samples/mk_chromolithography.jpg
new file mode 100644
index 00000000..27163c79
Binary files /dev/null and b/sdxl_styles/samples/mk_chromolithography.jpg differ
diff --git a/sdxl_styles/samples/mk_cibulak_porcelain.jpg b/sdxl_styles/samples/mk_cibulak_porcelain.jpg
new file mode 100644
index 00000000..30ae6205
Binary files /dev/null and b/sdxl_styles/samples/mk_cibulak_porcelain.jpg differ
diff --git a/sdxl_styles/samples/mk_color_sketchnote.jpg b/sdxl_styles/samples/mk_color_sketchnote.jpg
new file mode 100644
index 00000000..e8d2e4d9
Binary files /dev/null and b/sdxl_styles/samples/mk_color_sketchnote.jpg differ
diff --git a/sdxl_styles/samples/mk_coloring_book.jpg b/sdxl_styles/samples/mk_coloring_book.jpg
new file mode 100644
index 00000000..377f7c74
Binary files /dev/null and b/sdxl_styles/samples/mk_coloring_book.jpg differ
diff --git a/sdxl_styles/samples/mk_constructivism.jpg b/sdxl_styles/samples/mk_constructivism.jpg
new file mode 100644
index 00000000..374a62dc
Binary files /dev/null and b/sdxl_styles/samples/mk_constructivism.jpg differ
diff --git a/sdxl_styles/samples/mk_cross_processing_print.jpg b/sdxl_styles/samples/mk_cross_processing_print.jpg
new file mode 100644
index 00000000..234d809c
Binary files /dev/null and b/sdxl_styles/samples/mk_cross_processing_print.jpg differ
diff --git a/sdxl_styles/samples/mk_cross_stitching.jpg b/sdxl_styles/samples/mk_cross_stitching.jpg
new file mode 100644
index 00000000..07c7e352
Binary files /dev/null and b/sdxl_styles/samples/mk_cross_stitching.jpg differ
diff --git a/sdxl_styles/samples/mk_cyanotype_print.jpg b/sdxl_styles/samples/mk_cyanotype_print.jpg
new file mode 100644
index 00000000..9327227b
Binary files /dev/null and b/sdxl_styles/samples/mk_cyanotype_print.jpg differ
diff --git a/sdxl_styles/samples/mk_dayak_art.jpg b/sdxl_styles/samples/mk_dayak_art.jpg
new file mode 100644
index 00000000..3d27b0f0
Binary files /dev/null and b/sdxl_styles/samples/mk_dayak_art.jpg differ
diff --git a/sdxl_styles/samples/mk_de_stijl.jpg b/sdxl_styles/samples/mk_de_stijl.jpg
new file mode 100644
index 00000000..1260553a
Binary files /dev/null and b/sdxl_styles/samples/mk_de_stijl.jpg differ
diff --git a/sdxl_styles/samples/mk_dufaycolor_photograph.jpg b/sdxl_styles/samples/mk_dufaycolor_photograph.jpg
new file mode 100644
index 00000000..e18942b2
Binary files /dev/null and b/sdxl_styles/samples/mk_dufaycolor_photograph.jpg differ
diff --git a/sdxl_styles/samples/mk_embroidery.jpg b/sdxl_styles/samples/mk_embroidery.jpg
new file mode 100644
index 00000000..63f4e7c7
Binary files /dev/null and b/sdxl_styles/samples/mk_embroidery.jpg differ
diff --git a/sdxl_styles/samples/mk_encaustic_paint.jpg b/sdxl_styles/samples/mk_encaustic_paint.jpg
new file mode 100644
index 00000000..5c9844cf
Binary files /dev/null and b/sdxl_styles/samples/mk_encaustic_paint.jpg differ
diff --git a/sdxl_styles/samples/mk_fayum_portrait.jpg b/sdxl_styles/samples/mk_fayum_portrait.jpg
new file mode 100644
index 00000000..26427929
Binary files /dev/null and b/sdxl_styles/samples/mk_fayum_portrait.jpg differ
diff --git a/sdxl_styles/samples/mk_gond_painting.jpg b/sdxl_styles/samples/mk_gond_painting.jpg
new file mode 100644
index 00000000..3947f6cd
Binary files /dev/null and b/sdxl_styles/samples/mk_gond_painting.jpg differ
diff --git a/sdxl_styles/samples/mk_gyotaku.jpg b/sdxl_styles/samples/mk_gyotaku.jpg
new file mode 100644
index 00000000..650b4553
Binary files /dev/null and b/sdxl_styles/samples/mk_gyotaku.jpg differ
diff --git a/sdxl_styles/samples/mk_halftone_print.jpg b/sdxl_styles/samples/mk_halftone_print.jpg
new file mode 100644
index 00000000..37d977db
Binary files /dev/null and b/sdxl_styles/samples/mk_halftone_print.jpg differ
diff --git a/sdxl_styles/samples/mk_herbarium.jpg b/sdxl_styles/samples/mk_herbarium.jpg
new file mode 100644
index 00000000..01209be2
Binary files /dev/null and b/sdxl_styles/samples/mk_herbarium.jpg differ
diff --git a/sdxl_styles/samples/mk_illuminated_manuscript.jpg b/sdxl_styles/samples/mk_illuminated_manuscript.jpg
new file mode 100644
index 00000000..2b3765ac
Binary files /dev/null and b/sdxl_styles/samples/mk_illuminated_manuscript.jpg differ
diff --git a/sdxl_styles/samples/mk_inuit_carving.jpg b/sdxl_styles/samples/mk_inuit_carving.jpg
new file mode 100644
index 00000000..2cadd30a
Binary files /dev/null and b/sdxl_styles/samples/mk_inuit_carving.jpg differ
diff --git a/sdxl_styles/samples/mk_kalighat_painting.jpg b/sdxl_styles/samples/mk_kalighat_painting.jpg
new file mode 100644
index 00000000..7049b499
Binary files /dev/null and b/sdxl_styles/samples/mk_kalighat_painting.jpg differ
diff --git a/sdxl_styles/samples/mk_lite_brite_art.jpg b/sdxl_styles/samples/mk_lite_brite_art.jpg
new file mode 100644
index 00000000..0d348dfb
Binary files /dev/null and b/sdxl_styles/samples/mk_lite_brite_art.jpg differ
diff --git a/sdxl_styles/samples/mk_luminogram.jpg b/sdxl_styles/samples/mk_luminogram.jpg
new file mode 100644
index 00000000..011ce9b9
Binary files /dev/null and b/sdxl_styles/samples/mk_luminogram.jpg differ
diff --git a/sdxl_styles/samples/mk_madhubani_painting.jpg b/sdxl_styles/samples/mk_madhubani_painting.jpg
new file mode 100644
index 00000000..f959a0e5
Binary files /dev/null and b/sdxl_styles/samples/mk_madhubani_painting.jpg differ
diff --git a/sdxl_styles/samples/mk_mokume_gane.jpg b/sdxl_styles/samples/mk_mokume_gane.jpg
new file mode 100644
index 00000000..91bf90c7
Binary files /dev/null and b/sdxl_styles/samples/mk_mokume_gane.jpg differ
diff --git a/sdxl_styles/samples/mk_mosaic.jpg b/sdxl_styles/samples/mk_mosaic.jpg
new file mode 100644
index 00000000..f9d83075
Binary files /dev/null and b/sdxl_styles/samples/mk_mosaic.jpg differ
diff --git a/sdxl_styles/samples/mk_one_line_art.jpg b/sdxl_styles/samples/mk_one_line_art.jpg
new file mode 100644
index 00000000..62fb3593
Binary files /dev/null and b/sdxl_styles/samples/mk_one_line_art.jpg differ
diff --git a/sdxl_styles/samples/mk_palekh.jpg b/sdxl_styles/samples/mk_palekh.jpg
new file mode 100644
index 00000000..2c4453a7
Binary files /dev/null and b/sdxl_styles/samples/mk_palekh.jpg differ
diff --git a/sdxl_styles/samples/mk_patachitra_painting.jpg b/sdxl_styles/samples/mk_patachitra_painting.jpg
new file mode 100644
index 00000000..1fd21ea9
Binary files /dev/null and b/sdxl_styles/samples/mk_patachitra_painting.jpg differ
diff --git a/sdxl_styles/samples/mk_pichwai_painting.jpg b/sdxl_styles/samples/mk_pichwai_painting.jpg
new file mode 100644
index 00000000..3212f195
Binary files /dev/null and b/sdxl_styles/samples/mk_pichwai_painting.jpg differ
diff --git a/sdxl_styles/samples/mk_pictorialism.jpg b/sdxl_styles/samples/mk_pictorialism.jpg
new file mode 100644
index 00000000..7ed77422
Binary files /dev/null and b/sdxl_styles/samples/mk_pictorialism.jpg differ
diff --git a/sdxl_styles/samples/mk_pollock.jpg b/sdxl_styles/samples/mk_pollock.jpg
new file mode 100644
index 00000000..ecad511a
Binary files /dev/null and b/sdxl_styles/samples/mk_pollock.jpg differ
diff --git a/sdxl_styles/samples/mk_punk_collage.jpg b/sdxl_styles/samples/mk_punk_collage.jpg
new file mode 100644
index 00000000..5704a0f3
Binary files /dev/null and b/sdxl_styles/samples/mk_punk_collage.jpg differ
diff --git a/sdxl_styles/samples/mk_ron_english_style.jpg b/sdxl_styles/samples/mk_ron_english_style.jpg
new file mode 100644
index 00000000..14cc3ce5
Binary files /dev/null and b/sdxl_styles/samples/mk_ron_english_style.jpg differ
diff --git a/sdxl_styles/samples/mk_samoan_art_inspired.jpg b/sdxl_styles/samples/mk_samoan_art_inspired.jpg
new file mode 100644
index 00000000..570481d4
Binary files /dev/null and b/sdxl_styles/samples/mk_samoan_art_inspired.jpg differ
diff --git a/sdxl_styles/samples/mk_scrimshaw.jpg b/sdxl_styles/samples/mk_scrimshaw.jpg
new file mode 100644
index 00000000..cad08a21
Binary files /dev/null and b/sdxl_styles/samples/mk_scrimshaw.jpg differ
diff --git a/sdxl_styles/samples/mk_shepard_fairey_style.jpg b/sdxl_styles/samples/mk_shepard_fairey_style.jpg
new file mode 100644
index 00000000..7e5d1c17
Binary files /dev/null and b/sdxl_styles/samples/mk_shepard_fairey_style.jpg differ
diff --git a/sdxl_styles/samples/mk_shibori.jpg b/sdxl_styles/samples/mk_shibori.jpg
new file mode 100644
index 00000000..6dff3a6f
Binary files /dev/null and b/sdxl_styles/samples/mk_shibori.jpg differ
diff --git a/sdxl_styles/samples/mk_singer_sargent.jpg b/sdxl_styles/samples/mk_singer_sargent.jpg
new file mode 100644
index 00000000..1cef543e
Binary files /dev/null and b/sdxl_styles/samples/mk_singer_sargent.jpg differ
diff --git a/sdxl_styles/samples/mk_suminagashi.jpg b/sdxl_styles/samples/mk_suminagashi.jpg
new file mode 100644
index 00000000..5294cb9b
Binary files /dev/null and b/sdxl_styles/samples/mk_suminagashi.jpg differ
diff --git a/sdxl_styles/samples/mk_tlingit_art.jpg b/sdxl_styles/samples/mk_tlingit_art.jpg
new file mode 100644
index 00000000..60695e7a
Binary files /dev/null and b/sdxl_styles/samples/mk_tlingit_art.jpg differ
diff --git a/sdxl_styles/samples/mk_ukiyo_e.jpg b/sdxl_styles/samples/mk_ukiyo_e.jpg
new file mode 100644
index 00000000..2205c806
Binary files /dev/null and b/sdxl_styles/samples/mk_ukiyo_e.jpg differ
diff --git a/sdxl_styles/samples/mk_van_gogh.jpg b/sdxl_styles/samples/mk_van_gogh.jpg
new file mode 100644
index 00000000..96109a28
Binary files /dev/null and b/sdxl_styles/samples/mk_van_gogh.jpg differ
diff --git a/sdxl_styles/samples/mk_vintage_airline_poster.jpg b/sdxl_styles/samples/mk_vintage_airline_poster.jpg
new file mode 100644
index 00000000..e4c1fd5d
Binary files /dev/null and b/sdxl_styles/samples/mk_vintage_airline_poster.jpg differ
diff --git a/sdxl_styles/samples/mk_vintage_travel_poster.jpg b/sdxl_styles/samples/mk_vintage_travel_poster.jpg
new file mode 100644
index 00000000..bd3f2b7d
Binary files /dev/null and b/sdxl_styles/samples/mk_vintage_travel_poster.jpg differ
diff --git a/sdxl_styles/samples/mk_vitreous_enamel.jpg b/sdxl_styles/samples/mk_vitreous_enamel.jpg
new file mode 100644
index 00000000..afc5d14a
Binary files /dev/null and b/sdxl_styles/samples/mk_vitreous_enamel.jpg differ
diff --git a/sdxl_styles/samples/mre_ancient_illustration.jpg b/sdxl_styles/samples/mre_ancient_illustration.jpg
new file mode 100644
index 00000000..1583b72c
Binary files /dev/null and b/sdxl_styles/samples/mre_ancient_illustration.jpg differ
diff --git a/sdxl_styles/samples/mre_anime.jpg b/sdxl_styles/samples/mre_anime.jpg
new file mode 100644
index 00000000..be9a4058
Binary files /dev/null and b/sdxl_styles/samples/mre_anime.jpg differ
diff --git a/sdxl_styles/samples/mre_artistic_vision.jpg b/sdxl_styles/samples/mre_artistic_vision.jpg
new file mode 100644
index 00000000..eebd9fb6
Binary files /dev/null and b/sdxl_styles/samples/mre_artistic_vision.jpg differ
diff --git a/sdxl_styles/samples/mre_bad_dream.jpg b/sdxl_styles/samples/mre_bad_dream.jpg
new file mode 100644
index 00000000..125a27b4
Binary files /dev/null and b/sdxl_styles/samples/mre_bad_dream.jpg differ
diff --git a/sdxl_styles/samples/mre_brave_art.jpg b/sdxl_styles/samples/mre_brave_art.jpg
new file mode 100644
index 00000000..7b6ab272
Binary files /dev/null and b/sdxl_styles/samples/mre_brave_art.jpg differ
diff --git a/sdxl_styles/samples/mre_cinematic_dynamic.jpg b/sdxl_styles/samples/mre_cinematic_dynamic.jpg
new file mode 100644
index 00000000..46b6b845
Binary files /dev/null and b/sdxl_styles/samples/mre_cinematic_dynamic.jpg differ
diff --git a/sdxl_styles/samples/mre_comic.jpg b/sdxl_styles/samples/mre_comic.jpg
new file mode 100644
index 00000000..710208a8
Binary files /dev/null and b/sdxl_styles/samples/mre_comic.jpg differ
diff --git a/sdxl_styles/samples/mre_dark_cyberpunk.jpg b/sdxl_styles/samples/mre_dark_cyberpunk.jpg
new file mode 100644
index 00000000..18614e53
Binary files /dev/null and b/sdxl_styles/samples/mre_dark_cyberpunk.jpg differ
diff --git a/sdxl_styles/samples/mre_dark_dream.jpg b/sdxl_styles/samples/mre_dark_dream.jpg
new file mode 100644
index 00000000..af61310b
Binary files /dev/null and b/sdxl_styles/samples/mre_dark_dream.jpg differ
diff --git a/sdxl_styles/samples/mre_dynamic_illustration.jpg b/sdxl_styles/samples/mre_dynamic_illustration.jpg
new file mode 100644
index 00000000..66c78b3b
Binary files /dev/null and b/sdxl_styles/samples/mre_dynamic_illustration.jpg differ
diff --git a/sdxl_styles/samples/mre_elemental_art.jpg b/sdxl_styles/samples/mre_elemental_art.jpg
new file mode 100644
index 00000000..b55f9515
Binary files /dev/null and b/sdxl_styles/samples/mre_elemental_art.jpg differ
diff --git a/sdxl_styles/samples/mre_gloomy_art.jpg b/sdxl_styles/samples/mre_gloomy_art.jpg
new file mode 100644
index 00000000..9dbe72a4
Binary files /dev/null and b/sdxl_styles/samples/mre_gloomy_art.jpg differ
diff --git a/sdxl_styles/samples/mre_heroic_fantasy.jpg b/sdxl_styles/samples/mre_heroic_fantasy.jpg
new file mode 100644
index 00000000..7eff049e
Binary files /dev/null and b/sdxl_styles/samples/mre_heroic_fantasy.jpg differ
diff --git a/sdxl_styles/samples/mre_lyrical_geometry.jpg b/sdxl_styles/samples/mre_lyrical_geometry.jpg
new file mode 100644
index 00000000..fdd23018
Binary files /dev/null and b/sdxl_styles/samples/mre_lyrical_geometry.jpg differ
diff --git a/sdxl_styles/samples/mre_manga.jpg b/sdxl_styles/samples/mre_manga.jpg
new file mode 100644
index 00000000..891cadc0
Binary files /dev/null and b/sdxl_styles/samples/mre_manga.jpg differ
diff --git a/sdxl_styles/samples/mre_space_art.jpg b/sdxl_styles/samples/mre_space_art.jpg
new file mode 100644
index 00000000..f5cb31ab
Binary files /dev/null and b/sdxl_styles/samples/mre_space_art.jpg differ
diff --git a/sdxl_styles/samples/mre_spontaneous_picture.jpg b/sdxl_styles/samples/mre_spontaneous_picture.jpg
new file mode 100644
index 00000000..74cbcd39
Binary files /dev/null and b/sdxl_styles/samples/mre_spontaneous_picture.jpg differ
diff --git a/sdxl_styles/samples/mre_sumi_e_detailed.jpg b/sdxl_styles/samples/mre_sumi_e_detailed.jpg
new file mode 100644
index 00000000..bea50fa2
Binary files /dev/null and b/sdxl_styles/samples/mre_sumi_e_detailed.jpg differ
diff --git a/sdxl_styles/samples/mre_sumi_e_symbolic.jpg b/sdxl_styles/samples/mre_sumi_e_symbolic.jpg
new file mode 100644
index 00000000..81e4aa3b
Binary files /dev/null and b/sdxl_styles/samples/mre_sumi_e_symbolic.jpg differ
diff --git a/sdxl_styles/samples/mre_surreal_painting.jpg b/sdxl_styles/samples/mre_surreal_painting.jpg
new file mode 100644
index 00000000..82fa66db
Binary files /dev/null and b/sdxl_styles/samples/mre_surreal_painting.jpg differ
diff --git a/sdxl_styles/samples/mre_undead_art.jpg b/sdxl_styles/samples/mre_undead_art.jpg
new file mode 100644
index 00000000..d306d2cb
Binary files /dev/null and b/sdxl_styles/samples/mre_undead_art.jpg differ
diff --git a/sdxl_styles/samples/mre_underground.jpg b/sdxl_styles/samples/mre_underground.jpg
new file mode 100644
index 00000000..d01bc6cd
Binary files /dev/null and b/sdxl_styles/samples/mre_underground.jpg differ
diff --git a/sdxl_styles/samples/neo_baroque.jpg b/sdxl_styles/samples/neo_baroque.jpg
new file mode 100644
index 00000000..05ee36da
Binary files /dev/null and b/sdxl_styles/samples/neo_baroque.jpg differ
diff --git a/sdxl_styles/samples/neo_byzantine.jpg b/sdxl_styles/samples/neo_byzantine.jpg
new file mode 100644
index 00000000..f0d50aac
Binary files /dev/null and b/sdxl_styles/samples/neo_byzantine.jpg differ
diff --git a/sdxl_styles/samples/neo_futurism.jpg b/sdxl_styles/samples/neo_futurism.jpg
new file mode 100644
index 00000000..44cfa98e
Binary files /dev/null and b/sdxl_styles/samples/neo_futurism.jpg differ
diff --git a/sdxl_styles/samples/neo_impressionism.jpg b/sdxl_styles/samples/neo_impressionism.jpg
new file mode 100644
index 00000000..d11554df
Binary files /dev/null and b/sdxl_styles/samples/neo_impressionism.jpg differ
diff --git a/sdxl_styles/samples/neo_rococo.jpg b/sdxl_styles/samples/neo_rococo.jpg
new file mode 100644
index 00000000..0de1eaee
Binary files /dev/null and b/sdxl_styles/samples/neo_rococo.jpg differ
diff --git a/sdxl_styles/samples/neoclassicism.jpg b/sdxl_styles/samples/neoclassicism.jpg
new file mode 100644
index 00000000..cffc679b
Binary files /dev/null and b/sdxl_styles/samples/neoclassicism.jpg differ
diff --git a/sdxl_styles/samples/op_art.jpg b/sdxl_styles/samples/op_art.jpg
new file mode 100644
index 00000000..ee70c23b
Binary files /dev/null and b/sdxl_styles/samples/op_art.jpg differ
diff --git a/sdxl_styles/samples/ornate_and_intricate.jpg b/sdxl_styles/samples/ornate_and_intricate.jpg
new file mode 100644
index 00000000..765fec01
Binary files /dev/null and b/sdxl_styles/samples/ornate_and_intricate.jpg differ
diff --git a/sdxl_styles/samples/papercraft_collage.jpg b/sdxl_styles/samples/papercraft_collage.jpg
new file mode 100644
index 00000000..dba524c9
Binary files /dev/null and b/sdxl_styles/samples/papercraft_collage.jpg differ
diff --git a/sdxl_styles/samples/papercraft_flat_papercut.jpg b/sdxl_styles/samples/papercraft_flat_papercut.jpg
new file mode 100644
index 00000000..3608636c
Binary files /dev/null and b/sdxl_styles/samples/papercraft_flat_papercut.jpg differ
diff --git a/sdxl_styles/samples/papercraft_kirigami.jpg b/sdxl_styles/samples/papercraft_kirigami.jpg
new file mode 100644
index 00000000..f8a8c6f1
Binary files /dev/null and b/sdxl_styles/samples/papercraft_kirigami.jpg differ
diff --git a/sdxl_styles/samples/papercraft_paper_mache.jpg b/sdxl_styles/samples/papercraft_paper_mache.jpg
new file mode 100644
index 00000000..90122cac
Binary files /dev/null and b/sdxl_styles/samples/papercraft_paper_mache.jpg differ
diff --git a/sdxl_styles/samples/papercraft_paper_quilling.jpg b/sdxl_styles/samples/papercraft_paper_quilling.jpg
new file mode 100644
index 00000000..0b017ff3
Binary files /dev/null and b/sdxl_styles/samples/papercraft_paper_quilling.jpg differ
diff --git a/sdxl_styles/samples/papercraft_papercut_collage.jpg b/sdxl_styles/samples/papercraft_papercut_collage.jpg
new file mode 100644
index 00000000..0d0d60db
Binary files /dev/null and b/sdxl_styles/samples/papercraft_papercut_collage.jpg differ
diff --git a/sdxl_styles/samples/papercraft_papercut_shadow_box.jpg b/sdxl_styles/samples/papercraft_papercut_shadow_box.jpg
new file mode 100644
index 00000000..da088610
Binary files /dev/null and b/sdxl_styles/samples/papercraft_papercut_shadow_box.jpg differ
diff --git a/sdxl_styles/samples/papercraft_stacked_papercut.jpg b/sdxl_styles/samples/papercraft_stacked_papercut.jpg
new file mode 100644
index 00000000..503d78bf
Binary files /dev/null and b/sdxl_styles/samples/papercraft_stacked_papercut.jpg differ
diff --git a/sdxl_styles/samples/papercraft_thick_layered_papercut.jpg b/sdxl_styles/samples/papercraft_thick_layered_papercut.jpg
new file mode 100644
index 00000000..cd649505
Binary files /dev/null and b/sdxl_styles/samples/papercraft_thick_layered_papercut.jpg differ
diff --git a/sdxl_styles/samples/pebble_art.jpg b/sdxl_styles/samples/pebble_art.jpg
new file mode 100644
index 00000000..12e8c184
Binary files /dev/null and b/sdxl_styles/samples/pebble_art.jpg differ
diff --git a/sdxl_styles/samples/pencil_sketch_drawing.jpg b/sdxl_styles/samples/pencil_sketch_drawing.jpg
new file mode 100644
index 00000000..dc753e45
Binary files /dev/null and b/sdxl_styles/samples/pencil_sketch_drawing.jpg differ
diff --git a/sdxl_styles/samples/photo_alien.jpg b/sdxl_styles/samples/photo_alien.jpg
new file mode 100644
index 00000000..5fea0abb
Binary files /dev/null and b/sdxl_styles/samples/photo_alien.jpg differ
diff --git a/sdxl_styles/samples/photo_film_noir.jpg b/sdxl_styles/samples/photo_film_noir.jpg
new file mode 100644
index 00000000..961009af
Binary files /dev/null and b/sdxl_styles/samples/photo_film_noir.jpg differ
diff --git a/sdxl_styles/samples/photo_glamour.jpg b/sdxl_styles/samples/photo_glamour.jpg
new file mode 100644
index 00000000..9e136066
Binary files /dev/null and b/sdxl_styles/samples/photo_glamour.jpg differ
diff --git a/sdxl_styles/samples/photo_hdr.jpg b/sdxl_styles/samples/photo_hdr.jpg
new file mode 100644
index 00000000..a36bb175
Binary files /dev/null and b/sdxl_styles/samples/photo_hdr.jpg differ
diff --git a/sdxl_styles/samples/photo_iphone_photographic.jpg b/sdxl_styles/samples/photo_iphone_photographic.jpg
new file mode 100644
index 00000000..5e1830d4
Binary files /dev/null and b/sdxl_styles/samples/photo_iphone_photographic.jpg differ
diff --git a/sdxl_styles/samples/photo_long_exposure.jpg b/sdxl_styles/samples/photo_long_exposure.jpg
new file mode 100644
index 00000000..7a747fd1
Binary files /dev/null and b/sdxl_styles/samples/photo_long_exposure.jpg differ
diff --git a/sdxl_styles/samples/photo_neon_noir.jpg b/sdxl_styles/samples/photo_neon_noir.jpg
new file mode 100644
index 00000000..6e6d093b
Binary files /dev/null and b/sdxl_styles/samples/photo_neon_noir.jpg differ
diff --git a/sdxl_styles/samples/photo_silhouette.jpg b/sdxl_styles/samples/photo_silhouette.jpg
new file mode 100644
index 00000000..cf0a13c1
Binary files /dev/null and b/sdxl_styles/samples/photo_silhouette.jpg differ
diff --git a/sdxl_styles/samples/photo_tilt_shift.jpg b/sdxl_styles/samples/photo_tilt_shift.jpg
new file mode 100644
index 00000000..85fc2ba2
Binary files /dev/null and b/sdxl_styles/samples/photo_tilt_shift.jpg differ
diff --git a/sdxl_styles/samples/pop_art_2.jpg b/sdxl_styles/samples/pop_art_2.jpg
new file mode 100644
index 00000000..77c9a853
Binary files /dev/null and b/sdxl_styles/samples/pop_art_2.jpg differ
diff --git a/sdxl_styles/samples/random_style.jpg b/sdxl_styles/samples/random_style.jpg
new file mode 100644
index 00000000..9f685108
Binary files /dev/null and b/sdxl_styles/samples/random_style.jpg differ
diff --git a/sdxl_styles/samples/rococo.jpg b/sdxl_styles/samples/rococo.jpg
new file mode 100644
index 00000000..63a97bd3
Binary files /dev/null and b/sdxl_styles/samples/rococo.jpg differ
diff --git a/sdxl_styles/samples/sai_3d_model.jpg b/sdxl_styles/samples/sai_3d_model.jpg
new file mode 100644
index 00000000..273ab40c
Binary files /dev/null and b/sdxl_styles/samples/sai_3d_model.jpg differ
diff --git a/sdxl_styles/samples/sai_analog_film.jpg b/sdxl_styles/samples/sai_analog_film.jpg
new file mode 100644
index 00000000..7dea7a69
Binary files /dev/null and b/sdxl_styles/samples/sai_analog_film.jpg differ
diff --git a/sdxl_styles/samples/sai_anime.jpg b/sdxl_styles/samples/sai_anime.jpg
new file mode 100644
index 00000000..a26f57e0
Binary files /dev/null and b/sdxl_styles/samples/sai_anime.jpg differ
diff --git a/sdxl_styles/samples/sai_cinematic.jpg b/sdxl_styles/samples/sai_cinematic.jpg
new file mode 100644
index 00000000..e6546d5e
Binary files /dev/null and b/sdxl_styles/samples/sai_cinematic.jpg differ
diff --git a/sdxl_styles/samples/sai_comic_book.jpg b/sdxl_styles/samples/sai_comic_book.jpg
new file mode 100644
index 00000000..2b82ed27
Binary files /dev/null and b/sdxl_styles/samples/sai_comic_book.jpg differ
diff --git a/sdxl_styles/samples/sai_craft_clay.jpg b/sdxl_styles/samples/sai_craft_clay.jpg
new file mode 100644
index 00000000..ad75d09f
Binary files /dev/null and b/sdxl_styles/samples/sai_craft_clay.jpg differ
diff --git a/sdxl_styles/samples/sai_digital_art.jpg b/sdxl_styles/samples/sai_digital_art.jpg
new file mode 100644
index 00000000..55af0120
Binary files /dev/null and b/sdxl_styles/samples/sai_digital_art.jpg differ
diff --git a/sdxl_styles/samples/sai_enhance.jpg b/sdxl_styles/samples/sai_enhance.jpg
new file mode 100644
index 00000000..f44c9000
Binary files /dev/null and b/sdxl_styles/samples/sai_enhance.jpg differ
diff --git a/sdxl_styles/samples/sai_fantasy_art.jpg b/sdxl_styles/samples/sai_fantasy_art.jpg
new file mode 100644
index 00000000..1792de0e
Binary files /dev/null and b/sdxl_styles/samples/sai_fantasy_art.jpg differ
diff --git a/sdxl_styles/samples/sai_isometric.jpg b/sdxl_styles/samples/sai_isometric.jpg
new file mode 100644
index 00000000..34a75225
Binary files /dev/null and b/sdxl_styles/samples/sai_isometric.jpg differ
diff --git a/sdxl_styles/samples/sai_line_art.jpg b/sdxl_styles/samples/sai_line_art.jpg
new file mode 100644
index 00000000..f137c033
Binary files /dev/null and b/sdxl_styles/samples/sai_line_art.jpg differ
diff --git a/sdxl_styles/samples/sai_lowpoly.jpg b/sdxl_styles/samples/sai_lowpoly.jpg
new file mode 100644
index 00000000..058dfe94
Binary files /dev/null and b/sdxl_styles/samples/sai_lowpoly.jpg differ
diff --git a/sdxl_styles/samples/sai_neonpunk.jpg b/sdxl_styles/samples/sai_neonpunk.jpg
new file mode 100644
index 00000000..4c32008f
Binary files /dev/null and b/sdxl_styles/samples/sai_neonpunk.jpg differ
diff --git a/sdxl_styles/samples/sai_origami.jpg b/sdxl_styles/samples/sai_origami.jpg
new file mode 100644
index 00000000..c5c5ffd2
Binary files /dev/null and b/sdxl_styles/samples/sai_origami.jpg differ
diff --git a/sdxl_styles/samples/sai_photographic.jpg b/sdxl_styles/samples/sai_photographic.jpg
new file mode 100644
index 00000000..5086895d
Binary files /dev/null and b/sdxl_styles/samples/sai_photographic.jpg differ
diff --git a/sdxl_styles/samples/sai_pixel_art.jpg b/sdxl_styles/samples/sai_pixel_art.jpg
new file mode 100644
index 00000000..dbb6f9fc
Binary files /dev/null and b/sdxl_styles/samples/sai_pixel_art.jpg differ
diff --git a/sdxl_styles/samples/sai_texture.jpg b/sdxl_styles/samples/sai_texture.jpg
new file mode 100644
index 00000000..cd34f537
Binary files /dev/null and b/sdxl_styles/samples/sai_texture.jpg differ
diff --git a/sdxl_styles/samples/silhouette_art.jpg b/sdxl_styles/samples/silhouette_art.jpg
new file mode 100644
index 00000000..e28c6616
Binary files /dev/null and b/sdxl_styles/samples/silhouette_art.jpg differ
diff --git a/sdxl_styles/samples/simple_vector_art.jpg b/sdxl_styles/samples/simple_vector_art.jpg
new file mode 100644
index 00000000..cecdf09c
Binary files /dev/null and b/sdxl_styles/samples/simple_vector_art.jpg differ
diff --git a/sdxl_styles/samples/sketchup.jpg b/sdxl_styles/samples/sketchup.jpg
new file mode 100644
index 00000000..c077400e
Binary files /dev/null and b/sdxl_styles/samples/sketchup.jpg differ
diff --git a/sdxl_styles/samples/steampunk_2.jpg b/sdxl_styles/samples/steampunk_2.jpg
new file mode 100644
index 00000000..b636c620
Binary files /dev/null and b/sdxl_styles/samples/steampunk_2.jpg differ
diff --git a/sdxl_styles/samples/sticker_designs.jpg b/sdxl_styles/samples/sticker_designs.jpg
new file mode 100644
index 00000000..1e03d7ae
Binary files /dev/null and b/sdxl_styles/samples/sticker_designs.jpg differ
diff --git a/sdxl_styles/samples/suprematism.jpg b/sdxl_styles/samples/suprematism.jpg
new file mode 100644
index 00000000..b8ddc3ad
Binary files /dev/null and b/sdxl_styles/samples/suprematism.jpg differ
diff --git a/sdxl_styles/samples/surrealism.jpg b/sdxl_styles/samples/surrealism.jpg
new file mode 100644
index 00000000..12a7cac0
Binary files /dev/null and b/sdxl_styles/samples/surrealism.jpg differ
diff --git a/sdxl_styles/samples/terragen.jpg b/sdxl_styles/samples/terragen.jpg
new file mode 100644
index 00000000..f83417f6
Binary files /dev/null and b/sdxl_styles/samples/terragen.jpg differ
diff --git a/sdxl_styles/samples/tranquil_relaxing_atmosphere.jpg b/sdxl_styles/samples/tranquil_relaxing_atmosphere.jpg
new file mode 100644
index 00000000..52ae6f5c
Binary files /dev/null and b/sdxl_styles/samples/tranquil_relaxing_atmosphere.jpg differ
diff --git a/sdxl_styles/samples/vibrant_rim_light.jpg b/sdxl_styles/samples/vibrant_rim_light.jpg
new file mode 100644
index 00000000..47b47316
Binary files /dev/null and b/sdxl_styles/samples/vibrant_rim_light.jpg differ
diff --git a/sdxl_styles/samples/volumetric_lighting.jpg b/sdxl_styles/samples/volumetric_lighting.jpg
new file mode 100644
index 00000000..b6fb6958
Binary files /dev/null and b/sdxl_styles/samples/volumetric_lighting.jpg differ
diff --git a/sdxl_styles/samples/watercolor_2.jpg b/sdxl_styles/samples/watercolor_2.jpg
new file mode 100644
index 00000000..1afb96e2
Binary files /dev/null and b/sdxl_styles/samples/watercolor_2.jpg differ
diff --git a/sdxl_styles/samples/whimsical_and_playful.jpg b/sdxl_styles/samples/whimsical_and_playful.jpg
new file mode 100644
index 00000000..d5afcb47
Binary files /dev/null and b/sdxl_styles/samples/whimsical_and_playful.jpg differ
diff --git a/sdxl_styles/sdxl_styles_fooocus.json b/sdxl_styles/sdxl_styles_fooocus.json
index 81d6442e..cf64eab4 100644
--- a/sdxl_styles/sdxl_styles_fooocus.json
+++ b/sdxl_styles/sdxl_styles_fooocus.json
@@ -3,6 +3,10 @@
"name": "Fooocus Enhance",
"negative_prompt": "(worst quality, low quality, normal quality, lowres, low details, oversaturated, undersaturated, overexposed, underexposed, grayscale, bw, bad photo, bad photography, bad art:1.4), (watermark, signature, text font, username, error, logo, words, letters, digits, autograph, trademark, name:1.2), (blur, blurry, grainy), morbid, ugly, asymmetrical, mutated malformed, mutilated, poorly lit, bad shadow, draft, cropped, out of frame, cut off, censored, jpeg artifacts, out of focus, glitch, duplicate, (airbrushed, cartoon, anime, semi-realistic, cgi, render, blender, digital art, manga, amateur:1.3), (3D ,3D Game, 3D Game Scene, 3D Character:1.1), (bad hands, bad anatomy, bad body, bad face, bad teeth, bad arms, bad legs, deformities:1.3)"
},
+ {
+ "name": "Fooocus Semi Realistic",
+ "negative_prompt": "(worst quality, low quality, normal quality, lowres, low details, oversaturated, undersaturated, overexposed, underexposed, bad photo, bad photography, bad art:1.4), (watermark, signature, text font, username, error, logo, words, letters, digits, autograph, trademark, name:1.2), (blur, blurry, grainy), morbid, ugly, asymmetrical, mutated malformed, mutilated, poorly lit, bad shadow, draft, cropped, out of frame, cut off, censored, jpeg artifacts, out of focus, glitch, duplicate, (bad hands, bad anatomy, bad body, bad face, bad teeth, bad arms, bad legs, deformities:1.3)"
+ },
{
"name": "Fooocus Sharp",
"prompt": "cinematic still {prompt} . emotional, harmonious, vignette, 4k epic detailed, shot on kodak, 35mm photo, sharp focus, high budget, cinemascope, moody, epic, gorgeous, film grain, grainy",
diff --git a/shared.py b/shared.py
index 269809e3..21a2a864 100644
--- a/shared.py
+++ b/shared.py
@@ -1,2 +1 @@
-gradio_root = None
-last_stop = None
+gradio_root = None
\ No newline at end of file
diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100644
index 00000000..c424468f
--- /dev/null
+++ b/tests/__init__.py
@@ -0,0 +1,4 @@
+import sys
+import pathlib
+
+sys.path.append(pathlib.Path(f'{__file__}/../modules').parent.resolve())
diff --git a/tests/test_extra_utils.py b/tests/test_extra_utils.py
new file mode 100644
index 00000000..a849aa16
--- /dev/null
+++ b/tests/test_extra_utils.py
@@ -0,0 +1,74 @@
+import numbers
+import os
+import unittest
+
+import modules.flags
+from modules import extra_utils
+
+
+class TestUtils(unittest.TestCase):
+ def test_try_eval_env_var(self):
+ test_cases = [
+ {
+ "input": ("foo", str),
+ "output": "foo"
+ },
+ {
+ "input": ("1", int),
+ "output": 1
+ },
+ {
+ "input": ("1.0", float),
+ "output": 1.0
+ },
+ {
+ "input": ("1", numbers.Number),
+ "output": 1
+ },
+ {
+ "input": ("1.0", numbers.Number),
+ "output": 1.0
+ },
+ {
+ "input": ("true", bool),
+ "output": True
+ },
+ {
+ "input": ("True", bool),
+ "output": True
+ },
+ {
+ "input": ("false", bool),
+ "output": False
+ },
+ {
+ "input": ("False", bool),
+ "output": False
+ },
+ {
+ "input": ("True", str),
+ "output": "True"
+ },
+ {
+ "input": ("False", str),
+ "output": "False"
+ },
+ {
+ "input": ("['a', 'b', 'c']", list),
+ "output": ['a', 'b', 'c']
+ },
+ {
+ "input": ("{'a':1}", dict),
+ "output": {'a': 1}
+ },
+ {
+ "input": ("('foo', 1)", tuple),
+ "output": ('foo', 1)
+ }
+ ]
+
+ for test in test_cases:
+ value, expected_type = test["input"]
+ expected = test["output"]
+ actual = extra_utils.try_eval_env_var(value, expected_type)
+ self.assertEqual(expected, actual)
diff --git a/tests/test_utils.py b/tests/test_utils.py
new file mode 100644
index 00000000..c1f49c13
--- /dev/null
+++ b/tests/test_utils.py
@@ -0,0 +1,137 @@
+import os
+import unittest
+
+import modules.flags
+from modules import util
+
+
+class TestUtils(unittest.TestCase):
+ def test_can_parse_tokens_with_lora(self):
+ test_cases = [
+ {
+ "input": ("some prompt, very cool, , cool ", [], 5, True),
+ "output": (
+ [('hey-lora.safetensors', 0.4), ('you-lora.safetensors', 0.2)], 'some prompt, very cool, cool'),
+ },
+ # Test can not exceed limit
+ {
+ "input": ("some prompt, very cool, , cool ", [], 1, True),
+ "output": (
+ [('hey-lora.safetensors', 0.4)],
+ 'some prompt, very cool, cool'
+ ),
+ },
+ # test Loras from UI take precedence over prompt
+ {
+ "input": (
+ "some prompt, very cool, , , , , , ",
+ [("hey-lora.safetensors", 0.4)],
+ 5,
+ True
+ ),
+ "output": (
+ [
+ ('hey-lora.safetensors', 0.4),
+ ('l1.safetensors', 0.4),
+ ('l2.safetensors', -0.2),
+ ('l3.safetensors', 0.3),
+ ('l4.safetensors', 0.5)
+ ],
+ 'some prompt, very cool'
+ )
+ },
+ # test correct matching even if there is no space separating loras in the same token
+ {
+ "input": ("some prompt, very cool, ", [], 3, True),
+ "output": (
+ [
+ ('hey-lora.safetensors', 0.4),
+ ('you-lora.safetensors', 0.2)
+ ],
+ 'some prompt, very cool'
+ ),
+ },
+ # test deduplication, also selected loras are never overridden with loras in prompt
+ {
+ "input": (
+ "some prompt, very cool, ",
+ [('you-lora.safetensors', 0.3)],
+ 3,
+ True
+ ),
+ "output": (
+ [
+ ('you-lora.safetensors', 0.3),
+ ('hey-lora.safetensors', 0.4)
+ ],
+ 'some prompt, very cool'
+ ),
+ },
+ {
+ "input": (", , , and ", [], 6, True),
+ "output": (
+ [],
+ ', , , and '
+ )
+ }
+ ]
+
+ for test in test_cases:
+ prompt, loras, loras_limit, skip_file_check = test["input"]
+ expected = test["output"]
+ actual = util.parse_lora_references_from_prompt(prompt, loras, loras_limit=loras_limit,
+ skip_file_check=skip_file_check)
+ self.assertEqual(expected, actual)
+
+ def test_can_parse_tokens_and_strip_performance_lora(self):
+ lora_filenames = [
+ 'hey-lora.safetensors',
+ modules.flags.PerformanceLoRA.EXTREME_SPEED.value,
+ modules.flags.PerformanceLoRA.LIGHTNING.value,
+ os.path.join('subfolder', modules.flags.PerformanceLoRA.HYPER_SD.value)
+ ]
+
+ test_cases = [
+ {
+ "input": ("some prompt, ", [], 5, True, modules.flags.Performance.QUALITY),
+ "output": (
+ [('hey-lora.safetensors', 0.4)],
+ 'some prompt'
+ ),
+ },
+ {
+ "input": ("some prompt, ", [], 5, True, modules.flags.Performance.SPEED),
+ "output": (
+ [('hey-lora.safetensors', 0.4)],
+ 'some prompt'
+ ),
+ },
+ {
+ "input": ("some prompt, , ", [], 5, True, modules.flags.Performance.EXTREME_SPEED),
+ "output": (
+ [('hey-lora.safetensors', 0.4)],
+ 'some prompt'
+ ),
+ },
+ {
+ "input": ("some prompt, , ", [], 5, True, modules.flags.Performance.LIGHTNING),
+ "output": (
+ [('hey-lora.safetensors', 0.4)],
+ 'some prompt'
+ ),
+ },
+ {
+ "input": ("some prompt, , ", [], 5, True, modules.flags.Performance.HYPER_SD),
+ "output": (
+ [('hey-lora.safetensors', 0.4)],
+ 'some prompt'
+ ),
+ }
+ ]
+
+ for test in test_cases:
+ prompt, loras, loras_limit, skip_file_check, performance = test["input"]
+ lora_filenames = modules.util.remove_performance_lora(lora_filenames, performance)
+ expected = test["output"]
+ actual = util.parse_lora_references_from_prompt(prompt, loras, loras_limit=loras_limit, lora_filenames=lora_filenames)
+ self.assertEqual(expected, actual)
diff --git a/update_log.md b/update_log.md
index 216a2bc4..8aa43647 100644
--- a/update_log.md
+++ b/update_log.md
@@ -1,4 +1,95 @@
-**(2023 Dec 21) Hi all, the feature updating of Fooocus will be paused for about two or three weeks because we have some other workloads. See you soon and we will come back in mid or late Jan. However, you may still see updates if other collaborators are fixing bugs or solving problems.**
+# [2.4.3](https://github.com/lllyasviel/Fooocus/releases/tag/v2.4.3)
+
+* Fix alphas_cumprod setter for TCD sampler
+* Add parser for env var strings to expected config value types to allow override of all non-path config keys
+
+# [2.4.2](https://github.com/lllyasviel/Fooocus/releases/tag/v2.4.2)
+
+* Fix some small bugs (tcd scheduler when gamma is 0, chown in Dockerfile, update cmd args in readme, translation for aspect ratios, vae default after file reload)
+* Fix performance LoRA replacement when data is loaded from history log and inline prompt
+* Add support and preset for playground v2.5 (only works with performance Quality or Speed, use with scheduler edm_playground_v2)
+* Make textboxes (incl. positive prompt) resizable
+* Hide intermediate images when performance of Gradio would bottleneck the generation process (Extreme Speed, Lightning, Hyper-SD)
+
+# [2.4.1](https://github.com/lllyasviel/Fooocus/releases/tag/v2.4.1)
+
+* Fix some small bugs (e.g. adjust clip skip default value from 1 to 2, add type check to aspect ratios js update function)
+* Add automated docker build on push to main, tagged with `edge`. See [available docker images](https://github.com/lllyasviel/Fooocus/pkgs/container/fooocus).
+
+# [2.4.0](https://github.com/lllyasviel/Fooocus/releases/tag/v2.4.0)
+
+* Change settings tab elements to be more compact
+* Add clip skip slider
+* Add select for custom VAE
+* Add new style "Random Style"
+* Update default anime model to animaPencilXL_v310
+* Add button to reconnect the UI after Fooocus crashed without having to configure everything again (no page reload required)
+* Add performance "hyper-sd" (based on [Hyper-SDXL 4 step LoRA](https://huggingface.co/ByteDance/Hyper-SD/blob/main/Hyper-SDXL-4steps-lora.safetensors))
+* Add [AlignYourSteps](https://research.nvidia.com/labs/toronto-ai/AlignYourSteps/) scheduler by Nvidia, see
+* Add [TCD](https://github.com/jabir-zheng/TCD) sampler and scheduler (based on sgm_uniform)
+* Add NSFW image censoring (disables intermediate image preview while generating). Set config value `default_black_out_nsfw` to True to always enable.
+* Add argument `--enable-describe-uov-image` to automatically describe uploaded images for upscaling
+* Add inline lora prompt references with subfolder support, example prompt: `colorful bird `
+* Add size and aspect ratio recommendation on image describe
+* Add inpaint brush color picker, helpful when image and mask brush have the same color
+* Add automated Docker image build using Github Actions on each release.
+* Add full raw prompts to history logs
+* Change code ownership from @lllyasviel to @mashb1t for automated issue / MR notification
+
+# [2.3.1](https://github.com/lllyasviel/Fooocus/releases/tag/2.3.1)
+
+* Remove positive prompt from anime prefix to not reset prompt after switching presets
+* Fix image number being reset to 1 when switching preset, now doesn't reset anymore
+* Fix outpainting dimension calculation when extending left/right
+* Fix LoRA compatibility for LoRAs in a1111 metadata scheme
+
+# [2.3.0](https://github.com/lllyasviel/Fooocus/releases/tag/2.3.0)
+
+* Add performance "lightning" (based on [SDXL-Lightning 4 step LoRA](https://huggingface.co/ByteDance/SDXL-Lightning/blob/main/sdxl_lightning_4step_lora.safetensors))
+* Add preset selection to UI, disable with argument `--disable-preset-selection`. Use `--always-download-new-model` to download missing models on preset switch.
+* Improve face swap consistency by switching later in the process to (synthetic) refiner
+* Add temp path cleanup on startup
+* Add support for wildcard subdirectories
+* Add scrollable 2 column layout for styles for better structure
+* Improve Colab resource needs for T4 instances (default), positively tested with all image prompt features
+* Improve anime preset, now uses style `Fooocus Semi Realistic` instead of `Fooocus Negative` (less wet look images)
+
+# [2.2.1](https://github.com/lllyasviel/Fooocus/releases/tag/2.2.1)
+
+* Fix some small bugs (e.g. image grid, upscale fast 2x, LoRA weight width in Firefox)
+* Allow prompt weights in array syntax
+* Add steps override and metadata scheme to history log
+
+# [2.2.0](https://github.com/lllyasviel/Fooocus/releases/tag/2.2.0)
+
+* Isolate every image generation to truly allow multi-user usage
+* Add array support, changes the main prompt when increasing the image number. Syntax: `[[red, green, blue]] flower`
+* Add optional metadata to images, allowing you to regenerate and modify them later with the same parameters
+* Now supports native PNG, JPG and WEBP image generation
+* Add Docker support
+
+# [2.1.865](https://github.com/lllyasviel/Fooocus/releases/tag/2.1.865)
+
+* Various bugfixes
+* Add authentication to --listen
+
+# 2.1.864
+
+* New model list. See also discussions.
+
+# 2.1.861 (requested update)
+
+(2023 Dec 21) Hi all, the feature updating of Fooocus will be paused for about two or three weeks because we have some other workloads. See you soon and we will come back in mid or late Jan. However, you may still see updates if other collaborators are fixing bugs or solving problems.
+
+* Show image preview in Style when mouse hover.
+
+# 2.1.860 (requested update)
+
+* Allow upload inpaint mask in developer mode.
+
+# 2.1.857 (requested update)
+
+* Begin to support 8GB AMD GPU on Windows.
# 2.1.854
diff --git a/webui.py b/webui.py
index a5138abf..a0982cae 100644
--- a/webui.py
+++ b/webui.py
@@ -11,28 +11,35 @@ import modules.async_worker as worker
import modules.constants as constants
import modules.flags as flags
import modules.gradio_hijack as grh
-import modules.advanced_parameters as advanced_parameters
import modules.style_sorter as style_sorter
import modules.meta_parser
import args_manager
import copy
+import launch
from modules.sdxl_styles import legal_style_names
from modules.private_logger import get_current_html_path
from modules.ui_gradio_extensions import reload_javascript
from modules.auth import auth_enabled, check_auth
+from modules.util import is_json
+def get_task(*args):
+ args = list(args)
+ args.pop(0)
-def generate_clicked(*args):
+ return worker.AsyncTask(args=args)
+
+def generate_clicked(task: worker.AsyncTask):
import ldm_patched.modules.model_management as model_management
with model_management.interrupt_processing_mutex:
model_management.interrupt_processing = False
-
# outputs=[progress_html, progress_window, progress_gallery, gallery]
+ if len(task.args) == 0:
+ return
+
execution_start_time = time.perf_counter()
- task = worker.AsyncTask(args=list(args))
finished = False
yield gr.update(visible=True, value=modules.html.make_progress_html(1, 'Waiting for task to start ...')), \
@@ -71,6 +78,12 @@ def generate_clicked(*args):
gr.update(visible=True, value=product)
finished = True
+ # delete Fooocus temp images, only keep gradio temp images
+ if args_manager.args.disable_image_log:
+ for filepath in product:
+ if isinstance(filepath, str) and os.path.exists(filepath):
+ os.remove(filepath)
+
execution_time = time.perf_counter() - execution_start_time
print(f'Total time: {execution_time:.2f} seconds')
return
@@ -83,11 +96,10 @@ title = f'Fooocus {fooocus_version.version}'
if isinstance(args_manager.args.preset, str):
title += ' ' + args_manager.args.preset
-shared.gradio_root = gr.Blocks(
- title=title,
- css=modules.html.css).queue()
+shared.gradio_root = gr.Blocks(title=title).queue()
with shared.gradio_root:
+ currentTask = gr.State(worker.AsyncTask(args=[]))
with gr.Row():
with gr.Column(scale=2):
with gr.Row():
@@ -100,10 +112,10 @@ with shared.gradio_root:
gallery = gr.Gallery(label='Gallery', show_label=False, object_fit='contain', visible=True, height=768,
elem_classes=['resizable_area', 'main_view', 'final_gallery', 'image_gallery'],
elem_id='final_gallery')
- with gr.Row(elem_classes='type_row'):
+ with gr.Row():
with gr.Column(scale=17):
prompt = gr.Textbox(show_label=False, placeholder="Type prompt here or paste parameters.", elem_id='positive_prompt',
- container=False, autofocus=True, elem_classes='type_row', lines=1024)
+ autofocus=True, lines=3)
default_prompt = modules.config.default_prompt
if isinstance(default_prompt, str) and default_prompt != '':
@@ -111,25 +123,27 @@ with shared.gradio_root:
with gr.Column(scale=3, min_width=0):
generate_button = gr.Button(label="Generate", value="Generate", elem_classes='type_row', elem_id='generate_button', visible=True)
+ reset_button = gr.Button(label="Reconnect", value="Reconnect", elem_classes='type_row', elem_id='reset_button', visible=False)
load_parameter_button = gr.Button(label="Load Parameters", value="Load Parameters", elem_classes='type_row', elem_id='load_parameter_button', visible=False)
- skip_button = gr.Button(label="Skip", value="Skip", elem_classes='type_row_half', visible=False)
+ skip_button = gr.Button(label="Skip", value="Skip", elem_classes='type_row_half', elem_id='skip_button', visible=False)
stop_button = gr.Button(label="Stop", value="Stop", elem_classes='type_row_half', elem_id='stop_button', visible=False)
- def stop_clicked():
+ def stop_clicked(currentTask):
import ldm_patched.modules.model_management as model_management
- shared.last_stop = 'stop'
- model_management.interrupt_current_processing()
- return [gr.update(interactive=False)] * 2
+ currentTask.last_stop = 'stop'
+ if (currentTask.processing):
+ model_management.interrupt_current_processing()
+ return currentTask
- def skip_clicked():
+ def skip_clicked(currentTask):
import ldm_patched.modules.model_management as model_management
- shared.last_stop = 'skip'
- model_management.interrupt_current_processing()
- return
+ currentTask.last_stop = 'skip'
+ if (currentTask.processing):
+ model_management.interrupt_current_processing()
+ return currentTask
- stop_button.click(stop_clicked, outputs=[skip_button, stop_button],
- queue=False, show_progress=False, _js='cancelGenerateForever')
- skip_button.click(skip_clicked, queue=False, show_progress=False)
+ stop_button.click(stop_clicked, inputs=currentTask, outputs=currentTask, queue=False, show_progress=False, _js='cancelGenerateForever')
+ skip_button.click(skip_clicked, inputs=currentTask, outputs=currentTask, queue=False, show_progress=False)
with gr.Row(elem_classes='advanced_check_row'):
input_image_checkbox = gr.Checkbox(label='Input Image', value=False, container=False, elem_classes='min_check')
advanced_checkbox = gr.Checkbox(label='Advanced', value=modules.config.default_advanced_checkbox, container=False, elem_classes='min_check')
@@ -138,7 +152,7 @@ with shared.gradio_root:
with gr.TabItem(label='Upscale or Variation') as uov_tab:
with gr.Row():
with gr.Column():
- uov_input_image = grh.Image(label='Drag above image to here', source='upload', type='numpy')
+ uov_input_image = grh.Image(label='Image', source='upload', type='numpy', show_label=False)
with gr.Column():
uov_method = gr.Radio(label='Upscale or Variation:', choices=flags.uov_list, value=flags.disabled)
gr.HTML('\U0001F4D4 Document')
@@ -150,7 +164,7 @@ with shared.gradio_root:
ip_weights = []
ip_ctrls = []
ip_ad_cols = []
- for _ in range(4):
+ for _ in range(flags.controlnet_image_count):
with gr.Column():
ip_image = grh.Image(label='Image', source='upload', type='numpy', show_label=False, height=300)
ip_images.append(ip_image)
@@ -186,7 +200,10 @@ with shared.gradio_root:
outputs=ip_ad_cols + ip_types + ip_stops + ip_weights,
queue=False, show_progress=False)
with gr.TabItem(label='Inpaint or Outpaint') as inpaint_tab:
- inpaint_input_image = grh.Image(label='Drag above image to here', source='upload', type='numpy', tool='sketch', height=500, brush_color="#FFFFFF", elem_id='inpaint_canvas')
+ with gr.Row():
+ inpaint_input_image = grh.Image(label='Image', source='upload', type='numpy', tool='sketch', height=500, brush_color="#FFFFFF", elem_id='inpaint_canvas', show_label=False)
+ inpaint_mask_image = grh.Image(label='Mask Upload', source='upload', type='numpy', height=500, visible=False)
+
with gr.Row():
inpaint_additional_prompt = gr.Textbox(placeholder="Describe what you want to inpaint.", elem_id='inpaint_additional_prompt', label='Inpaint Additional Prompt', visible=False)
outpaint_selections = gr.CheckboxGroup(choices=['Left', 'Right', 'Top', 'Bottom'], value=[], label='Outpaint Direction')
@@ -197,14 +214,44 @@ with shared.gradio_root:
with gr.TabItem(label='Describe') as desc_tab:
with gr.Row():
with gr.Column():
- desc_input_image = grh.Image(label='Drag any image to here', source='upload', type='numpy')
+ desc_input_image = grh.Image(label='Image', source='upload', type='numpy', show_label=False)
with gr.Column():
desc_method = gr.Radio(
label='Content Type',
choices=[flags.desc_type_photo, flags.desc_type_anime],
value=flags.desc_type_photo)
desc_btn = gr.Button(value='Describe this Image into Prompt')
+ desc_image_size = gr.Textbox(label='Image Size and Recommended Size', elem_id='desc_image_size', visible=False)
gr.HTML('\U0001F4D4 Document')
+
+ def trigger_show_image_properties(image):
+ value = modules.util.get_image_size_info(image, modules.flags.sdxl_aspect_ratios)
+ return gr.update(value=value, visible=True)
+
+ desc_input_image.upload(trigger_show_image_properties, inputs=desc_input_image,
+ outputs=desc_image_size, show_progress=False, queue=False)
+
+ with gr.TabItem(label='Metadata') as metadata_tab:
+ with gr.Column():
+ metadata_input_image = grh.Image(label='For images created by Fooocus', source='upload', type='filepath')
+ metadata_json = gr.JSON(label='Metadata')
+ metadata_import_button = gr.Button(value='Apply Metadata')
+
+ def trigger_metadata_preview(filepath):
+ parameters, metadata_scheme = modules.meta_parser.read_info_from_image(filepath)
+
+ results = {}
+ if parameters is not None:
+ results['parameters'] = parameters
+
+ if isinstance(metadata_scheme, flags.MetadataScheme):
+ results['metadata_scheme'] = metadata_scheme.value
+
+ return results
+
+ metadata_input_image.upload(trigger_metadata_preview, inputs=metadata_input_image,
+ outputs=metadata_json, queue=False, show_progress=True)
+
switch_js = "(x) => {if(x){viewer_to_bottom(100);viewer_to_bottom(500);}else{viewer_to_top();} return x;}"
down_js = "() => {viewer_to_bottom();}"
@@ -217,16 +264,35 @@ with shared.gradio_root:
inpaint_tab.select(lambda: 'inpaint', outputs=current_tab, queue=False, _js=down_js, show_progress=False)
ip_tab.select(lambda: 'ip', outputs=current_tab, queue=False, _js=down_js, show_progress=False)
desc_tab.select(lambda: 'desc', outputs=current_tab, queue=False, _js=down_js, show_progress=False)
+ metadata_tab.select(lambda: 'metadata', outputs=current_tab, queue=False, _js=down_js, show_progress=False)
with gr.Column(scale=1, visible=modules.config.default_advanced_checkbox) as advanced_column:
with gr.Tab(label='Setting'):
+ if not args_manager.args.disable_preset_selection:
+ preset_selection = gr.Dropdown(label='Preset',
+ choices=modules.config.available_presets,
+ value=args_manager.args.preset if args_manager.args.preset else "initial",
+ interactive=True)
performance_selection = gr.Radio(label='Performance',
- choices=modules.flags.performance_selections,
- value=modules.config.default_performance)
- aspect_ratios_selection = gr.Radio(label='Aspect Ratios', choices=modules.config.available_aspect_ratios,
- value=modules.config.default_aspect_ratio, info='width × height',
- elem_classes='aspect_ratios')
- image_number = gr.Slider(label='Image Number', minimum=1, maximum=32, step=1, value=modules.config.default_image_number)
+ choices=flags.Performance.list(),
+ value=modules.config.default_performance,
+ elem_classes=['performance_selection'])
+ with gr.Accordion(label='Aspect Ratios', open=False, elem_id='aspect_ratios_accordion') as aspect_ratios_accordion:
+ aspect_ratios_selection = gr.Radio(label='Aspect Ratios', show_label=False,
+ choices=modules.config.available_aspect_ratios_labels,
+ value=modules.config.default_aspect_ratio,
+ info='width × height',
+ elem_classes='aspect_ratios')
+
+ aspect_ratios_selection.change(lambda x: None, inputs=aspect_ratios_selection, queue=False, show_progress=False, _js='(x)=>{refresh_aspect_ratios_label(x);}')
+ shared.gradio_root.load(lambda x: None, inputs=aspect_ratios_selection, queue=False, show_progress=False, _js='(x)=>{refresh_aspect_ratios_label(x);}')
+
+ image_number = gr.Slider(label='Image Number', minimum=1, maximum=modules.config.default_max_image_number, step=1, value=modules.config.default_image_number)
+
+ output_format = gr.Radio(label='Output Format',
+ choices=flags.OutputFormat.list(),
+ value=modules.config.default_output_format)
+
negative_prompt = gr.Textbox(label='Negative Prompt', show_label=True, placeholder="Type prompt here.",
info='Describing what you do not want to see.', lines=2,
elem_id='negative_prompt',
@@ -252,10 +318,16 @@ with shared.gradio_root:
seed_random.change(random_checked, inputs=[seed_random], outputs=[image_seed],
queue=False, show_progress=False)
- if not args_manager.args.disable_image_log:
- gr.HTML(f'\U0001F4DA History Log')
+ def update_history_link():
+ if args_manager.args.disable_image_log:
+ return gr.update(value='')
+
+ return gr.update(value=f'\U0001F4DA History Log')
- with gr.Tab(label='Style'):
+ history_link = gr.HTML()
+ shared.gradio_root.load(update_history_link, outputs=history_link, queue=False, show_progress=False)
+
+ with gr.Tab(label='Style', elem_classes=['style_selections_tab']):
style_sorter.try_load_sorted_styles(
style_names=legal_style_names,
default_selected=modules.config.default_styles)
@@ -308,16 +380,20 @@ with shared.gradio_root:
with gr.Group():
lora_ctrls = []
- for i, (n, v) in enumerate(modules.config.default_loras):
+ for i, (enabled, filename, weight) in enumerate(modules.config.default_loras):
with gr.Row():
+ lora_enabled = gr.Checkbox(label='Enable', value=enabled,
+ elem_classes=['lora_enable', 'min_check'], scale=1)
lora_model = gr.Dropdown(label=f'LoRA {i + 1}',
- choices=['None'] + modules.config.lora_filenames, value=n)
- lora_weight = gr.Slider(label='Weight', minimum=-2, maximum=2, step=0.01, value=v,
- elem_classes='lora_weight')
- lora_ctrls += [lora_model, lora_weight]
+ choices=['None'] + modules.config.lora_filenames, value=filename,
+ elem_classes='lora_model', scale=5)
+ lora_weight = gr.Slider(label='Weight', minimum=modules.config.default_loras_min_weight,
+ maximum=modules.config.default_loras_max_weight, step=0.01, value=weight,
+ elem_classes='lora_weight', scale=5)
+ lora_ctrls += [lora_enabled, lora_model, lora_weight]
with gr.Row():
- model_refresh = gr.Button(label='Refresh', value='\U0001f504 Refresh All Files', variant='secondary', elem_classes='refresh_button')
+ refresh_files = gr.Button(label='Refresh', value='\U0001f504 Refresh All Files', variant='secondary', elem_classes='refresh_button')
with gr.Tab(label='Advanced'):
guidance_scale = gr.Slider(label='Guidance Scale', minimum=1.0, maximum=30.0, step=0.01,
value=modules.config.default_cfg_scale,
@@ -338,17 +414,22 @@ with shared.gradio_root:
step=0.001, value=0.3,
info='When to end the guidance from positive/negative ADM. ')
- refiner_swap_method = gr.Dropdown(label='Refiner swap method', value='joint',
+ refiner_swap_method = gr.Dropdown(label='Refiner swap method', value=flags.refiner_swap_method,
choices=['joint', 'separate', 'vae'])
adaptive_cfg = gr.Slider(label='CFG Mimicking from TSNR', minimum=1.0, maximum=30.0, step=0.01,
value=modules.config.default_cfg_tsnr,
info='Enabling Fooocus\'s implementation of CFG mimicking for TSNR '
'(effective when real CFG > mimicked CFG).')
+ clip_skip = gr.Slider(label='CLIP Skip', minimum=1, maximum=flags.clip_skip_max, step=1,
+ value=modules.config.default_clip_skip,
+ info='Bypass CLIP layers to avoid overfitting (use 1 to not skip any layers, 2 is recommended).')
sampler_name = gr.Dropdown(label='Sampler', choices=flags.sampler_list,
value=modules.config.default_sampler)
scheduler_name = gr.Dropdown(label='Scheduler', choices=flags.scheduler_list,
value=modules.config.default_scheduler)
+ vae_name = gr.Dropdown(label='VAE', choices=[modules.flags.default_vae] + modules.config.vae_filenames,
+ value=modules.config.default_vae, show_label=True)
generate_image_grid = gr.Checkbox(label='Generate Image Grid for Each Batch',
info='(Experimental) This may cause performance problems on some computers and certain internet conditions.',
@@ -376,8 +457,35 @@ with shared.gradio_root:
overwrite_upscale_strength = gr.Slider(label='Forced Overwrite of Denoising Strength of "Upscale"',
minimum=-1, maximum=1.0, step=0.001, value=-1,
info='Set as negative number to disable. For developer debugging.')
- disable_preview = gr.Checkbox(label='Disable Preview', value=False,
+ disable_preview = gr.Checkbox(label='Disable Preview', value=modules.config.default_black_out_nsfw,
+ interactive=not modules.config.default_black_out_nsfw,
info='Disable preview during generation.')
+ disable_intermediate_results = gr.Checkbox(label='Disable Intermediate Results',
+ value=flags.Performance.has_restricted_features(modules.config.default_performance),
+ info='Disable intermediate results during generation, only show final gallery.')
+ disable_seed_increment = gr.Checkbox(label='Disable seed increment',
+ info='Disable automatic seed increment when image number is > 1.',
+ value=False)
+ read_wildcards_in_order = gr.Checkbox(label="Read wildcards in order", value=False)
+
+ black_out_nsfw = gr.Checkbox(label='Black Out NSFW',
+ value=modules.config.default_black_out_nsfw,
+ interactive=not modules.config.default_black_out_nsfw,
+ info='Use black image if NSFW is detected.')
+
+ black_out_nsfw.change(lambda x: gr.update(value=x, interactive=not x),
+ inputs=black_out_nsfw, outputs=disable_preview, queue=False,
+ show_progress=False)
+
+ if not args_manager.args.disable_metadata:
+ save_metadata_to_images = gr.Checkbox(label='Save Metadata to Images', value=modules.config.default_save_metadata_to_images,
+ info='Adds parameters to generated images allowing manual regeneration.')
+ metadata_scheme = gr.Radio(label='Metadata Scheme', choices=flags.metadata_scheme, value=modules.config.default_metadata_scheme,
+ info='Image Prompt parameters are not included. Use png and a1111 for compatibility with Civitai.',
+ visible=modules.config.default_save_metadata_to_images)
+
+ save_metadata_to_images.change(lambda x: gr.update(visible=x), inputs=[save_metadata_to_images], outputs=[metadata_scheme],
+ queue=False, show_progress=False)
with gr.Tab(label='Control'):
debugging_cn_preprocessor = gr.Checkbox(label='Debug Preprocessors', value=False,
@@ -419,7 +527,28 @@ with shared.gradio_root:
'Value 1 is same as "Whole Image" in A1111. '
'Only used in inpaint, not used in outpaint. '
'(Outpaint always use 1.0)')
- inpaint_ctrls = [debugging_inpaint_preprocessor, inpaint_disable_initial_latent, inpaint_engine, inpaint_strength, inpaint_respective_field]
+ inpaint_erode_or_dilate = gr.Slider(label='Mask Erode or Dilate',
+ minimum=-64, maximum=64, step=1, value=0,
+ info='Positive value will make white area in the mask larger, '
+ 'negative value will make white area smaller.'
+ '(default is 0, always process before any mask invert)')
+ inpaint_mask_upload_checkbox = gr.Checkbox(label='Enable Mask Upload', value=False)
+ invert_mask_checkbox = gr.Checkbox(label='Invert Mask', value=False)
+
+ inpaint_mask_color = gr.ColorPicker(label='Inpaint brush color', value='#FFFFFF', elem_id='inpaint_brush_color')
+
+ inpaint_ctrls = [debugging_inpaint_preprocessor, inpaint_disable_initial_latent, inpaint_engine,
+ inpaint_strength, inpaint_respective_field,
+ inpaint_mask_upload_checkbox, invert_mask_checkbox, inpaint_erode_or_dilate]
+
+ inpaint_mask_upload_checkbox.change(lambda x: gr.update(visible=x),
+ inputs=inpaint_mask_upload_checkbox,
+ outputs=inpaint_mask_image, queue=False,
+ show_progress=False)
+
+ inpaint_mask_color.change(lambda x: gr.update(brush_color=x), inputs=inpaint_mask_color,
+ outputs=inpaint_input_image,
+ queue=False, show_progress=False)
with gr.Tab(label='FreeU'):
freeu_enabled = gr.Checkbox(label='Enabled', value=False)
@@ -429,42 +558,73 @@ with shared.gradio_root:
freeu_s2 = gr.Slider(label='S2', minimum=0, maximum=4, step=0.01, value=0.95)
freeu_ctrls = [freeu_enabled, freeu_b1, freeu_b2, freeu_s1, freeu_s2]
- adps = [disable_preview, adm_scaler_positive, adm_scaler_negative, adm_scaler_end, adaptive_cfg, sampler_name,
- scheduler_name, generate_image_grid, overwrite_step, overwrite_switch, overwrite_width, overwrite_height,
- overwrite_vary_strength, overwrite_upscale_strength,
- mixing_image_prompt_and_vary_upscale, mixing_image_prompt_and_inpaint,
- debugging_cn_preprocessor, skipping_cn_preprocessor, controlnet_softness,
- canny_low_threshold, canny_high_threshold, refiner_swap_method]
- adps += freeu_ctrls
- adps += inpaint_ctrls
-
def dev_mode_checked(r):
return gr.update(visible=r)
-
dev_mode.change(dev_mode_checked, inputs=[dev_mode], outputs=[dev_tools],
queue=False, show_progress=False)
- def model_refresh_clicked():
- modules.config.update_all_model_names()
- results = []
- results += [gr.update(choices=modules.config.model_filenames), gr.update(choices=['None'] + modules.config.model_filenames)]
- for i in range(5):
- results += [gr.update(choices=['None'] + modules.config.lora_filenames), gr.update()]
+ def refresh_files_clicked():
+ modules.config.update_files()
+ results = [gr.update(choices=modules.config.model_filenames)]
+ results += [gr.update(choices=['None'] + modules.config.model_filenames)]
+ results += [gr.update(choices=[flags.default_vae] + modules.config.vae_filenames)]
+ if not args_manager.args.disable_preset_selection:
+ results += [gr.update(choices=modules.config.available_presets)]
+ for i in range(modules.config.default_max_lora_number):
+ results += [gr.update(interactive=True),
+ gr.update(choices=['None'] + modules.config.lora_filenames), gr.update()]
return results
- model_refresh.click(model_refresh_clicked, [], [base_model, refiner_model] + lora_ctrls,
+ refresh_files_output = [base_model, refiner_model, vae_name]
+ if not args_manager.args.disable_preset_selection:
+ refresh_files_output += [preset_selection]
+ refresh_files.click(refresh_files_clicked, [], refresh_files_output + lora_ctrls,
queue=False, show_progress=False)
- performance_selection.change(lambda x: [gr.update(interactive=x != 'Extreme Speed')] * 11 +
- [gr.update(visible=x != 'Extreme Speed')] * 1,
+ state_is_generating = gr.State(False)
+
+ load_data_outputs = [advanced_checkbox, image_number, prompt, negative_prompt, style_selections,
+ performance_selection, overwrite_step, overwrite_switch, aspect_ratios_selection,
+ overwrite_width, overwrite_height, guidance_scale, sharpness, adm_scaler_positive,
+ adm_scaler_negative, adm_scaler_end, refiner_swap_method, adaptive_cfg, clip_skip,
+ base_model, refiner_model, refiner_switch, sampler_name, scheduler_name, vae_name,
+ seed_random, image_seed, generate_button, load_parameter_button] + freeu_ctrls + lora_ctrls
+
+ if not args_manager.args.disable_preset_selection:
+ def preset_selection_change(preset, is_generating):
+ preset_content = modules.config.try_get_preset_content(preset) if preset != 'initial' else {}
+ preset_prepared = modules.meta_parser.parse_meta_from_preset(preset_content)
+
+ default_model = preset_prepared.get('base_model')
+ previous_default_models = preset_prepared.get('previous_default_models', [])
+ checkpoint_downloads = preset_prepared.get('checkpoint_downloads', {})
+ embeddings_downloads = preset_prepared.get('embeddings_downloads', {})
+ lora_downloads = preset_prepared.get('lora_downloads', {})
+
+ preset_prepared['base_model'], preset_prepared['lora_downloads'] = launch.download_models(
+ default_model, previous_default_models, checkpoint_downloads, embeddings_downloads, lora_downloads)
+
+ if 'prompt' in preset_prepared and preset_prepared.get('prompt') == '':
+ del preset_prepared['prompt']
+
+ return modules.meta_parser.load_parameter_button_click(json.dumps(preset_prepared), is_generating)
+
+ preset_selection.change(preset_selection_change, inputs=[preset_selection, state_is_generating], outputs=load_data_outputs, queue=False, show_progress=True) \
+ .then(fn=style_sorter.sort_styles, inputs=style_selections, outputs=style_selections, queue=False, show_progress=False)
+
+ performance_selection.change(lambda x: [gr.update(interactive=not flags.Performance.has_restricted_features(x))] * 11 +
+ [gr.update(visible=not flags.Performance.has_restricted_features(x))] * 1 +
+ [gr.update(value=flags.Performance.has_restricted_features(x))] * 1,
inputs=performance_selection,
outputs=[
guidance_scale, sharpness, adm_scaler_end, adm_scaler_positive,
adm_scaler_negative, refiner_switch, refiner_model, sampler_name,
- scheduler_name, adaptive_cfg, refiner_swap_method, negative_prompt
+ scheduler_name, adaptive_cfg, refiner_swap_method, negative_prompt, disable_intermediate_results
], queue=False, show_progress=False)
-
+
+ output_format.input(lambda x: gr.update(output_format=x), inputs=output_format)
+
advanced_checkbox.change(lambda x: gr.update(visible=x), advanced_checkbox, advanced_column,
queue=False, show_progress=False) \
.then(fn=lambda: None, _js='refresh_grid_delayed', queue=False, show_progress=False)
@@ -502,68 +662,81 @@ with shared.gradio_root:
inpaint_strength, inpaint_respective_field
], show_progress=False, queue=False)
- ctrls = [
+ ctrls = [currentTask, generate_image_grid]
+ ctrls += [
prompt, negative_prompt, style_selections,
- performance_selection, aspect_ratios_selection, image_number, image_seed, sharpness, guidance_scale
+ performance_selection, aspect_ratios_selection, image_number, output_format, image_seed,
+ read_wildcards_in_order, sharpness, guidance_scale
]
ctrls += [base_model, refiner_model, refiner_switch] + lora_ctrls
ctrls += [input_image_checkbox, current_tab]
ctrls += [uov_method, uov_input_image]
- ctrls += [outpaint_selections, inpaint_input_image, inpaint_additional_prompt]
+ ctrls += [outpaint_selections, inpaint_input_image, inpaint_additional_prompt, inpaint_mask_image]
+ ctrls += [disable_preview, disable_intermediate_results, disable_seed_increment, black_out_nsfw]
+ ctrls += [adm_scaler_positive, adm_scaler_negative, adm_scaler_end, adaptive_cfg, clip_skip]
+ ctrls += [sampler_name, scheduler_name, vae_name]
+ ctrls += [overwrite_step, overwrite_switch, overwrite_width, overwrite_height, overwrite_vary_strength]
+ ctrls += [overwrite_upscale_strength, mixing_image_prompt_and_vary_upscale, mixing_image_prompt_and_inpaint]
+ ctrls += [debugging_cn_preprocessor, skipping_cn_preprocessor, canny_low_threshold, canny_high_threshold]
+ ctrls += [refiner_swap_method, controlnet_softness]
+ ctrls += freeu_ctrls
+ ctrls += inpaint_ctrls
+
+ if not args_manager.args.disable_metadata:
+ ctrls += [save_metadata_to_images, metadata_scheme]
+
ctrls += ip_ctrls
- def parse_meta(raw_prompt_txt):
+ def parse_meta(raw_prompt_txt, is_generating):
loaded_json = None
- try:
- if '{' in raw_prompt_txt:
- if '}' in raw_prompt_txt:
- if ':' in raw_prompt_txt:
- loaded_json = json.loads(raw_prompt_txt)
- assert isinstance(loaded_json, dict)
- except:
- loaded_json = None
+ if is_json(raw_prompt_txt):
+ loaded_json = json.loads(raw_prompt_txt)
if loaded_json is None:
- return gr.update(), gr.update(visible=True), gr.update(visible=False)
+ if is_generating:
+ return gr.update(), gr.update(), gr.update()
+ else:
+ return gr.update(), gr.update(visible=True), gr.update(visible=False)
return json.dumps(loaded_json), gr.update(visible=False), gr.update(visible=True)
- prompt.input(parse_meta, inputs=prompt, outputs=[prompt, generate_button, load_parameter_button], queue=False, show_progress=False)
+ prompt.input(parse_meta, inputs=[prompt, state_is_generating], outputs=[prompt, generate_button, load_parameter_button], queue=False, show_progress=False)
- load_parameter_button.click(modules.meta_parser.load_parameter_button_click, inputs=prompt, outputs=[
- advanced_checkbox,
- image_number,
- prompt,
- negative_prompt,
- style_selections,
- performance_selection,
- aspect_ratios_selection,
- overwrite_width,
- overwrite_height,
- sharpness,
- guidance_scale,
- adm_scaler_positive,
- adm_scaler_negative,
- adm_scaler_end,
- base_model,
- refiner_model,
- refiner_switch,
- sampler_name,
- scheduler_name,
- seed_random,
- image_seed,
- generate_button,
- load_parameter_button
- ] + lora_ctrls, queue=False, show_progress=False)
+ load_parameter_button.click(modules.meta_parser.load_parameter_button_click, inputs=[prompt, state_is_generating], outputs=load_data_outputs, queue=False, show_progress=False)
- generate_button.click(lambda: (gr.update(visible=True, interactive=True), gr.update(visible=True, interactive=True), gr.update(visible=False), []), outputs=[stop_button, skip_button, generate_button, gallery]) \
+ def trigger_metadata_import(filepath, state_is_generating):
+ parameters, metadata_scheme = modules.meta_parser.read_info_from_image(filepath)
+ if parameters is None:
+ print('Could not find metadata in the image!')
+ parsed_parameters = {}
+ else:
+ metadata_parser = modules.meta_parser.get_metadata_parser(metadata_scheme)
+ parsed_parameters = metadata_parser.to_json(parameters)
+
+ return modules.meta_parser.load_parameter_button_click(parsed_parameters, state_is_generating)
+
+ metadata_import_button.click(trigger_metadata_import, inputs=[metadata_input_image, state_is_generating], outputs=load_data_outputs, queue=False, show_progress=True) \
+ .then(style_sorter.sort_styles, inputs=style_selections, outputs=style_selections, queue=False, show_progress=False)
+
+ generate_button.click(lambda: (gr.update(visible=True, interactive=True), gr.update(visible=True, interactive=True), gr.update(visible=False, interactive=False), [], True),
+ outputs=[stop_button, skip_button, generate_button, gallery, state_is_generating]) \
.then(fn=refresh_seed, inputs=[seed_random, image_seed], outputs=image_seed) \
- .then(advanced_parameters.set_all_advanced_parameters, inputs=adps) \
- .then(fn=generate_clicked, inputs=ctrls, outputs=[progress_html, progress_window, progress_gallery, gallery]) \
- .then(lambda: (gr.update(visible=True), gr.update(visible=False), gr.update(visible=False)), outputs=[generate_button, stop_button, skip_button]) \
+ .then(fn=get_task, inputs=ctrls, outputs=currentTask) \
+ .then(fn=generate_clicked, inputs=currentTask, outputs=[progress_html, progress_window, progress_gallery, gallery]) \
+ .then(lambda: (gr.update(visible=True, interactive=True), gr.update(visible=False, interactive=False), gr.update(visible=False, interactive=False), False),
+ outputs=[generate_button, stop_button, skip_button, state_is_generating]) \
+ .then(fn=update_history_link, outputs=history_link) \
.then(fn=lambda: None, _js='playNotification').then(fn=lambda: None, _js='refresh_grid_delayed')
+ reset_button.click(lambda: [worker.AsyncTask(args=[]), False, gr.update(visible=True, interactive=True)] +
+ [gr.update(visible=False)] * 6 +
+ [gr.update(visible=True, value=[])],
+ outputs=[currentTask, state_is_generating, generate_button,
+ reset_button, stop_button, skip_button,
+ progress_html, progress_window, progress_gallery, gallery],
+ queue=False)
+
for notification_file in ['notification.ogg', 'notification.mp3']:
if os.path.exists(notification_file):
gr.Audio(interactive=False, value=notification_file, elem_id='audio_notification', visible=False)
@@ -579,8 +752,17 @@ with shared.gradio_root:
return mode, ["Fooocus V2"]
desc_btn.click(trigger_describe, inputs=[desc_method, desc_input_image],
- outputs=[prompt, style_selections], show_progress=True, queue=False)
+ outputs=[prompt, style_selections], show_progress=True, queue=True)
+ if args_manager.args.enable_describe_uov_image:
+ def trigger_uov_describe(mode, img, prompt):
+ # keep prompt if not empty
+ if prompt == '':
+ return trigger_describe(mode, img)
+ return gr.update(), gr.update()
+
+ uov_input_image.upload(trigger_uov_describe, inputs=[desc_method, uov_input_image, prompt],
+ outputs=[prompt, style_selections], show_progress=True, queue=True)
def dump_default_english_config():
from modules.localization import dump_english_config
@@ -594,6 +776,7 @@ shared.gradio_root.launch(
server_name=args_manager.args.listen,
server_port=args_manager.args.port,
share=args_manager.args.share,
- auth=check_auth if args_manager.args.share and auth_enabled else None,
+ auth=check_auth if (args_manager.args.share or args_manager.args.listen) and auth_enabled else None,
+ allowed_paths=[modules.config.path_outputs],
blocked_paths=[constants.AUTH_FILENAME]
)
diff --git a/wildcards/.gitignore b/wildcards/.gitignore
new file mode 100644
index 00000000..7e4ac188
--- /dev/null
+++ b/wildcards/.gitignore
@@ -0,0 +1,8 @@
+*.txt
+!animal.txt
+!artist.txt
+!color.txt
+!color_flower.txt
+!extended-color.txt
+!flower.txt
+!nationality.txt
\ No newline at end of file
diff --git a/wildcards/animal.txt b/wildcards/animal.txt
new file mode 100644
index 00000000..3c479daa
--- /dev/null
+++ b/wildcards/animal.txt
@@ -0,0 +1,100 @@
+Alligator
+Ant
+Antelope
+Armadillo
+Badger
+Bat
+Bear
+Beaver
+Bison
+Boar
+Bobcat
+Bull
+Camel
+Chameleon
+Cheetah
+Chicken
+Chihuahua
+Chimpanzee
+Chinchilla
+Chipmunk
+Komodo Dragon
+Cow
+Coyote
+Crocodile
+Crow
+Deer
+Dinosaur
+Dolphin
+Donkey
+Duck
+Eagle
+Eel
+Elephant
+Elk
+Emu
+Falcon
+Ferret
+Flamingo
+Flying Squirrel
+Giraffe
+Goose
+Guinea pig
+Hawk
+Hedgehog
+Hippopotamus
+Horse
+Hummingbird
+Hyena
+Jackal
+Jaguar
+Jellyfish
+Kangaroo
+King Cobra
+Koala bear
+Leopard
+Lion
+Lizard
+Magpie
+Marten
+Meerkat
+Mole
+Monkey
+Moose
+Mouse
+Octopus
+Okapi
+Orangutan
+Ostrich
+Otter
+Owl
+Panda
+Pangolin
+Panther
+Penguin
+Pig
+Porcupine
+Possum
+Puma
+Quokka
+Rabbit
+Raccoon
+Raven
+Reindeer
+Rhinoceros
+Seal
+Shark
+Sheep
+Snail
+Snake
+Sparrow
+Spider
+Squirrel
+Swallow
+Tiger
+Walrus
+Whale
+Wolf
+Wombat
+Yak
+Zebra