Fooocus Log {date_string} (private)
\nAll images are clean, without any hidden data/meta, and safe to share with others.
\n\n" + begin_part = f"Fooocus Log {date_string} (private)
\nMetadata is embedded if enabled in the config or developer debug mode. You can find the information for each image in line Metadata Scheme.
\n\n" end_part = f'\n' middle_part = log_cache.get(html_name, "") @@ -83,14 +106,14 @@ def log(img, dic): div_name = only_name.replace('.', '_') item = f"{only_name} | "
+ item += f"{only_name} | "
item += "
| "
@@ -105,4 +128,4 @@ def log(img, dic):
log_cache[html_name] = middle_part
- return
+ return local_temp_filename
diff --git a/modules/sdxl_styles.py b/modules/sdxl_styles.py
index f5bb6276..77ad6b57 100644
--- a/modules/sdxl_styles.py
+++ b/modules/sdxl_styles.py
@@ -1,13 +1,13 @@
import os
import re
import json
+import math
+import modules.config
from modules.util import get_files_from_folder
-
# cannot use modules.config - validators causing circular imports
styles_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../sdxl_styles/'))
-wildcards_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../wildcards/'))
wildcards_max_bfs_depth = 64
@@ -59,7 +59,7 @@ def apply_style(style, positive):
return p.replace('{prompt}', positive).splitlines(), n.splitlines()
-def apply_wildcards(wildcard_text, rng, directory=wildcards_path):
+def apply_wildcards(wildcard_text, rng, i, read_wildcards_in_order):
for _ in range(wildcards_max_bfs_depth):
placeholders = re.findall(r'__([\w-]+)__', wildcard_text)
if len(placeholders) == 0:
@@ -68,10 +68,14 @@ def apply_wildcards(wildcard_text, rng, directory=wildcards_path):
print(f'[Wildcards] processing: {wildcard_text}')
for placeholder in placeholders:
try:
- words = open(os.path.join(directory, f'{placeholder}.txt'), encoding='utf-8').read().splitlines()
+ matches = [x for x in modules.config.wildcard_filenames if os.path.splitext(os.path.basename(x))[0] == placeholder]
+ words = open(os.path.join(modules.config.path_wildcards, matches[0]), encoding='utf-8').read().splitlines()
words = [x for x in words if x != '']
assert len(words) > 0
- wildcard_text = wildcard_text.replace(f'__{placeholder}__', rng.choice(words), 1)
+ if read_wildcards_in_order:
+ wildcard_text = wildcard_text.replace(f'__{placeholder}__', words[i % len(words)], 1)
+ else:
+ wildcard_text = wildcard_text.replace(f'__{placeholder}__', rng.choice(words), 1)
except:
print(f'[Wildcards] Warning: {placeholder}.txt missing or empty. '
f'Using "{placeholder}" as a normal word.')
@@ -80,3 +84,38 @@ def apply_wildcards(wildcard_text, rng, directory=wildcards_path):
print(f'[Wildcards] BFS stack overflow. Current text: {wildcard_text}')
return wildcard_text
+
+
+def get_words(arrays, totalMult, index):
+ if len(arrays) == 1:
+ return [arrays[0].split(',')[index]]
+ else:
+ words = arrays[0].split(',')
+ word = words[index % len(words)]
+ index -= index % len(words)
+ index /= len(words)
+ index = math.floor(index)
+ return [word] + get_words(arrays[1:], math.floor(totalMult/len(words)), index)
+
+
+def apply_arrays(text, index):
+ arrays = re.findall(r'\[\[(.*?)\]\]', text)
+ if len(arrays) == 0:
+ return text
+
+ print(f'[Arrays] processing: {text}')
+ mult = 1
+ for arr in arrays:
+ words = arr.split(',')
+ mult *= len(words)
+
+ index %= mult
+ chosen_words = get_words(arrays, mult, index)
+
+ i = 0
+ for arr in arrays:
+ text = text.replace(f'[[{arr}]]', chosen_words[i], 1)
+ i = i+1
+
+ return text
+
diff --git a/modules/util.py b/modules/util.py
index 052b746b..9e0fb294 100644
--- a/modules/util.py
+++ b/modules/util.py
@@ -1,15 +1,20 @@
+import typing
+
import numpy as np
import datetime
import random
import math
import os
import cv2
+import json
+import hashlib
from PIL import Image
+import modules.sdxl_styles
LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS)
-
+HASH_SHA256_LENGTH = 10
def erode_or_dilate(x, k):
k = int(k)
@@ -155,23 +160,235 @@ def generate_temp_filename(folder='./outputs/', extension='png'):
random_number = random.randint(1000, 9999)
filename = f"{time_string}_{random_number}.{extension}"
result = os.path.join(folder, date_string, filename)
- return date_string, os.path.abspath(os.path.realpath(result)), filename
+ return date_string, os.path.abspath(result), filename
-def get_files_from_folder(folder_path, exensions=None, name_filter=None):
+def get_files_from_folder(folder_path, extensions=None, name_filter=None):
if not os.path.isdir(folder_path):
raise ValueError("Folder path is not a valid directory.")
filenames = []
- for root, dirs, files in os.walk(folder_path):
+ for root, dirs, files in os.walk(folder_path, topdown=False):
relative_path = os.path.relpath(root, folder_path)
if relative_path == ".":
relative_path = ""
- for filename in files:
+ for filename in sorted(files, key=lambda s: s.casefold()):
_, file_extension = os.path.splitext(filename)
- if (exensions == None or file_extension.lower() in exensions) and (name_filter == None or name_filter in _):
+ if (extensions is None or file_extension.lower() in extensions) and (name_filter is None or name_filter in _):
path = os.path.join(relative_path, filename)
filenames.append(path)
- return sorted(filenames, key=lambda x: -1 if os.sep in x else 1)
+ return filenames
+
+
+def sha256(filename, use_addnet_hash=False, length=HASH_SHA256_LENGTH):
+ print(f"Calculating sha256 for {filename}: ", end='')
+ if use_addnet_hash:
+ with open(filename, "rb") as file:
+ sha256_value = addnet_hash_safetensors(file)
+ else:
+ sha256_value = calculate_sha256(filename)
+ print(f"{sha256_value}")
+
+ return sha256_value[:length] if length is not None else sha256_value
+
+
+def addnet_hash_safetensors(b):
+ """kohya-ss hash for safetensors from https://github.com/kohya-ss/sd-scripts/blob/main/library/train_util.py"""
+ hash_sha256 = hashlib.sha256()
+ blksize = 1024 * 1024
+
+ b.seek(0)
+ header = b.read(8)
+ n = int.from_bytes(header, "little")
+
+ offset = n + 8
+ b.seek(offset)
+ for chunk in iter(lambda: b.read(blksize), b""):
+ hash_sha256.update(chunk)
+
+ return hash_sha256.hexdigest()
+
+
+def calculate_sha256(filename) -> str:
+ hash_sha256 = hashlib.sha256()
+ blksize = 1024 * 1024
+
+ with open(filename, "rb") as f:
+ for chunk in iter(lambda: f.read(blksize), b""):
+ hash_sha256.update(chunk)
+
+ return hash_sha256.hexdigest()
+
+
+def quote(text):
+ if ',' not in str(text) and '\n' not in str(text) and ':' not in str(text):
+ return text
+
+ return json.dumps(text, ensure_ascii=False)
+
+
+def unquote(text):
+ if len(text) == 0 or text[0] != '"' or text[-1] != '"':
+ return text
+
+ try:
+ return json.loads(text)
+ except Exception:
+ return text
+
+
+def unwrap_style_text_from_prompt(style_text, prompt):
+ """
+ Checks the prompt to see if the style text is wrapped around it. If so,
+ returns True plus the prompt text without the style text. Otherwise, returns
+ False with the original prompt.
+
+ Note that the "cleaned" version of the style text is only used for matching
+ purposes here. It isn't returned; the original style text is not modified.
+ """
+ stripped_prompt = prompt
+ stripped_style_text = style_text
+ if "{prompt}" in stripped_style_text:
+ # Work out whether the prompt is wrapped in the style text. If so, we
+ # return True and the "inner" prompt text that isn't part of the style.
+ try:
+ left, right = stripped_style_text.split("{prompt}", 2)
+ except ValueError as e:
+ # If the style text has multple "{prompt}"s, we can't split it into
+ # two parts. This is an error, but we can't do anything about it.
+ print(f"Unable to compare style text to prompt:\n{style_text}")
+ print(f"Error: {e}")
+ return False, prompt, ''
+
+ left_pos = stripped_prompt.find(left)
+ right_pos = stripped_prompt.find(right)
+ if 0 <= left_pos < right_pos:
+ real_prompt = stripped_prompt[left_pos + len(left):right_pos]
+ prompt = stripped_prompt.replace(left + real_prompt + right, '', 1)
+ if prompt.startswith(", "):
+ prompt = prompt[2:]
+ if prompt.endswith(", "):
+ prompt = prompt[:-2]
+ return True, prompt, real_prompt
+ else:
+ # Work out whether the given prompt starts with the style text. If so, we
+ # return True and the prompt text up to where the style text starts.
+ if stripped_prompt.endswith(stripped_style_text):
+ prompt = stripped_prompt[: len(stripped_prompt) - len(stripped_style_text)]
+ if prompt.endswith(", "):
+ prompt = prompt[:-2]
+ return True, prompt, prompt
+
+ return False, prompt, ''
+
+
+def extract_original_prompts(style, prompt, negative_prompt):
+ """
+ Takes a style and compares it to the prompt and negative prompt. If the style
+ matches, returns True plus the prompt and negative prompt with the style text
+ removed. Otherwise, returns False with the original prompt and negative prompt.
+ """
+ if not style.prompt and not style.negative_prompt:
+ return False, prompt, negative_prompt
+
+ match_positive, extracted_positive, real_prompt = unwrap_style_text_from_prompt(
+ style.prompt, prompt
+ )
+ if not match_positive:
+ return False, prompt, negative_prompt, ''
+
+ match_negative, extracted_negative, _ = unwrap_style_text_from_prompt(
+ style.negative_prompt, negative_prompt
+ )
+ if not match_negative:
+ return False, prompt, negative_prompt, ''
+
+ return True, extracted_positive, extracted_negative, real_prompt
+
+
+def extract_styles_from_prompt(prompt, negative_prompt):
+ extracted = []
+ applicable_styles = []
+
+ for style_name, (style_prompt, style_negative_prompt) in modules.sdxl_styles.styles.items():
+ applicable_styles.append(PromptStyle(name=style_name, prompt=style_prompt, negative_prompt=style_negative_prompt))
+
+ real_prompt = ''
+
+ while True:
+ found_style = None
+
+ for style in applicable_styles:
+ is_match, new_prompt, new_neg_prompt, new_real_prompt = extract_original_prompts(
+ style, prompt, negative_prompt
+ )
+ if is_match:
+ found_style = style
+ prompt = new_prompt
+ negative_prompt = new_neg_prompt
+ if real_prompt == '' and new_real_prompt != '' and new_real_prompt != prompt:
+ real_prompt = new_real_prompt
+ break
+
+ if not found_style:
+ break
+
+ applicable_styles.remove(found_style)
+ extracted.append(found_style.name)
+
+ # add prompt expansion if not all styles could be resolved
+ if prompt != '':
+ if real_prompt != '':
+ extracted.append(modules.sdxl_styles.fooocus_expansion)
+ else:
+ # find real_prompt when only prompt expansion is selected
+ first_word = prompt.split(', ')[0]
+ first_word_positions = [i for i in range(len(prompt)) if prompt.startswith(first_word, i)]
+ if len(first_word_positions) > 1:
+ real_prompt = prompt[:first_word_positions[-1]]
+ extracted.append(modules.sdxl_styles.fooocus_expansion)
+ if real_prompt.endswith(', '):
+ real_prompt = real_prompt[:-2]
+
+ return list(reversed(extracted)), real_prompt, negative_prompt
+
+
+class PromptStyle(typing.NamedTuple):
+ name: str
+ prompt: str
+ negative_prompt: str
+
+
+def is_json(data: str) -> bool:
+ try:
+ loaded_json = json.loads(data)
+ assert isinstance(loaded_json, dict)
+ except (ValueError, AssertionError):
+ return False
+ return True
+
+
+def get_file_from_folder_list(name, folders):
+ for folder in folders:
+ filename = os.path.abspath(os.path.realpath(os.path.join(folder, name)))
+ if os.path.isfile(filename):
+ return filename
+
+ return os.path.abspath(os.path.realpath(os.path.join(folders[0], name)))
+
+
+def ordinal_suffix(number: int) -> str:
+ return 'th' if 10 <= number % 100 <= 20 else {1: 'st', 2: 'nd', 3: 'rd'}.get(number % 10, 'th')
+
+
+def makedirs_with_log(path):
+ try:
+ os.makedirs(path, exist_ok=True)
+ except OSError as error:
+ print(f'Directory {path} could not be created, reason: {error}')
+
+
+def get_enabled_loras(loras: list) -> list:
+ return [[lora[1], lora[2]] for lora in loras if lora[0]]
diff --git a/presets/.gitignore b/presets/.gitignore
new file mode 100644
index 00000000..481930c5
--- /dev/null
+++ b/presets/.gitignore
@@ -0,0 +1,6 @@
+*.json
+!anime.json
+!default.json
+!lcm.json
+!realistic.json
+!sai.json
\ No newline at end of file
diff --git a/presets/anime.json b/presets/anime.json
index 8bd2813b..78607edb 100644
--- a/presets/anime.json
+++ b/presets/anime.json
@@ -1,46 +1,57 @@
{
- "default_model": "animaPencilXL_v100.safetensors",
+ "default_model": "animaPencilXL_v310.safetensors",
"default_refiner": "None",
"default_refiner_switch": 0.5,
"default_loras": [
[
+ true,
"None",
1.0
],
[
+ true,
"None",
1.0
],
[
+ true,
"None",
1.0
],
[
+ true,
"None",
1.0
],
[
+ true,
"None",
1.0
]
],
- "default_cfg_scale": 7.0,
+ "default_cfg_scale": 6.0,
"default_sample_sharpness": 2.0,
"default_sampler": "dpmpp_2m_sde_gpu",
"default_scheduler": "karras",
"default_performance": "Speed",
- "default_prompt": "1girl, ",
+ "default_prompt": "",
"default_prompt_negative": "",
"default_styles": [
"Fooocus V2",
- "Fooocus Negative",
+ "Fooocus Semi Realistic",
"Fooocus Masterpiece"
],
"default_aspect_ratio": "896*1152",
"checkpoint_downloads": {
- "animaPencilXL_v100.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/animaPencilXL_v100.safetensors"
+ "animaPencilXL_v310.safetensors": "https://huggingface.co/mashb1t/fav_models/resolve/main/fav/animaPencilXL_v310.safetensors"
},
"embeddings_downloads": {},
"lora_downloads": {},
- "previous_default_models": []
+ "previous_default_models": [
+ "animaPencilXL_v300.safetensors",
+ "animaPencilXL_v260.safetensors",
+ "animaPencilXL_v210.safetensors",
+ "animaPencilXL_v200.safetensors",
+ "animaPencilXL_v100.safetensors"
+ ]
}
\ No newline at end of file
diff --git a/presets/default.json b/presets/default.json
index 7930c92f..d02bb8a4 100644
--- a/presets/default.json
+++ b/presets/default.json
@@ -4,22 +4,27 @@
"default_refiner_switch": 0.5,
"default_loras": [
[
+ true,
"sd_xl_offset_example-lora_1.0.safetensors",
0.1
],
[
+ true,
"None",
1.0
],
[
+ true,
"None",
1.0
],
[
+ true,
"None",
1.0
],
[
+ true,
"None",
1.0
]
diff --git a/presets/lcm.json b/presets/lcm.json
index 3897f881..6713fdd5 100644
--- a/presets/lcm.json
+++ b/presets/lcm.json
@@ -4,22 +4,27 @@
"default_refiner_switch": 0.5,
"default_loras": [
[
+ true,
"None",
1.0
],
[
+ true,
"None",
1.0
],
[
+ true,
"None",
1.0
],
[
+ true,
"None",
1.0
],
[
+ true,
"None",
1.0
]
diff --git a/presets/lightning.json b/presets/lightning.json
new file mode 100644
index 00000000..d1466c10
--- /dev/null
+++ b/presets/lightning.json
@@ -0,0 +1,57 @@
+{
+ "default_model": "juggernautXL_v8Rundiffusion.safetensors",
+ "default_refiner": "None",
+ "default_refiner_switch": 0.5,
+ "default_loras": [
+ [
+ true,
+ "None",
+ 1.0
+ ],
+ [
+ true,
+ "None",
+ 1.0
+ ],
+ [
+ true,
+ "None",
+ 1.0
+ ],
+ [
+ true,
+ "None",
+ 1.0
+ ],
+ [
+ true,
+ "None",
+ 1.0
+ ]
+ ],
+ "default_cfg_scale": 4.0,
+ "default_sample_sharpness": 2.0,
+ "default_sampler": "dpmpp_2m_sde_gpu",
+ "default_scheduler": "karras",
+ "default_performance": "Lightning",
+ "default_prompt": "",
+ "default_prompt_negative": "",
+ "default_styles": [
+ "Fooocus V2",
+ "Fooocus Enhance",
+ "Fooocus Sharp"
+ ],
+ "default_aspect_ratio": "1152*896",
+ "checkpoint_downloads": {
+ "juggernautXL_v8Rundiffusion.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/juggernautXL_v8Rundiffusion.safetensors"
+ },
+ "embeddings_downloads": {},
+ "lora_downloads": {},
+ "previous_default_models": [
+ "juggernautXL_version8Rundiffusion.safetensors",
+ "juggernautXL_version7Rundiffusion.safetensors",
+ "juggernautXL_v7Rundiffusion.safetensors",
+ "juggernautXL_version6Rundiffusion.safetensors",
+ "juggernautXL_v6Rundiffusion.safetensors"
+ ]
+}
\ No newline at end of file
diff --git a/presets/realistic.json b/presets/realistic.json
index 7799c96a..6db6d0b7 100644
--- a/presets/realistic.json
+++ b/presets/realistic.json
@@ -1,25 +1,30 @@
{
"default_model": "realisticStockPhoto_v20.safetensors",
- "default_refiner": "",
+ "default_refiner": "None",
"default_refiner_switch": 0.5,
"default_loras": [
[
+ true,
"SDXL_FILM_PHOTOGRAPHY_STYLE_BetaV0.4.safetensors",
0.25
],
[
+ true,
"None",
1.0
],
[
+ true,
"None",
1.0
],
[
+ true,
"None",
1.0
],
[
+ true,
"None",
1.0
]
diff --git a/presets/sai.json b/presets/sai.json
index fecf047b..918028f3 100644
--- a/presets/sai.json
+++ b/presets/sai.json
@@ -4,22 +4,27 @@
"default_refiner_switch": 0.75,
"default_loras": [
[
+ true,
"sd_xl_offset_example-lora_1.0.safetensors",
0.5
],
[
+ true,
"None",
1.0
],
[
+ true,
"None",
1.0
],
[
+ true,
"None",
1.0
],
[
+ true,
"None",
1.0
]
diff --git a/readme.md b/readme.md
index 77653816..5f66e02a 100644
--- a/readme.md
+++ b/readme.md
@@ -84,6 +84,10 @@ The first time you launch the software, it will automatically download models:
After Fooocus 2.1.60, you will also have `run_anime.bat` and `run_realistic.bat`. They are different model presets (and require different models, but they will be automatically downloaded). [Check here for more details](https://github.com/lllyasviel/Fooocus/discussions/679).
+After Fooocus 2.3.0 you can also switch presets directly in the browser. Keep in mind to add these arguments if you want to change the default behavior:
+* Use `--disable-preset-selection` to disable preset selection in the browser.
+* Use `--always-download-new-model` to download missing models on preset switch. Default is fallback to `previous_default_models` defined in the corresponding preset, also see terminal output.
+

If you already have these files, you can copy them to the above locations to speed up installation.
@@ -115,17 +119,21 @@ See also the common problems and troubleshoots [here](troubleshoot.md).
### Colab
-(Last tested - 2023 Dec 12)
+(Last tested - 2024 Mar 18 by [mashb1t](https://github.com/mashb1t))
| Colab | Info
| --- | --- |
[](https://colab.research.google.com/github/lllyasviel/Fooocus/blob/main/fooocus_colab.ipynb) | Fooocus Official
-In Colab, you can modify the last line to `!python entry_with_update.py --share` or `!python entry_with_update.py --preset anime --share` or `!python entry_with_update.py --preset realistic --share` for Fooocus Default/Anime/Realistic Edition.
+In Colab, you can modify the last line to `!python entry_with_update.py --share --always-high-vram` or `!python entry_with_update.py --share --always-high-vram --preset anime` or `!python entry_with_update.py --share --always-high-vram --preset realistic` for Fooocus Default/Anime/Realistic Edition.
+
+You can also change the preset in the UI. Please be aware that this may lead to timeouts after 60 seconds. If this is the case, please wait until the download has finished, change the preset to initial and back to the one you've selected or reload the page.
Note that this Colab will disable refiner by default because Colab free's resources are relatively limited (and some "big" features like image prompt may cause free-tier Colab to disconnect). We make sure that basic text-to-image is always working on free-tier Colab.
-Thanks to [camenduru](https://github.com/camenduru)!
+Using `--always-high-vram` shifts resource allocation from RAM to VRAM and achieves the overall best balance between performance, flexibility and stability on the default T4 instance. Please find more information [here](https://github.com/lllyasviel/Fooocus/pull/1710#issuecomment-1989185346).
+
+Thanks to [camenduru](https://github.com/camenduru) for the template!
### Linux (Using Anaconda)
@@ -202,7 +210,7 @@ AMD is not intensively tested, however. The AMD support is in beta.
Use `python entry_with_update.py --preset anime` or `python entry_with_update.py --preset realistic` for Fooocus Anime/Realistic Edition.
-### Windows(AMD GPUs)
+### Windows (AMD GPUs)
Note that the [minimal requirement](#minimal-requirement) for different platforms is different.
@@ -237,6 +245,10 @@ You can install Fooocus on Apple Mac silicon (M1 or M2) with macOS 'Catalina' or
Use `python entry_with_update.py --preset anime` or `python entry_with_update.py --preset realistic` for Fooocus Anime/Realistic Edition.
+### Docker
+
+See [docker.md](docker.md)
+
### Download Previous Version
See the guidelines [here](https://github.com/lllyasviel/Fooocus/discussions/1405).
@@ -281,14 +293,21 @@ Given different goals, the default models and configs of Fooocus are different:
Note that the download is **automatic** - you do not need to do anything if the internet connection is okay. However, you can download them manually if you (or move them from somewhere else) have your own preparation.
+## UI Access and Authentication
+In addition to running on localhost, Fooocus can also expose its UI in two ways:
+* Local UI listener: use `--listen` (specify port e.g. with `--port 8888`).
+* API access: use `--share` (registers an endpoint at `.gradio.live`).
+
+In both ways the access is unauthenticated by default. You can add basic authentication by creating a file called `auth.json` in the main directory, which contains a list of JSON objects with the keys `user` and `pass` (see example in [auth-example.json](./auth-example.json)).
+
## List of "Hidden" Tricks
The below things are already inside the software, and **users do not need to do anything about these**.
-1. GPT2-based [prompt expansion as a dynamic style "Fooocus V2".](https://github.com/lllyasviel/Fooocus/discussions/117#raw) (similar to Midjourney's hidden pre-processsing and "raw" mode, or the LeonardoAI's Prompt Magic).
+1. GPT2-based [prompt expansion as a dynamic style "Fooocus V2".](https://github.com/lllyasviel/Fooocus/discussions/117#raw) (similar to Midjourney's hidden pre-processing and "raw" mode, or the LeonardoAI's Prompt Magic).
2. Native refiner swap inside one single k-sampler. The advantage is that the refiner model can now reuse the base model's momentum (or ODE's history parameters) collected from k-sampling to achieve more coherent sampling. In Automatic1111's high-res fix and ComfyUI's node system, the base model and refiner use two independent k-samplers, which means the momentum is largely wasted, and the sampling continuity is broken. Fooocus uses its own advanced k-diffusion sampling that ensures seamless, native, and continuous swap in a refiner setup. (Update Aug 13: Actually, I discussed this with Automatic1111 several days ago, and it seems that the “native refiner swap inside one single k-sampler” is [merged]( https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12371) into the dev branch of webui. Great!)
-3. Negative ADM guidance. Because the highest resolution level of XL Base does not have cross attentions, the positive and negative signals for XL's highest resolution level cannot receive enough contrasts during the CFG sampling, causing the results to look a bit plastic or overly smooth in certain cases. Fortunately, since the XL's highest resolution level is still conditioned on image aspect ratios (ADM), we can modify the adm on the positive/negative side to compensate for the lack of CFG contrast in the highest resolution level. (Update Aug 16, the IOS App [Drawing Things](https://apps.apple.com/us/app/draw-things-ai-generation/id6444050820) will support Negative ADM Guidance. Great!)
+3. Negative ADM guidance. Because the highest resolution level of XL Base does not have cross attentions, the positive and negative signals for XL's highest resolution level cannot receive enough contrasts during the CFG sampling, causing the results to look a bit plastic or overly smooth in certain cases. Fortunately, since the XL's highest resolution level is still conditioned on image aspect ratios (ADM), we can modify the adm on the positive/negative side to compensate for the lack of CFG contrast in the highest resolution level. (Update Aug 16, the IOS App [Draw Things](https://apps.apple.com/us/app/draw-things-ai-generation/id6444050820) will support Negative ADM Guidance. Great!)
4. We implemented a carefully tuned variation of Section 5.1 of ["Improving Sample Quality of Diffusion Models Using Self-Attention Guidance"](https://arxiv.org/pdf/2210.00939.pdf). The weight is set to very low, but this is Fooocus's final guarantee to make sure that the XL will never yield an overly smooth or plastic appearance (examples [here](https://github.com/lllyasviel/Fooocus/discussions/117#sharpness)). This can almost eliminate all cases for which XL still occasionally produces overly smooth results, even with negative ADM guidance. (Update 2023 Aug 18, the Gaussian kernel of SAG is changed to an anisotropic kernel for better structure preservation and fewer artifacts.)
5. We modified the style templates a bit and added the "cinematic-default".
6. We tested the "sd_xl_offset_example-lora_1.0.safetensors" and it seems that when the lora weight is below 0.5, the results are always better than XL without lora.
@@ -363,7 +382,7 @@ entry_with_update.py [-h] [--listen [IP]] [--port PORT]
[--attention-split | --attention-quad | --attention-pytorch]
[--disable-xformers]
[--always-gpu | --always-high-vram | --always-normal-vram |
- --always-low-vram | --always-no-vram | --always-cpu]
+ --always-low-vram | --always-no-vram | --always-cpu [CPU_NUM_THREADS]]
[--always-offload-from-vram] [--disable-server-log]
[--debug-mode] [--is-windows-embedded-python]
[--disable-server-info] [--share] [--preset PRESET]
diff --git a/requirements_docker.txt b/requirements_docker.txt
new file mode 100644
index 00000000..3cf4aa89
--- /dev/null
+++ b/requirements_docker.txt
@@ -0,0 +1,5 @@
+torch==2.0.1
+torchvision==0.15.2
+torchaudio==2.0.2
+torchtext==0.15.2
+torchdata==0.6.1
diff --git a/sdxl_styles/samples/fooocus_semi_realistic.jpg b/sdxl_styles/samples/fooocus_semi_realistic.jpg
new file mode 100644
index 00000000..b07555a7
Binary files /dev/null and b/sdxl_styles/samples/fooocus_semi_realistic.jpg differ
diff --git a/sdxl_styles/sdxl_styles_fooocus.json b/sdxl_styles/sdxl_styles_fooocus.json
index 81d6442e..cf64eab4 100644
--- a/sdxl_styles/sdxl_styles_fooocus.json
+++ b/sdxl_styles/sdxl_styles_fooocus.json
@@ -3,6 +3,10 @@
"name": "Fooocus Enhance",
"negative_prompt": "(worst quality, low quality, normal quality, lowres, low details, oversaturated, undersaturated, overexposed, underexposed, grayscale, bw, bad photo, bad photography, bad art:1.4), (watermark, signature, text font, username, error, logo, words, letters, digits, autograph, trademark, name:1.2), (blur, blurry, grainy), morbid, ugly, asymmetrical, mutated malformed, mutilated, poorly lit, bad shadow, draft, cropped, out of frame, cut off, censored, jpeg artifacts, out of focus, glitch, duplicate, (airbrushed, cartoon, anime, semi-realistic, cgi, render, blender, digital art, manga, amateur:1.3), (3D ,3D Game, 3D Game Scene, 3D Character:1.1), (bad hands, bad anatomy, bad body, bad face, bad teeth, bad arms, bad legs, deformities:1.3)"
},
+ {
+ "name": "Fooocus Semi Realistic",
+ "negative_prompt": "(worst quality, low quality, normal quality, lowres, low details, oversaturated, undersaturated, overexposed, underexposed, bad photo, bad photography, bad art:1.4), (watermark, signature, text font, username, error, logo, words, letters, digits, autograph, trademark, name:1.2), (blur, blurry, grainy), morbid, ugly, asymmetrical, mutated malformed, mutilated, poorly lit, bad shadow, draft, cropped, out of frame, cut off, censored, jpeg artifacts, out of focus, glitch, duplicate, (bad hands, bad anatomy, bad body, bad face, bad teeth, bad arms, bad legs, deformities:1.3)"
+ },
{
"name": "Fooocus Sharp",
"prompt": "cinematic still {prompt} . emotional, harmonious, vignette, 4k epic detailed, shot on kodak, 35mm photo, sharp focus, high budget, cinemascope, moody, epic, gorgeous, film grain, grainy",
diff --git a/shared.py b/shared.py
index 269809e3..21a2a864 100644
--- a/shared.py
+++ b/shared.py
@@ -1,2 +1 @@
-gradio_root = None
-last_stop = None
+gradio_root = None
\ No newline at end of file
diff --git a/update_log.md b/update_log.md
index e052d24c..62c4882b 100644
--- a/update_log.md
+++ b/update_log.md
@@ -1,3 +1,40 @@
+# [2.3.1](https://github.com/lllyasviel/Fooocus/releases/tag/2.3.1)
+
+* Remove positive prompt from anime prefix to not reset prompt after switching presets
+* Fix image number being reset to 1 when switching preset, now doesn't reset anymore
+* Fix outpainting dimension calculation when extending left/right
+* Fix LoRA compatibility for LoRAs in a1111 metadata scheme
+
+# [2.3.0](https://github.com/lllyasviel/Fooocus/releases/tag/2.3.0)
+
+* Add performance "lightning" (based on [SDXL-Lightning 4 step LoRA](https://huggingface.co/ByteDance/SDXL-Lightning/blob/main/sdxl_lightning_4step_lora.safetensors))
+* Add preset selection to UI, disable with argument `--disable-preset-selection`. Use `--always-download-new-model` to download missing models on preset switch.
+* Improve face swap consistency by switching later in the process to (synthetic) refiner
+* Add temp path cleanup on startup
+* Add support for wildcard subdirectories
+* Add scrollable 2 column layout for styles for better structure
+* Improve Colab resource needs for T4 instances (default), positively tested with all image prompt features
+* Improve anime preset, now uses style `Fooocus Semi Realistic` instead of `Fooocus Negative` (less wet look images)
+
+# [2.2.1](https://github.com/lllyasviel/Fooocus/releases/tag/2.2.1)
+
+* Fix some small bugs (e.g. image grid, upscale fast 2x, LoRA weight width in Firefox)
+* Allow prompt weights in array syntax
+* Add steps override and metadata scheme to history log
+
+# [2.2.0](https://github.com/lllyasviel/Fooocus/releases/tag/2.2.0)
+
+* Isolate every image generation to truly allow multi-user usage
+* Add array support, changes the main prompt when increasing the image number. Syntax: `[[red, green, blue]] flower`
+* Add optional metadata to images, allowing you to regenerate and modify them later with the same parameters
+* Now supports native PNG, JPG and WEBP image generation
+* Add Docker support
+
+# [2.1.865](https://github.com/lllyasviel/Fooocus/releases/tag/2.1.865)
+
+* Various bugfixes
+* Add authentication to --listen
+
# 2.1.864
* New model list. See also discussions.
diff --git a/webui.py b/webui.py
index 493ff376..29eed606 100644
--- a/webui.py
+++ b/webui.py
@@ -11,28 +11,35 @@ import modules.async_worker as worker
import modules.constants as constants
import modules.flags as flags
import modules.gradio_hijack as grh
-import modules.advanced_parameters as advanced_parameters
import modules.style_sorter as style_sorter
import modules.meta_parser
import args_manager
import copy
+import launch
from modules.sdxl_styles import legal_style_names
from modules.private_logger import get_current_html_path
from modules.ui_gradio_extensions import reload_javascript
from modules.auth import auth_enabled, check_auth
+from modules.util import is_json
+def get_task(*args):
+ args = list(args)
+ args.pop(0)
-def generate_clicked(*args):
+ return worker.AsyncTask(args=args)
+
+def generate_clicked(task: worker.AsyncTask):
import ldm_patched.modules.model_management as model_management
with model_management.interrupt_processing_mutex:
model_management.interrupt_processing = False
-
# outputs=[progress_html, progress_window, progress_gallery, gallery]
+ if len(task.args) == 0:
+ return
+
execution_start_time = time.perf_counter()
- task = worker.AsyncTask(args=list(args))
finished = False
yield gr.update(visible=True, value=modules.html.make_progress_html(1, 'Waiting for task to start ...')), \
@@ -71,6 +78,12 @@ def generate_clicked(*args):
gr.update(visible=True, value=product)
finished = True
+ # delete Fooocus temp images, only keep gradio temp images
+ if args_manager.args.disable_image_log:
+ for filepath in product:
+ if isinstance(filepath, str) and os.path.exists(filepath):
+ os.remove(filepath)
+
execution_time = time.perf_counter() - execution_start_time
print(f'Total time: {execution_time:.2f} seconds')
return
@@ -83,11 +96,10 @@ title = f'Fooocus {fooocus_version.version}'
if isinstance(args_manager.args.preset, str):
title += ' ' + args_manager.args.preset
-shared.gradio_root = gr.Blocks(
- title=title,
- css=modules.html.css).queue()
+shared.gradio_root = gr.Blocks(title=title).queue()
with shared.gradio_root:
+ currentTask = gr.State(worker.AsyncTask(args=[]))
with gr.Row():
with gr.Column(scale=2):
with gr.Row():
@@ -115,21 +127,22 @@ with shared.gradio_root:
skip_button = gr.Button(label="Skip", value="Skip", elem_classes='type_row_half', visible=False)
stop_button = gr.Button(label="Stop", value="Stop", elem_classes='type_row_half', elem_id='stop_button', visible=False)
- def stop_clicked():
+ def stop_clicked(currentTask):
import ldm_patched.modules.model_management as model_management
- shared.last_stop = 'stop'
- model_management.interrupt_current_processing()
- return [gr.update(interactive=False)] * 2
+ currentTask.last_stop = 'stop'
+ if (currentTask.processing):
+ model_management.interrupt_current_processing()
+ return currentTask
- def skip_clicked():
+ def skip_clicked(currentTask):
import ldm_patched.modules.model_management as model_management
- shared.last_stop = 'skip'
- model_management.interrupt_current_processing()
- return
+ currentTask.last_stop = 'skip'
+ if (currentTask.processing):
+ model_management.interrupt_current_processing()
+ return currentTask
- stop_button.click(stop_clicked, outputs=[skip_button, stop_button],
- queue=False, show_progress=False, _js='cancelGenerateForever')
- skip_button.click(skip_clicked, queue=False, show_progress=False)
+ stop_button.click(stop_clicked, inputs=currentTask, outputs=currentTask, queue=False, show_progress=False, _js='cancelGenerateForever')
+ skip_button.click(skip_clicked, inputs=currentTask, outputs=currentTask, queue=False, show_progress=False)
with gr.Row(elem_classes='advanced_check_row'):
input_image_checkbox = gr.Checkbox(label='Input Image', value=False, container=False, elem_classes='min_check')
advanced_checkbox = gr.Checkbox(label='Advanced', value=modules.config.default_advanced_checkbox, container=False, elem_classes='min_check')
@@ -150,7 +163,7 @@ with shared.gradio_root:
ip_weights = []
ip_ctrls = []
ip_ad_cols = []
- for _ in range(4):
+ for _ in range(flags.controlnet_image_count):
with gr.Column():
ip_image = grh.Image(label='Image', source='upload', type='numpy', show_label=False, height=300)
ip_images.append(ip_image)
@@ -208,6 +221,27 @@ with shared.gradio_root:
value=flags.desc_type_photo)
desc_btn = gr.Button(value='Describe this Image into Prompt')
gr.HTML('\U0001F4D4 Document')
+ with gr.TabItem(label='Metadata') as load_tab:
+ with gr.Column():
+ metadata_input_image = grh.Image(label='Drag any image generated by Fooocus here', source='upload', type='filepath')
+ metadata_json = gr.JSON(label='Metadata')
+ metadata_import_button = gr.Button(value='Apply Metadata')
+
+ def trigger_metadata_preview(filepath):
+ parameters, metadata_scheme = modules.meta_parser.read_info_from_image(filepath)
+
+ results = {}
+ if parameters is not None:
+ results['parameters'] = parameters
+
+ if isinstance(metadata_scheme, flags.MetadataScheme):
+ results['metadata_scheme'] = metadata_scheme.value
+
+ return results
+
+ metadata_input_image.upload(trigger_metadata_preview, inputs=metadata_input_image,
+ outputs=metadata_json, queue=False, show_progress=True)
+
switch_js = "(x) => {if(x){viewer_to_bottom(100);viewer_to_bottom(500);}else{viewer_to_top();} return x;}"
down_js = "() => {viewer_to_bottom();}"
@@ -223,13 +257,23 @@ with shared.gradio_root:
with gr.Column(scale=1, visible=modules.config.default_advanced_checkbox) as advanced_column:
with gr.Tab(label='Setting'):
+ if not args_manager.args.disable_preset_selection:
+ preset_selection = gr.Radio(label='Preset',
+ choices=modules.config.available_presets,
+ value=args_manager.args.preset if args_manager.args.preset else "initial",
+ interactive=True)
performance_selection = gr.Radio(label='Performance',
- choices=modules.flags.performance_selections,
+ choices=flags.Performance.list(),
value=modules.config.default_performance)
aspect_ratios_selection = gr.Radio(label='Aspect Ratios', choices=modules.config.available_aspect_ratios,
value=modules.config.default_aspect_ratio, info='width × height',
elem_classes='aspect_ratios')
image_number = gr.Slider(label='Image Number', minimum=1, maximum=modules.config.default_max_image_number, step=1, value=modules.config.default_image_number)
+
+ output_format = gr.Radio(label='Output Format',
+ choices=flags.OutputFormat.list(),
+ value=modules.config.default_output_format)
+
negative_prompt = gr.Textbox(label='Negative Prompt', show_label=True, placeholder="Type prompt here.",
info='Describing what you do not want to see.', lines=2,
elem_id='negative_prompt',
@@ -255,10 +299,16 @@ with shared.gradio_root:
seed_random.change(random_checked, inputs=[seed_random], outputs=[image_seed],
queue=False, show_progress=False)
- if not args_manager.args.disable_image_log:
- gr.HTML(f'\U0001F4DA History Log')
+ def update_history_link():
+ if args_manager.args.disable_image_log:
+ return gr.update(value='')
+
+ return gr.update(value=f'\U0001F4DA History Log')
- with gr.Tab(label='Style'):
+ history_link = gr.HTML()
+ shared.gradio_root.load(update_history_link, outputs=history_link, queue=False, show_progress=False)
+
+ with gr.Tab(label='Style', elem_classes=['style_selections_tab']):
style_sorter.try_load_sorted_styles(
style_names=legal_style_names,
default_selected=modules.config.default_styles)
@@ -311,16 +361,20 @@ with shared.gradio_root:
with gr.Group():
lora_ctrls = []
- for i, (n, v) in enumerate(modules.config.default_loras):
+ for i, (enabled, filename, weight) in enumerate(modules.config.default_loras):
with gr.Row():
+ lora_enabled = gr.Checkbox(label='Enable', value=enabled,
+ elem_classes=['lora_enable', 'min_check'], scale=1)
lora_model = gr.Dropdown(label=f'LoRA {i + 1}',
- choices=['None'] + modules.config.lora_filenames, value=n)
- lora_weight = gr.Slider(label='Weight', minimum=-2, maximum=2, step=0.01, value=v,
- elem_classes='lora_weight')
- lora_ctrls += [lora_model, lora_weight]
+ choices=['None'] + modules.config.lora_filenames, value=filename,
+ elem_classes='lora_model', scale=5)
+ lora_weight = gr.Slider(label='Weight', minimum=modules.config.default_loras_min_weight,
+ maximum=modules.config.default_loras_max_weight, step=0.01, value=weight,
+ elem_classes='lora_weight', scale=5)
+ lora_ctrls += [lora_enabled, lora_model, lora_weight]
with gr.Row():
- model_refresh = gr.Button(label='Refresh', value='\U0001f504 Refresh All Files', variant='secondary', elem_classes='refresh_button')
+ refresh_files = gr.Button(label='Refresh', value='\U0001f504 Refresh All Files', variant='secondary', elem_classes='refresh_button')
with gr.Tab(label='Advanced'):
guidance_scale = gr.Slider(label='Guidance Scale', minimum=1.0, maximum=30.0, step=0.01,
value=modules.config.default_cfg_scale,
@@ -341,7 +395,7 @@ with shared.gradio_root:
step=0.001, value=0.3,
info='When to end the guidance from positive/negative ADM. ')
- refiner_swap_method = gr.Dropdown(label='Refiner swap method', value='joint',
+ refiner_swap_method = gr.Dropdown(label='Refiner swap method', value=flags.refiner_swap_method,
choices=['joint', 'separate', 'vae'])
adaptive_cfg = gr.Slider(label='CFG Mimicking from TSNR', minimum=1.0, maximum=30.0, step=0.01,
@@ -382,6 +436,24 @@ with shared.gradio_root:
disable_preview = gr.Checkbox(label='Disable Preview', value=modules.config.default_black_out_nsfw,
interactive=not modules.config.default_black_out_nsfw,
info='Disable preview during generation.')
+ disable_intermediate_results = gr.Checkbox(label='Disable Intermediate Results',
+ value=modules.config.default_performance == flags.Performance.EXTREME_SPEED.value,
+ interactive=modules.config.default_performance != flags.Performance.EXTREME_SPEED.value,
+ info='Disable intermediate results during generation, only show final gallery.')
+ disable_seed_increment = gr.Checkbox(label='Disable seed increment',
+ info='Disable automatic seed increment when image number is > 1.',
+ value=False)
+ read_wildcards_in_order = gr.Checkbox(label="Read wildcards in order", value=False)
+
+ if not args_manager.args.disable_metadata:
+ save_metadata_to_images = gr.Checkbox(label='Save Metadata to Images', value=modules.config.default_save_metadata_to_images,
+ info='Adds parameters to generated images allowing manual regeneration.')
+ metadata_scheme = gr.Radio(label='Metadata Scheme', choices=flags.metadata_scheme, value=modules.config.default_metadata_scheme,
+ info='Image Prompt parameters are not included. Use png and a1111 for compatibility with Civitai.',
+ visible=modules.config.default_save_metadata_to_images)
+
+ save_metadata_to_images.change(lambda x: gr.update(visible=x), inputs=[save_metadata_to_images], outputs=[metadata_scheme],
+ queue=False, show_progress=False)
black_out_nsfw = gr.Checkbox(label='Black Out NSFW', value=modules.config.default_black_out_nsfw,
interactive=not modules.config.default_black_out_nsfw,
@@ -437,7 +509,7 @@ with shared.gradio_root:
'(default is 0, always process before any mask invert)')
inpaint_mask_upload_checkbox = gr.Checkbox(label='Enable Mask Upload', value=False)
invert_mask_checkbox = gr.Checkbox(label='Invert Mask', value=False)
-
+
inpaint_ctrls = [debugging_inpaint_preprocessor, inpaint_disable_initial_latent, inpaint_engine,
inpaint_strength, inpaint_respective_field,
inpaint_mask_upload_checkbox, invert_mask_checkbox, inpaint_erode_or_dilate]
@@ -454,42 +526,72 @@ with shared.gradio_root:
freeu_s2 = gr.Slider(label='S2', minimum=0, maximum=4, step=0.01, value=0.95)
freeu_ctrls = [freeu_enabled, freeu_b1, freeu_b2, freeu_s1, freeu_s2]
- adps = [disable_preview, black_out_nsfw, adm_scaler_positive, adm_scaler_negative, adm_scaler_end, adaptive_cfg, sampler_name,
- scheduler_name, generate_image_grid, overwrite_step, overwrite_switch, overwrite_width, overwrite_height,
- overwrite_vary_strength, overwrite_upscale_strength,
- mixing_image_prompt_and_vary_upscale, mixing_image_prompt_and_inpaint,
- debugging_cn_preprocessor, skipping_cn_preprocessor, controlnet_softness,
- canny_low_threshold, canny_high_threshold, refiner_swap_method]
- adps += freeu_ctrls
- adps += inpaint_ctrls
-
def dev_mode_checked(r):
return gr.update(visible=r)
-
dev_mode.change(dev_mode_checked, inputs=[dev_mode], outputs=[dev_tools],
queue=False, show_progress=False)
- def model_refresh_clicked():
- modules.config.update_all_model_names()
- results = []
- results += [gr.update(choices=modules.config.model_filenames), gr.update(choices=['None'] + modules.config.model_filenames)]
- for i in range(5):
- results += [gr.update(choices=['None'] + modules.config.lora_filenames), gr.update()]
+ def refresh_files_clicked():
+ modules.config.update_files()
+ results = [gr.update(choices=modules.config.model_filenames)]
+ results += [gr.update(choices=['None'] + modules.config.model_filenames)]
+ if not args_manager.args.disable_preset_selection:
+ results += [gr.update(choices=modules.config.available_presets)]
+ for i in range(modules.config.default_max_lora_number):
+ results += [gr.update(interactive=True),
+ gr.update(choices=['None'] + modules.config.lora_filenames), gr.update()]
return results
- model_refresh.click(model_refresh_clicked, [], [base_model, refiner_model] + lora_ctrls,
+ refresh_files_output = [base_model, refiner_model]
+ if not args_manager.args.disable_preset_selection:
+ refresh_files_output += [preset_selection]
+ refresh_files.click(refresh_files_clicked, [], refresh_files_output + lora_ctrls,
queue=False, show_progress=False)
- performance_selection.change(lambda x: [gr.update(interactive=x != 'Extreme Speed')] * 11 +
- [gr.update(visible=x != 'Extreme Speed')] * 1,
+ state_is_generating = gr.State(False)
+
+ load_data_outputs = [advanced_checkbox, image_number, prompt, negative_prompt, style_selections,
+ performance_selection, overwrite_step, overwrite_switch, aspect_ratios_selection,
+ overwrite_width, overwrite_height, guidance_scale, sharpness, adm_scaler_positive,
+ adm_scaler_negative, adm_scaler_end, refiner_swap_method, adaptive_cfg, base_model,
+ refiner_model, refiner_switch, sampler_name, scheduler_name, seed_random, image_seed,
+ generate_button, load_parameter_button] + freeu_ctrls + lora_ctrls
+
+ if not args_manager.args.disable_preset_selection:
+ def preset_selection_change(preset, is_generating):
+ preset_content = modules.config.try_get_preset_content(preset) if preset != 'initial' else {}
+ preset_prepared = modules.meta_parser.parse_meta_from_preset(preset_content)
+
+ default_model = preset_prepared.get('base_model')
+ previous_default_models = preset_prepared.get('previous_default_models', [])
+ checkpoint_downloads = preset_prepared.get('checkpoint_downloads', {})
+ embeddings_downloads = preset_prepared.get('embeddings_downloads', {})
+ lora_downloads = preset_prepared.get('lora_downloads', {})
+
+ preset_prepared['base_model'], preset_prepared['lora_downloads'] = launch.download_models(
+ default_model, previous_default_models, checkpoint_downloads, embeddings_downloads, lora_downloads)
+
+ if 'prompt' in preset_prepared and preset_prepared.get('prompt') == '':
+ del preset_prepared['prompt']
+
+ return modules.meta_parser.load_parameter_button_click(json.dumps(preset_prepared), is_generating)
+
+ preset_selection.change(preset_selection_change, inputs=[preset_selection, state_is_generating], outputs=load_data_outputs, queue=False, show_progress=True) \
+ .then(fn=style_sorter.sort_styles, inputs=style_selections, outputs=style_selections, queue=False, show_progress=False) \
+
+ performance_selection.change(lambda x: [gr.update(interactive=not flags.Performance.has_restricted_features(x))] * 11 +
+ [gr.update(visible=not flags.Performance.has_restricted_features(x))] * 1 +
+ [gr.update(interactive=not flags.Performance.has_restricted_features(x), value=flags.Performance.has_restricted_features(x))] * 1,
inputs=performance_selection,
outputs=[
guidance_scale, sharpness, adm_scaler_end, adm_scaler_positive,
adm_scaler_negative, refiner_switch, refiner_model, sampler_name,
- scheduler_name, adaptive_cfg, refiner_swap_method, negative_prompt
+ scheduler_name, adaptive_cfg, refiner_swap_method, negative_prompt, disable_intermediate_results
], queue=False, show_progress=False)
-
+
+ output_format.input(lambda x: gr.update(output_format=x), inputs=output_format)
+
advanced_checkbox.change(lambda x: gr.update(visible=x), advanced_checkbox, advanced_column,
queue=False, show_progress=False) \
.then(fn=lambda: None, _js='refresh_grid_delayed', queue=False, show_progress=False)
@@ -527,29 +629,36 @@ with shared.gradio_root:
inpaint_strength, inpaint_respective_field
], show_progress=False, queue=False)
- ctrls = [
+ ctrls = [currentTask, generate_image_grid]
+ ctrls += [
prompt, negative_prompt, style_selections,
- performance_selection, aspect_ratios_selection, image_number, image_seed, sharpness, guidance_scale
+ performance_selection, aspect_ratios_selection, image_number, output_format, image_seed,
+ read_wildcards_in_order, sharpness, guidance_scale
]
ctrls += [base_model, refiner_model, refiner_switch] + lora_ctrls
ctrls += [input_image_checkbox, current_tab]
ctrls += [uov_method, uov_input_image]
ctrls += [outpaint_selections, inpaint_input_image, inpaint_additional_prompt, inpaint_mask_image]
- ctrls += ip_ctrls
+ ctrls += [disable_preview, disable_intermediate_results, disable_seed_increment]
+ ctrls += [adm_scaler_positive, adm_scaler_negative, adm_scaler_end, adaptive_cfg]
+ ctrls += [sampler_name, scheduler_name]
+ ctrls += [overwrite_step, overwrite_switch, overwrite_width, overwrite_height, overwrite_vary_strength]
+ ctrls += [overwrite_upscale_strength, mixing_image_prompt_and_vary_upscale, mixing_image_prompt_and_inpaint]
+ ctrls += [debugging_cn_preprocessor, skipping_cn_preprocessor, canny_low_threshold, canny_high_threshold]
+ ctrls += [refiner_swap_method, controlnet_softness]
+ ctrls += freeu_ctrls
+ ctrls += inpaint_ctrls
- state_is_generating = gr.State(False)
+ if not args_manager.args.disable_metadata:
+ ctrls += [save_metadata_to_images, metadata_scheme]
+
+ ctrls += ip_ctrls
def parse_meta(raw_prompt_txt, is_generating):
loaded_json = None
- try:
- if '{' in raw_prompt_txt:
- if '}' in raw_prompt_txt:
- if ':' in raw_prompt_txt:
- loaded_json = json.loads(raw_prompt_txt)
- assert isinstance(loaded_json, dict)
- except:
- loaded_json = None
+ if is_json(raw_prompt_txt):
+ loaded_json = json.loads(raw_prompt_txt)
if loaded_json is None:
if is_generating:
@@ -561,39 +670,30 @@ with shared.gradio_root:
prompt.input(parse_meta, inputs=[prompt, state_is_generating], outputs=[prompt, generate_button, load_parameter_button], queue=False, show_progress=False)
- load_parameter_button.click(modules.meta_parser.load_parameter_button_click, inputs=[prompt, state_is_generating], outputs=[
- advanced_checkbox,
- image_number,
- prompt,
- negative_prompt,
- style_selections,
- performance_selection,
- aspect_ratios_selection,
- overwrite_width,
- overwrite_height,
- sharpness,
- guidance_scale,
- adm_scaler_positive,
- adm_scaler_negative,
- adm_scaler_end,
- base_model,
- refiner_model,
- refiner_switch,
- sampler_name,
- scheduler_name,
- seed_random,
- image_seed,
- generate_button,
- load_parameter_button
- ] + lora_ctrls, queue=False, show_progress=False)
+ load_parameter_button.click(modules.meta_parser.load_parameter_button_click, inputs=[prompt, state_is_generating], outputs=load_data_outputs, queue=False, show_progress=False)
+
+ def trigger_metadata_import(filepath, state_is_generating):
+ parameters, metadata_scheme = modules.meta_parser.read_info_from_image(filepath)
+ if parameters is None:
+ print('Could not find metadata in the image!')
+ parsed_parameters = {}
+ else:
+ metadata_parser = modules.meta_parser.get_metadata_parser(metadata_scheme)
+ parsed_parameters = metadata_parser.parse_json(parameters)
+
+ return modules.meta_parser.load_parameter_button_click(parsed_parameters, state_is_generating)
+
+ metadata_import_button.click(trigger_metadata_import, inputs=[metadata_input_image, state_is_generating], outputs=load_data_outputs, queue=False, show_progress=True) \
+ .then(style_sorter.sort_styles, inputs=style_selections, outputs=style_selections, queue=False, show_progress=False)
generate_button.click(lambda: (gr.update(visible=True, interactive=True), gr.update(visible=True, interactive=True), gr.update(visible=False, interactive=False), [], True),
outputs=[stop_button, skip_button, generate_button, gallery, state_is_generating]) \
.then(fn=refresh_seed, inputs=[seed_random, image_seed], outputs=image_seed) \
- .then(advanced_parameters.set_all_advanced_parameters, inputs=adps) \
- .then(fn=generate_clicked, inputs=ctrls, outputs=[progress_html, progress_window, progress_gallery, gallery]) \
+ .then(fn=get_task, inputs=ctrls, outputs=currentTask) \
+ .then(fn=generate_clicked, inputs=currentTask, outputs=[progress_html, progress_window, progress_gallery, gallery]) \
.then(lambda: (gr.update(visible=True, interactive=True), gr.update(visible=False, interactive=False), gr.update(visible=False, interactive=False), False),
outputs=[generate_button, stop_button, skip_button, state_is_generating]) \
+ .then(fn=update_history_link, outputs=history_link) \
.then(fn=lambda: None, _js='playNotification').then(fn=lambda: None, _js='refresh_grid_delayed')
for notification_file in ['notification.ogg', 'notification.mp3']:
@@ -626,6 +726,7 @@ shared.gradio_root.launch(
server_name=args_manager.args.listen,
server_port=args_manager.args.port,
share=args_manager.args.share,
- auth=check_auth if args_manager.args.share and auth_enabled else None,
+ auth=check_auth if (args_manager.args.share or args_manager.args.listen) and auth_enabled else None,
+ allowed_paths=[modules.config.path_outputs],
blocked_paths=[constants.AUTH_FILENAME]
)
diff --git a/wildcards/animal.txt b/wildcards/animal.txt
new file mode 100644
index 00000000..3c479daa
--- /dev/null
+++ b/wildcards/animal.txt
@@ -0,0 +1,100 @@
+Alligator
+Ant
+Antelope
+Armadillo
+Badger
+Bat
+Bear
+Beaver
+Bison
+Boar
+Bobcat
+Bull
+Camel
+Chameleon
+Cheetah
+Chicken
+Chihuahua
+Chimpanzee
+Chinchilla
+Chipmunk
+Komodo Dragon
+Cow
+Coyote
+Crocodile
+Crow
+Deer
+Dinosaur
+Dolphin
+Donkey
+Duck
+Eagle
+Eel
+Elephant
+Elk
+Emu
+Falcon
+Ferret
+Flamingo
+Flying Squirrel
+Giraffe
+Goose
+Guinea pig
+Hawk
+Hedgehog
+Hippopotamus
+Horse
+Hummingbird
+Hyena
+Jackal
+Jaguar
+Jellyfish
+Kangaroo
+King Cobra
+Koala bear
+Leopard
+Lion
+Lizard
+Magpie
+Marten
+Meerkat
+Mole
+Monkey
+Moose
+Mouse
+Octopus
+Okapi
+Orangutan
+Ostrich
+Otter
+Owl
+Panda
+Pangolin
+Panther
+Penguin
+Pig
+Porcupine
+Possum
+Puma
+Quokka
+Rabbit
+Raccoon
+Raven
+Reindeer
+Rhinoceros
+Seal
+Shark
+Sheep
+Snail
+Snake
+Sparrow
+Spider
+Squirrel
+Swallow
+Tiger
+Walrus
+Whale
+Wolf
+Wombat
+Yak
+Zebra