Fooocus Log {date_string} (private)
\nAll images are clean, without any hidden data/meta, and safe to share with others.
\n\n" + begin_part = f"Fooocus Log {date_string} (private)
\nAll images are clean, without any hidden data/meta, and safe to share with others.
\n\n" end_part = f'\n' middle_part = log_cache.get(html_name, "") @@ -83,14 +105,14 @@ def log(img, dic): div_name = only_name.replace('.', '_') item = f"{only_name} | "
+ item += f"{only_name} | "
item += "
| "
@@ -105,4 +127,4 @@ def log(img, dic):
log_cache[html_name] = middle_part
- return
+ return local_temp_filename
diff --git a/modules/sdxl_styles.py b/modules/sdxl_styles.py
index f5bb6276..71afc402 100644
--- a/modules/sdxl_styles.py
+++ b/modules/sdxl_styles.py
@@ -1,6 +1,7 @@
import os
import re
import json
+import math
from modules.util import get_files_from_folder
@@ -80,3 +81,38 @@ def apply_wildcards(wildcard_text, rng, directory=wildcards_path):
print(f'[Wildcards] BFS stack overflow. Current text: {wildcard_text}')
return wildcard_text
+
+def get_words(arrays, totalMult, index):
+ if(len(arrays) == 1):
+ return [arrays[0].split(',')[index]]
+ else:
+ words = arrays[0].split(',')
+ word = words[index % len(words)]
+ index -= index % len(words)
+ index /= len(words)
+ index = math.floor(index)
+ return [word] + get_words(arrays[1:], math.floor(totalMult/len(words)), index)
+
+
+
+def apply_arrays(text, index):
+ arrays = re.findall(r'\[\[([\s,\w-]+)\]\]', text)
+ if len(arrays) == 0:
+ return text
+
+ print(f'[Arrays] processing: {text}')
+ mult = 1
+ for arr in arrays:
+ words = arr.split(',')
+ mult *= len(words)
+
+ index %= mult
+ chosen_words = get_words(arrays, mult, index)
+
+ i = 0
+ for arr in arrays:
+ text = text.replace(f'[[{arr}]]', chosen_words[i], 1)
+ i = i+1
+
+ return text
+
diff --git a/modules/util.py b/modules/util.py
index 052b746b..c7923ec8 100644
--- a/modules/util.py
+++ b/modules/util.py
@@ -1,15 +1,20 @@
+import typing
+
import numpy as np
import datetime
import random
import math
import os
import cv2
+import json
from PIL import Image
+from hashlib import sha256
+import modules.sdxl_styles
LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS)
-
+HASH_SHA256_LENGTH = 10
def erode_or_dilate(x, k):
k = int(k)
@@ -155,7 +160,7 @@ def generate_temp_filename(folder='./outputs/', extension='png'):
random_number = random.randint(1000, 9999)
filename = f"{time_string}_{random_number}.{extension}"
result = os.path.join(folder, date_string, filename)
- return date_string, os.path.abspath(os.path.realpath(result)), filename
+ return date_string, os.path.abspath(result), filename
def get_files_from_folder(folder_path, exensions=None, name_filter=None):
@@ -164,14 +169,194 @@ def get_files_from_folder(folder_path, exensions=None, name_filter=None):
filenames = []
- for root, dirs, files in os.walk(folder_path):
+ for root, dirs, files in os.walk(folder_path, topdown=False):
relative_path = os.path.relpath(root, folder_path)
if relative_path == ".":
relative_path = ""
- for filename in files:
+ for filename in sorted(files, key=lambda s: s.casefold()):
_, file_extension = os.path.splitext(filename)
- if (exensions == None or file_extension.lower() in exensions) and (name_filter == None or name_filter in _):
+ if (exensions is None or file_extension.lower() in exensions) and (name_filter is None or name_filter in _):
path = os.path.join(relative_path, filename)
filenames.append(path)
- return sorted(filenames, key=lambda x: -1 if os.sep in x else 1)
+ return filenames
+
+
+def calculate_sha256(filename, length=HASH_SHA256_LENGTH) -> str:
+ hash_sha256 = sha256()
+ blksize = 1024 * 1024
+
+ with open(filename, "rb") as f:
+ for chunk in iter(lambda: f.read(blksize), b""):
+ hash_sha256.update(chunk)
+
+ res = hash_sha256.hexdigest()
+ return res[:length] if length else res
+
+
+def quote(text):
+ if ',' not in str(text) and '\n' not in str(text) and ':' not in str(text):
+ return text
+
+ return json.dumps(text, ensure_ascii=False)
+
+
+def unquote(text):
+ if len(text) == 0 or text[0] != '"' or text[-1] != '"':
+ return text
+
+ try:
+ return json.loads(text)
+ except Exception:
+ return text
+
+
+def unwrap_style_text_from_prompt(style_text, prompt):
+ """
+ Checks the prompt to see if the style text is wrapped around it. If so,
+ returns True plus the prompt text without the style text. Otherwise, returns
+ False with the original prompt.
+
+ Note that the "cleaned" version of the style text is only used for matching
+ purposes here. It isn't returned; the original style text is not modified.
+ """
+ stripped_prompt = prompt
+ stripped_style_text = style_text
+ if "{prompt}" in stripped_style_text:
+ # Work out whether the prompt is wrapped in the style text. If so, we
+ # return True and the "inner" prompt text that isn't part of the style.
+ try:
+ left, right = stripped_style_text.split("{prompt}", 2)
+ except ValueError as e:
+ # If the style text has multple "{prompt}"s, we can't split it into
+ # two parts. This is an error, but we can't do anything about it.
+ print(f"Unable to compare style text to prompt:\n{style_text}")
+ print(f"Error: {e}")
+ return False, prompt, ''
+
+ left_pos = stripped_prompt.find(left)
+ right_pos = stripped_prompt.find(right)
+ if 0 <= left_pos < right_pos:
+ real_prompt = stripped_prompt[left_pos + len(left):right_pos]
+ prompt = stripped_prompt.replace(left + real_prompt + right, '', 1)
+ if prompt.startswith(", "):
+ prompt = prompt[2:]
+ if prompt.endswith(", "):
+ prompt = prompt[:-2]
+ return True, prompt, real_prompt
+ else:
+ # Work out whether the given prompt starts with the style text. If so, we
+ # return True and the prompt text up to where the style text starts.
+ if stripped_prompt.endswith(stripped_style_text):
+ prompt = stripped_prompt[: len(stripped_prompt) - len(stripped_style_text)]
+ if prompt.endswith(", "):
+ prompt = prompt[:-2]
+ return True, prompt, prompt
+
+ return False, prompt, ''
+
+
+def extract_original_prompts(style, prompt, negative_prompt):
+ """
+ Takes a style and compares it to the prompt and negative prompt. If the style
+ matches, returns True plus the prompt and negative prompt with the style text
+ removed. Otherwise, returns False with the original prompt and negative prompt.
+ """
+ if not style.prompt and not style.negative_prompt:
+ return False, prompt, negative_prompt
+
+ match_positive, extracted_positive, real_prompt = unwrap_style_text_from_prompt(
+ style.prompt, prompt
+ )
+ if not match_positive:
+ return False, prompt, negative_prompt, ''
+
+ match_negative, extracted_negative, _ = unwrap_style_text_from_prompt(
+ style.negative_prompt, negative_prompt
+ )
+ if not match_negative:
+ return False, prompt, negative_prompt, ''
+
+ return True, extracted_positive, extracted_negative, real_prompt
+
+
+def extract_styles_from_prompt(prompt, negative_prompt):
+ extracted = []
+ applicable_styles = []
+
+ for style_name, (style_prompt, style_negative_prompt) in modules.sdxl_styles.styles.items():
+ applicable_styles.append(PromptStyle(name=style_name, prompt=style_prompt, negative_prompt=style_negative_prompt))
+
+ real_prompt = ''
+
+ while True:
+ found_style = None
+
+ for style in applicable_styles:
+ is_match, new_prompt, new_neg_prompt, new_real_prompt = extract_original_prompts(
+ style, prompt, negative_prompt
+ )
+ if is_match:
+ found_style = style
+ prompt = new_prompt
+ negative_prompt = new_neg_prompt
+ if real_prompt == '' and new_real_prompt != '' and new_real_prompt != prompt:
+ real_prompt = new_real_prompt
+ break
+
+ if not found_style:
+ break
+
+ applicable_styles.remove(found_style)
+ extracted.append(found_style.name)
+
+ # add prompt expansion if not all styles could be resolved
+ if prompt != '':
+ if real_prompt != '':
+ extracted.append(modules.sdxl_styles.fooocus_expansion)
+ else:
+ # find real_prompt when only prompt expansion is selected
+ first_word = prompt.split(', ')[0]
+ first_word_positions = [i for i in range(len(prompt)) if prompt.startswith(first_word, i)]
+ if len(first_word_positions) > 1:
+ real_prompt = prompt[:first_word_positions[-1]]
+ extracted.append(modules.sdxl_styles.fooocus_expansion)
+ if real_prompt.endswith(', '):
+ real_prompt = real_prompt[:-2]
+
+ return list(reversed(extracted)), real_prompt, negative_prompt
+
+
+class PromptStyle(typing.NamedTuple):
+ name: str
+ prompt: str
+ negative_prompt: str
+
+
+def is_json(data: str) -> bool:
+ try:
+ loaded_json = json.loads(data)
+ assert isinstance(loaded_json, dict)
+ except (ValueError, AssertionError):
+ return False
+ return True
+
+
+def get_file_from_folder_list(name, folders):
+ for folder in folders:
+ filename = os.path.abspath(os.path.realpath(os.path.join(folder, name)))
+ if os.path.isfile(filename):
+ return filename
+
+ return os.path.abspath(os.path.realpath(os.path.join(folders[0], name)))
+
+
+def ordinal_suffix(number: int) -> str:
+ return 'th' if 10 <= number % 100 <= 20 else {1: 'st', 2: 'nd', 3: 'rd'}.get(number % 10, 'th')
+
+
+def makedirs_with_log(path):
+ try:
+ os.makedirs(path, exist_ok=True)
+ except OSError as error:
+ print(f'Directory {path} could not be created, reason: {error}')
diff --git a/presets/anime.json b/presets/anime.json
index 32428a71..8bd2813b 100644
--- a/presets/anime.json
+++ b/presets/anime.json
@@ -1,11 +1,11 @@
{
- "default_model": "bluePencilXL_v050.safetensors",
- "default_refiner": "DreamShaper_8_pruned.safetensors",
- "default_refiner_switch": 0.667,
+ "default_model": "animaPencilXL_v100.safetensors",
+ "default_refiner": "None",
+ "default_refiner_switch": 0.5,
"default_loras": [
[
- "sd_xl_offset_example-lora_1.0.safetensors",
- 0.5
+ "None",
+ 1.0
],
[
"None",
@@ -30,24 +30,17 @@
"default_scheduler": "karras",
"default_performance": "Speed",
"default_prompt": "1girl, ",
- "default_prompt_negative": "(embedding:unaestheticXLv31:0.8), low quality, watermark",
+ "default_prompt_negative": "",
"default_styles": [
"Fooocus V2",
- "Fooocus Masterpiece",
- "SAI Anime",
- "SAI Digital Art",
- "SAI Enhance",
- "SAI Fantasy Art"
+ "Fooocus Negative",
+ "Fooocus Masterpiece"
],
"default_aspect_ratio": "896*1152",
"checkpoint_downloads": {
- "bluePencilXL_v050.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/bluePencilXL_v050.safetensors",
- "DreamShaper_8_pruned.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/DreamShaper_8_pruned.safetensors"
+ "animaPencilXL_v100.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/animaPencilXL_v100.safetensors"
},
- "embeddings_downloads": {
- "unaestheticXLv31.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/unaestheticXLv31.safetensors"
- },
- "lora_downloads": {
- "sd_xl_offset_example-lora_1.0.safetensors": "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_offset_example-lora_1.0.safetensors"
- }
+ "embeddings_downloads": {},
+ "lora_downloads": {},
+ "previous_default_models": []
}
\ No newline at end of file
diff --git a/presets/default.json b/presets/default.json
index bc014035..7930c92f 100644
--- a/presets/default.json
+++ b/presets/default.json
@@ -1,5 +1,5 @@
{
- "default_model": "juggernautXL_version6Rundiffusion.safetensors",
+ "default_model": "juggernautXL_v8Rundiffusion.safetensors",
"default_refiner": "None",
"default_refiner_switch": 0.5,
"default_loras": [
@@ -38,10 +38,17 @@
],
"default_aspect_ratio": "1152*896",
"checkpoint_downloads": {
- "juggernautXL_version6Rundiffusion.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/juggernautXL_version6Rundiffusion.safetensors"
+ "juggernautXL_v8Rundiffusion.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/juggernautXL_v8Rundiffusion.safetensors"
},
"embeddings_downloads": {},
"lora_downloads": {
"sd_xl_offset_example-lora_1.0.safetensors": "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_offset_example-lora_1.0.safetensors"
- }
+ },
+ "previous_default_models": [
+ "juggernautXL_version8Rundiffusion.safetensors",
+ "juggernautXL_version7Rundiffusion.safetensors",
+ "juggernautXL_v7Rundiffusion.safetensors",
+ "juggernautXL_version6Rundiffusion.safetensors",
+ "juggernautXL_v6Rundiffusion.safetensors"
+ ]
}
\ No newline at end of file
diff --git a/presets/lcm.json b/presets/lcm.json
index 8822172d..3897f881 100644
--- a/presets/lcm.json
+++ b/presets/lcm.json
@@ -1,5 +1,5 @@
{
- "default_model": "juggernautXL_version6Rundiffusion.safetensors",
+ "default_model": "juggernautXL_v8Rundiffusion.safetensors",
"default_refiner": "None",
"default_refiner_switch": 0.5,
"default_loras": [
@@ -38,8 +38,15 @@
],
"default_aspect_ratio": "1152*896",
"checkpoint_downloads": {
- "juggernautXL_version6Rundiffusion.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/juggernautXL_version6Rundiffusion.safetensors"
+ "juggernautXL_v8Rundiffusion.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/juggernautXL_v8Rundiffusion.safetensors"
},
"embeddings_downloads": {},
- "lora_downloads": {}
+ "lora_downloads": {},
+ "previous_default_models": [
+ "juggernautXL_version8Rundiffusion.safetensors",
+ "juggernautXL_version7Rundiffusion.safetensors",
+ "juggernautXL_v7Rundiffusion.safetensors",
+ "juggernautXL_version6Rundiffusion.safetensors",
+ "juggernautXL_v6Rundiffusion.safetensors"
+ ]
}
\ No newline at end of file
diff --git a/presets/realistic.json b/presets/realistic.json
index ed625d45..7799c96a 100644
--- a/presets/realistic.json
+++ b/presets/realistic.json
@@ -1,5 +1,5 @@
{
- "default_model": "realisticStockPhoto_v10.safetensors",
+ "default_model": "realisticStockPhoto_v20.safetensors",
"default_refiner": "",
"default_refiner_switch": 0.5,
"default_loras": [
@@ -38,10 +38,11 @@
],
"default_aspect_ratio": "896*1152",
"checkpoint_downloads": {
- "realisticStockPhoto_v10.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/realisticStockPhoto_v10.safetensors"
+ "realisticStockPhoto_v20.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/realisticStockPhoto_v20.safetensors"
},
"embeddings_downloads": {},
"lora_downloads": {
"SDXL_FILM_PHOTOGRAPHY_STYLE_BetaV0.4.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/SDXL_FILM_PHOTOGRAPHY_STYLE_BetaV0.4.safetensors"
- }
+ },
+ "previous_default_models": ["realisticStockPhoto_v10.safetensors"]
}
\ No newline at end of file
diff --git a/presets/sai.json b/presets/sai.json
index ac9c17d1..fecf047b 100644
--- a/presets/sai.json
+++ b/presets/sai.json
@@ -43,5 +43,6 @@
"embeddings_downloads": {},
"lora_downloads": {
"sd_xl_offset_example-lora_1.0.safetensors": "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_offset_example-lora_1.0.safetensors"
- }
+ },
+ "previous_default_models": []
}
\ No newline at end of file
diff --git a/readme.md b/readme.md
index 87c44b83..0bfee5b4 100644
--- a/readme.md
+++ b/readme.md
@@ -5,7 +5,7 @@
without any parameter tweaking, without any strange prompt tags.
-See also **non-cherry-picked** generalization and diversity tests [here](https://github.com/lllyasviel/Fooocus/discussions/808) and [here](https://github.com/lllyasviel/Fooocus/discussions/679) and [here](https://github.com/lllyasviel/Fooocus/discussions/679#realistic).
+See also **non-cherry-picked** generalization and diversity tests [here](https://github.com/lllyasviel/Fooocus/discussions/2067) and [here](https://github.com/lllyasviel/Fooocus/discussions/808) and [here](https://github.com/lllyasviel/Fooocus/discussions/679) and [here](https://github.com/lllyasviel/Fooocus/discussions/679#realistic).
In the entire open source community, only Fooocus can achieve this level of **non-cherry-picked** quality.
@@ -202,7 +202,7 @@ AMD is not intensively tested, however. The AMD support is in beta.
Use `python entry_with_update.py --preset anime` or `python entry_with_update.py --preset realistic` for Fooocus Anime/Realistic Edition.
-### Windows(AMD GPUs)
+### Windows (AMD GPUs)
Note that the [minimal requirement](#minimal-requirement) for different platforms is different.
@@ -237,6 +237,10 @@ You can install Fooocus on Apple Mac silicon (M1 or M2) with macOS 'Catalina' or
Use `python entry_with_update.py --preset anime` or `python entry_with_update.py --preset realistic` for Fooocus Anime/Realistic Edition.
+### Docker
+
+See [docker.md](docker.md)
+
### Download Previous Version
See the guidelines [here](https://github.com/lllyasviel/Fooocus/discussions/1405).
@@ -273,22 +277,29 @@ See the common problems [here](troubleshoot.md).
Given different goals, the default models and configs of Fooocus are different:
-| Task | Windows | Linux args | Main Model | Refiner | Config |
-| --- | --- | --- | --- | --- | --- |
-| General | run.bat | | [juggernautXL v6_RunDiffusion](https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/juggernautXL_version6Rundiffusion.safetensors) | not used | [here](https://github.com/lllyasviel/Fooocus/blob/main/modules/path.py) |
-| Realistic | run_realistic.bat | --preset realistic | [realistic_stock_photo](https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/realisticStockPhoto_v10.safetensors) | not used | [here](https://github.com/lllyasviel/Fooocus/blob/main/presets/realistic.json) |
-| Anime | run_anime.bat | --preset anime | [bluepencil_v50](https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/bluePencilXL_v050.safetensors) | [dreamsharper_v8](https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/DreamShaper_8_pruned.safetensors) (SD1.5) | [here](https://github.com/lllyasviel/Fooocus/blob/main/presets/anime.json) |
+| Task | Windows | Linux args | Main Model | Refiner | Config |
+| --- | --- | --- | --- | --- |--------------------------------------------------------------------------------|
+| General | run.bat | | juggernautXL_v8Rundiffusion | not used | [here](https://github.com/lllyasviel/Fooocus/blob/main/presets/default.json) |
+| Realistic | run_realistic.bat | --preset realistic | realisticStockPhoto_v20 | not used | [here](https://github.com/lllyasviel/Fooocus/blob/main/presets/realistic.json) |
+| Anime | run_anime.bat | --preset anime | animaPencilXL_v100 | not used | [here](https://github.com/lllyasviel/Fooocus/blob/main/presets/anime.json) |
Note that the download is **automatic** - you do not need to do anything if the internet connection is okay. However, you can download them manually if you (or move them from somewhere else) have your own preparation.
+## UI Access and Authentication
+In addition to running on localhost, Fooocus can also expose its UI in two ways:
+* Local UI listener: use `--listen` (specify port e.g. with `--port 8888`).
+* API access: use `--share` (registers an endpoint at `.gradio.live`).
+
+In both ways the access is unauthenticated by default. You can add basic authentication by creating a file called `auth.json` in the main directory, which contains a list of JSON objects with the keys `user` and `pass` (see example in [auth-example.json](./auth-example.json)).
+
## List of "Hidden" Tricks
The below things are already inside the software, and **users do not need to do anything about these**.
-1. GPT2-based [prompt expansion as a dynamic style "Fooocus V2".](https://github.com/lllyasviel/Fooocus/discussions/117#raw) (similar to Midjourney's hidden pre-processsing and "raw" mode, or the LeonardoAI's Prompt Magic).
+1. GPT2-based [prompt expansion as a dynamic style "Fooocus V2".](https://github.com/lllyasviel/Fooocus/discussions/117#raw) (similar to Midjourney's hidden pre-processing and "raw" mode, or the LeonardoAI's Prompt Magic).
2. Native refiner swap inside one single k-sampler. The advantage is that the refiner model can now reuse the base model's momentum (or ODE's history parameters) collected from k-sampling to achieve more coherent sampling. In Automatic1111's high-res fix and ComfyUI's node system, the base model and refiner use two independent k-samplers, which means the momentum is largely wasted, and the sampling continuity is broken. Fooocus uses its own advanced k-diffusion sampling that ensures seamless, native, and continuous swap in a refiner setup. (Update Aug 13: Actually, I discussed this with Automatic1111 several days ago, and it seems that the “native refiner swap inside one single k-sampler” is [merged]( https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12371) into the dev branch of webui. Great!)
-3. Negative ADM guidance. Because the highest resolution level of XL Base does not have cross attentions, the positive and negative signals for XL's highest resolution level cannot receive enough contrasts during the CFG sampling, causing the results to look a bit plastic or overly smooth in certain cases. Fortunately, since the XL's highest resolution level is still conditioned on image aspect ratios (ADM), we can modify the adm on the positive/negative side to compensate for the lack of CFG contrast in the highest resolution level. (Update Aug 16, the IOS App [Drawing Things](https://apps.apple.com/us/app/draw-things-ai-generation/id6444050820) will support Negative ADM Guidance. Great!)
+3. Negative ADM guidance. Because the highest resolution level of XL Base does not have cross attentions, the positive and negative signals for XL's highest resolution level cannot receive enough contrasts during the CFG sampling, causing the results to look a bit plastic or overly smooth in certain cases. Fortunately, since the XL's highest resolution level is still conditioned on image aspect ratios (ADM), we can modify the adm on the positive/negative side to compensate for the lack of CFG contrast in the highest resolution level. (Update Aug 16, the IOS App [Draw Things](https://apps.apple.com/us/app/draw-things-ai-generation/id6444050820) will support Negative ADM Guidance. Great!)
4. We implemented a carefully tuned variation of Section 5.1 of ["Improving Sample Quality of Diffusion Models Using Self-Attention Guidance"](https://arxiv.org/pdf/2210.00939.pdf). The weight is set to very low, but this is Fooocus's final guarantee to make sure that the XL will never yield an overly smooth or plastic appearance (examples [here](https://github.com/lllyasviel/Fooocus/discussions/117#sharpness)). This can almost eliminate all cases for which XL still occasionally produces overly smooth results, even with negative ADM guidance. (Update 2023 Aug 18, the Gaussian kernel of SAG is changed to an anisotropic kernel for better structure preservation and fewer artifacts.)
5. We modified the style templates a bit and added the "cinematic-default".
6. We tested the "sd_xl_offset_example-lora_1.0.safetensors" and it seems that when the lora weight is below 0.5, the results are always better than XL without lora.
@@ -363,7 +374,7 @@ entry_with_update.py [-h] [--listen [IP]] [--port PORT]
[--attention-split | --attention-quad | --attention-pytorch]
[--disable-xformers]
[--always-gpu | --always-high-vram | --always-normal-vram |
- --always-low-vram | --always-no-vram | --always-cpu]
+ --always-low-vram | --always-no-vram | --always-cpu [CPU_NUM_THREADS]]
[--always-offload-from-vram] [--disable-server-log]
[--debug-mode] [--is-windows-embedded-python]
[--disable-server-info] [--share] [--preset PRESET]
diff --git a/requirements_docker.txt b/requirements_docker.txt
new file mode 100644
index 00000000..3cf4aa89
--- /dev/null
+++ b/requirements_docker.txt
@@ -0,0 +1,5 @@
+torch==2.0.1
+torchvision==0.15.2
+torchaudio==2.0.2
+torchtext==0.15.2
+torchdata==0.6.1
diff --git a/shared.py b/shared.py
index 269809e3..21a2a864 100644
--- a/shared.py
+++ b/shared.py
@@ -1,2 +1 @@
-gradio_root = None
-last_stop = None
+gradio_root = None
\ No newline at end of file
diff --git a/update_log.md b/update_log.md
index 1e8914d1..b0192d0d 100644
--- a/update_log.md
+++ b/update_log.md
@@ -1,7 +1,24 @@
-**(2023 Dec 21) Hi all, the feature updating of Fooocus will be paused for about two or three weeks because we have some other workloads. See you soon and we will come back in mid or late Jan. However, you may still see updates if other collaborators are fixing bugs or solving problems.**
+# [2.2.0](https://github.com/lllyasviel/Fooocus/releases/tag/2.2.0)
+
+* Isolate every image generation to truly allow multi-user usage
+* Add array support, changes the main prompt when increasing the image number. Syntax: `[[red, green, blue]] flower`
+* Add optional metadata to images, allowing you to regenerate and modify them later with the same parameters
+* Now supports native PNG, JPG and WEBP image generation
+* Add Docker support
+
+# [2.1.865](https://github.com/lllyasviel/Fooocus/releases/tag/2.1.865)
+
+* Various bugfixes
+* Add authentication to --listen
+
+# 2.1.864
+
+* New model list. See also discussions.
# 2.1.861 (requested update)
+(2023 Dec 21) Hi all, the feature updating of Fooocus will be paused for about two or three weeks because we have some other workloads. See you soon and we will come back in mid or late Jan. However, you may still see updates if other collaborators are fixing bugs or solving problems.
+
* Show image preview in Style when mouse hover.
# 2.1.860 (requested update)
diff --git a/webui.py b/webui.py
index 36face51..08ed29da 100644
--- a/webui.py
+++ b/webui.py
@@ -11,7 +11,6 @@ import modules.async_worker as worker
import modules.constants as constants
import modules.flags as flags
import modules.gradio_hijack as grh
-import modules.advanced_parameters as advanced_parameters
import modules.style_sorter as style_sorter
import modules.meta_parser
import args_manager
@@ -21,18 +20,21 @@ from modules.sdxl_styles import legal_style_names
from modules.private_logger import get_current_html_path
from modules.ui_gradio_extensions import reload_javascript
from modules.auth import auth_enabled, check_auth
+from modules.util import is_json
+def get_task(*args):
+ args = list(args)
+ args.pop(0)
-def generate_clicked(*args):
+ return worker.AsyncTask(args=args)
+
+def generate_clicked(task):
import ldm_patched.modules.model_management as model_management
with model_management.interrupt_processing_mutex:
model_management.interrupt_processing = False
-
# outputs=[progress_html, progress_window, progress_gallery, gallery]
-
execution_start_time = time.perf_counter()
- task = worker.AsyncTask(args=list(args))
finished = False
yield gr.update(visible=True, value=modules.html.make_progress_html(1, 'Waiting for task to start ...')), \
@@ -71,6 +73,11 @@ def generate_clicked(*args):
gr.update(visible=True, value=product)
finished = True
+ # delete Fooocus temp images, only keep gradio temp images
+ if args_manager.args.disable_image_log:
+ for filepath in product:
+ os.remove(filepath)
+
execution_time = time.perf_counter() - execution_start_time
print(f'Total time: {execution_time:.2f} seconds')
return
@@ -88,6 +95,7 @@ shared.gradio_root = gr.Blocks(
css=modules.html.css).queue()
with shared.gradio_root:
+ currentTask = gr.State(worker.AsyncTask(args=[]))
with gr.Row():
with gr.Column(scale=2):
with gr.Row():
@@ -115,21 +123,22 @@ with shared.gradio_root:
skip_button = gr.Button(label="Skip", value="Skip", elem_classes='type_row_half', visible=False)
stop_button = gr.Button(label="Stop", value="Stop", elem_classes='type_row_half', elem_id='stop_button', visible=False)
- def stop_clicked():
+ def stop_clicked(currentTask):
import ldm_patched.modules.model_management as model_management
- shared.last_stop = 'stop'
- model_management.interrupt_current_processing()
- return [gr.update(interactive=False)] * 2
+ currentTask.last_stop = 'stop'
+ if (currentTask.processing):
+ model_management.interrupt_current_processing()
+ return currentTask
- def skip_clicked():
+ def skip_clicked(currentTask):
import ldm_patched.modules.model_management as model_management
- shared.last_stop = 'skip'
- model_management.interrupt_current_processing()
- return
+ currentTask.last_stop = 'skip'
+ if (currentTask.processing):
+ model_management.interrupt_current_processing()
+ return currentTask
- stop_button.click(stop_clicked, outputs=[skip_button, stop_button],
- queue=False, show_progress=False, _js='cancelGenerateForever')
- skip_button.click(skip_clicked, queue=False, show_progress=False)
+ stop_button.click(stop_clicked, inputs=currentTask, outputs=currentTask, queue=False, show_progress=False, _js='cancelGenerateForever')
+ skip_button.click(skip_clicked, inputs=currentTask, outputs=currentTask, queue=False, show_progress=False)
with gr.Row(elem_classes='advanced_check_row'):
input_image_checkbox = gr.Checkbox(label='Input Image', value=False, container=False, elem_classes='min_check')
advanced_checkbox = gr.Checkbox(label='Advanced', value=modules.config.default_advanced_checkbox, container=False, elem_classes='min_check')
@@ -150,7 +159,7 @@ with shared.gradio_root:
ip_weights = []
ip_ctrls = []
ip_ad_cols = []
- for _ in range(4):
+ for _ in range(flags.controlnet_image_count):
with gr.Column():
ip_image = grh.Image(label='Image', source='upload', type='numpy', show_label=False, height=300)
ip_images.append(ip_image)
@@ -260,6 +269,27 @@ with shared.gradio_root:
value=flags.desc_type_photo)
desc_btn = gr.Button(value='Describe this Image into Prompt')
gr.HTML('\U0001F4D4 Document')
+ with gr.TabItem(label='Metadata') as load_tab:
+ with gr.Column():
+ metadata_input_image = grh.Image(label='Drag any image generated by Fooocus here', source='upload', type='filepath')
+ metadata_json = gr.JSON(label='Metadata')
+ metadata_import_button = gr.Button(value='Apply Metadata')
+
+ def trigger_metadata_preview(filepath):
+ parameters, metadata_scheme = modules.meta_parser.read_info_from_image(filepath)
+
+ results = {}
+ if parameters is not None:
+ results['parameters'] = parameters
+
+ if isinstance(metadata_scheme, flags.MetadataScheme):
+ results['metadata_scheme'] = metadata_scheme.value
+
+ return results
+
+ metadata_input_image.upload(trigger_metadata_preview, inputs=metadata_input_image,
+ outputs=metadata_json, queue=False, show_progress=True)
+
switch_js = "(x) => {if(x){viewer_to_bottom(100);viewer_to_bottom(500);}else{viewer_to_top();} return x;}"
down_js = "() => {viewer_to_bottom();}"
@@ -282,6 +312,11 @@ with shared.gradio_root:
value=modules.config.default_aspect_ratio, info='width × height',
elem_classes='aspect_ratios')
image_number = gr.Slider(label='Image Number', minimum=1, maximum=modules.config.default_max_image_number, step=1, value=modules.config.default_image_number)
+
+ output_format = gr.Radio(label='Output Format',
+ choices=modules.flags.output_formats,
+ value=modules.config.default_output_format)
+
negative_prompt = gr.Textbox(label='Negative Prompt', show_label=True, placeholder="Type prompt here.",
info='Describing what you do not want to see.', lines=2,
elem_id='negative_prompt',
@@ -307,8 +342,14 @@ with shared.gradio_root:
seed_random.change(random_checked, inputs=[seed_random], outputs=[image_seed],
queue=False, show_progress=False)
- if not args_manager.args.disable_image_log:
- gr.HTML(f'\U0001F4DA History Log')
+ def update_history_link():
+ if args_manager.args.disable_image_log:
+ return gr.update(value='')
+
+ return gr.update(value=f'\U0001F4DA History Log')
+
+ history_link = gr.HTML()
+ shared.gradio_root.load(update_history_link, outputs=history_link, queue=False, show_progress=False)
with gr.Tab(label='Style'):
style_sorter.try_load_sorted_styles(
@@ -365,11 +406,15 @@ with shared.gradio_root:
for i, (n, v) in enumerate(modules.config.default_loras):
with gr.Row():
+ lora_enabled = gr.Checkbox(label='Enable', value=True,
+ elem_classes=['lora_enable', 'min_check'])
lora_model = gr.Dropdown(label=f'LoRA {i + 1}',
- choices=['None'] + modules.config.lora_filenames, value=n)
- lora_weight = gr.Slider(label='Weight', minimum=-2, maximum=2, step=0.01, value=v,
+ choices=['None'] + modules.config.lora_filenames, value=n,
+ elem_classes='lora_model')
+ lora_weight = gr.Slider(label='Weight', minimum=modules.config.default_loras_min_weight,
+ maximum=modules.config.default_loras_max_weight, step=0.01, value=v,
elem_classes='lora_weight')
- lora_ctrls += [lora_model, lora_weight]
+ lora_ctrls += [lora_enabled, lora_model, lora_weight]
with gr.Row():
model_refresh = gr.Button(label='Refresh', value='\U0001f504 Refresh All Files', variant='secondary', elem_classes='refresh_button')
@@ -393,7 +438,7 @@ with shared.gradio_root:
step=0.001, value=0.3,
info='When to end the guidance from positive/negative ADM. ')
- refiner_swap_method = gr.Dropdown(label='Refiner swap method', value='joint',
+ refiner_swap_method = gr.Dropdown(label='Refiner swap method', value=flags.refiner_swap_method,
choices=['joint', 'separate', 'vae'])
adaptive_cfg = gr.Slider(label='CFG Mimicking from TSNR', minimum=1.0, maximum=30.0, step=0.01,
@@ -433,6 +478,23 @@ with shared.gradio_root:
info='Set as negative number to disable. For developer debugging.')
disable_preview = gr.Checkbox(label='Disable Preview', value=False,
info='Disable preview during generation.')
+ disable_intermediate_results = gr.Checkbox(label='Disable Intermediate Results',
+ value=modules.config.default_performance == 'Extreme Speed',
+ interactive=modules.config.default_performance != 'Extreme Speed',
+ info='Disable intermediate results during generation, only show final gallery.')
+ disable_seed_increment = gr.Checkbox(label='Disable seed increment',
+ info='Disable automatic seed increment when image number is > 1.',
+ value=False)
+
+ if not args_manager.args.disable_metadata:
+ save_metadata_to_images = gr.Checkbox(label='Save Metadata to Images', value=modules.config.default_save_metadata_to_images,
+ info='Adds parameters to generated images allowing manual regeneration.')
+ metadata_scheme = gr.Radio(label='Metadata Scheme', choices=flags.metadata_scheme, value=modules.config.default_metadata_scheme,
+ info='Image Prompt parameters are not included. Use a1111 for compatibility with Civitai.',
+ visible=modules.config.default_save_metadata_to_images)
+
+ save_metadata_to_images.change(lambda x: gr.update(visible=x), inputs=[save_metadata_to_images], outputs=[metadata_scheme],
+ queue=False, show_progress=False)
with gr.Tab(label='Control'):
debugging_cn_preprocessor = gr.Checkbox(label='Debug Preprocessors', value=False,
@@ -481,7 +543,7 @@ with shared.gradio_root:
'(default is 0, always process before any mask invert)')
inpaint_mask_upload_checkbox = gr.Checkbox(label='Enable Mask Upload', value=False)
invert_mask_checkbox = gr.Checkbox(label='Invert Mask', value=False)
-
+
inpaint_ctrls = [debugging_inpaint_preprocessor, inpaint_disable_initial_latent, inpaint_engine,
inpaint_strength, inpaint_respective_field,
inpaint_mask_upload_checkbox, invert_mask_checkbox, inpaint_erode_or_dilate]
@@ -499,15 +561,6 @@ with shared.gradio_root:
freeu_s2 = gr.Slider(label='S2', minimum=0, maximum=4, step=0.01, value=0.95)
freeu_ctrls = [freeu_enabled, freeu_b1, freeu_b2, freeu_s1, freeu_s2]
- adps = [disable_preview, adm_scaler_positive, adm_scaler_negative, adm_scaler_end, adaptive_cfg, sampler_name,
- scheduler_name, generate_image_grid, overwrite_step, overwrite_switch, overwrite_width, overwrite_height,
- overwrite_vary_strength, overwrite_upscale_strength,
- mixing_image_prompt_and_vary_upscale, mixing_image_prompt_and_inpaint,
- debugging_cn_preprocessor, skipping_cn_preprocessor, controlnet_softness,
- canny_low_threshold, canny_high_threshold, refiner_swap_method]
- adps += freeu_ctrls
- adps += inpaint_ctrls
-
def dev_mode_checked(r):
return gr.update(visible=r)
@@ -517,24 +570,27 @@ with shared.gradio_root:
def model_refresh_clicked():
modules.config.update_all_model_names()
- results = []
- results += [gr.update(choices=modules.config.model_filenames), gr.update(choices=['None'] + modules.config.model_filenames)]
- for i in range(5):
- results += [gr.update(choices=['None'] + modules.config.lora_filenames), gr.update()]
+ results = [gr.update(choices=modules.config.model_filenames)]
+ results += [gr.update(choices=['None'] + modules.config.model_filenames)]
+ for i in range(modules.config.default_max_lora_number):
+ results += [gr.update(interactive=True), gr.update(choices=['None'] + modules.config.lora_filenames), gr.update()]
return results
model_refresh.click(model_refresh_clicked, [], [base_model, refiner_model] + lora_ctrls,
queue=False, show_progress=False)
performance_selection.change(lambda x: [gr.update(interactive=x != 'Extreme Speed')] * 11 +
- [gr.update(visible=x != 'Extreme Speed')] * 1,
+ [gr.update(visible=x != 'Extreme Speed')] * 1 +
+ [gr.update(interactive=x != 'Extreme Speed', value=x == 'Extreme Speed', )] * 1,
inputs=performance_selection,
outputs=[
guidance_scale, sharpness, adm_scaler_end, adm_scaler_positive,
adm_scaler_negative, refiner_switch, refiner_model, sampler_name,
- scheduler_name, adaptive_cfg, refiner_swap_method, negative_prompt
+ scheduler_name, adaptive_cfg, refiner_swap_method, negative_prompt, disable_intermediate_results
], queue=False, show_progress=False)
-
+
+ output_format.input(lambda x: gr.update(output_format=x), inputs=output_format)
+
advanced_checkbox.change(lambda x: gr.update(visible=x), advanced_checkbox, advanced_column,
queue=False, show_progress=False) \
.then(fn=lambda: None, _js='refresh_grid_delayed', queue=False, show_progress=False)
@@ -572,29 +628,37 @@ with shared.gradio_root:
inpaint_strength, inpaint_respective_field
], show_progress=False, queue=False)
- ctrls = [
+ ctrls = [currentTask, generate_image_grid]
+ ctrls += [
prompt, negative_prompt, style_selections,
- performance_selection, aspect_ratios_selection, image_number, image_seed, sharpness, guidance_scale
+ performance_selection, aspect_ratios_selection, image_number, output_format, image_seed, sharpness, guidance_scale
]
ctrls += [base_model, refiner_model, refiner_switch] + lora_ctrls
ctrls += [input_image_checkbox, current_tab]
ctrls += [uov_method, uov_input_image]
ctrls += [outpaint_selections, inpaint_input_image, inpaint_additional_prompt, inpaint_mask_image]
+ ctrls += [disable_preview, disable_intermediate_results, disable_seed_increment]
+ ctrls += [adm_scaler_positive, adm_scaler_negative, adm_scaler_end, adaptive_cfg]
+ ctrls += [sampler_name, scheduler_name]
+ ctrls += [overwrite_step, overwrite_switch, overwrite_width, overwrite_height, overwrite_vary_strength]
+ ctrls += [overwrite_upscale_strength, mixing_image_prompt_and_vary_upscale, mixing_image_prompt_and_inpaint]
+ ctrls += [debugging_cn_preprocessor, skipping_cn_preprocessor, canny_low_threshold, canny_high_threshold]
+ ctrls += [refiner_swap_method, controlnet_softness]
+ ctrls += freeu_ctrls
+ ctrls += inpaint_ctrls
+
+ if not args_manager.args.disable_metadata:
+ ctrls += [save_metadata_to_images, metadata_scheme]
+
ctrls += ip_ctrls
state_is_generating = gr.State(False)
def parse_meta(raw_prompt_txt, is_generating):
loaded_json = None
- try:
- if '{' in raw_prompt_txt:
- if '}' in raw_prompt_txt:
- if ':' in raw_prompt_txt:
- loaded_json = json.loads(raw_prompt_txt)
- assert isinstance(loaded_json, dict)
- except:
- loaded_json = None
+ if is_json(raw_prompt_txt):
+ loaded_json = json.loads(raw_prompt_txt)
if loaded_json is None:
if is_generating:
@@ -606,39 +670,38 @@ with shared.gradio_root:
prompt.input(parse_meta, inputs=[prompt, state_is_generating], outputs=[prompt, generate_button, load_parameter_button], queue=False, show_progress=False)
- load_parameter_button.click(modules.meta_parser.load_parameter_button_click, inputs=[prompt, state_is_generating], outputs=[
- advanced_checkbox,
- image_number,
- prompt,
- negative_prompt,
- style_selections,
- performance_selection,
- aspect_ratios_selection,
- overwrite_width,
- overwrite_height,
- sharpness,
- guidance_scale,
- adm_scaler_positive,
- adm_scaler_negative,
- adm_scaler_end,
- base_model,
- refiner_model,
- refiner_switch,
- sampler_name,
- scheduler_name,
- seed_random,
- image_seed,
- generate_button,
- load_parameter_button
- ] + lora_ctrls, queue=False, show_progress=False)
+ load_data_outputs = [advanced_checkbox, image_number, prompt, negative_prompt, style_selections,
+ performance_selection, overwrite_step, overwrite_switch, aspect_ratios_selection,
+ overwrite_width, overwrite_height, guidance_scale, sharpness, adm_scaler_positive,
+ adm_scaler_negative, adm_scaler_end, refiner_swap_method, adaptive_cfg, base_model,
+ refiner_model, refiner_switch, sampler_name, scheduler_name, seed_random, image_seed,
+ generate_button, load_parameter_button] + freeu_ctrls + lora_ctrls
+
+ load_parameter_button.click(modules.meta_parser.load_parameter_button_click, inputs=[prompt, state_is_generating], outputs=load_data_outputs, queue=False, show_progress=False)
+
+ def trigger_metadata_import(filepath, state_is_generating):
+ parameters, metadata_scheme = modules.meta_parser.read_info_from_image(filepath)
+ if parameters is None:
+ print('Could not find metadata in the image!')
+ parsed_parameters = {}
+ else:
+ metadata_parser = modules.meta_parser.get_metadata_parser(metadata_scheme)
+ parsed_parameters = metadata_parser.parse_json(parameters)
+
+ return modules.meta_parser.load_parameter_button_click(parsed_parameters, state_is_generating)
+
+
+ metadata_import_button.click(trigger_metadata_import, inputs=[metadata_input_image, state_is_generating], outputs=load_data_outputs, queue=False, show_progress=True) \
+ .then(style_sorter.sort_styles, inputs=style_selections, outputs=style_selections, queue=False, show_progress=False)
generate_button.click(lambda: (gr.update(visible=True, interactive=True), gr.update(visible=True, interactive=True), gr.update(visible=False, interactive=False), [], True),
outputs=[stop_button, skip_button, generate_button, gallery, state_is_generating]) \
.then(fn=refresh_seed, inputs=[seed_random, image_seed], outputs=image_seed) \
- .then(advanced_parameters.set_all_advanced_parameters, inputs=adps) \
- .then(fn=generate_clicked, inputs=ctrls, outputs=[progress_html, progress_window, progress_gallery, gallery]) \
+ .then(fn=get_task, inputs=ctrls, outputs=currentTask) \
+ .then(fn=generate_clicked, inputs=currentTask, outputs=[progress_html, progress_window, progress_gallery, gallery]) \
.then(lambda: (gr.update(visible=True, interactive=True), gr.update(visible=False, interactive=False), gr.update(visible=False, interactive=False), False),
outputs=[generate_button, stop_button, skip_button, state_is_generating]) \
+ .then(fn=update_history_link, outputs=history_link) \
.then(fn=lambda: None, _js='playNotification').then(fn=lambda: None, _js='refresh_grid_delayed')
for notification_file in ['notification.ogg', 'notification.mp3']:
@@ -671,6 +734,7 @@ shared.gradio_root.launch(
server_name=args_manager.args.listen,
server_port=args_manager.args.port,
share=args_manager.args.share,
- auth=check_auth if args_manager.args.share and auth_enabled else None,
+ auth=check_auth if (args_manager.args.share or args_manager.args.listen) and auth_enabled else None,
+ allowed_paths=[modules.config.path_outputs],
blocked_paths=[constants.AUTH_FILENAME]
)
diff --git a/wildcards/animal.txt b/wildcards/animal.txt
new file mode 100644
index 00000000..9a6f09ba
--- /dev/null
+++ b/wildcards/animal.txt
@@ -0,0 +1,100 @@
+Alligator
+Ant
+Antelope
+Armadillo
+Badger
+Bat
+Bear
+Beaver
+Bison
+Boar
+Bobcat
+Bull
+Camel
+Chameleon
+Cheetah
+Chicken
+Chihuahua
+Chimpanzee
+Chinchilla
+Chipmunk
+Comodo Dragon
+Cow
+Coyote
+Crocodile
+Crow
+Deer
+Dinosaur
+Dolphin
+Donkey
+Duck
+Eagle
+Eel
+Elephant
+Elk
+Emu
+Falcon
+Ferret
+Flamingo
+Flying Squirrel
+Giraffe
+Goose
+Guinea pig
+Hawk
+Hedgehog
+Hippopotamus
+Horse
+Hummingbird
+Hyena
+Jackal
+Jaguar
+Jellyfish
+Kangaroo
+King Cobra
+Koala bear
+Leopard
+Lion
+Lizard
+Magpie
+Marten
+Meerkat
+Mole
+Monkey
+Moose
+Mouse
+Octopus
+Okapi
+Orangutan
+Ostrich
+Otter
+Owl
+Panda
+Pangolin
+Panther
+Penguin
+Pig
+Porcupine
+Possum
+Puma
+Quokka
+Rabbit
+Raccoon
+Raven
+Reindeer
+Rhinoceros
+Seal
+Shark
+Sheep
+Snail
+Snake
+Sparrow
+Spider
+Squirrel
+Swallow
+Tiger
+Walrus
+Whale
+Wolf
+Wombat
+Yak
+Zebra
\ No newline at end of file