diff --git a/modules/meta_parser.py b/modules/meta_parser.py
index 66e40dd3..b5b1dbe0 100644
--- a/modules/meta_parser.py
+++ b/modules/meta_parser.py
@@ -11,8 +11,8 @@ import fooocus_version
import modules.config
import modules.sdxl_styles
from modules.flags import MetadataScheme, Performance, Steps
-from modules.flags import lora_count, SAMPLERS, CIVITAI_NO_KARRAS
-from modules.util import quote, unquote, extract_styles_from_prompt, is_json, calculate_sha256
+from modules.flags import SAMPLERS, CIVITAI_NO_KARRAS
+from modules.util import quote, unquote, extract_styles_from_prompt, is_json, get_file_from_folder_list, calculate_sha256
re_param_code = r'\s*(\w[\w \-/]+):\s*("(?:\\.|[^\\"])+"|[^,]*)(?:,|$)'
re_param = re.compile(re_param_code)
@@ -57,7 +57,7 @@ def load_parameter_button_click(raw_metadata: dict | str, is_generating: bool):
get_freeu('freeu', 'FreeU', loaded_parameter_dict, results)
- for i in range(lora_count):
+ for i in range(modules.config.default_max_lora_number):
get_lora(f'lora_combined_{i + 1}', f'LoRA {i + 1}', loaded_parameter_dict, results)
return results
@@ -171,9 +171,11 @@ def get_lora(key: str, fallback: str | None, source_dict: dict, results: list):
try:
n, w = source_dict.get(key, source_dict.get(fallback)).split(' : ')
w = float(w)
+ results.append(True)
results.append(n)
results.append(w)
except:
+ results.append(True)
results.append('None')
results.append(1)
@@ -209,7 +211,7 @@ def parse_meta_from_preset(preset_content):
preset_prepared[meta_key] = (width, height)
else:
preset_prepared[meta_key] = items[settings_key] if settings_key in items and items[settings_key] is not None else getattr(modules.config, settings_key)
-
+
if settings_key == "default_styles" or settings_key == "default_aspect_ratio":
preset_prepared[meta_key] = str(preset_prepared[meta_key])
@@ -241,7 +243,8 @@ class MetadataParser(ABC):
def parse_string(self, metadata: dict) -> str:
raise NotImplementedError
- def set_data(self, raw_prompt, full_prompt, raw_negative_prompt, full_negative_prompt, steps, base_model_name, refiner_model_name, loras):
+ def set_data(self, raw_prompt, full_prompt, raw_negative_prompt, full_negative_prompt, steps, base_model_name,
+ refiner_model_name, loras):
self.raw_prompt = raw_prompt
self.full_prompt = full_prompt
self.raw_negative_prompt = raw_negative_prompt
@@ -249,18 +252,18 @@ class MetadataParser(ABC):
self.steps = steps
self.base_model_name = Path(base_model_name).stem
- base_model_path = os.path.join(modules.config.path_checkpoints, base_model_name)
+ base_model_path = get_file_from_folder_list(base_model_name, modules.config.paths_checkpoints)
self.base_model_hash = get_sha256(base_model_path)
if refiner_model_name not in ['', 'None']:
self.refiner_model_name = Path(refiner_model_name).stem
- refiner_model_path = os.path.join(modules.config.path_checkpoints, refiner_model_name)
+ refiner_model_path = get_file_from_folder_list(refiner_model_name, modules.config.paths_checkpoints)
self.refiner_model_hash = get_sha256(refiner_model_path)
self.loras = []
for (lora_name, lora_weight) in loras:
if lora_name != 'None':
- lora_path = os.path.join(modules.config.path_loras, lora_name)
+ lora_path = get_file_from_folder_list(lora_name, modules.config.paths_loras)
lora_hash = get_sha256(lora_path)
self.loras.append((Path(lora_name).stem, lora_weight, lora_hash))
@@ -327,7 +330,7 @@ class A1111MetadataParser(MetadataParser):
for k, v in re_param.findall(lastline):
try:
- if v[0] == '"' and v[-1] == '"':
+ if v != '' and v[0] == '"' and v[-1] == '"':
v = unquote(v)
m = re_imagesize.match(v)
@@ -375,7 +378,8 @@ class A1111MetadataParser(MetadataParser):
if 'lora_hashes' in data:
lora_filenames = modules.config.lora_filenames.copy()
- lora_filenames.remove(modules.config.downloading_sdxl_lcm_lora())
+ if modules.config.sdxl_lcm_lora in lora_filenames:
+ lora_filenames.remove(modules.config.sdxl_lcm_lora)
for li, lora in enumerate(data['lora_hashes'].split(', ')):
lora_name, lora_hash, lora_weight = lora.split(': ')
for filename in lora_filenames:
@@ -456,7 +460,8 @@ class FooocusMetadataParser(MetadataParser):
def parse_json(self, metadata: dict) -> dict:
model_filenames = modules.config.model_filenames.copy()
lora_filenames = modules.config.lora_filenames.copy()
- lora_filenames.remove(modules.config.downloading_sdxl_lcm_lora())
+ if modules.config.sdxl_lcm_lora in lora_filenames:
+ lora_filenames.remove(modules.config.sdxl_lcm_lora)
for key, value in metadata.items():
if value in ['', 'None']:
diff --git a/modules/sdxl_styles.py b/modules/sdxl_styles.py
index f5bb6276..71afc402 100644
--- a/modules/sdxl_styles.py
+++ b/modules/sdxl_styles.py
@@ -1,6 +1,7 @@
import os
import re
import json
+import math
from modules.util import get_files_from_folder
@@ -80,3 +81,38 @@ def apply_wildcards(wildcard_text, rng, directory=wildcards_path):
print(f'[Wildcards] BFS stack overflow. Current text: {wildcard_text}')
return wildcard_text
+
+def get_words(arrays, totalMult, index):
+ if(len(arrays) == 1):
+ return [arrays[0].split(',')[index]]
+ else:
+ words = arrays[0].split(',')
+ word = words[index % len(words)]
+ index -= index % len(words)
+ index /= len(words)
+ index = math.floor(index)
+ return [word] + get_words(arrays[1:], math.floor(totalMult/len(words)), index)
+
+
+
+def apply_arrays(text, index):
+ arrays = re.findall(r'\[\[([\s,\w-]+)\]\]', text)
+ if len(arrays) == 0:
+ return text
+
+ print(f'[Arrays] processing: {text}')
+ mult = 1
+ for arr in arrays:
+ words = arr.split(',')
+ mult *= len(words)
+
+ index %= mult
+ chosen_words = get_words(arrays, mult, index)
+
+ i = 0
+ for arr in arrays:
+ text = text.replace(f'[[{arr}]]', chosen_words[i], 1)
+ i = i+1
+
+ return text
+
diff --git a/modules/util.py b/modules/util.py
index 4a919a1a..c7923ec8 100644
--- a/modules/util.py
+++ b/modules/util.py
@@ -160,7 +160,7 @@ def generate_temp_filename(folder='./outputs/', extension='png'):
random_number = random.randint(1000, 9999)
filename = f"{time_string}_{random_number}.{extension}"
result = os.path.join(folder, date_string, filename)
- return date_string, os.path.abspath(os.path.realpath(result)), filename
+ return date_string, os.path.abspath(result), filename
def get_files_from_folder(folder_path, exensions=None, name_filter=None):
@@ -341,5 +341,22 @@ def is_json(data: str) -> bool:
return False
return True
+
+def get_file_from_folder_list(name, folders):
+ for folder in folders:
+ filename = os.path.abspath(os.path.realpath(os.path.join(folder, name)))
+ if os.path.isfile(filename):
+ return filename
+
+ return os.path.abspath(os.path.realpath(os.path.join(folders[0], name)))
+
+
def ordinal_suffix(number: int) -> str:
return 'th' if 10 <= number % 100 <= 20 else {1: 'st', 2: 'nd', 3: 'rd'}.get(number % 10, 'th')
+
+
+def makedirs_with_log(path):
+ try:
+ os.makedirs(path, exist_ok=True)
+ except OSError as error:
+ print(f'Directory {path} could not be created, reason: {error}')
diff --git a/readme.md b/readme.md
index 02a5231b..c61d1f7c 100644
--- a/readme.md
+++ b/readme.md
@@ -433,6 +433,10 @@ You can install Fooocus on Apple Mac silicon (M1 or M2) with macOS 'Catalina' or
Use `python entry_with_update.py --preset anime` or `python entry_with_update.py --preset realistic` for Fooocus Anime/Realistic Edition.
+### Docker
+
+See [docker.md](docker.md)
+
### Download Previous Version
See the guidelines [here](https://github.com/lllyasviel/Fooocus/discussions/1405).
@@ -489,7 +493,7 @@ In both ways the access is unauthenticated by default. You can add basic authent
The below things are already inside the software, and **users do not need to do anything about these**.
-1. GPT2-based [prompt expansion as a dynamic style "Fooocus V2".](https://github.com/lllyasviel/Fooocus/discussions/117#raw) (similar to Midjourney's hidden pre-processsing and "raw" mode, or the LeonardoAI's Prompt Magic).
+1. GPT2-based [prompt expansion as a dynamic style "Fooocus V2".](https://github.com/lllyasviel/Fooocus/discussions/117#raw) (similar to Midjourney's hidden pre-processing and "raw" mode, or the LeonardoAI's Prompt Magic).
2. Native refiner swap inside one single k-sampler. The advantage is that the refiner model can now reuse the base model's momentum (or ODE's history parameters) collected from k-sampling to achieve more coherent sampling. In Automatic1111's high-res fix and ComfyUI's node system, the base model and refiner use two independent k-samplers, which means the momentum is largely wasted, and the sampling continuity is broken. Fooocus uses its own advanced k-diffusion sampling that ensures seamless, native, and continuous swap in a refiner setup. (Update Aug 13: Actually, I discussed this with Automatic1111 several days ago, and it seems that the “native refiner swap inside one single k-sampler” is [merged]( https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12371) into the dev branch of webui. Great!)
3. Negative ADM guidance. Because the highest resolution level of XL Base does not have cross attentions, the positive and negative signals for XL's highest resolution level cannot receive enough contrasts during the CFG sampling, causing the results to look a bit plastic or overly smooth in certain cases. Fortunately, since the XL's highest resolution level is still conditioned on image aspect ratios (ADM), we can modify the adm on the positive/negative side to compensate for the lack of CFG contrast in the highest resolution level. (Update Aug 16, the IOS App [Draw Things](https://apps.apple.com/us/app/draw-things-ai-generation/id6444050820) will support Negative ADM Guidance. Great!)
4. We implemented a carefully tuned variation of Section 5.1 of ["Improving Sample Quality of Diffusion Models Using Self-Attention Guidance"](https://arxiv.org/pdf/2210.00939.pdf). The weight is set to very low, but this is Fooocus's final guarantee to make sure that the XL will never yield an overly smooth or plastic appearance (examples [here](https://github.com/lllyasviel/Fooocus/discussions/117#sharpness)). This can almost eliminate all cases for which XL still occasionally produces overly smooth results, even with negative ADM guidance. (Update 2023 Aug 18, the Gaussian kernel of SAG is changed to an anisotropic kernel for better structure preservation and fewer artifacts.)
@@ -566,7 +570,7 @@ entry_with_update.py [-h] [--listen [IP]] [--port PORT]
[--attention-split | --attention-quad | --attention-pytorch]
[--disable-xformers]
[--always-gpu | --always-high-vram | --always-normal-vram |
- --always-low-vram | --always-no-vram | --always-cpu]
+ --always-low-vram | --always-no-vram | --always-cpu [CPU_NUM_THREADS]]
[--always-offload-from-vram] [--disable-server-log]
[--debug-mode] [--is-windows-embedded-python]
[--disable-server-info] [--share] [--preset PRESET]
diff --git a/requirements_docker.txt b/requirements_docker.txt
new file mode 100644
index 00000000..3cf4aa89
--- /dev/null
+++ b/requirements_docker.txt
@@ -0,0 +1,5 @@
+torch==2.0.1
+torchvision==0.15.2
+torchaudio==2.0.2
+torchtext==0.15.2
+torchdata==0.6.1
diff --git a/update_log.md b/update_log.md
index e052d24c..b0192d0d 100644
--- a/update_log.md
+++ b/update_log.md
@@ -1,3 +1,16 @@
+# [2.2.0](https://github.com/lllyasviel/Fooocus/releases/tag/2.2.0)
+
+* Isolate every image generation to truly allow multi-user usage
+* Add array support, changes the main prompt when increasing the image number. Syntax: `[[red, green, blue]] flower`
+* Add optional metadata to images, allowing you to regenerate and modify them later with the same parameters
+* Now supports native PNG, JPG and WEBP image generation
+* Add Docker support
+
+# [2.1.865](https://github.com/lllyasviel/Fooocus/releases/tag/2.1.865)
+
+* Various bugfixes
+* Add authentication to --listen
+
# 2.1.864
* New model list. See also discussions.
diff --git a/webui.py b/webui.py
index 514a6a65..67d84650 100644
--- a/webui.py
+++ b/webui.py
@@ -357,7 +357,7 @@ with shared.gradio_root:
if args_manager.args.disable_image_log:
return gr.update(value='')
- return gr.update(value=f'
\U0001F4DA History Log')
+ return gr.update(value=f'
\U0001F4DA History Log')
history_link = gr.HTML()
shared.gradio_root.load(update_history_link, outputs=history_link, queue=False, show_progress=False)
@@ -417,11 +417,15 @@ with shared.gradio_root:
for i, (n, v) in enumerate(modules.config.default_loras):
with gr.Row():
+ lora_enabled = gr.Checkbox(label='Enable', value=True,
+ elem_classes=['lora_enable', 'min_check'])
lora_model = gr.Dropdown(label=f'LoRA {i + 1}',
- choices=['None'] + modules.config.lora_filenames, value=n)
- lora_weight = gr.Slider(label='Weight', minimum=-2, maximum=2, step=0.01, value=v,
+ choices=['None'] + modules.config.lora_filenames, value=n,
+ elem_classes='lora_model')
+ lora_weight = gr.Slider(label='Weight', minimum=modules.config.default_loras_min_weight,
+ maximum=modules.config.default_loras_max_weight, step=0.01, value=v,
elem_classes='lora_weight')
- lora_ctrls += [lora_model, lora_weight]
+ lora_ctrls += [lora_enabled, lora_model, lora_weight]
with gr.Row():
model_refresh = gr.Button(label='Refresh', value='\U0001f504 Refresh All Files', variant='secondary', elem_classes='refresh_button')
@@ -493,6 +497,10 @@ with shared.gradio_root:
interactive=modules.config.default_performance != 'Extreme Speed',
info='Disable intermediate results during generation, only show final gallery.')
+ disable_seed_increment = gr.Checkbox(label='Disable seed increment',
+ info='Disable automatic seed increment when image number is > 1.',
+ value=False)
+
black_out_nsfw = gr.Checkbox(label='Black Out NSFW', value=modules.config.default_black_out_nsfw,
interactive=not modules.config.default_black_out_nsfw,
info='Use black image if NSFW is detected.')
@@ -590,8 +598,8 @@ with shared.gradio_root:
gr.update(choices=['None'] + modules.config.model_filenames)]
if not args_manager.args.disable_preset_selection:
results += [gr.update(choices=modules.config.available_presets)]
- for i in range(flags.lora_count):
- results += [gr.update(choices=['None'] + modules.config.lora_filenames), gr.update()]
+ for i in range(modules.config.default_max_lora_number):
+ results += [gr.update(interactive=True), gr.update(choices=['None'] + modules.config.lora_filenames), gr.update()]
return results
model_refresh_output = [base_model, refiner_model]
@@ -710,7 +718,7 @@ with shared.gradio_root:
ctrls += [input_image_checkbox, current_tab]
ctrls += [uov_method, uov_input_image]
ctrls += [outpaint_selections, inpaint_input_image, inpaint_additional_prompt, inpaint_mask_image]
- ctrls += [disable_preview, disable_intermediate_results, black_out_nsfw]
+ ctrls += [disable_preview, disable_intermediate_results, disable_seed_increment, black_out_nsfw]
ctrls += [adm_scaler_positive, adm_scaler_negative, adm_scaler_end, adaptive_cfg]
ctrls += [sampler_name, scheduler_name]
ctrls += [overwrite_step, overwrite_switch, overwrite_width, overwrite_height, overwrite_vary_strength]
@@ -720,6 +728,11 @@ with shared.gradio_root:
ctrls += freeu_ctrls
ctrls += inpaint_ctrls
+ if not args_manager.args.disable_metadata:
+ ctrls += [save_metadata_to_images, metadata_scheme]
+
+ ctrls += ip_ctrls
+
if not args_manager.args.disable_metadata:
ctrls += [save_metadata_to_images, metadata_scheme]