Merge branch 'main_upstream'

# Conflicts:
#	css/style.css
#	fooocus_colab.ipynb
#	fooocus_version.py
#	launch.py
#	modules/async_worker.py
#	modules/config.py
#	modules/flags.py
#	modules/meta_parser.py
#	webui.py
This commit is contained in:
Manuel Schmid 2024-03-18 21:27:56 +01:00
commit 679c02a09f
No known key found for this signature in database
GPG Key ID: 32C4F7569B40B84B
33 changed files with 513 additions and 273 deletions

View File

@ -21,6 +21,7 @@ body:
 5. Try a fresh installation of Fooocus in a different directory - see if a clean installation solves the issue
Before making a issue report please, check that the issue hasn't been reported recently.
options:
- label: The issue has not been resolved by following the [troubleshooting guide](https://github.com/lllyasviel/Fooocus/blob/main/troubleshoot.md)
- label: The issue exists on a clean installation of Fooocus
- label: The issue exists in the current version of Fooocus
- label: The issue has not been reported before recently

View File

@ -10,7 +10,7 @@ RUN apt-get update -y && \
COPY requirements_docker.txt requirements_versions.txt /tmp/
RUN pip install --no-cache-dir -r /tmp/requirements_docker.txt -r /tmp/requirements_versions.txt && \
rm -f /tmp/requirements_docker.txt /tmp/requirements_versions.txt
RUN pip install --no-cache-dir xformers==0.0.22 --no-dependencies
RUN pip install --no-cache-dir xformers==0.0.23 --no-dependencies
RUN curl -fsL -o /usr/local/lib/python3.10/dist-packages/gradio/frpc_linux_amd64_v0.2 https://cdn-media.huggingface.co/frpc-gradio-0.2/frpc_linux_amd64 && \
chmod +x /usr/local/lib/python3.10/dist-packages/gradio/frpc_linux_amd64_v0.2

View File

@ -52,7 +52,4 @@ if args_parser.args.disable_analytics:
if args_parser.args.disable_in_browser:
args_parser.args.in_browser = False
if args_parser.args.temp_path is None:
args_parser.args.temp_path = os.path.join(gettempdir(), 'Fooocus')
args = args_parser.args

View File

@ -1,5 +1,136 @@
/* based on https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/v1.6.0/style.css */
.loader-container {
display: flex; /* Use flex to align items horizontally */
align-items: center; /* Center items vertically within the container */
white-space: nowrap; /* Prevent line breaks within the container */
}
.loader {
border: 8px solid #f3f3f3; /* Light grey */
border-top: 8px solid #3498db; /* Blue */
border-radius: 50%;
width: 30px;
height: 30px;
animation: spin 2s linear infinite;
}
@keyframes spin {
0% { transform: rotate(0deg); }
100% { transform: rotate(360deg); }
}
/* Style the progress bar */
progress {
appearance: none; /* Remove default styling */
height: 20px; /* Set the height of the progress bar */
border-radius: 5px; /* Round the corners of the progress bar */
background-color: #f3f3f3; /* Light grey background */
width: 100%;
}
/* Style the progress bar container */
.progress-container {
margin-left: 20px;
margin-right: 20px;
flex-grow: 1; /* Allow the progress container to take up remaining space */
}
/* Set the color of the progress bar fill */
progress::-webkit-progress-value {
background-color: #3498db; /* Blue color for the fill */
}
progress::-moz-progress-bar {
background-color: #3498db; /* Blue color for the fill in Firefox */
}
/* Style the text on the progress bar */
progress::after {
content: attr(value '%'); /* Display the progress value followed by '%' */
position: absolute;
top: 50%;
left: 50%;
transform: translate(-50%, -50%);
color: white; /* Set text color */
font-size: 14px; /* Set font size */
}
/* Style other texts */
.loader-container > span {
margin-left: 5px; /* Add spacing between the progress bar and the text */
}
.progress-bar > .generating {
display: none !important;
}
.progress-bar{
height: 30px !important;
}
.type_row{
height: 80px !important;
}
.type_row_half{
height: 32px !important;
}
.scroll-hide{
resize: none !important;
}
.refresh_button{
border: none !important;
background: none !important;
font-size: none !important;
box-shadow: none !important;
}
.advanced_check_row{
width: 250px !important;
}
.min_check{
min-width: min(1px, 100%) !important;
}
.resizable_area {
resize: vertical;
overflow: auto !important;
}
.aspect_ratios label {
width: 140px !important;
}
.aspect_ratios label span {
white-space: nowrap !important;
}
.aspect_ratios label input {
margin-left: -5px !important;
}
.lora_enable label {
height: 100%;
}
.lora_enable label input {
margin: auto;
}
.lora_enable label span {
display: none;
}
@-moz-document url-prefix() {
.lora_weight input[type=number] {
width: 80px;
}
}
#context-menu{
z-index:9999;
position:absolute;
@ -217,4 +348,49 @@
#stylePreviewOverlay.lower-half {
transform: translate(-140px, -140px);
}
/* scrollable box for style selections */
.contain .tabs {
height: 100%;
}
.contain .tabs .tabitem.style_selections_tab {
height: 100%;
}
.contain .tabs .tabitem.style_selections_tab > div:first-child {
height: 100%;
}
.contain .tabs .tabitem.style_selections_tab .style_selections {
min-height: 200px;
height: 100%;
}
.contain .tabs .tabitem.style_selections_tab .style_selections .wrap[data-testid="checkbox-group"] {
position: absolute; /* remove this to disable scrolling within the checkbox-group */
overflow: auto;
padding-right: 2px;
max-height: 100%;
}
.contain .tabs .tabitem.style_selections_tab .style_selections .wrap[data-testid="checkbox-group"] label {
/* max-width: calc(35% - 15px) !important; */ /* add this to enable 3 columns layout */
flex: calc(50% - 5px) !important;
}
.contain .tabs .tabitem.style_selections_tab .style_selections .wrap[data-testid="checkbox-group"] label span {
/* white-space:nowrap; */ /* add this to disable text wrapping (better choice for 3 columns layout) */
overflow: hidden;
text-overflow: ellipsis;
}
/* styles preview tooltip */
.preview-tooltip {
background-color: #fff8;
font-family: monospace;
text-align: center;
border-radius-top: 5px;
display: none; /* remove this to enable tooltip in preview image */
}

View File

@ -35,7 +35,7 @@
{
"cell_type": "code",
"source": [
"!python entry_with_update.py --share --attention-split --disable-offload-from-vram --always-high-vram"
"!python entry_with_update.py --share --always-high-vram"
],
"metadata": {
"id": "gPZEYpsfMDxQ"

View File

@ -1 +1 @@
version = '2.2.1 (mashb1t)'
version = '2.3.0 (mashb1t)'

View File

@ -150,9 +150,12 @@ function initStylePreviewOverlay() {
let overlayVisible = false;
const samplesPath = document.querySelector("meta[name='samples-path']").getAttribute("content")
const overlay = document.createElement('div');
const tooltip = document.createElement('div');
tooltip.className = 'preview-tooltip';
overlay.appendChild(tooltip);
overlay.id = 'stylePreviewOverlay';
document.body.appendChild(overlay);
document.addEventListener('mouseover', function(e) {
document.addEventListener('mouseover', function (e) {
const label = e.target.closest('.style_selections label');
if (!label) return;
label.removeEventListener("mouseout", onMouseLeave);
@ -162,9 +165,12 @@ function initStylePreviewOverlay() {
const originalText = label.querySelector("span").getAttribute("data-original-text");
const name = originalText || label.querySelector("span").textContent;
overlay.style.backgroundImage = `url("${samplesPath.replace(
"fooocus_v2",
name.toLowerCase().replaceAll(" ", "_")
"fooocus_v2",
name.toLowerCase().replaceAll(" ", "_")
).replaceAll("\\", "\\\\")}")`;
tooltip.textContent = name;
function onMouseLeave() {
overlayVisible = false;
overlay.style.opacity = "0";
@ -172,8 +178,8 @@ function initStylePreviewOverlay() {
label.removeEventListener("mouseout", onMouseLeave);
}
});
document.addEventListener('mousemove', function(e) {
if(!overlayVisible) return;
document.addEventListener('mousemove', function (e) {
if (!overlayVisible) return;
overlay.style.left = `${e.clientX}px`;
overlay.style.top = `${e.clientY}px`;
overlay.className = e.clientY > window.innerHeight / 2 ? "lower-half" : "upper-half";

View File

@ -47,9 +47,12 @@
"Generate mask from image": "Generate mask from image",
"Setting": "Setting",
"Style": "Style",
"Preset": "Preset",
"Performance": "Performance",
"Speed": "Speed",
"Quality": "Quality",
"Extreme Speed": "Extreme Speed",
"Lightning": "Lightning",
"Aspect Ratios": "Aspect Ratios",
"width \u00d7 height": "width \u00d7 height",
"Image Number": "Image Number",
@ -60,6 +63,7 @@
"Seed": "Seed",
"Disable seed increment": "Disable seed increment",
"Disable automatic seed increment when image number is > 1.": "Disable automatic seed increment when image number is > 1.",
"Read wildcards in order": "Read wildcards in order",
"\ud83d\udcda History Log": "\uD83D\uDCDA History Log",
"Image Style": "Image Style",
"Fooocus V2": "Fooocus V2",
@ -377,7 +381,6 @@
"B2": "B2",
"S1": "S1",
"S2": "S2",
"Extreme Speed": "Extreme Speed",
"\uD83D\uDD0E Type here to search styles ...": "\uD83D\uDD0E Type here to search styles ...",
"Type prompt here.": "Type prompt here.",
"Outpaint Expansion Direction:": "Outpaint Expansion Direction:",
@ -391,5 +394,6 @@
"Metadata Scheme": "Metadata Scheme",
"Image Prompt parameters are not included. Use png and a1111 for compatibility with Civitai.": "Image Prompt parameters are not included. Use png and a1111 for compatibility with Civitai.",
"fooocus (json)": "fooocus (json)",
"a1111 (plain text)": "a1111 (plain text)"
"a1111 (plain text)": "a1111 (plain text)",
"Unsupported image type in input": "Unsupported image type in input"
}

View File

@ -1,6 +1,6 @@
import os
import sys
import ssl
import sys
print('[System ARGV] ' + str(sys.argv))
@ -15,12 +15,11 @@ if "GRADIO_SERVER_PORT" not in os.environ:
ssl._create_default_https_context = ssl._create_unverified_context
import platform
import fooocus_version
from build_launcher import build_launcher
from modules.launch_util import is_installed, run, python, run_pip, requirements_met
from modules.launch_util import is_installed, run, python, run_pip, requirements_met, delete_folder_content
from modules.model_loader import load_file_from_url
from modules import config
@ -42,7 +41,7 @@ def prepare_environment():
if TRY_INSTALL_XFORMERS:
if REINSTALL_ALL or not is_installed("xformers"):
xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.20')
xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.23')
if platform.system() == "Windows":
if platform.python_version().startswith("3.10"):
run_pip(f"install -U -I --no-deps {xformers_package}", "xformers", live=True)
@ -68,6 +67,7 @@ vae_approx_filenames = [
'https://huggingface.co/lllyasviel/misc/resolve/main/xl-to-v1_interposer-v3.1.safetensors')
]
def ini_args():
from args_manager import args
return args
@ -84,6 +84,15 @@ if args.gpu_device_id is not None:
from modules import config
os.environ["U2NET_HOME"] = config.path_inpaint
os.environ['GRADIO_TEMP_DIR'] = config.temp_path
if config.temp_path_cleanup_on_launch:
print(f'[Cleanup] Attempting to delete content of temp dir {config.temp_path}')
result = delete_folder_content(config.temp_path, '[Cleanup] ')
if result:
print("[Cleanup] Cleanup successful")
else:
print(f"[Cleanup] Failed to delete content of temp dir.")
def download_models(default_model, previous_default_models, checkpoint_downloads, embeddings_downloads, lora_downloads):
for file_name, url in vae_approx_filenames:

View File

@ -1,5 +1,6 @@
import threading
import os
import re
from modules.patch import PatchSettings, patch_settings, patch_all
patch_all()
@ -49,8 +50,8 @@ def worker():
from modules.sdxl_styles import apply_style, apply_wildcards, fooocus_expansion, apply_arrays
from modules.private_logger import log
from extras.expansion import safe_str
from modules.util import remove_empty_str, HWC3, resize_image, \
get_image_shape_ceil, set_image_shape_ceil, get_shape_ceil, resample_image, erode_or_dilate
from modules.util import remove_empty_str, HWC3, resize_image, get_image_shape_ceil, set_image_shape_ceil, \
get_shape_ceil, resample_image, erode_or_dilate, get_enabled_loras
from modules.upscaler import perform_upscale
from modules.flags import Performance
from modules.meta_parser import get_metadata_parser, MetadataScheme
@ -131,14 +132,6 @@ def worker():
async_task.results = async_task.results + [wall]
return
def apply_enabled_loras(loras):
enabled_loras = []
for lora_enabled, lora_model, lora_weight in loras:
if lora_enabled:
enabled_loras.append([lora_model, lora_weight])
return enabled_loras
@torch.no_grad()
@torch.inference_mode()
def handler(async_task):
@ -157,12 +150,13 @@ def worker():
image_number = args.pop()
output_format = args.pop()
image_seed = args.pop()
read_wildcards_in_order = args.pop()
sharpness = args.pop()
guidance_scale = args.pop()
base_model_name = args.pop()
refiner_model_name = args.pop()
refiner_switch = args.pop()
loras = apply_enabled_loras([[bool(args.pop()), str(args.pop()), float(args.pop()), ] for _ in range(modules.config.default_max_lora_number)])
loras = get_enabled_loras([[bool(args.pop()), str(args.pop()), float(args.pop())] for _ in range(modules.config.default_max_lora_number)])
input_image_checkbox = args.pop()
current_tab = args.pop()
uov_method = args.pop()
@ -385,7 +379,7 @@ def worker():
print(f'[Inpaint] Current inpaint model is {inpaint_patch_model_path}')
if refiner_model_name == 'None':
use_synthetic_refiner = True
refiner_switch = 0.5
refiner_switch = 0.8
else:
inpaint_head_model_path, inpaint_patch_model_path = None, None
print(f'[Inpaint] Parameterized inpaint is disabled.')
@ -460,16 +454,16 @@ def worker():
for i in range(image_number):
if disable_seed_increment:
task_seed = seed
task_seed = seed % (constants.MAX_SEED + 1)
else:
task_seed = (seed + i) % (constants.MAX_SEED + 1) # randint is inclusive, % is not
task_rng = random.Random(task_seed) # may bind to inpaint noise in the future
task_prompt = apply_wildcards(prompt, task_rng)
task_prompt = apply_wildcards(prompt, task_rng, i, read_wildcards_in_order)
task_prompt = apply_arrays(task_prompt, i)
task_negative_prompt = apply_wildcards(negative_prompt, task_rng)
task_extra_positive_prompts = [apply_wildcards(pmt, task_rng) for pmt in extra_positive_prompts]
task_extra_negative_prompts = [apply_wildcards(pmt, task_rng) for pmt in extra_negative_prompts]
task_negative_prompt = apply_wildcards(negative_prompt, task_rng, i, read_wildcards_in_order)
task_extra_positive_prompts = [apply_wildcards(pmt, task_rng, i, read_wildcards_in_order) for pmt in extra_positive_prompts]
task_extra_negative_prompts = [apply_wildcards(pmt, task_rng, i, read_wildcards_in_order) for pmt in extra_negative_prompts]
positive_basic_workloads = []
negative_basic_workloads = []
@ -827,7 +821,7 @@ def worker():
try:
if async_task.last_stop is not False:
ldm_patched.model_management.interrupt_current_processing()
ldm_patched.modules.model_management.interrupt_current_processing()
positive_cond, negative_cond = task['c'], task['uc']
if 'cn' in goals:
@ -902,7 +896,7 @@ def worker():
d.append(('Sampler', 'sampler', sampler_name))
d.append(('Scheduler', 'scheduler', scheduler_name))
d.append(('Seed', 'seed', task['task_seed']))
d.append(('Seed', 'seed', str(task['task_seed'])))
if freeu_enabled:
d.append(('FreeU', 'freeu', str((freeu_b1, freeu_b2, freeu_s1, freeu_s2))))

View File

@ -3,12 +3,14 @@ import json
import math
import numbers
import args_manager
import tempfile
import modules.flags
import modules.sdxl_styles
from modules.model_loader import load_file_from_url
from modules.util import get_files_from_folder, makedirs_with_log
from modules.flags import Performance, MetadataScheme
from modules.flags import OutputFormat, Performance, MetadataScheme
def get_config_path(key, default_value):
env = os.getenv(key)
@ -18,6 +20,7 @@ def get_config_path(key, default_value):
else:
return os.path.abspath(default_value)
config_path = get_config_path('config_path', "./config.txt")
config_example_path = get_config_path('config_example_path', "config_modification_tutorial.txt")
config_dict = {}
@ -94,7 +97,7 @@ def try_load_deprecated_user_path_config():
try_load_deprecated_user_path_config()
def list_presets():
def get_presets():
preset_folder = 'presets'
presets = ['initial']
if not os.path.exists(preset_folder):
@ -103,11 +106,11 @@ def list_presets():
return presets + [f[:f.index(".json")] for f in os.listdir(preset_folder) if f.endswith('.json')]
available_presets = list_presets()
available_presets = get_presets()
def update_presets():
global available_presets
available_presets = list_presets()
available_presets = get_presets()
def try_get_preset_content(preset):
if isinstance(preset, str):
@ -125,26 +128,16 @@ def try_get_preset_content(preset):
print(e)
return {}
def try_load_preset_global(preset):
global config_dict
if isinstance(preset, str):
preset_path = os.path.abspath(f'./presets/{preset}.json')
try:
if os.path.exists(preset_path):
with open(preset_path, "r", encoding="utf-8") as json_file:
config_dict.update(json.load(json_file))
print(f'Loaded preset: {preset_path}')
else:
raise FileNotFoundError
except Exception as e:
print(f'Load preset [{preset_path}] failed')
print(e)
try:
with open(os.path.abspath(f'./presets/default.json'), "r", encoding="utf-8") as json_file:
config_dict.update(json.load(json_file))
except Exception as e:
print(f'Load default preset failed.')
print(e)
available_presets = get_presets()
preset = args_manager.args.preset
try_load_preset_global(preset)
config_dict.update(try_get_preset_content(preset))
def get_path_output() -> str:
"""
@ -153,7 +146,7 @@ def get_path_output() -> str:
global config_dict
path_output = get_dir_or_set_default('path_outputs', '../outputs/', make_directory=True)
if args_manager.args.output_path:
print(f'[CONFIG] Overriding config value path_outputs with {args_manager.args.output_path}')
print(f'Overriding config value path_outputs with {args_manager.args.output_path}')
config_dict['path_outputs'] = path_output = args_manager.args.output_path
return path_output
@ -213,6 +206,7 @@ path_controlnet = get_dir_or_set_default('path_controlnet', '../models/controlne
path_clip_vision = get_dir_or_set_default('path_clip_vision', '../models/clip_vision/')
path_fooocus_expansion = get_dir_or_set_default('path_fooocus_expansion', '../models/prompt_expansion/fooocus_expansion')
path_safety_checker_models = get_dir_or_set_default('path_safety_checker_models', '../models/safety_checker_models/')
path_wildcards = get_dir_or_set_default('path_wildcards', '../wildcards/')
path_outputs = get_path_output()
def get_config_item_or_set_default(key, default_value, validator, disable_empty_as_none=False):
@ -243,6 +237,36 @@ def get_config_item_or_set_default(key, default_value, validator, disable_empty_
return default_value
def init_temp_path(path: str | None, default_path: str) -> str:
if args_manager.args.temp_path:
path = args_manager.args.temp_path
if path != '' and path != default_path:
try:
if not os.path.isabs(path):
path = os.path.abspath(path)
os.makedirs(path, exist_ok=True)
print(f'Using temp path {path}')
return path
except Exception as e:
print(f'Could not create temp path {path}. Reason: {e}')
print(f'Using default temp path {default_path} instead.')
os.makedirs(default_path, exist_ok=True)
return default_path
default_temp_path = os.path.join(tempfile.gettempdir(), 'fooocus')
temp_path = init_temp_path(get_config_item_or_set_default(
key='temp_path',
default_value=default_temp_path,
validator=lambda x: isinstance(x, str),
), default_temp_path)
temp_path_cleanup_on_launch = get_config_item_or_set_default(
key='temp_path_cleanup_on_launch',
default_value=True,
validator=lambda x: isinstance(x, bool)
)
default_base_model_name = default_model = get_config_item_or_set_default(
key='default_model',
default_value='model.safetensors',
@ -277,28 +301,37 @@ default_loras = get_config_item_or_set_default(
key='default_loras',
default_value=[
[
True,
"None",
1.0
],
[
True,
"None",
1.0
],
[
True,
"None",
1.0
],
[
True,
"None",
1.0
],
[
True,
"None",
1.0
]
],
validator=lambda x: isinstance(x, list) and all(len(y) == 2 and isinstance(y[0], str) and isinstance(y[1], numbers.Number) for y in x)
validator=lambda x: isinstance(x, list) and all(
len(y) == 3 and isinstance(y[0], bool) and isinstance(y[1], str) and isinstance(y[2], numbers.Number)
or len(y) == 2 and isinstance(y[0], str) and isinstance(y[1], numbers.Number)
for y in x)
)
default_loras = [(y[0], y[1], y[2]) if len(y) == 3 else (True, y[0], y[1]) for y in default_loras]
default_max_lora_number = get_config_item_or_set_default(
key='default_max_lora_number',
default_value=len(default_loras) if isinstance(default_loras, list) and len(default_loras) > 0 else 5,
@ -363,7 +396,7 @@ default_max_image_number = get_config_item_or_set_default(
default_output_format = get_config_item_or_set_default(
key='default_output_format',
default_value='png',
validator=lambda x: x in modules.flags.output_formats
validator=lambda x: x in OutputFormat.list()
)
default_image_number = get_config_item_or_set_default(
key='default_image_number',
@ -540,22 +573,27 @@ with open(config_example_path, "w", encoding="utf-8") as json_file:
model_filenames = []
lora_filenames = []
wildcard_filenames = []
sdxl_lcm_lora = 'sdxl_lcm_lora.safetensors'
sdxl_lightning_lora = 'sdxl_lightning_4step_lora.safetensors'
def get_model_filenames(folder_paths, name_filter=None):
extensions = ['.pth', '.ckpt', '.bin', '.safetensors', '.fooocus.patch']
def get_model_filenames(folder_paths, extensions=None, name_filter=None):
if extensions is None:
extensions = ['.pth', '.ckpt', '.bin', '.safetensors', '.fooocus.patch']
files = []
for folder in folder_paths:
files += get_files_from_folder(folder, extensions, name_filter)
return files
def update_all_model_names():
global model_filenames, lora_filenames
def update_files():
global model_filenames, lora_filenames, wildcard_filenames, available_presets
model_filenames = get_model_filenames(paths_checkpoints)
lora_filenames = get_model_filenames(paths_loras)
wildcard_filenames = get_files_from_folder(path_wildcards, ['.txt'])
available_presets = get_presets()
return
@ -679,4 +717,4 @@ def downloading_upscale_model():
return os.path.join(path_upscale_models, 'fooocus_upscaler_s409985e5.bin')
update_all_model_names()
update_files()

View File

@ -73,14 +73,14 @@ class StableDiffusionModel:
loras_to_load = []
for name, weight in loras:
if name == 'None':
for filename, weight in loras:
if filename == 'None':
continue
if os.path.exists(name):
lora_filename = name
if os.path.exists(filename):
lora_filename = filename
else:
lora_filename = get_file_from_folder_list(name, modules.config.paths_loras)
lora_filename = get_file_from_folder_list(filename, modules.config.paths_loras)
if not os.path.exists(lora_filename):
print(f'Lora file not found: {lora_filename}')

View File

@ -11,7 +11,7 @@ from extras.expansion import FooocusExpansion
from ldm_patched.modules.model_base import SDXL, SDXLRefiner
from modules.sample_hijack import clip_separate
from modules.util import get_file_from_folder_list
from modules.util import get_file_from_folder_list, get_enabled_loras
model_base = core.StableDiffusionModel()
@ -254,7 +254,7 @@ def refresh_everything(refiner_model_name, base_model_name, loras,
refresh_everything(
refiner_model_name=modules.config.default_refiner_model_name,
base_model_name=modules.config.default_base_model_name,
loras=modules.config.default_loras
loras=get_enabled_loras(modules.config.default_loras)
)

View File

@ -67,7 +67,7 @@ default_parameters = {
cn_ip: (0.5, 0.6), cn_ip_face: (0.9, 0.75), cn_canny: (0.5, 1.0), cn_cpds: (0.5, 1.0)
} # stop, weight
output_formats = ['png', 'jpg', 'webp']
output_formats = ['png', 'jpeg', 'webp']
inpaint_mask_models = ['u2net', 'u2netp', 'u2net_human_seg', 'u2net_cloth_seg', 'silueta', 'isnet-general-use', 'isnet-anime', 'sam']
inpaint_mask_cloth_category = ['full', 'upper', 'lower']
@ -96,6 +96,16 @@ metadata_scheme = [
controlnet_image_count = 4
class OutputFormat(Enum):
PNG = 'png'
JPEG = 'jpeg'
WEBP = 'webp'
@classmethod
def list(cls) -> list:
return list(map(lambda c: c.value, cls))
class Steps(IntEnum):
QUALITY = 60
SPEED = 30
@ -120,6 +130,12 @@ class Performance(Enum):
def list(cls) -> list:
return list(map(lambda c: c.value, cls))
@classmethod
def has_restricted_features(cls, x) -> bool:
if isinstance(x, Performance):
x = x.value
return x in [cls.EXTREME_SPEED.value, cls.LIGHTNING.value]
def steps(self) -> int | None:
return Steps[self.name].value if Steps[self.name] else None

View File

@ -17,7 +17,7 @@ from gradio_client.documentation import document, set_documentation_group
from gradio_client.serializing import ImgSerializable
from PIL import Image as _Image # using _ to minimize namespace pollution
from gradio import processing_utils, utils
from gradio import processing_utils, utils, Error
from gradio.components.base import IOComponent, _Keywords, Block
from gradio.deprecation import warn_style_method_deprecation
from gradio.events import (
@ -275,7 +275,10 @@ class Image(
x, mask = x["image"], x["mask"]
assert isinstance(x, str)
im = processing_utils.decode_base64_to_image(x)
try:
im = processing_utils.decode_base64_to_image(x)
except PIL.UnidentifiedImageError:
raise Error("Unsupported image type in input")
with warnings.catch_warnings():
warnings.simplefilter("ignore")
im = im.convert(self.image_mode)

View File

@ -1,136 +1,3 @@
css = '''
.loader-container {
display: flex; /* Use flex to align items horizontally */
align-items: center; /* Center items vertically within the container */
white-space: nowrap; /* Prevent line breaks within the container */
}
.loader {
border: 8px solid #f3f3f3; /* Light grey */
border-top: 8px solid #3498db; /* Blue */
border-radius: 50%;
width: 30px;
height: 30px;
animation: spin 2s linear infinite;
}
@keyframes spin {
0% { transform: rotate(0deg); }
100% { transform: rotate(360deg); }
}
/* Style the progress bar */
progress {
appearance: none; /* Remove default styling */
height: 20px; /* Set the height of the progress bar */
border-radius: 5px; /* Round the corners of the progress bar */
background-color: #f3f3f3; /* Light grey background */
width: 100%;
}
/* Style the progress bar container */
.progress-container {
margin-left: 20px;
margin-right: 20px;
flex-grow: 1; /* Allow the progress container to take up remaining space */
}
/* Set the color of the progress bar fill */
progress::-webkit-progress-value {
background-color: #3498db; /* Blue color for the fill */
}
progress::-moz-progress-bar {
background-color: #3498db; /* Blue color for the fill in Firefox */
}
/* Style the text on the progress bar */
progress::after {
content: attr(value '%'); /* Display the progress value followed by '%' */
position: absolute;
top: 50%;
left: 50%;
transform: translate(-50%, -50%);
color: white; /* Set text color */
font-size: 14px; /* Set font size */
}
/* Style other texts */
.loader-container > span {
margin-left: 5px; /* Add spacing between the progress bar and the text */
}
.progress-bar > .generating {
display: none !important;
}
.progress-bar{
height: 30px !important;
}
.type_row{
height: 80px !important;
}
.type_row_half{
height: 32px !important;
}
.scroll-hide{
resize: none !important;
}
.refresh_button{
border: none !important;
background: none !important;
font-size: none !important;
box-shadow: none !important;
}
.advanced_check_row{
width: 250px !important;
}
.min_check{
min-width: min(1px, 100%) !important;
}
.resizable_area {
resize: vertical;
overflow: auto !important;
}
.aspect_ratios label {
width: 140px !important;
}
.aspect_ratios label span {
white-space: nowrap !important;
}
.aspect_ratios label input {
margin-left: -5px !important;
}
.lora_enable label {
height: 100%;
}
.lora_enable label input {
margin: auto;
}
.lora_enable label span {
display: none;
}
@-moz-document url-prefix() {
.lora_weight input[type=number] {
width: 80px;
}
}
'''
progress_html = '''
<div class="loader-container">
<div class="loader"></div>

View File

@ -1,6 +1,7 @@
import os
import importlib
import importlib.util
import shutil
import subprocess
import sys
import re
@ -9,9 +10,6 @@ import importlib.metadata
import packaging.version
from packaging.requirements import Requirement
logging.getLogger("torch.distributed.nn").setLevel(logging.ERROR) # sshh...
logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage())
@ -101,3 +99,19 @@ def requirements_met(requirements_file):
return True
def delete_folder_content(folder, prefix=None):
result = True
for filename in os.listdir(folder):
file_path = os.path.join(folder, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print(f'{prefix}Failed to delete {file_path}. Reason: {e}')
result = False
return result

View File

@ -376,7 +376,7 @@ class A1111MetadataParser(MetadataParser):
data[key] = filename
break
if 'lora_hashes' in data:
if 'lora_hashes' in data and data['lora_hashes'] != '':
lora_filenames = modules.config.lora_filenames.copy()
if modules.config.sdxl_lcm_lora in lora_filenames:
lora_filenames.remove(modules.config.sdxl_lcm_lora)
@ -430,16 +430,15 @@ class A1111MetadataParser(MetadataParser):
if key in data:
generation_params[self.fooocus_to_a1111[key]] = data[key]
lora_hashes = []
for index, (lora_name, lora_weight, lora_hash) in enumerate(self.loras):
# workaround for Fooocus not knowing LoRA name in LoRA metadata
lora_hashes.append(f'{lora_name}: {lora_hash}: {lora_weight}')
lora_hashes_string = ', '.join(lora_hashes)
if len(self.loras) > 0:
lora_hashes = []
for index, (lora_name, lora_weight, lora_hash) in enumerate(self.loras):
# workaround for Fooocus not knowing LoRA name in LoRA metadata
lora_hashes.append(f'{lora_name}: {lora_hash}: {lora_weight}')
lora_hashes_string = ', '.join(lora_hashes)
generation_params[self.fooocus_to_a1111['lora_hashes']] = lora_hashes_string
generation_params |= {
self.fooocus_to_a1111['lora_hashes']: lora_hashes_string,
self.fooocus_to_a1111['version']: data['version']
}
generation_params[self.fooocus_to_a1111['version']] = data['version']
if modules.config.metadata_created_by != '':
generation_params[self.fooocus_to_a1111['created_by']] = modules.config.metadata_created_by

View File

@ -6,8 +6,9 @@ import urllib.parse
from PIL import Image
from PIL.PngImagePlugin import PngInfo
from modules.util import generate_temp_filename
from modules.flags import OutputFormat
from modules.meta_parser import MetadataParser, get_exif
from modules.util import generate_temp_filename
log_cache = {}
@ -21,7 +22,7 @@ def get_current_html_path(output_format=None):
def log(img, metadata, metadata_parser: MetadataParser | None = None, output_format=None) -> str:
path_outputs = args_manager.args.temp_path if args_manager.args.disable_image_log else modules.config.path_outputs
path_outputs = modules.config.temp_path if args_manager.args.disable_image_log else modules.config.path_outputs
output_format = output_format if output_format else modules.config.default_output_format
date_string, local_temp_filename, only_name = generate_temp_filename(folder=path_outputs, extension=output_format)
os.makedirs(os.path.dirname(local_temp_filename), exist_ok=True)
@ -29,7 +30,7 @@ def log(img, metadata, metadata_parser: MetadataParser | None = None, output_for
parsed_parameters = metadata_parser.parse_string(metadata.copy()) if metadata_parser is not None else ''
image = Image.fromarray(img)
if output_format == 'png':
if output_format == OutputFormat.PNG.value:
if parsed_parameters != '':
pnginfo = PngInfo()
pnginfo.add_text('parameters', parsed_parameters)
@ -37,9 +38,9 @@ def log(img, metadata, metadata_parser: MetadataParser | None = None, output_for
else:
pnginfo = None
image.save(local_temp_filename, pnginfo=pnginfo)
elif output_format == 'jpg':
elif output_format == OutputFormat.JPEG.value:
image.save(local_temp_filename, quality=95, optimize=True, progressive=True, exif=get_exif(parsed_parameters, metadata_parser.get_scheme().value) if metadata_parser else Image.Exif())
elif output_format == 'webp':
elif output_format == OutputFormat.WEBP.value:
image.save(local_temp_filename, quality=95, lossless=False, exif=get_exif(parsed_parameters, metadata_parser.get_scheme().value) if metadata_parser else Image.Exif())
else:
image.save(local_temp_filename)

View File

@ -2,13 +2,12 @@ import os
import re
import json
import math
import modules.config
from modules.util import get_files_from_folder
# cannot use modules.config - validators causing circular imports
styles_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../sdxl_styles/'))
wildcards_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../wildcards/'))
wildcards_max_bfs_depth = 64
@ -60,7 +59,7 @@ def apply_style(style, positive):
return p.replace('{prompt}', positive).splitlines(), n.splitlines()
def apply_wildcards(wildcard_text, rng, directory=wildcards_path):
def apply_wildcards(wildcard_text, rng, i, read_wildcards_in_order):
for _ in range(wildcards_max_bfs_depth):
placeholders = re.findall(r'__([\w-]+)__', wildcard_text)
if len(placeholders) == 0:
@ -69,10 +68,14 @@ def apply_wildcards(wildcard_text, rng, directory=wildcards_path):
print(f'[Wildcards] processing: {wildcard_text}')
for placeholder in placeholders:
try:
words = open(os.path.join(directory, f'{placeholder}.txt'), encoding='utf-8').read().splitlines()
matches = [x for x in modules.config.wildcard_filenames if os.path.splitext(os.path.basename(x))[0] == placeholder]
words = open(os.path.join(modules.config.path_wildcards, matches[0]), encoding='utf-8').read().splitlines()
words = [x for x in words if x != '']
assert len(words) > 0
wildcard_text = wildcard_text.replace(f'__{placeholder}__', rng.choice(words), 1)
if read_wildcards_in_order:
wildcard_text = wildcard_text.replace(f'__{placeholder}__', words[i % len(words)], 1)
else:
wildcard_text = wildcard_text.replace(f'__{placeholder}__', rng.choice(words), 1)
except:
print(f'[Wildcards] Warning: {placeholder}.txt missing or empty. '
f'Using "{placeholder}" as a normal word.')
@ -82,8 +85,9 @@ def apply_wildcards(wildcard_text, rng, directory=wildcards_path):
print(f'[Wildcards] BFS stack overflow. Current text: {wildcard_text}')
return wildcard_text
def get_words(arrays, totalMult, index):
if(len(arrays) == 1):
if len(arrays) == 1:
return [arrays[0].split(',')[index]]
else:
words = arrays[0].split(',')

View File

@ -163,7 +163,7 @@ def generate_temp_filename(folder='./outputs/', extension='png'):
return date_string, os.path.abspath(result), filename
def get_files_from_folder(folder_path, exensions=None, name_filter=None):
def get_files_from_folder(folder_path, extensions=None, name_filter=None):
if not os.path.isdir(folder_path):
raise ValueError("Folder path is not a valid directory.")
@ -175,7 +175,7 @@ def get_files_from_folder(folder_path, exensions=None, name_filter=None):
relative_path = ""
for filename in sorted(files, key=lambda s: s.casefold()):
_, file_extension = os.path.splitext(filename)
if (exensions is None or file_extension.lower() in exensions) and (name_filter is None or name_filter in _):
if (extensions is None or file_extension.lower() in extensions) and (name_filter is None or name_filter in _):
path = os.path.join(relative_path, filename)
filenames.append(path)
@ -360,3 +360,7 @@ def makedirs_with_log(path):
os.makedirs(path, exist_ok=True)
except OSError as error:
print(f'Directory {path} could not be created, reason: {error}')
def get_enabled_loras(loras: list) -> list:
return [[lora[1], lora[2]] for lora in loras if lora[0]]

6
presets/.gitignore vendored Normal file
View File

@ -0,0 +1,6 @@
*.json
!anime.json
!default.json
!lcm.json
!realistic.json
!sai.json

View File

@ -4,22 +4,27 @@
"default_refiner_switch": 0.5,
"default_loras": [
[
true,
"None",
1.0
],
[
true,
"None",
1.0
],
[
true,
"None",
1.0
],
[
true,
"None",
1.0
],
[
true,
"None",
1.0
]
@ -33,7 +38,7 @@
"default_prompt_negative": "",
"default_styles": [
"Fooocus V2",
"Fooocus Negative",
"Fooocus Semi Realistic",
"Fooocus Masterpiece"
],
"default_aspect_ratio": "896*1152",

View File

@ -4,22 +4,27 @@
"default_refiner_switch": 0.5,
"default_loras": [
[
true,
"sd_xl_offset_example-lora_1.0.safetensors",
0.1
],
[
true,
"None",
1.0
],
[
true,
"None",
1.0
],
[
true,
"None",
1.0
],
[
true,
"None",
1.0
]

View File

@ -4,22 +4,27 @@
"default_refiner_switch": 0.5,
"default_loras": [
[
true,
"None",
1.0
],
[
true,
"None",
1.0
],
[
true,
"None",
1.0
],
[
true,
"None",
1.0
],
[
true,
"None",
1.0
]

57
presets/lightning.json Normal file
View File

@ -0,0 +1,57 @@
{
"default_model": "juggernautXL_v8Rundiffusion.safetensors",
"default_refiner": "None",
"default_refiner_switch": 0.5,
"default_loras": [
[
true,
"None",
1.0
],
[
true,
"None",
1.0
],
[
true,
"None",
1.0
],
[
true,
"None",
1.0
],
[
true,
"None",
1.0
]
],
"default_cfg_scale": 4.0,
"default_sample_sharpness": 2.0,
"default_sampler": "dpmpp_2m_sde_gpu",
"default_scheduler": "karras",
"default_performance": "Lightning",
"default_prompt": "",
"default_prompt_negative": "",
"default_styles": [
"Fooocus V2",
"Fooocus Enhance",
"Fooocus Sharp"
],
"default_aspect_ratio": "1152*896",
"checkpoint_downloads": {
"juggernautXL_v8Rundiffusion.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/juggernautXL_v8Rundiffusion.safetensors"
},
"embeddings_downloads": {},
"lora_downloads": {},
"previous_default_models": [
"juggernautXL_version8Rundiffusion.safetensors",
"juggernautXL_version7Rundiffusion.safetensors",
"juggernautXL_v7Rundiffusion.safetensors",
"juggernautXL_version6Rundiffusion.safetensors",
"juggernautXL_v6Rundiffusion.safetensors"
]
}

View File

@ -4,22 +4,27 @@
"default_refiner_switch": 0.5,
"default_loras": [
[
true,
"SDXL_FILM_PHOTOGRAPHY_STYLE_BetaV0.4.safetensors",
0.25
],
[
true,
"None",
1.0
],
[
true,
"None",
1.0
],
[
true,
"None",
1.0
],
[
true,
"None",
1.0
]

View File

@ -4,22 +4,27 @@
"default_refiner_switch": 0.75,
"default_loras": [
[
true,
"sd_xl_offset_example-lora_1.0.safetensors",
0.5
],
[
true,
"None",
1.0
],
[
true,
"None",
1.0
],
[
true,
"None",
1.0
],
[
true,
"None",
1.0
]

View File

@ -256,6 +256,10 @@ The first time you launch the software, it will automatically download models:
After Fooocus 2.1.60, you will also have `run_anime.bat` and `run_realistic.bat`. They are different model presets (and require different models, but they will be automatically downloaded). [Check here for more details](https://github.com/lllyasviel/Fooocus/discussions/679).
After Fooocus 2.3.0 you can also switch presets directly in the browser. Keep in mind to add these arguments if you want to change the default behavior:
* Use `--disable-preset-selection` to disable preset selection in the browser.
* Use `--always-download-new-model` to download missing models on preset switch. Default is fallback to `previous_default_models` defined in the corresponding preset, also see terminal output.
![image](https://github.com/lllyasviel/Fooocus/assets/19834515/d386f817-4bd7-490c-ad89-c1e228c23447)
If you already have these files, you can copy them to the above locations to speed up installation.
@ -312,17 +316,21 @@ Windows: download the [7z file](#download), extract it and run `run.bat`. You ma
### Colab
(Last tested - 2023 Dec 12)
(Last tested - 2024 Mar 18 by [mashb1t](https://github.com/mashb1t))
| Colab | Info
| --- | --- |
[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/mashb1t/Fooocus/blob/main/fooocus_colab.ipynb) | Fooocus Official
In Colab, you can modify the last line to `!python entry_with_update.py --share` or `!python entry_with_update.py --preset anime --share` or `!python entry_with_update.py --preset realistic --share` for Fooocus Default/Anime/Realistic Edition.
In Colab, you can modify the last line to `!python entry_with_update.py --share --always-high-vram` or `!python entry_with_update.py --share --always-high-vram --preset anime` or `!python entry_with_update.py --share --always-high-vram --preset realistic` for Fooocus Default/Anime/Realistic Edition.
You can also change the preset in the UI. Please be aware that this may lead to timeouts after 60 seconds. If this is the case, please wait until the download has finished, change the preset to initial and back to the one you've selected or reload the page.
Note that this Colab will disable refiner by default because Colab free's resources are relatively limited (and some "big" features like image prompt may cause free-tier Colab to disconnect). We make sure that basic text-to-image is always working on free-tier Colab.
Thanks to [camenduru](https://github.com/camenduru)!
Using `--always-high-vram` shifts resource allocation from RAM to VRAM and achieves the overall best balance between performance, flexibility and stability on the default T4 instance. Please find more information [here](https://github.com/lllyasviel/Fooocus/pull/1710#issuecomment-1989185346).
Thanks to [camenduru](https://github.com/camenduru) for the template!
### Linux (Using Anaconda)

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.4 KiB

View File

@ -3,6 +3,10 @@
"name": "Fooocus Enhance",
"negative_prompt": "(worst quality, low quality, normal quality, lowres, low details, oversaturated, undersaturated, overexposed, underexposed, grayscale, bw, bad photo, bad photography, bad art:1.4), (watermark, signature, text font, username, error, logo, words, letters, digits, autograph, trademark, name:1.2), (blur, blurry, grainy), morbid, ugly, asymmetrical, mutated malformed, mutilated, poorly lit, bad shadow, draft, cropped, out of frame, cut off, censored, jpeg artifacts, out of focus, glitch, duplicate, (airbrushed, cartoon, anime, semi-realistic, cgi, render, blender, digital art, manga, amateur:1.3), (3D ,3D Game, 3D Game Scene, 3D Character:1.1), (bad hands, bad anatomy, bad body, bad face, bad teeth, bad arms, bad legs, deformities:1.3)"
},
{
"name": "Fooocus Semi Realistic",
"negative_prompt": "(worst quality, low quality, normal quality, lowres, low details, oversaturated, undersaturated, overexposed, underexposed, bad photo, bad photography, bad art:1.4), (watermark, signature, text font, username, error, logo, words, letters, digits, autograph, trademark, name:1.2), (blur, blurry, grainy), morbid, ugly, asymmetrical, mutated malformed, mutilated, poorly lit, bad shadow, draft, cropped, out of frame, cut off, censored, jpeg artifacts, out of focus, glitch, duplicate, (bad hands, bad anatomy, bad body, bad face, bad teeth, bad arms, bad legs, deformities:1.3)"
},
{
"name": "Fooocus Sharp",
"prompt": "cinematic still {prompt} . emotional, harmonious, vignette, 4k epic detailed, shot on kodak, 35mm photo, sharp focus, high budget, cinemascope, moody, epic, gorgeous, film grain, grainy",

View File

@ -1,3 +1,14 @@
# [2.3.0](https://github.com/lllyasviel/Fooocus/releases/tag/2.3.0)
* Add performance "lightning" (based on [SDXL-Lightning 4 step LoRA](https://huggingface.co/ByteDance/SDXL-Lightning/blob/main/sdxl_lightning_4step_lora.safetensors))
* Add preset selection to UI, disable with argument `--disable-preset-selection`. Use `--always-download-new-model` to download missing models on preset switch.
* Improve face swap consistency by switching later in the process to (synthetic) refiner
* Add temp path cleanup on startup
* Add support for wildcard subdirectories
* Add scrollable 2 column layout for styles for better structure
* Improve Colab resource needs for T4 instances (default), positively tested with all image prompt features
* Improve anime preset, now uses style `Fooocus Semi Realistic` instead of `Fooocus Negative` (less wet look images)
# [2.2.1](https://github.com/lllyasviel/Fooocus/releases/tag/2.2.1)
* Fix some small bugs (e.g. image grid, upscale fast 2x, LoRA weight width in Firefox)

View File

@ -29,12 +29,16 @@ def get_task(*args):
return worker.AsyncTask(args=args)
def generate_clicked(task):
def generate_clicked(task: worker.AsyncTask):
import ldm_patched.modules.model_management as model_management
with model_management.interrupt_processing_mutex:
model_management.interrupt_processing = False
# outputs=[progress_html, progress_window, progress_gallery, gallery]
if len(task.args) == 0:
return
execution_start_time = time.perf_counter()
finished = False
@ -92,9 +96,7 @@ title = f'Fooocus {fooocus_version.version}'
if isinstance(args_manager.args.preset, str):
title += ' ' + args_manager.args.preset
shared.gradio_root = gr.Blocks(
title=title,
css=modules.html.css).queue()
shared.gradio_root = gr.Blocks(title=title).queue()
with shared.gradio_root:
currentTask = gr.State(worker.AsyncTask(args=[]))
@ -322,7 +324,7 @@ with shared.gradio_root:
image_number = gr.Slider(label='Image Number', minimum=1, maximum=modules.config.default_max_image_number, step=1, value=modules.config.default_image_number)
output_format = gr.Radio(label='Output Format',
choices=modules.flags.output_formats,
choices=flags.OutputFormat.list(),
value=modules.config.default_output_format)
negative_prompt = gr.Textbox(label='Negative Prompt', show_label=True, placeholder="Type prompt here.",
@ -362,7 +364,7 @@ with shared.gradio_root:
history_link = gr.HTML()
shared.gradio_root.load(update_history_link, outputs=history_link, queue=False, show_progress=False)
with gr.Tab(label='Styles'):
with gr.Tab(label='Styles', elem_classes=['style_selections_tab']):
style_sorter.try_load_sorted_styles(
style_names=legal_style_names,
default_selected=modules.config.default_styles)
@ -415,20 +417,20 @@ with shared.gradio_root:
with gr.Group():
lora_ctrls = []
for i, (n, v) in enumerate(modules.config.default_loras):
for i, (enabled, filename, weight) in enumerate(modules.config.default_loras):
with gr.Row():
lora_enabled = gr.Checkbox(label='Enable', value=True,
lora_enabled = gr.Checkbox(label='Enable', value=enabled,
elem_classes=['lora_enable', 'min_check'], scale=1)
lora_model = gr.Dropdown(label=f'LoRA {i + 1}',
choices=['None'] + modules.config.lora_filenames, value=n,
choices=['None'] + modules.config.lora_filenames, value=filename,
elem_classes='lora_model', scale=5)
lora_weight = gr.Slider(label='Weight', minimum=modules.config.default_loras_min_weight,
maximum=modules.config.default_loras_max_weight, step=0.01, value=v,
maximum=modules.config.default_loras_max_weight, step=0.01, value=weight,
elem_classes='lora_weight', scale=5)
lora_ctrls += [lora_enabled, lora_model, lora_weight]
with gr.Row():
model_refresh = gr.Button(label='Refresh', value='\U0001f504 Refresh All Files', variant='secondary', elem_classes='refresh_button')
refresh_files = gr.Button(label='Refresh', value='\U0001f504 Refresh All Files', variant='secondary', elem_classes='refresh_button')
with gr.Tab(label='Advanced'):
guidance_scale = gr.Slider(label='Guidance Scale', minimum=1.0, maximum=30.0, step=0.01,
value=modules.config.default_cfg_scale,
@ -493,13 +495,14 @@ with shared.gradio_root:
interactive=not modules.config.default_black_out_nsfw,
info='Disable preview during generation.')
disable_intermediate_results = gr.Checkbox(label='Disable Intermediate Results',
value=modules.config.default_performance == 'Extreme Speed',
interactive=modules.config.default_performance != 'Extreme Speed',
value=modules.config.default_performance == flags.Performance.EXTREME_SPEED.value,
interactive=modules.config.default_performance != flags.Performance.EXTREME_SPEED.value,
info='Disable intermediate results during generation, only show final gallery.')
disable_seed_increment = gr.Checkbox(label='Disable seed increment',
info='Disable automatic seed increment when image number is > 1.',
value=False)
read_wildcards_in_order = gr.Checkbox(label="Read wildcards in order", value=False)
black_out_nsfw = gr.Checkbox(label='Black Out NSFW', value=modules.config.default_black_out_nsfw,
interactive=not modules.config.default_black_out_nsfw,
@ -586,26 +589,24 @@ with shared.gradio_root:
def dev_mode_checked(r):
return gr.update(visible=r)
dev_mode.change(dev_mode_checked, inputs=[dev_mode], outputs=[dev_tools],
queue=False, show_progress=False)
def model_refresh_clicked():
modules.config.update_all_model_names()
modules.config.update_presets()
results = []
results += [gr.update(choices=modules.config.model_filenames),
gr.update(choices=['None'] + modules.config.model_filenames)]
def refresh_files_clicked():
modules.config.update_files()
results = [gr.update(choices=modules.config.model_filenames)]
results += [gr.update(choices=['None'] + modules.config.model_filenames)]
if not args_manager.args.disable_preset_selection:
results += [gr.update(choices=modules.config.available_presets)]
for i in range(modules.config.default_max_lora_number):
results += [gr.update(interactive=True), gr.update(choices=['None'] + modules.config.lora_filenames), gr.update()]
results += [gr.update(interactive=True),
gr.update(choices=['None'] + modules.config.lora_filenames), gr.update()]
return results
model_refresh_output = [base_model, refiner_model]
refresh_files_output = [base_model, refiner_model]
if not args_manager.args.disable_preset_selection:
model_refresh_output += [preset_selection]
model_refresh.click(model_refresh_clicked, [], model_refresh_output + lora_ctrls,
refresh_files_output += [preset_selection]
refresh_files.click(refresh_files_clicked, [], refresh_files_output + lora_ctrls,
queue=False, show_progress=False)
with gr.Tab(label='Audio'):
@ -658,10 +659,9 @@ with shared.gradio_root:
.then(fn=style_sorter.sort_styles, inputs=style_selections, outputs=style_selections, queue=False, show_progress=False) \
.then(lambda: None, _js='()=>{refresh_style_localization();}')
performance_selection.change(lambda x: [gr.update(interactive=x != 'Extreme Speed')] * 11 +
[gr.update(visible=x != 'Extreme Speed')] * 1 +
[gr.update(interactive=x != 'Extreme Speed', value=x == 'Extreme Speed', )] * 1,
performance_selection.change(lambda x: [gr.update(interactive=not flags.Performance.has_restricted_features(x))] * 11 +
[gr.update(visible=not flags.Performance.has_restricted_features(x))] * 1 +
[gr.update(interactive=not flags.Performance.has_restricted_features(x), value=flags.Performance.has_restricted_features(x))] * 1,
inputs=performance_selection,
outputs=[
guidance_scale, sharpness, adm_scaler_end, adm_scaler_positive,
@ -711,7 +711,8 @@ with shared.gradio_root:
ctrls = [currentTask, generate_image_grid]
ctrls += [
prompt, negative_prompt, translate_prompts, style_selections,
performance_selection, aspect_ratios_selection, image_number, output_format, image_seed, sharpness, guidance_scale
performance_selection, aspect_ratios_selection, image_number, output_format, image_seed,
read_wildcards_in_order, sharpness, guidance_scale
]
ctrls += [base_model, refiner_model, refiner_switch] + lora_ctrls
@ -733,11 +734,6 @@ with shared.gradio_root:
ctrls += ip_ctrls
if not args_manager.args.disable_metadata:
ctrls += [save_metadata_to_images, metadata_scheme]
ctrls += ip_ctrls
def parse_meta(raw_prompt_txt, is_generating):
loaded_json = None
if is_json(raw_prompt_txt):