Merge branch 'develop' into lora-reference-parsing
This commit is contained in:
commit
eebfcac83f
|
|
@ -31,6 +31,9 @@ args_parser.parser.add_argument("--disable-metadata", action='store_true',
|
|||
args_parser.parser.add_argument("--disable-preset-download", action='store_true',
|
||||
help="Disables downloading models for presets", default=False)
|
||||
|
||||
args_parser.parser.add_argument("--enable-describe-uov-image", action='store_true',
|
||||
help="Disables automatic description of uov images when prompt is empty", default=False)
|
||||
|
||||
args_parser.parser.add_argument("--always-download-new-model", action='store_true',
|
||||
help="Always download newer models ", default=False)
|
||||
|
||||
|
|
|
|||
|
|
@ -391,6 +391,6 @@ progress::after {
|
|||
background-color: #fff8;
|
||||
font-family: monospace;
|
||||
text-align: center;
|
||||
border-radius-top: 5px;
|
||||
border-radius: 5px 5px 0px 0px;
|
||||
display: none; /* remove this to enable tooltip in preview image */
|
||||
}
|
||||
|
|
@ -54,6 +54,7 @@ Docker specified environments are there. They are used by 'entrypoint.sh'
|
|||
|CMDARGS|Arguments for [entry_with_update.py](entry_with_update.py) which is called by [entrypoint.sh](entrypoint.sh)|
|
||||
|config_path|'config.txt' location|
|
||||
|config_example_path|'config_modification_tutorial.txt' location|
|
||||
|HF_MIRROR| huggingface mirror site domain|
|
||||
|
||||
You can also use the same json key names and values explained in the 'config_modification_tutorial.txt' as the environments.
|
||||
See examples in the [docker-compose.yml](docker-compose.yml)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,56 @@
|
|||
# modified version of https://github.com/AUTOMATIC1111/stable-diffusion-webui-nsfw-censor/blob/master/scripts/censor.py
|
||||
import numpy as np
|
||||
import os
|
||||
|
||||
from extras.safety_checker.models.safety_checker import StableDiffusionSafetyChecker
|
||||
from transformers import CLIPFeatureExtractor, CLIPConfig
|
||||
from PIL import Image
|
||||
import modules.config
|
||||
|
||||
safety_checker_repo_root = os.path.join(os.path.dirname(__file__), 'safety_checker')
|
||||
config_path = os.path.join(safety_checker_repo_root, "configs", "config.json")
|
||||
preprocessor_config_path = os.path.join(safety_checker_repo_root, "configs", "preprocessor_config.json")
|
||||
|
||||
safety_feature_extractor = None
|
||||
safety_checker = None
|
||||
|
||||
|
||||
def numpy_to_pil(image):
|
||||
image = (image * 255).round().astype("uint8")
|
||||
pil_image = Image.fromarray(image)
|
||||
|
||||
return pil_image
|
||||
|
||||
|
||||
# check and replace nsfw content
|
||||
def check_safety(x_image):
|
||||
global safety_feature_extractor, safety_checker
|
||||
|
||||
if safety_feature_extractor is None or safety_checker is None:
|
||||
safety_checker_model = modules.config.downloading_safety_checker_model()
|
||||
safety_feature_extractor = CLIPFeatureExtractor.from_json_file(preprocessor_config_path)
|
||||
clip_config = CLIPConfig.from_json_file(config_path)
|
||||
safety_checker = StableDiffusionSafetyChecker.from_pretrained(safety_checker_model, config=clip_config)
|
||||
|
||||
safety_checker_input = safety_feature_extractor(numpy_to_pil(x_image), return_tensors="pt")
|
||||
x_checked_image, has_nsfw_concept = safety_checker(images=x_image, clip_input=safety_checker_input.pixel_values)
|
||||
|
||||
return x_checked_image, has_nsfw_concept
|
||||
|
||||
|
||||
def censor_single(x):
|
||||
x_checked_image, has_nsfw_concept = check_safety(x)
|
||||
|
||||
# replace image with black pixels, keep dimensions
|
||||
# workaround due to different numpy / pytorch image matrix format
|
||||
if has_nsfw_concept[0]:
|
||||
imageshape = x_checked_image.shape
|
||||
x_checked_image = np.zeros((imageshape[0], imageshape[1], 3), dtype = np.uint8)
|
||||
|
||||
return x_checked_image
|
||||
|
||||
|
||||
def censor_batch(images):
|
||||
images = [censor_single(image) for image in images]
|
||||
|
||||
return images
|
||||
|
|
@ -0,0 +1,171 @@
|
|||
{
|
||||
"_name_or_path": "clip-vit-large-patch14/",
|
||||
"architectures": [
|
||||
"SafetyChecker"
|
||||
],
|
||||
"initializer_factor": 1.0,
|
||||
"logit_scale_init_value": 2.6592,
|
||||
"model_type": "clip",
|
||||
"projection_dim": 768,
|
||||
"text_config": {
|
||||
"_name_or_path": "",
|
||||
"add_cross_attention": false,
|
||||
"architectures": null,
|
||||
"attention_dropout": 0.0,
|
||||
"bad_words_ids": null,
|
||||
"bos_token_id": 0,
|
||||
"chunk_size_feed_forward": 0,
|
||||
"cross_attention_hidden_size": null,
|
||||
"decoder_start_token_id": null,
|
||||
"diversity_penalty": 0.0,
|
||||
"do_sample": false,
|
||||
"dropout": 0.0,
|
||||
"early_stopping": false,
|
||||
"encoder_no_repeat_ngram_size": 0,
|
||||
"eos_token_id": 2,
|
||||
"exponential_decay_length_penalty": null,
|
||||
"finetuning_task": null,
|
||||
"forced_bos_token_id": null,
|
||||
"forced_eos_token_id": null,
|
||||
"hidden_act": "quick_gelu",
|
||||
"hidden_size": 768,
|
||||
"id2label": {
|
||||
"0": "LABEL_0",
|
||||
"1": "LABEL_1"
|
||||
},
|
||||
"initializer_factor": 1.0,
|
||||
"initializer_range": 0.02,
|
||||
"intermediate_size": 3072,
|
||||
"is_decoder": false,
|
||||
"is_encoder_decoder": false,
|
||||
"label2id": {
|
||||
"LABEL_0": 0,
|
||||
"LABEL_1": 1
|
||||
},
|
||||
"layer_norm_eps": 1e-05,
|
||||
"length_penalty": 1.0,
|
||||
"max_length": 20,
|
||||
"max_position_embeddings": 77,
|
||||
"min_length": 0,
|
||||
"model_type": "clip_text_model",
|
||||
"no_repeat_ngram_size": 0,
|
||||
"num_attention_heads": 12,
|
||||
"num_beam_groups": 1,
|
||||
"num_beams": 1,
|
||||
"num_hidden_layers": 12,
|
||||
"num_return_sequences": 1,
|
||||
"output_attentions": false,
|
||||
"output_hidden_states": false,
|
||||
"output_scores": false,
|
||||
"pad_token_id": 1,
|
||||
"prefix": null,
|
||||
"problem_type": null,
|
||||
"pruned_heads": {},
|
||||
"remove_invalid_values": false,
|
||||
"repetition_penalty": 1.0,
|
||||
"return_dict": true,
|
||||
"return_dict_in_generate": false,
|
||||
"sep_token_id": null,
|
||||
"task_specific_params": null,
|
||||
"temperature": 1.0,
|
||||
"tie_encoder_decoder": false,
|
||||
"tie_word_embeddings": true,
|
||||
"tokenizer_class": null,
|
||||
"top_k": 50,
|
||||
"top_p": 1.0,
|
||||
"torch_dtype": null,
|
||||
"torchscript": false,
|
||||
"transformers_version": "4.21.0.dev0",
|
||||
"typical_p": 1.0,
|
||||
"use_bfloat16": false,
|
||||
"vocab_size": 49408
|
||||
},
|
||||
"text_config_dict": {
|
||||
"hidden_size": 768,
|
||||
"intermediate_size": 3072,
|
||||
"num_attention_heads": 12,
|
||||
"num_hidden_layers": 12
|
||||
},
|
||||
"torch_dtype": "float32",
|
||||
"transformers_version": null,
|
||||
"vision_config": {
|
||||
"_name_or_path": "",
|
||||
"add_cross_attention": false,
|
||||
"architectures": null,
|
||||
"attention_dropout": 0.0,
|
||||
"bad_words_ids": null,
|
||||
"bos_token_id": null,
|
||||
"chunk_size_feed_forward": 0,
|
||||
"cross_attention_hidden_size": null,
|
||||
"decoder_start_token_id": null,
|
||||
"diversity_penalty": 0.0,
|
||||
"do_sample": false,
|
||||
"dropout": 0.0,
|
||||
"early_stopping": false,
|
||||
"encoder_no_repeat_ngram_size": 0,
|
||||
"eos_token_id": null,
|
||||
"exponential_decay_length_penalty": null,
|
||||
"finetuning_task": null,
|
||||
"forced_bos_token_id": null,
|
||||
"forced_eos_token_id": null,
|
||||
"hidden_act": "quick_gelu",
|
||||
"hidden_size": 1024,
|
||||
"id2label": {
|
||||
"0": "LABEL_0",
|
||||
"1": "LABEL_1"
|
||||
},
|
||||
"image_size": 224,
|
||||
"initializer_factor": 1.0,
|
||||
"initializer_range": 0.02,
|
||||
"intermediate_size": 4096,
|
||||
"is_decoder": false,
|
||||
"is_encoder_decoder": false,
|
||||
"label2id": {
|
||||
"LABEL_0": 0,
|
||||
"LABEL_1": 1
|
||||
},
|
||||
"layer_norm_eps": 1e-05,
|
||||
"length_penalty": 1.0,
|
||||
"max_length": 20,
|
||||
"min_length": 0,
|
||||
"model_type": "clip_vision_model",
|
||||
"no_repeat_ngram_size": 0,
|
||||
"num_attention_heads": 16,
|
||||
"num_beam_groups": 1,
|
||||
"num_beams": 1,
|
||||
"num_hidden_layers": 24,
|
||||
"num_return_sequences": 1,
|
||||
"output_attentions": false,
|
||||
"output_hidden_states": false,
|
||||
"output_scores": false,
|
||||
"pad_token_id": null,
|
||||
"patch_size": 14,
|
||||
"prefix": null,
|
||||
"problem_type": null,
|
||||
"pruned_heads": {},
|
||||
"remove_invalid_values": false,
|
||||
"repetition_penalty": 1.0,
|
||||
"return_dict": true,
|
||||
"return_dict_in_generate": false,
|
||||
"sep_token_id": null,
|
||||
"task_specific_params": null,
|
||||
"temperature": 1.0,
|
||||
"tie_encoder_decoder": false,
|
||||
"tie_word_embeddings": true,
|
||||
"tokenizer_class": null,
|
||||
"top_k": 50,
|
||||
"top_p": 1.0,
|
||||
"torch_dtype": null,
|
||||
"torchscript": false,
|
||||
"transformers_version": "4.21.0.dev0",
|
||||
"typical_p": 1.0,
|
||||
"use_bfloat16": false
|
||||
},
|
||||
"vision_config_dict": {
|
||||
"hidden_size": 1024,
|
||||
"intermediate_size": 4096,
|
||||
"num_attention_heads": 16,
|
||||
"num_hidden_layers": 24,
|
||||
"patch_size": 14
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,20 @@
|
|||
{
|
||||
"crop_size": 224,
|
||||
"do_center_crop": true,
|
||||
"do_convert_rgb": true,
|
||||
"do_normalize": true,
|
||||
"do_resize": true,
|
||||
"feature_extractor_type": "CLIPFeatureExtractor",
|
||||
"image_mean": [
|
||||
0.48145466,
|
||||
0.4578275,
|
||||
0.40821073
|
||||
],
|
||||
"image_std": [
|
||||
0.26862954,
|
||||
0.26130258,
|
||||
0.27577711
|
||||
],
|
||||
"resample": 3,
|
||||
"size": 224
|
||||
}
|
||||
|
|
@ -0,0 +1,126 @@
|
|||
# from https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/safety_checker.py
|
||||
|
||||
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
|
||||
from transformers.utils import logging
|
||||
|
||||
logger = logging.get_logger(__name__)
|
||||
|
||||
|
||||
def cosine_distance(image_embeds, text_embeds):
|
||||
normalized_image_embeds = nn.functional.normalize(image_embeds)
|
||||
normalized_text_embeds = nn.functional.normalize(text_embeds)
|
||||
return torch.mm(normalized_image_embeds, normalized_text_embeds.t())
|
||||
|
||||
|
||||
class StableDiffusionSafetyChecker(PreTrainedModel):
|
||||
config_class = CLIPConfig
|
||||
main_input_name = "clip_input"
|
||||
|
||||
_no_split_modules = ["CLIPEncoderLayer"]
|
||||
|
||||
def __init__(self, config: CLIPConfig):
|
||||
super().__init__(config)
|
||||
|
||||
self.vision_model = CLIPVisionModel(config.vision_config)
|
||||
self.visual_projection = nn.Linear(config.vision_config.hidden_size, config.projection_dim, bias=False)
|
||||
|
||||
self.concept_embeds = nn.Parameter(torch.ones(17, config.projection_dim), requires_grad=False)
|
||||
self.special_care_embeds = nn.Parameter(torch.ones(3, config.projection_dim), requires_grad=False)
|
||||
|
||||
self.concept_embeds_weights = nn.Parameter(torch.ones(17), requires_grad=False)
|
||||
self.special_care_embeds_weights = nn.Parameter(torch.ones(3), requires_grad=False)
|
||||
|
||||
@torch.no_grad()
|
||||
def forward(self, clip_input, images):
|
||||
pooled_output = self.vision_model(clip_input)[1] # pooled_output
|
||||
image_embeds = self.visual_projection(pooled_output)
|
||||
|
||||
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
|
||||
special_cos_dist = cosine_distance(image_embeds, self.special_care_embeds).cpu().float().numpy()
|
||||
cos_dist = cosine_distance(image_embeds, self.concept_embeds).cpu().float().numpy()
|
||||
|
||||
result = []
|
||||
batch_size = image_embeds.shape[0]
|
||||
for i in range(batch_size):
|
||||
result_img = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []}
|
||||
|
||||
# increase this value to create a stronger `nfsw` filter
|
||||
# at the cost of increasing the possibility of filtering benign images
|
||||
adjustment = 0.0
|
||||
|
||||
for concept_idx in range(len(special_cos_dist[0])):
|
||||
concept_cos = special_cos_dist[i][concept_idx]
|
||||
concept_threshold = self.special_care_embeds_weights[concept_idx].item()
|
||||
result_img["special_scores"][concept_idx] = round(concept_cos - concept_threshold + adjustment, 3)
|
||||
if result_img["special_scores"][concept_idx] > 0:
|
||||
result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]})
|
||||
adjustment = 0.01
|
||||
|
||||
for concept_idx in range(len(cos_dist[0])):
|
||||
concept_cos = cos_dist[i][concept_idx]
|
||||
concept_threshold = self.concept_embeds_weights[concept_idx].item()
|
||||
result_img["concept_scores"][concept_idx] = round(concept_cos - concept_threshold + adjustment, 3)
|
||||
if result_img["concept_scores"][concept_idx] > 0:
|
||||
result_img["bad_concepts"].append(concept_idx)
|
||||
|
||||
result.append(result_img)
|
||||
|
||||
has_nsfw_concepts = [len(res["bad_concepts"]) > 0 for res in result]
|
||||
|
||||
for idx, has_nsfw_concept in enumerate(has_nsfw_concepts):
|
||||
if has_nsfw_concept:
|
||||
if torch.is_tensor(images) or torch.is_tensor(images[0]):
|
||||
images[idx] = torch.zeros_like(images[idx]) # black image
|
||||
else:
|
||||
images[idx] = np.zeros(images[idx].shape) # black image
|
||||
|
||||
if any(has_nsfw_concepts):
|
||||
logger.warning(
|
||||
"Potential NSFW content was detected in one or more images. A black image will be returned instead."
|
||||
" Try again with a different prompt and/or seed."
|
||||
)
|
||||
|
||||
return images, has_nsfw_concepts
|
||||
|
||||
@torch.no_grad()
|
||||
def forward_onnx(self, clip_input: torch.Tensor, images: torch.Tensor):
|
||||
pooled_output = self.vision_model(clip_input)[1] # pooled_output
|
||||
image_embeds = self.visual_projection(pooled_output)
|
||||
|
||||
special_cos_dist = cosine_distance(image_embeds, self.special_care_embeds)
|
||||
cos_dist = cosine_distance(image_embeds, self.concept_embeds)
|
||||
|
||||
# increase this value to create a stronger `nsfw` filter
|
||||
# at the cost of increasing the possibility of filtering benign images
|
||||
adjustment = 0.0
|
||||
|
||||
special_scores = special_cos_dist - self.special_care_embeds_weights + adjustment
|
||||
# special_scores = special_scores.round(decimals=3)
|
||||
special_care = torch.any(special_scores > 0, dim=1)
|
||||
special_adjustment = special_care * 0.01
|
||||
special_adjustment = special_adjustment.unsqueeze(1).expand(-1, cos_dist.shape[1])
|
||||
|
||||
concept_scores = (cos_dist - self.concept_embeds_weights) + special_adjustment
|
||||
# concept_scores = concept_scores.round(decimals=3)
|
||||
has_nsfw_concepts = torch.any(concept_scores > 0, dim=1)
|
||||
|
||||
images[has_nsfw_concepts] = 0.0 # black image
|
||||
|
||||
return images, has_nsfw_concepts
|
||||
|
|
@ -1,69 +1,85 @@
|
|||
# https://github.com/city96/SD-Latent-Interposer/blob/main/interposer.py
|
||||
|
||||
import os
|
||||
import torch
|
||||
import safetensors.torch as sf
|
||||
import torch.nn as nn
|
||||
import ldm_patched.modules.model_management
|
||||
|
||||
import safetensors.torch as sf
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
import ldm_patched.modules.model_management
|
||||
from ldm_patched.modules.model_patcher import ModelPatcher
|
||||
from modules.config import path_vae_approx
|
||||
|
||||
|
||||
class Block(nn.Module):
|
||||
def __init__(self, size):
|
||||
class ResBlock(nn.Module):
|
||||
"""Block with residuals"""
|
||||
|
||||
def __init__(self, ch):
|
||||
super().__init__()
|
||||
self.join = nn.ReLU()
|
||||
self.norm = nn.BatchNorm2d(ch)
|
||||
self.long = nn.Sequential(
|
||||
nn.Conv2d(size, size, kernel_size=3, stride=1, padding=1),
|
||||
nn.LeakyReLU(0.1),
|
||||
nn.Conv2d(size, size, kernel_size=3, stride=1, padding=1),
|
||||
nn.LeakyReLU(0.1),
|
||||
nn.Conv2d(size, size, kernel_size=3, stride=1, padding=1),
|
||||
nn.Conv2d(ch, ch, kernel_size=3, stride=1, padding=1),
|
||||
nn.SiLU(),
|
||||
nn.Conv2d(ch, ch, kernel_size=3, stride=1, padding=1),
|
||||
nn.SiLU(),
|
||||
nn.Conv2d(ch, ch, kernel_size=3, stride=1, padding=1),
|
||||
nn.Dropout(0.1)
|
||||
)
|
||||
|
||||
def forward(self, x):
|
||||
y = self.long(x)
|
||||
z = self.join(y + x)
|
||||
return z
|
||||
x = self.norm(x)
|
||||
return self.join(self.long(x) + x)
|
||||
|
||||
|
||||
class Interposer(nn.Module):
|
||||
def __init__(self):
|
||||
class ExtractBlock(nn.Module):
|
||||
"""Increase no. of channels by [out/in]"""
|
||||
|
||||
def __init__(self, ch_in, ch_out):
|
||||
super().__init__()
|
||||
self.chan = 4
|
||||
self.hid = 128
|
||||
|
||||
self.head_join = nn.ReLU()
|
||||
self.head_short = nn.Conv2d(self.chan, self.hid, kernel_size=3, stride=1, padding=1)
|
||||
self.head_long = nn.Sequential(
|
||||
nn.Conv2d(self.chan, self.hid, kernel_size=3, stride=1, padding=1),
|
||||
nn.LeakyReLU(0.1),
|
||||
nn.Conv2d(self.hid, self.hid, kernel_size=3, stride=1, padding=1),
|
||||
nn.LeakyReLU(0.1),
|
||||
nn.Conv2d(self.hid, self.hid, kernel_size=3, stride=1, padding=1),
|
||||
)
|
||||
self.core = nn.Sequential(
|
||||
Block(self.hid),
|
||||
Block(self.hid),
|
||||
Block(self.hid),
|
||||
)
|
||||
self.tail = nn.Sequential(
|
||||
nn.ReLU(),
|
||||
nn.Conv2d(self.hid, self.chan, kernel_size=3, stride=1, padding=1)
|
||||
self.join = nn.ReLU()
|
||||
self.short = nn.Conv2d(ch_in, ch_out, kernel_size=3, stride=1, padding=1)
|
||||
self.long = nn.Sequential(
|
||||
nn.Conv2d(ch_in, ch_out, kernel_size=3, stride=1, padding=1),
|
||||
nn.SiLU(),
|
||||
nn.Conv2d(ch_out, ch_out, kernel_size=3, stride=1, padding=1),
|
||||
nn.SiLU(),
|
||||
nn.Conv2d(ch_out, ch_out, kernel_size=3, stride=1, padding=1),
|
||||
nn.Dropout(0.1)
|
||||
)
|
||||
|
||||
def forward(self, x):
|
||||
y = self.head_join(
|
||||
self.head_long(x) +
|
||||
self.head_short(x)
|
||||
return self.join(self.long(x) + self.short(x))
|
||||
|
||||
|
||||
class InterposerModel(nn.Module):
|
||||
"""Main neural network"""
|
||||
|
||||
def __init__(self, ch_in=4, ch_out=4, ch_mid=64, scale=1.0, blocks=12):
|
||||
super().__init__()
|
||||
self.ch_in = ch_in
|
||||
self.ch_out = ch_out
|
||||
self.ch_mid = ch_mid
|
||||
self.blocks = blocks
|
||||
self.scale = scale
|
||||
|
||||
self.head = ExtractBlock(self.ch_in, self.ch_mid)
|
||||
self.core = nn.Sequential(
|
||||
nn.Upsample(scale_factor=self.scale, mode="nearest"),
|
||||
*[ResBlock(self.ch_mid) for _ in range(blocks)],
|
||||
nn.BatchNorm2d(self.ch_mid),
|
||||
nn.SiLU(),
|
||||
)
|
||||
self.tail = nn.Conv2d(self.ch_mid, self.ch_out, kernel_size=3, stride=1, padding=1)
|
||||
|
||||
def forward(self, x):
|
||||
y = self.head(x)
|
||||
z = self.core(y)
|
||||
return self.tail(z)
|
||||
|
||||
|
||||
vae_approx_model = None
|
||||
vae_approx_filename = os.path.join(path_vae_approx, 'xl-to-v1_interposer-v3.1.safetensors')
|
||||
vae_approx_filename = os.path.join(path_vae_approx, 'xl-to-v1_interposer-v4.0.safetensors')
|
||||
|
||||
|
||||
def parse(x):
|
||||
|
|
@ -72,7 +88,7 @@ def parse(x):
|
|||
x_origin = x.clone()
|
||||
|
||||
if vae_approx_model is None:
|
||||
model = Interposer()
|
||||
model = InterposerModel()
|
||||
model.eval()
|
||||
sd = sf.load_file(vae_approx_filename)
|
||||
model.load_state_dict(sd)
|
||||
|
|
|
|||
|
|
@ -122,6 +122,43 @@ document.addEventListener("DOMContentLoaded", function() {
|
|||
initStylePreviewOverlay();
|
||||
});
|
||||
|
||||
var onAppend = function(elem, f) {
|
||||
var observer = new MutationObserver(function(mutations) {
|
||||
mutations.forEach(function(m) {
|
||||
if (m.addedNodes.length) {
|
||||
f(m.addedNodes);
|
||||
}
|
||||
});
|
||||
});
|
||||
observer.observe(elem, {childList: true});
|
||||
}
|
||||
|
||||
function addObserverIfDesiredNodeAvailable(querySelector, callback) {
|
||||
var elem = document.querySelector(querySelector);
|
||||
if (!elem) {
|
||||
window.setTimeout(() => addObserverIfDesiredNodeAvailable(querySelector, callback), 1000);
|
||||
return;
|
||||
}
|
||||
|
||||
onAppend(elem, callback);
|
||||
}
|
||||
|
||||
/**
|
||||
* Show reset button on toast "Connection errored out."
|
||||
*/
|
||||
addObserverIfDesiredNodeAvailable(".toast-wrap", function(added) {
|
||||
added.forEach(function(element) {
|
||||
if (element.innerText.includes("Connection errored out.")) {
|
||||
window.setTimeout(function() {
|
||||
document.getElementById("reset_button").classList.remove("hidden");
|
||||
document.getElementById("generate_button").classList.add("hidden");
|
||||
document.getElementById("skip_button").classList.add("hidden");
|
||||
document.getElementById("stop_button").classList.add("hidden");
|
||||
});
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
/**
|
||||
* Add a ctrl+enter as a shortcut to start a generation
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@
|
|||
"Generate": "Generate",
|
||||
"Skip": "Skip",
|
||||
"Stop": "Stop",
|
||||
"Reconnect": "Reconnect",
|
||||
"Input Image": "Input Image",
|
||||
"Advanced": "Advanced",
|
||||
"Upscale or Variation": "Upscale or Variation",
|
||||
|
|
@ -54,9 +55,12 @@
|
|||
"Disable seed increment": "Disable seed increment",
|
||||
"Disable automatic seed increment when image number is > 1.": "Disable automatic seed increment when image number is > 1.",
|
||||
"Read wildcards in order": "Read wildcards in order",
|
||||
"Black Out NSFW": "Black Out NSFW",
|
||||
"Use black image if NSFW is detected.": "Use black image if NSFW is detected.",
|
||||
"\ud83d\udcda History Log": "\uD83D\uDCDA History Log",
|
||||
"Image Style": "Image Style",
|
||||
"Fooocus V2": "Fooocus V2",
|
||||
"Random Style": "Random Style",
|
||||
"Default (Slightly Cinematic)": "Default (Slightly Cinematic)",
|
||||
"Fooocus Masterpiece": "Fooocus Masterpiece",
|
||||
"Fooocus Photograph": "Fooocus Photograph",
|
||||
|
|
@ -339,6 +343,8 @@
|
|||
"sgm_uniform": "sgm_uniform",
|
||||
"simple": "simple",
|
||||
"ddim_uniform": "ddim_uniform",
|
||||
"VAE": "VAE",
|
||||
"Default (model)": "Default (model)",
|
||||
"Forced Overwrite of Sampling Step": "Forced Overwrite of Sampling Step",
|
||||
"Set as -1 to disable. For developer debugging.": "Set as -1 to disable. For developer debugging.",
|
||||
"Forced Overwrite of Refiner Switch Step": "Forced Overwrite of Refiner Switch Step",
|
||||
|
|
|
|||
|
|
@ -62,8 +62,8 @@ def prepare_environment():
|
|||
vae_approx_filenames = [
|
||||
('xlvaeapp.pth', 'https://huggingface.co/lllyasviel/misc/resolve/main/xlvaeapp.pth'),
|
||||
('vaeapp_sd15.pth', 'https://huggingface.co/lllyasviel/misc/resolve/main/vaeapp_sd15.pt'),
|
||||
('xl-to-v1_interposer-v3.1.safetensors',
|
||||
'https://huggingface.co/lllyasviel/misc/resolve/main/xl-to-v1_interposer-v3.1.safetensors')
|
||||
('xl-to-v1_interposer-v4.0.safetensors',
|
||||
'https://huggingface.co/mashb1t/misc/resolve/main/xl-to-v1_interposer-v4.0.safetensors')
|
||||
]
|
||||
|
||||
|
||||
|
|
@ -80,6 +80,10 @@ if args.gpu_device_id is not None:
|
|||
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_device_id)
|
||||
print("Set device to:", args.gpu_device_id)
|
||||
|
||||
if args.hf_mirror is not None :
|
||||
os.environ['HF_MIRROR'] = str(args.hf_mirror)
|
||||
print("Set hf_mirror to:", args.hf_mirror)
|
||||
|
||||
from modules import config
|
||||
|
||||
os.environ['GRADIO_TEMP_DIR'] = config.temp_path
|
||||
|
|
|
|||
|
|
@ -0,0 +1,55 @@
|
|||
# https://github.com/comfyanonymous/ComfyUI/blob/master/nodes.py
|
||||
|
||||
#from: https://research.nvidia.com/labs/toronto-ai/AlignYourSteps/howto.html
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
def loglinear_interp(t_steps, num_steps):
|
||||
"""
|
||||
Performs log-linear interpolation of a given array of decreasing numbers.
|
||||
"""
|
||||
xs = np.linspace(0, 1, len(t_steps))
|
||||
ys = np.log(t_steps[::-1])
|
||||
|
||||
new_xs = np.linspace(0, 1, num_steps)
|
||||
new_ys = np.interp(new_xs, xs, ys)
|
||||
|
||||
interped_ys = np.exp(new_ys)[::-1].copy()
|
||||
return interped_ys
|
||||
|
||||
NOISE_LEVELS = {"SD1": [14.6146412293, 6.4745760956, 3.8636745985, 2.6946151520, 1.8841921177, 1.3943805092, 0.9642583904, 0.6523686016, 0.3977456272, 0.1515232662, 0.0291671582],
|
||||
"SDXL":[14.6146412293, 6.3184485287, 3.7681790315, 2.1811480769, 1.3405244945, 0.8620721141, 0.5550693289, 0.3798540708, 0.2332364134, 0.1114188177, 0.0291671582],
|
||||
"SVD": [700.00, 54.5, 15.886, 7.977, 4.248, 1.789, 0.981, 0.403, 0.173, 0.034, 0.002]}
|
||||
|
||||
class AlignYourStepsScheduler:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required":
|
||||
{"model_type": (["SD1", "SDXL", "SVD"], ),
|
||||
"steps": ("INT", {"default": 10, "min": 10, "max": 10000}),
|
||||
"denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
|
||||
}
|
||||
}
|
||||
RETURN_TYPES = ("SIGMAS",)
|
||||
CATEGORY = "sampling/custom_sampling/schedulers"
|
||||
|
||||
FUNCTION = "get_sigmas"
|
||||
|
||||
def get_sigmas(self, model_type, steps, denoise):
|
||||
total_steps = steps
|
||||
if denoise < 1.0:
|
||||
if denoise <= 0.0:
|
||||
return (torch.FloatTensor([]),)
|
||||
total_steps = round(steps * denoise)
|
||||
|
||||
sigmas = NOISE_LEVELS[model_type][:]
|
||||
if (steps + 1) != len(sigmas):
|
||||
sigmas = loglinear_interp(sigmas, steps + 1)
|
||||
|
||||
sigmas = sigmas[-(total_steps + 1):]
|
||||
sigmas[-1] = 0
|
||||
return (torch.FloatTensor(sigmas), )
|
||||
|
||||
NODE_CLASS_MAPPINGS = {
|
||||
"AlignYourStepsScheduler": AlignYourStepsScheduler,
|
||||
}
|
||||
|
|
@ -37,6 +37,7 @@ parser.add_argument("--listen", type=str, default="127.0.0.1", metavar="IP", nar
|
|||
parser.add_argument("--port", type=int, default=8188)
|
||||
parser.add_argument("--disable-header-check", type=str, default=None, metavar="ORIGIN", nargs="?", const="*")
|
||||
parser.add_argument("--web-upload-size", type=float, default=100)
|
||||
parser.add_argument("--hf-mirror", type=str, default=None)
|
||||
|
||||
parser.add_argument("--external-working-path", type=str, default=None, metavar="PATH", nargs='+', action='append')
|
||||
parser.add_argument("--output-path", type=str, default=None)
|
||||
|
|
|
|||
|
|
@ -427,12 +427,13 @@ def load_checkpoint(config_path=None, ckpt_path=None, output_vae=True, output_cl
|
|||
|
||||
return (ldm_patched.modules.model_patcher.ModelPatcher(model, load_device=model_management.get_torch_device(), offload_device=offload_device), clip, vae)
|
||||
|
||||
def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, output_clipvision=False, embedding_directory=None, output_model=True):
|
||||
def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, output_clipvision=False, embedding_directory=None, output_model=True, vae_filename_param=None):
|
||||
sd = ldm_patched.modules.utils.load_torch_file(ckpt_path)
|
||||
sd_keys = sd.keys()
|
||||
clip = None
|
||||
clipvision = None
|
||||
vae = None
|
||||
vae_filename = None
|
||||
model = None
|
||||
model_patcher = None
|
||||
clip_target = None
|
||||
|
|
@ -462,8 +463,12 @@ def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, o
|
|||
model.load_model_weights(sd, "model.diffusion_model.")
|
||||
|
||||
if output_vae:
|
||||
vae_sd = ldm_patched.modules.utils.state_dict_prefix_replace(sd, {"first_stage_model.": ""}, filter_keys=True)
|
||||
vae_sd = model_config.process_vae_state_dict(vae_sd)
|
||||
if vae_filename_param is None:
|
||||
vae_sd = ldm_patched.modules.utils.state_dict_prefix_replace(sd, {"first_stage_model.": ""}, filter_keys=True)
|
||||
vae_sd = model_config.process_vae_state_dict(vae_sd)
|
||||
else:
|
||||
vae_sd = ldm_patched.modules.utils.load_torch_file(vae_filename_param)
|
||||
vae_filename = vae_filename_param
|
||||
vae = VAE(sd=vae_sd)
|
||||
|
||||
if output_clip:
|
||||
|
|
@ -485,7 +490,7 @@ def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, o
|
|||
print("loaded straight to GPU")
|
||||
model_management.load_model_gpu(model_patcher)
|
||||
|
||||
return (model_patcher, clip, vae, clipvision)
|
||||
return model_patcher, clip, vae, vae_filename, clipvision
|
||||
|
||||
|
||||
def load_unet_state_dict(sd): #load unet in diffusers format
|
||||
|
|
|
|||
|
|
@ -43,7 +43,8 @@ def worker():
|
|||
import fooocus_version
|
||||
import args_manager
|
||||
|
||||
from modules.sdxl_styles import apply_style, fooocus_expansion, apply_arrays
|
||||
from extras.censor import censor_batch, censor_single
|
||||
from modules.sdxl_styles import apply_style, get_random_style, apply_wildcards, fooocus_expansion, apply_arrays, random_style_name
|
||||
from modules.private_logger import log
|
||||
from extras.expansion import safe_str
|
||||
from modules.util import (remove_empty_str, HWC3, resize_image, get_image_shape_ceil, set_image_shape_ceil,
|
||||
|
|
@ -69,10 +70,14 @@ def worker():
|
|||
print(f'[Fooocus] {text}')
|
||||
async_task.yields.append(['preview', (number, text, None)])
|
||||
|
||||
def yield_result(async_task, imgs, do_not_show_finished_images=False):
|
||||
def yield_result(async_task, imgs, black_out_nsfw, censor=True, do_not_show_finished_images=False, progressbar_index=13):
|
||||
if not isinstance(imgs, list):
|
||||
imgs = [imgs]
|
||||
|
||||
if censor and (modules.config.default_black_out_nsfw or black_out_nsfw):
|
||||
progressbar(async_task, progressbar_index, 'Checking for NSFW content ...')
|
||||
imgs = censor_batch(imgs)
|
||||
|
||||
async_task.results = async_task.results + imgs
|
||||
|
||||
if do_not_show_finished_images:
|
||||
|
|
@ -161,12 +166,14 @@ def worker():
|
|||
disable_preview = args.pop()
|
||||
disable_intermediate_results = args.pop()
|
||||
disable_seed_increment = args.pop()
|
||||
black_out_nsfw = args.pop()
|
||||
adm_scaler_positive = args.pop()
|
||||
adm_scaler_negative = args.pop()
|
||||
adm_scaler_end = args.pop()
|
||||
adaptive_cfg = args.pop()
|
||||
sampler_name = args.pop()
|
||||
scheduler_name = args.pop()
|
||||
vae_name = args.pop()
|
||||
overwrite_step = args.pop()
|
||||
overwrite_switch = args.pop()
|
||||
overwrite_width = args.pop()
|
||||
|
|
@ -432,7 +439,7 @@ def worker():
|
|||
|
||||
pipeline.refresh_everything(refiner_model_name=refiner_model_name, base_model_name=base_model_name,
|
||||
loras=loras, base_model_additional_loras=base_model_additional_loras,
|
||||
use_synthetic_refiner=use_synthetic_refiner)
|
||||
use_synthetic_refiner=use_synthetic_refiner, vae_name=vae_name)
|
||||
|
||||
progressbar(async_task, 3, 'Processing prompts ...')
|
||||
tasks = []
|
||||
|
|
@ -453,8 +460,12 @@ def worker():
|
|||
positive_basic_workloads = []
|
||||
negative_basic_workloads = []
|
||||
|
||||
task_styles = style_selections.copy()
|
||||
if use_style:
|
||||
for s in style_selections:
|
||||
for i, s in enumerate(task_styles):
|
||||
if s == random_style_name:
|
||||
s = get_random_style(task_rng)
|
||||
task_styles[i] = s
|
||||
p, n = apply_style(s, positive=task_prompt)
|
||||
positive_basic_workloads = positive_basic_workloads + p
|
||||
negative_basic_workloads = negative_basic_workloads + n
|
||||
|
|
@ -482,6 +493,7 @@ def worker():
|
|||
negative_top_k=len(negative_basic_workloads),
|
||||
log_positive_prompt='\n'.join([task_prompt] + task_extra_positive_prompts),
|
||||
log_negative_prompt='\n'.join([task_negative_prompt] + task_extra_negative_prompts),
|
||||
styles=task_styles
|
||||
))
|
||||
|
||||
if use_expansion:
|
||||
|
|
@ -576,8 +588,11 @@ def worker():
|
|||
|
||||
if direct_return:
|
||||
d = [('Upscale (Fast)', 'upscale_fast', '2x')]
|
||||
if modules.config.default_black_out_nsfw or black_out_nsfw:
|
||||
progressbar(async_task, 100, 'Checking for NSFW content ...')
|
||||
uov_input_image = censor_single(uov_input_image)
|
||||
uov_input_image_path = log(uov_input_image, d, output_format=output_format)
|
||||
yield_result(async_task, uov_input_image_path, do_not_show_finished_images=True)
|
||||
yield_result(async_task, uov_input_image_path, black_out_nsfw, False, do_not_show_finished_images=True)
|
||||
return
|
||||
|
||||
tiled = True
|
||||
|
|
@ -641,8 +656,7 @@ def worker():
|
|||
)
|
||||
|
||||
if debugging_inpaint_preprocessor:
|
||||
yield_result(async_task, inpaint_worker.current_task.visualize_mask_processing(),
|
||||
do_not_show_finished_images=True)
|
||||
yield_result(async_task, inpaint_worker.current_task.visualize_mask_processing(), black_out_nsfw, do_not_show_finished_images=True)
|
||||
return
|
||||
|
||||
progressbar(async_task, 13, 'VAE Inpaint encoding ...')
|
||||
|
|
@ -705,7 +719,7 @@ def worker():
|
|||
cn_img = HWC3(cn_img)
|
||||
task[0] = core.numpy_to_pytorch(cn_img)
|
||||
if debugging_cn_preprocessor:
|
||||
yield_result(async_task, cn_img, do_not_show_finished_images=True)
|
||||
yield_result(async_task, cn_img, black_out_nsfw, do_not_show_finished_images=True)
|
||||
return
|
||||
for task in cn_tasks[flags.cn_cpds]:
|
||||
cn_img, cn_stop, cn_weight = task
|
||||
|
|
@ -717,7 +731,7 @@ def worker():
|
|||
cn_img = HWC3(cn_img)
|
||||
task[0] = core.numpy_to_pytorch(cn_img)
|
||||
if debugging_cn_preprocessor:
|
||||
yield_result(async_task, cn_img, do_not_show_finished_images=True)
|
||||
yield_result(async_task, cn_img, black_out_nsfw, do_not_show_finished_images=True)
|
||||
return
|
||||
for task in cn_tasks[flags.cn_ip]:
|
||||
cn_img, cn_stop, cn_weight = task
|
||||
|
|
@ -728,7 +742,7 @@ def worker():
|
|||
|
||||
task[0] = ip_adapter.preprocess(cn_img, ip_adapter_path=ip_adapter_path)
|
||||
if debugging_cn_preprocessor:
|
||||
yield_result(async_task, cn_img, do_not_show_finished_images=True)
|
||||
yield_result(async_task, cn_img, black_out_nsfw, do_not_show_finished_images=True)
|
||||
return
|
||||
for task in cn_tasks[flags.cn_ip_face]:
|
||||
cn_img, cn_stop, cn_weight = task
|
||||
|
|
@ -742,7 +756,7 @@ def worker():
|
|||
|
||||
task[0] = ip_adapter.preprocess(cn_img, ip_adapter_path=ip_adapter_face_path)
|
||||
if debugging_cn_preprocessor:
|
||||
yield_result(async_task, cn_img, do_not_show_finished_images=True)
|
||||
yield_result(async_task, cn_img, black_out_nsfw, do_not_show_finished_images=True)
|
||||
return
|
||||
|
||||
all_ip_tasks = cn_tasks[flags.cn_ip] + cn_tasks[flags.cn_ip_face]
|
||||
|
|
@ -842,11 +856,17 @@ def worker():
|
|||
imgs = [inpaint_worker.current_task.post_process(x) for x in imgs]
|
||||
|
||||
img_paths = []
|
||||
|
||||
if modules.config.default_black_out_nsfw or black_out_nsfw:
|
||||
progressbar(async_task, int(15.0 + 85.0 * float((current_task_id + 1) * steps) / float(all_steps)),
|
||||
'Checking for NSFW content ...')
|
||||
imgs = censor_batch(imgs)
|
||||
|
||||
for x in imgs:
|
||||
d = [('Prompt', 'prompt', task['log_positive_prompt']),
|
||||
('Negative Prompt', 'negative_prompt', task['log_negative_prompt']),
|
||||
('Fooocus V2 Expansion', 'prompt_expansion', task['expansion']),
|
||||
('Styles', 'styles', str(raw_style_selections)),
|
||||
('Styles', 'styles', str(task['styles'] if not use_expansion else [fooocus_expansion] + task['styles'])),
|
||||
('Performance', 'performance', performance_selection.value)]
|
||||
|
||||
if performance_selection.steps() != steps:
|
||||
|
|
@ -873,6 +893,7 @@ def worker():
|
|||
|
||||
d.append(('Sampler', 'sampler', sampler_name))
|
||||
d.append(('Scheduler', 'scheduler', scheduler_name))
|
||||
d.append(('VAE', 'vae', vae_name))
|
||||
d.append(('Seed', 'seed', str(task['task_seed'])))
|
||||
|
||||
if freeu_enabled:
|
||||
|
|
@ -887,12 +908,12 @@ def worker():
|
|||
metadata_parser = modules.meta_parser.get_metadata_parser(metadata_scheme)
|
||||
metadata_parser.set_data(task['log_positive_prompt'], task['positive'],
|
||||
task['log_negative_prompt'], task['negative'],
|
||||
steps, base_model_name, refiner_model_name, loras)
|
||||
steps, base_model_name, refiner_model_name, loras, vae_name)
|
||||
d.append(('Metadata Scheme', 'metadata_scheme', metadata_scheme.value if save_metadata_to_images else save_metadata_to_images))
|
||||
d.append(('Version', 'version', 'Fooocus v' + fooocus_version.version))
|
||||
img_paths.append(log(x, d, metadata_parser, output_format))
|
||||
img_paths.append(log(x, d, metadata_parser, output_format, task))
|
||||
|
||||
yield_result(async_task, img_paths, do_not_show_finished_images=len(tasks) == 1 or disable_intermediate_results)
|
||||
yield_result(async_task, img_paths, black_out_nsfw, False, do_not_show_finished_images=len(tasks) == 1 or disable_intermediate_results)
|
||||
except ldm_patched.modules.model_management.InterruptProcessingException as e:
|
||||
if async_task.last_stop == 'skip':
|
||||
print('User skipped')
|
||||
|
|
|
|||
|
|
@ -189,12 +189,14 @@ paths_checkpoints = get_dir_or_set_default('path_checkpoints', ['../models/check
|
|||
paths_loras = get_dir_or_set_default('path_loras', ['../models/loras/'], True)
|
||||
path_embeddings = get_dir_or_set_default('path_embeddings', '../models/embeddings/')
|
||||
path_vae_approx = get_dir_or_set_default('path_vae_approx', '../models/vae_approx/')
|
||||
path_vae = get_dir_or_set_default('path_vae', '../models/vae/')
|
||||
path_upscale_models = get_dir_or_set_default('path_upscale_models', '../models/upscale_models/')
|
||||
path_inpaint = get_dir_or_set_default('path_inpaint', '../models/inpaint/')
|
||||
path_controlnet = get_dir_or_set_default('path_controlnet', '../models/controlnet/')
|
||||
path_clip_vision = get_dir_or_set_default('path_clip_vision', '../models/clip_vision/')
|
||||
path_fooocus_expansion = get_dir_or_set_default('path_fooocus_expansion', '../models/prompt_expansion/fooocus_expansion')
|
||||
path_wildcards = get_dir_or_set_default('path_wildcards', '../wildcards/')
|
||||
path_safety_checker = get_dir_or_set_default('path_safety_checker', '../models/safety_checker/')
|
||||
path_outputs = get_path_output()
|
||||
|
||||
|
||||
|
|
@ -346,6 +348,11 @@ default_scheduler = get_config_item_or_set_default(
|
|||
default_value='karras',
|
||||
validator=lambda x: x in modules.flags.scheduler_list
|
||||
)
|
||||
default_vae = get_config_item_or_set_default(
|
||||
key='default_vae',
|
||||
default_value=modules.flags.default_vae,
|
||||
validator=lambda x: isinstance(x, str)
|
||||
)
|
||||
default_styles = get_config_item_or_set_default(
|
||||
key='default_styles',
|
||||
default_value=[
|
||||
|
|
@ -450,6 +457,11 @@ example_inpaint_prompts = get_config_item_or_set_default(
|
|||
],
|
||||
validator=lambda x: isinstance(x, list) and all(isinstance(v, str) for v in x)
|
||||
)
|
||||
default_black_out_nsfw = get_config_item_or_set_default(
|
||||
key='default_black_out_nsfw',
|
||||
default_value=False,
|
||||
validator=lambda x: isinstance(x, bool)
|
||||
)
|
||||
default_save_metadata_to_images = get_config_item_or_set_default(
|
||||
key='default_save_metadata_to_images',
|
||||
default_value=False,
|
||||
|
|
@ -535,6 +547,7 @@ with open(config_example_path, "w", encoding="utf-8") as json_file:
|
|||
|
||||
model_filenames = []
|
||||
lora_filenames = []
|
||||
vae_filenames = []
|
||||
wildcard_filenames = []
|
||||
|
||||
sdxl_lcm_lora = 'sdxl_lcm_lora.safetensors'
|
||||
|
|
@ -546,15 +559,20 @@ def get_model_filenames(folder_paths, extensions=None, name_filter=None):
|
|||
if extensions is None:
|
||||
extensions = ['.pth', '.ckpt', '.bin', '.safetensors', '.fooocus.patch']
|
||||
files = []
|
||||
|
||||
if not isinstance(folder_paths, list):
|
||||
folder_paths = [folder_paths]
|
||||
for folder in folder_paths:
|
||||
files += get_files_from_folder(folder, extensions, name_filter)
|
||||
|
||||
return files
|
||||
|
||||
|
||||
def update_files():
|
||||
global model_filenames, lora_filenames, wildcard_filenames, available_presets
|
||||
global model_filenames, lora_filenames, vae_filenames, wildcard_filenames, available_presets
|
||||
model_filenames = get_model_filenames(paths_checkpoints)
|
||||
lora_filenames = get_model_filenames(paths_loras)
|
||||
vae_filenames = get_model_filenames(path_vae)
|
||||
wildcard_filenames = get_files_from_folder(path_wildcards, ['.txt'])
|
||||
available_presets = get_presets()
|
||||
return
|
||||
|
|
@ -679,5 +697,13 @@ def downloading_upscale_model():
|
|||
)
|
||||
return os.path.join(path_upscale_models, 'fooocus_upscaler_s409985e5.bin')
|
||||
|
||||
def downloading_safety_checker_model():
|
||||
load_file_from_url(
|
||||
url='https://huggingface.co/mashb1t/misc/resolve/main/stable-diffusion-safety-checker.bin',
|
||||
model_dir=path_safety_checker,
|
||||
file_name='stable-diffusion-safety-checker.bin'
|
||||
)
|
||||
return os.path.join(path_safety_checker, 'stable-diffusion-safety-checker.bin')
|
||||
|
||||
|
||||
update_files()
|
||||
|
|
|
|||
|
|
@ -35,12 +35,13 @@ opModelSamplingDiscrete = ModelSamplingDiscrete()
|
|||
|
||||
|
||||
class StableDiffusionModel:
|
||||
def __init__(self, unet=None, vae=None, clip=None, clip_vision=None, filename=None):
|
||||
def __init__(self, unet=None, vae=None, clip=None, clip_vision=None, filename=None, vae_filename=None):
|
||||
self.unet = unet
|
||||
self.vae = vae
|
||||
self.clip = clip
|
||||
self.clip_vision = clip_vision
|
||||
self.filename = filename
|
||||
self.vae_filename = vae_filename
|
||||
self.unet_with_lora = unet
|
||||
self.clip_with_lora = clip
|
||||
self.visited_loras = ''
|
||||
|
|
@ -142,9 +143,10 @@ def apply_controlnet(positive, negative, control_net, image, strength, start_per
|
|||
|
||||
@torch.no_grad()
|
||||
@torch.inference_mode()
|
||||
def load_model(ckpt_filename):
|
||||
unet, clip, vae, clip_vision = load_checkpoint_guess_config(ckpt_filename, embedding_directory=path_embeddings)
|
||||
return StableDiffusionModel(unet=unet, clip=clip, vae=vae, clip_vision=clip_vision, filename=ckpt_filename)
|
||||
def load_model(ckpt_filename, vae_filename=None):
|
||||
unet, clip, vae, vae_filename, clip_vision = load_checkpoint_guess_config(ckpt_filename, embedding_directory=path_embeddings,
|
||||
vae_filename_param=vae_filename)
|
||||
return StableDiffusionModel(unet=unet, clip=clip, vae=vae, clip_vision=clip_vision, filename=ckpt_filename, vae_filename=vae_filename)
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ import os
|
|||
import torch
|
||||
import modules.patch
|
||||
import modules.config
|
||||
import modules.flags
|
||||
import ldm_patched.modules.model_management
|
||||
import ldm_patched.modules.latent_formats
|
||||
import modules.inpaint_worker
|
||||
|
|
@ -58,17 +59,21 @@ def assert_model_integrity():
|
|||
|
||||
@torch.no_grad()
|
||||
@torch.inference_mode()
|
||||
def refresh_base_model(name):
|
||||
def refresh_base_model(name, vae_name=None):
|
||||
global model_base
|
||||
|
||||
filename = get_file_from_folder_list(name, modules.config.paths_checkpoints)
|
||||
|
||||
if model_base.filename == filename:
|
||||
vae_filename = None
|
||||
if vae_name is not None and vae_name != modules.flags.default_vae:
|
||||
vae_filename = get_file_from_folder_list(vae_name, modules.config.path_vae)
|
||||
|
||||
if model_base.filename == filename and model_base.vae_filename == vae_filename:
|
||||
return
|
||||
|
||||
model_base = core.StableDiffusionModel()
|
||||
model_base = core.load_model(filename)
|
||||
model_base = core.load_model(filename, vae_filename)
|
||||
print(f'Base model loaded: {model_base.filename}')
|
||||
print(f'VAE loaded: {model_base.vae_filename}')
|
||||
return
|
||||
|
||||
|
||||
|
|
@ -216,7 +221,7 @@ def prepare_text_encoder(async_call=True):
|
|||
@torch.no_grad()
|
||||
@torch.inference_mode()
|
||||
def refresh_everything(refiner_model_name, base_model_name, loras,
|
||||
base_model_additional_loras=None, use_synthetic_refiner=False):
|
||||
base_model_additional_loras=None, use_synthetic_refiner=False, vae_name=None):
|
||||
global final_unet, final_clip, final_vae, final_refiner_unet, final_refiner_vae, final_expansion
|
||||
|
||||
final_unet = None
|
||||
|
|
@ -227,11 +232,11 @@ def refresh_everything(refiner_model_name, base_model_name, loras,
|
|||
|
||||
if use_synthetic_refiner and refiner_model_name == 'None':
|
||||
print('Synthetic Refiner Activated')
|
||||
refresh_base_model(base_model_name)
|
||||
refresh_base_model(base_model_name, vae_name)
|
||||
synthesize_refiner_model()
|
||||
else:
|
||||
refresh_refiner_model(refiner_model_name)
|
||||
refresh_base_model(base_model_name)
|
||||
refresh_base_model(base_model_name, vae_name)
|
||||
|
||||
refresh_loras(loras, base_model_additional_loras=base_model_additional_loras)
|
||||
assert_model_integrity()
|
||||
|
|
@ -254,7 +259,8 @@ def refresh_everything(refiner_model_name, base_model_name, loras,
|
|||
refresh_everything(
|
||||
refiner_model_name=modules.config.default_refiner_model_name,
|
||||
base_model_name=modules.config.default_base_model_name,
|
||||
loras=get_enabled_loras(modules.config.default_loras)
|
||||
loras=get_enabled_loras(modules.config.default_loras),
|
||||
vae_name=modules.config.default_vae,
|
||||
)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -47,12 +47,14 @@ SAMPLERS = KSAMPLER | SAMPLER_EXTRA
|
|||
|
||||
KSAMPLER_NAMES = list(KSAMPLER.keys())
|
||||
|
||||
SCHEDULER_NAMES = ["normal", "karras", "exponential", "sgm_uniform", "simple", "ddim_uniform", "lcm", "turbo"]
|
||||
SCHEDULER_NAMES = ["normal", "karras", "exponential", "sgm_uniform", "simple", "ddim_uniform", "lcm", "turbo", "align_your_steps"]
|
||||
SAMPLER_NAMES = KSAMPLER_NAMES + list(SAMPLER_EXTRA.keys())
|
||||
|
||||
sampler_list = SAMPLER_NAMES
|
||||
scheduler_list = SCHEDULER_NAMES
|
||||
|
||||
default_vae = 'Default (model)'
|
||||
|
||||
refiner_swap_method = 'joint'
|
||||
|
||||
cn_ip = "ImagePrompt"
|
||||
|
|
|
|||
|
|
@ -46,6 +46,7 @@ def load_parameter_button_click(raw_metadata: dict | str, is_generating: bool):
|
|||
get_float('refiner_switch', 'Refiner Switch', loaded_parameter_dict, results)
|
||||
get_str('sampler', 'Sampler', loaded_parameter_dict, results)
|
||||
get_str('scheduler', 'Scheduler', loaded_parameter_dict, results)
|
||||
get_str('vae', 'VAE', loaded_parameter_dict, results)
|
||||
get_seed('seed', 'Seed', loaded_parameter_dict, results)
|
||||
|
||||
if is_generating:
|
||||
|
|
@ -253,6 +254,7 @@ class MetadataParser(ABC):
|
|||
self.refiner_model_name: str = ''
|
||||
self.refiner_model_hash: str = ''
|
||||
self.loras: list = []
|
||||
self.vae_name: str = ''
|
||||
|
||||
@abstractmethod
|
||||
def get_scheme(self) -> MetadataScheme:
|
||||
|
|
@ -267,7 +269,7 @@ class MetadataParser(ABC):
|
|||
raise NotImplementedError
|
||||
|
||||
def set_data(self, raw_prompt, full_prompt, raw_negative_prompt, full_negative_prompt, steps, base_model_name,
|
||||
refiner_model_name, loras):
|
||||
refiner_model_name, loras, vae_name):
|
||||
self.raw_prompt = raw_prompt
|
||||
self.full_prompt = full_prompt
|
||||
self.raw_negative_prompt = raw_negative_prompt
|
||||
|
|
@ -289,6 +291,7 @@ class MetadataParser(ABC):
|
|||
lora_path = get_file_from_folder_list(lora_name, modules.config.paths_loras)
|
||||
lora_hash = get_sha256(lora_path)
|
||||
self.loras.append((Path(lora_name).stem, lora_weight, lora_hash))
|
||||
self.vae_name = Path(vae_name).stem
|
||||
|
||||
@staticmethod
|
||||
def remove_special_loras(lora_filenames):
|
||||
|
|
@ -310,6 +313,7 @@ class A1111MetadataParser(MetadataParser):
|
|||
'steps': 'Steps',
|
||||
'sampler': 'Sampler',
|
||||
'scheduler': 'Scheduler',
|
||||
'vae': 'VAE',
|
||||
'guidance_scale': 'CFG scale',
|
||||
'seed': 'Seed',
|
||||
'resolution': 'Size',
|
||||
|
|
@ -397,13 +401,12 @@ class A1111MetadataParser(MetadataParser):
|
|||
data['sampler'] = k
|
||||
break
|
||||
|
||||
for key in ['base_model', 'refiner_model']:
|
||||
for key in ['base_model', 'refiner_model', 'vae']:
|
||||
if key in data:
|
||||
for filename in modules.config.model_filenames:
|
||||
path = Path(filename)
|
||||
if data[key] == path.stem:
|
||||
data[key] = filename
|
||||
break
|
||||
if key == 'vae':
|
||||
self.add_extension_to_filename(data, modules.config.vae_filenames, 'vae')
|
||||
else:
|
||||
self.add_extension_to_filename(data, modules.config.model_filenames, key)
|
||||
|
||||
lora_data = ''
|
||||
if 'lora_weights' in data and data['lora_weights'] != '':
|
||||
|
|
@ -433,6 +436,7 @@ class A1111MetadataParser(MetadataParser):
|
|||
|
||||
sampler = data['sampler']
|
||||
scheduler = data['scheduler']
|
||||
|
||||
if sampler in SAMPLERS and SAMPLERS[sampler] != '':
|
||||
sampler = SAMPLERS[sampler]
|
||||
if sampler not in CIVITAI_NO_KARRAS and scheduler == 'karras':
|
||||
|
|
@ -451,6 +455,7 @@ class A1111MetadataParser(MetadataParser):
|
|||
|
||||
self.fooocus_to_a1111['performance']: data['performance'],
|
||||
self.fooocus_to_a1111['scheduler']: scheduler,
|
||||
self.fooocus_to_a1111['vae']: Path(data['vae']).stem,
|
||||
# workaround for multiline prompts
|
||||
self.fooocus_to_a1111['raw_prompt']: self.raw_prompt,
|
||||
self.fooocus_to_a1111['raw_negative_prompt']: self.raw_negative_prompt,
|
||||
|
|
@ -491,6 +496,14 @@ class A1111MetadataParser(MetadataParser):
|
|||
negative_prompt_text = f"\nNegative prompt: {negative_prompt_resolved}" if negative_prompt_resolved else ""
|
||||
return f"{positive_prompt_resolved}{negative_prompt_text}\n{generation_params_text}".strip()
|
||||
|
||||
@staticmethod
|
||||
def add_extension_to_filename(data, filenames, key):
|
||||
for filename in filenames:
|
||||
path = Path(filename)
|
||||
if data[key] == path.stem:
|
||||
data[key] = filename
|
||||
break
|
||||
|
||||
|
||||
class FooocusMetadataParser(MetadataParser):
|
||||
def get_scheme(self) -> MetadataScheme:
|
||||
|
|
@ -499,6 +512,7 @@ class FooocusMetadataParser(MetadataParser):
|
|||
def parse_json(self, metadata: dict) -> dict:
|
||||
model_filenames = modules.config.model_filenames.copy()
|
||||
lora_filenames = modules.config.lora_filenames.copy()
|
||||
vae_filenames = modules.config.vae_filenames.copy()
|
||||
self.remove_special_loras(lora_filenames)
|
||||
for key, value in metadata.items():
|
||||
if value in ['', 'None']:
|
||||
|
|
@ -507,6 +521,8 @@ class FooocusMetadataParser(MetadataParser):
|
|||
metadata[key] = self.replace_value_with_filename(key, value, model_filenames)
|
||||
elif key.startswith('lora_combined_'):
|
||||
metadata[key] = self.replace_value_with_filename(key, value, lora_filenames)
|
||||
elif key == 'vae':
|
||||
metadata[key] = self.replace_value_with_filename(key, value, vae_filenames)
|
||||
else:
|
||||
continue
|
||||
|
||||
|
|
@ -533,6 +549,7 @@ class FooocusMetadataParser(MetadataParser):
|
|||
res['refiner_model'] = self.refiner_model_name
|
||||
res['refiner_model_hash'] = self.refiner_model_hash
|
||||
|
||||
res['vae'] = self.vae_name
|
||||
res['loras'] = self.loras
|
||||
|
||||
if modules.config.metadata_created_by != '':
|
||||
|
|
|
|||
|
|
@ -14,6 +14,8 @@ def load_file_from_url(
|
|||
|
||||
Returns the path to the downloaded file.
|
||||
"""
|
||||
domain = os.environ.get("HF_MIRROR", "https://huggingface.co").rstrip('/')
|
||||
url = str.replace(url, "https://huggingface.co", domain, 1)
|
||||
os.makedirs(model_dir, exist_ok=True)
|
||||
if not file_name:
|
||||
parts = urlparse(url)
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ def get_current_html_path(output_format=None):
|
|||
return html_name
|
||||
|
||||
|
||||
def log(img, metadata, metadata_parser: MetadataParser | None = None, output_format=None) -> str:
|
||||
def log(img, metadata, metadata_parser: MetadataParser | None = None, output_format=None, task=None) -> str:
|
||||
path_outputs = modules.config.temp_path if args_manager.args.disable_image_log else modules.config.path_outputs
|
||||
output_format = output_format if output_format else modules.config.default_output_format
|
||||
date_string, local_temp_filename, only_name = generate_temp_filename(folder=path_outputs, extension=output_format)
|
||||
|
|
@ -111,9 +111,15 @@ def log(img, metadata, metadata_parser: MetadataParser | None = None, output_for
|
|||
for label, key, value in metadata:
|
||||
value_txt = str(value).replace('\n', ' </br> ')
|
||||
item += f"<tr><td class='label'>{label}</td><td class='value'>{value_txt}</td></tr>\n"
|
||||
|
||||
if task is not None and 'positive' in task and 'negative' in task:
|
||||
full_prompt_details = f"""<details><summary>Positive</summary>{', '.join(task['positive'])}</details>
|
||||
<details><summary>Negative</summary>{', '.join(task['negative'])}</details>"""
|
||||
item += f"<tr><td class='label'>Full raw prompt</td><td class='value'>{full_prompt_details}</td></tr>\n"
|
||||
|
||||
item += "</table>"
|
||||
|
||||
js_txt = urllib.parse.quote(json.dumps({k: v for _, k, v in metadata}, indent=0), safe='')
|
||||
js_txt = urllib.parse.quote(json.dumps({k: v for _, k, v, in metadata}, indent=0), safe='')
|
||||
item += f"</br><button onclick=\"to_clipboard('{js_txt}')\">Copy to Clipboard</button>"
|
||||
|
||||
item += "</td>"
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ import ldm_patched.modules.samplers
|
|||
import ldm_patched.modules.model_management
|
||||
|
||||
from collections import namedtuple
|
||||
from ldm_patched.contrib.external_align_your_steps import AlignYourStepsScheduler
|
||||
from ldm_patched.contrib.external_custom_sampler import SDTurboScheduler
|
||||
from ldm_patched.k_diffusion import sampling as k_diffusion_sampling
|
||||
from ldm_patched.modules.samplers import normal_scheduler, simple_scheduler, ddim_scheduler
|
||||
|
|
@ -175,6 +176,9 @@ def calculate_sigmas_scheduler_hacked(model, scheduler_name, steps):
|
|||
sigmas = normal_scheduler(model, steps, sgm=True)
|
||||
elif scheduler_name == "turbo":
|
||||
sigmas = SDTurboScheduler().get_sigmas(namedtuple('Patcher', ['model'])(model=model), steps=steps, denoise=1.0)[0]
|
||||
elif scheduler_name == "align_your_steps":
|
||||
model_type = 'SDXL' if isinstance(model.latent_format, ldm_patched.modules.latent_formats.SDXL) else 'SD1'
|
||||
sigmas = AlignYourStepsScheduler().get_sigmas(model_type=model_type, steps=steps, denoise=1.0)[0]
|
||||
else:
|
||||
raise TypeError("error invalid scheduler")
|
||||
return sigmas
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ import json
|
|||
import math
|
||||
|
||||
from modules.extra_utils import get_files_from_folder
|
||||
from random import Random
|
||||
|
||||
# cannot use modules.config - validators causing circular imports
|
||||
styles_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../sdxl_styles/'))
|
||||
|
|
@ -47,8 +48,13 @@ for styles_file in styles_files:
|
|||
print(f'Failed to load style file {styles_file}')
|
||||
|
||||
style_keys = list(styles.keys())
|
||||
fooocus_expansion = "Fooocus V2"
|
||||
legal_style_names = [fooocus_expansion] + style_keys
|
||||
fooocus_expansion = 'Fooocus V2'
|
||||
random_style_name = 'Random Style'
|
||||
legal_style_names = [fooocus_expansion, random_style_name] + style_keys
|
||||
|
||||
|
||||
def get_random_style(rng: Random) -> str:
|
||||
return rng.choice(list(styles.items()))[0]
|
||||
|
||||
|
||||
def apply_style(style, positive):
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ def javascript_html():
|
|||
head += f'<script type="text/javascript" src="{edit_attention_js_path}"></script>\n'
|
||||
head += f'<script type="text/javascript" src="{viewer_js_path}"></script>\n'
|
||||
head += f'<script type="text/javascript" src="{image_viewer_js_path}"></script>\n'
|
||||
head += f'<meta name="samples-path" content="{samples_path}"></meta>\n'
|
||||
head += f'<meta name="samples-path" content="{samples_path}">\n'
|
||||
|
||||
if args_manager.args.theme:
|
||||
head += f'<script type="text/javascript">set_theme(\"{args_manager.args.theme}\");</script>\n'
|
||||
|
|
|
|||
|
|
@ -362,6 +362,9 @@ def is_json(data: str) -> bool:
|
|||
|
||||
|
||||
def get_file_from_folder_list(name, folders):
|
||||
if not isinstance(folders, list):
|
||||
folders = [folders]
|
||||
|
||||
for folder in folders:
|
||||
filename = os.path.abspath(os.path.realpath(os.path.join(folder, name)))
|
||||
if os.path.isfile(filename):
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
{
|
||||
"default_model": "animaPencilXL_v100.safetensors",
|
||||
"default_model": "animaPencilXL_v310.safetensors",
|
||||
"default_refiner": "None",
|
||||
"default_refiner_switch": 0.5,
|
||||
"default_loras": [
|
||||
|
|
@ -29,7 +29,7 @@
|
|||
1.0
|
||||
]
|
||||
],
|
||||
"default_cfg_scale": 7.0,
|
||||
"default_cfg_scale": 6.0,
|
||||
"default_sample_sharpness": 2.0,
|
||||
"default_sampler": "dpmpp_2m_sde_gpu",
|
||||
"default_scheduler": "karras",
|
||||
|
|
@ -43,9 +43,15 @@
|
|||
],
|
||||
"default_aspect_ratio": "896*1152",
|
||||
"checkpoint_downloads": {
|
||||
"animaPencilXL_v100.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/animaPencilXL_v100.safetensors"
|
||||
"animaPencilXL_v310.safetensors": "https://huggingface.co/mashb1t/fav_models/resolve/main/fav/animaPencilXL_v310.safetensors"
|
||||
},
|
||||
"embeddings_downloads": {},
|
||||
"lora_downloads": {},
|
||||
"previous_default_models": []
|
||||
"previous_default_models": [
|
||||
"animaPencilXL_v300.safetensors",
|
||||
"animaPencilXL_v260.safetensors",
|
||||
"animaPencilXL_v210.safetensors",
|
||||
"animaPencilXL_v200.safetensors",
|
||||
"animaPencilXL_v100.safetensors"
|
||||
]
|
||||
}
|
||||
|
|
@ -368,6 +368,7 @@ A safer way is just to try "run_anime.bat" or "run_realistic.bat" - they should
|
|||
entry_with_update.py [-h] [--listen [IP]] [--port PORT]
|
||||
[--disable-header-check [ORIGIN]]
|
||||
[--web-upload-size WEB_UPLOAD_SIZE]
|
||||
[--hf-mirror HF_MIRROR]
|
||||
[--external-working-path PATH [PATH ...]]
|
||||
[--output-path OUTPUT_PATH] [--temp-path TEMP_PATH]
|
||||
[--cache-path CACHE_PATH] [--in-browser]
|
||||
|
|
|
|||
Binary file not shown.
|
After Width: | Height: | Size: 1.4 KiB |
45
webui.py
45
webui.py
|
|
@ -123,8 +123,9 @@ with shared.gradio_root:
|
|||
|
||||
with gr.Column(scale=3, min_width=0):
|
||||
generate_button = gr.Button(label="Generate", value="Generate", elem_classes='type_row', elem_id='generate_button', visible=True)
|
||||
reset_button = gr.Button(label="Reconnect", value="Reconnect", elem_classes='type_row', elem_id='reset_button', visible=False)
|
||||
load_parameter_button = gr.Button(label="Load Parameters", value="Load Parameters", elem_classes='type_row', elem_id='load_parameter_button', visible=False)
|
||||
skip_button = gr.Button(label="Skip", value="Skip", elem_classes='type_row_half', visible=False)
|
||||
skip_button = gr.Button(label="Skip", value="Skip", elem_classes='type_row_half', elem_id='skip_button', visible=False)
|
||||
stop_button = gr.Button(label="Stop", value="Stop", elem_classes='type_row_half', elem_id='stop_button', visible=False)
|
||||
|
||||
def stop_clicked(currentTask):
|
||||
|
|
@ -406,6 +407,8 @@ with shared.gradio_root:
|
|||
value=modules.config.default_sampler)
|
||||
scheduler_name = gr.Dropdown(label='Scheduler', choices=flags.scheduler_list,
|
||||
value=modules.config.default_scheduler)
|
||||
vae_name = gr.Dropdown(label='VAE', choices=[modules.flags.default_vae] + modules.config.vae_filenames,
|
||||
value=modules.config.default_vae, show_label=True)
|
||||
|
||||
generate_image_grid = gr.Checkbox(label='Generate Image Grid for Each Batch',
|
||||
info='(Experimental) This may cause performance problems on some computers and certain internet conditions.',
|
||||
|
|
@ -433,7 +436,8 @@ with shared.gradio_root:
|
|||
overwrite_upscale_strength = gr.Slider(label='Forced Overwrite of Denoising Strength of "Upscale"',
|
||||
minimum=-1, maximum=1.0, step=0.001, value=-1,
|
||||
info='Set as negative number to disable. For developer debugging.')
|
||||
disable_preview = gr.Checkbox(label='Disable Preview', value=False,
|
||||
disable_preview = gr.Checkbox(label='Disable Preview', value=modules.config.default_black_out_nsfw,
|
||||
interactive=not modules.config.default_black_out_nsfw,
|
||||
info='Disable preview during generation.')
|
||||
disable_intermediate_results = gr.Checkbox(label='Disable Intermediate Results',
|
||||
value=modules.config.default_performance == flags.Performance.EXTREME_SPEED.value,
|
||||
|
|
@ -444,6 +448,15 @@ with shared.gradio_root:
|
|||
value=False)
|
||||
read_wildcards_in_order = gr.Checkbox(label="Read wildcards in order", value=False)
|
||||
|
||||
black_out_nsfw = gr.Checkbox(label='Black Out NSFW',
|
||||
value=modules.config.default_black_out_nsfw,
|
||||
interactive=not modules.config.default_black_out_nsfw,
|
||||
info='Use black image if NSFW is detected.')
|
||||
|
||||
black_out_nsfw.change(lambda x: gr.update(value=x, interactive=not x),
|
||||
inputs=black_out_nsfw, outputs=disable_preview, queue=False,
|
||||
show_progress=False)
|
||||
|
||||
if not args_manager.args.disable_metadata:
|
||||
save_metadata_to_images = gr.Checkbox(label='Save Metadata to Images', value=modules.config.default_save_metadata_to_images,
|
||||
info='Adds parameters to generated images allowing manual regeneration.')
|
||||
|
|
@ -528,6 +541,7 @@ with shared.gradio_root:
|
|||
modules.config.update_files()
|
||||
results = [gr.update(choices=modules.config.model_filenames)]
|
||||
results += [gr.update(choices=['None'] + modules.config.model_filenames)]
|
||||
results += [gr.update(choices=['None'] + modules.config.vae_filenames)]
|
||||
if not args_manager.args.disable_preset_selection:
|
||||
results += [gr.update(choices=modules.config.available_presets)]
|
||||
for i in range(modules.config.default_max_lora_number):
|
||||
|
|
@ -535,7 +549,7 @@ with shared.gradio_root:
|
|||
gr.update(choices=['None'] + modules.config.lora_filenames), gr.update()]
|
||||
return results
|
||||
|
||||
refresh_files_output = [base_model, refiner_model]
|
||||
refresh_files_output = [base_model, refiner_model, vae_name]
|
||||
if not args_manager.args.disable_preset_selection:
|
||||
refresh_files_output += [preset_selection]
|
||||
refresh_files.click(refresh_files_clicked, [], refresh_files_output + lora_ctrls,
|
||||
|
|
@ -547,8 +561,8 @@ with shared.gradio_root:
|
|||
performance_selection, overwrite_step, overwrite_switch, aspect_ratios_selection,
|
||||
overwrite_width, overwrite_height, guidance_scale, sharpness, adm_scaler_positive,
|
||||
adm_scaler_negative, adm_scaler_end, refiner_swap_method, adaptive_cfg, base_model,
|
||||
refiner_model, refiner_switch, sampler_name, scheduler_name, seed_random, image_seed,
|
||||
generate_button, load_parameter_button] + freeu_ctrls + lora_ctrls
|
||||
refiner_model, refiner_switch, sampler_name, scheduler_name, vae_name, seed_random,
|
||||
image_seed, generate_button, load_parameter_button] + freeu_ctrls + lora_ctrls
|
||||
|
||||
if not args_manager.args.disable_preset_selection:
|
||||
def preset_selection_change(preset, is_generating):
|
||||
|
|
@ -632,9 +646,9 @@ with shared.gradio_root:
|
|||
ctrls += [input_image_checkbox, current_tab]
|
||||
ctrls += [uov_method, uov_input_image]
|
||||
ctrls += [outpaint_selections, inpaint_input_image, inpaint_additional_prompt, inpaint_mask_image]
|
||||
ctrls += [disable_preview, disable_intermediate_results, disable_seed_increment]
|
||||
ctrls += [disable_preview, disable_intermediate_results, disable_seed_increment, black_out_nsfw]
|
||||
ctrls += [adm_scaler_positive, adm_scaler_negative, adm_scaler_end, adaptive_cfg]
|
||||
ctrls += [sampler_name, scheduler_name]
|
||||
ctrls += [sampler_name, scheduler_name, vae_name]
|
||||
ctrls += [overwrite_step, overwrite_switch, overwrite_width, overwrite_height, overwrite_vary_strength]
|
||||
ctrls += [overwrite_upscale_strength, mixing_image_prompt_and_vary_upscale, mixing_image_prompt_and_inpaint]
|
||||
ctrls += [debugging_cn_preprocessor, skipping_cn_preprocessor, canny_low_threshold, canny_high_threshold]
|
||||
|
|
@ -688,6 +702,14 @@ with shared.gradio_root:
|
|||
.then(fn=update_history_link, outputs=history_link) \
|
||||
.then(fn=lambda: None, _js='playNotification').then(fn=lambda: None, _js='refresh_grid_delayed')
|
||||
|
||||
reset_button.click(lambda: [worker.AsyncTask(args=[]), False, gr.update(visible=True, interactive=True)] +
|
||||
[gr.update(visible=False)] * 6 +
|
||||
[gr.update(visible=True, value=[])],
|
||||
outputs=[currentTask, state_is_generating, generate_button,
|
||||
reset_button, stop_button, skip_button,
|
||||
progress_html, progress_window, progress_gallery, gallery],
|
||||
queue=False)
|
||||
|
||||
for notification_file in ['notification.ogg', 'notification.mp3']:
|
||||
if os.path.exists(notification_file):
|
||||
gr.Audio(interactive=False, value=notification_file, elem_id='audio_notification', visible=False)
|
||||
|
|
@ -705,6 +727,15 @@ with shared.gradio_root:
|
|||
desc_btn.click(trigger_describe, inputs=[desc_method, desc_input_image],
|
||||
outputs=[prompt, style_selections], show_progress=True, queue=True)
|
||||
|
||||
if args_manager.args.enable_describe_uov_image:
|
||||
def trigger_uov_describe(mode, img, prompt):
|
||||
# keep prompt if not empty
|
||||
if prompt == '':
|
||||
return trigger_describe(mode, img)
|
||||
return gr.update(), gr.update()
|
||||
|
||||
uov_input_image.upload(trigger_uov_describe, inputs=[desc_method, uov_input_image, prompt],
|
||||
outputs=[prompt, style_selections], show_progress=True, queue=True)
|
||||
|
||||
def dump_default_english_config():
|
||||
from modules.localization import dump_english_config
|
||||
|
|
|
|||
Loading…
Reference in New Issue