Sync branch 'mashb1t_main' with develop_upstream

This commit is contained in:
Manuel Schmid 2024-07-14 20:14:10 +02:00
commit f2a21900c6
No known key found for this signature in database
GPG Key ID: 32C4F7569B40B84B
40 changed files with 2815 additions and 904 deletions

77
.github/ISSUE_TEMPLATE/bug_report.yaml vendored Normal file
View File

@ -0,0 +1,77 @@
name: Bug Report
description: Describe a problem
title: "[Bug]: "
labels: ["bug", "triage"]
body:
- type: markdown
attributes:
value: |
Thank you for taking the time to fill out this bug report form!
- type: checkboxes
id: prerequisites
attributes:
label: Prerequisites
description: Please make sure to troubleshoot yourself before continuing.
options:
- label: I have read the [Troubleshooting Guide](https://github.com/lllyasviel/Fooocus/blob/main/troubleshoot.md)
required: true
- label: I have checked that this is not a duplicate of an already existing [issue](https://github.com/lllyasviel/Fooocus/issues)
required: true
- type: textarea
id: description
attributes:
label: Describe the problem
description: Also tell us, what did you expect to happen?
placeholder: "A clear and concise description of what the bug is."
validations:
required: true
- type: textarea
id: logs
attributes:
label: Full console log output
description: Please copy and paste the **full** console log here. You will make our job easier if you give a **full** log. This will be automatically formatted into code, so no need for backticks.
render: shell
validations:
required: true
- type: textarea
id: version
attributes:
label: Version
description: What version of Fooocus are you using? (see browser tab title or console log)
placeholder: "Example: Fooocus 2.1.855"
validations:
required: true
- type: dropdown
id: hosting
attributes:
label: Where are you running Fooocus?
multiple: false
options:
- Locally
- Locally with virtualisation (e.g. Docker)
- Cloud (Gradio)
- Cloud (other)
validations:
required: true
- type: input
id: operating-system
attributes:
label: Operating System
description: What operating system are you using?
placeholder: "Example: Windows 10"
- type: dropdown
id: browsers
attributes:
label: What browsers are you seeing the problem on?
multiple: true
options:
- Chrome
- Firefox
- Microsoft Edge
- Safari
- other
validations:
required: true
- type: markdown
attributes:
value: "Thank you for completing our form!"

View File

@ -0,0 +1,34 @@
name: Feature request
description: Suggest an idea for this project
title: "[Feature]: "
labels: ["enhancement"]
body:
- type: markdown
attributes:
value: |
Thank you for taking the time to fill out this feature request form!
- type: checkboxes
id: prerequisites
attributes:
label: Prerequisites
options:
- label: I have checked that this is not a duplicate of an already existing [feature request](https://github.com/lllyasviel/Fooocus/issues)
required: true
- type: textarea
id: relation-to-problem
attributes:
label: Is your feature request related to a problem? Please describe.
placeholder: "A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
."
validations:
required: true
- type: textarea
id: description
attributes:
label: Describe the idea you'd like
placeholder: "A clear and concise description of what you want to happen."
validations:
required: true
- type: markdown
attributes:
value: "Thank you for completing our form!"

View File

@ -38,7 +38,7 @@ jobs:
type=edge,branch=main
- name: Build and push Docker image
uses: docker/build-push-action@v5
uses: docker/build-push-action@v6
with:
context: .
file: ./Dockerfile

1
.gitignore vendored
View File

@ -10,6 +10,7 @@ __pycache__
*.partial
*.onnx
sorted_styles.json
hash_cache.txt
/input
/cache
/language/default.json

View File

@ -28,11 +28,14 @@ args_parser.parser.add_argument("--disable-metadata", action='store_true',
args_parser.parser.add_argument("--disable-preset-download", action='store_true',
help="Disables downloading models for presets", default=False)
args_parser.parser.add_argument("--enable-describe-uov-image", action='store_true',
help="Disables automatic description of uov images when prompt is empty", default=False)
args_parser.parser.add_argument("--enable-auto-describe-image", action='store_true',
help="Enables automatic description of uov and enhance image when prompt is empty", default=False)
args_parser.parser.add_argument("--always-download-new-model", action='store_true',
help="Always download newer models ", default=False)
help="Always download newer models", default=False)
args_parser.parser.add_argument("--rebuild-hash-cache", help="Generates missing model and LoRA hashes.",
type=int, nargs="?", metavar="CPU_NUM_THREADS", const=-1)
args_parser.parser.set_defaults(
disable_cuda_malloc=True,

View File

@ -99,7 +99,7 @@ div:has(> #positive_prompt) {
}
.advanced_check_row {
width: 250px !important;
width: 330px !important;
}
.min_check {

View File

@ -0,0 +1,24 @@
# https://github.com/sail-sg/EditAnything/blob/main/sam2groundingdino_edit.py
import numpy as np
from PIL import Image
from extras.inpaint_mask import SAMOptions, generate_mask_from_image
original_image = Image.open('cat.webp')
image = np.array(original_image, dtype=np.uint8)
sam_options = SAMOptions(
dino_prompt='eye',
dino_box_threshold=0.3,
dino_text_threshold=0.25,
dino_erode_or_dilate=0,
dino_debug=False,
max_detections=2,
model_type='vit_b'
)
mask_image, _, _, _ = generate_mask_from_image(image, sam_options=sam_options)
merged_masks_img = Image.fromarray(mask_image)
merged_masks_img.show()

View File

@ -0,0 +1,43 @@
batch_size = 1
modelname = "groundingdino"
backbone = "swin_T_224_1k"
position_embedding = "sine"
pe_temperatureH = 20
pe_temperatureW = 20
return_interm_indices = [1, 2, 3]
backbone_freeze_keywords = None
enc_layers = 6
dec_layers = 6
pre_norm = False
dim_feedforward = 2048
hidden_dim = 256
dropout = 0.0
nheads = 8
num_queries = 900
query_dim = 4
num_patterns = 0
num_feature_levels = 4
enc_n_points = 4
dec_n_points = 4
two_stage_type = "standard"
two_stage_bbox_embed_share = False
two_stage_class_embed_share = False
transformer_activation = "relu"
dec_pred_bbox_embed_share = True
dn_box_noise_scale = 1.0
dn_label_noise_ratio = 0.5
dn_label_coef = 1.0
dn_bbox_coef = 1.0
embed_init_tgt = True
dn_labelbook_size = 2000
max_text_len = 256
text_encoder_type = "bert-base-uncased"
use_text_enhancer = True
use_fusion_layer = True
use_checkpoint = True
use_transformer_ckpt = True
use_text_cross_attention = True
text_dropout = 0.0
fusion_dropout = 0.0
fusion_droppath = 0.1
sub_sentence_present = True

View File

@ -0,0 +1,100 @@
from typing import Tuple, List
import ldm_patched.modules.model_management as model_management
from ldm_patched.modules.model_patcher import ModelPatcher
from modules.config import path_inpaint
from modules.model_loader import load_file_from_url
import numpy as np
import supervision as sv
import torch
from groundingdino.util.inference import Model
from groundingdino.util.inference import load_model, preprocess_caption, get_phrases_from_posmap
class GroundingDinoModel(Model):
def __init__(self):
self.config_file = 'extras/GroundingDINO/config/GroundingDINO_SwinT_OGC.py'
self.model = None
self.load_device = torch.device('cpu')
self.offload_device = torch.device('cpu')
@torch.no_grad()
@torch.inference_mode()
def predict_with_caption(
self,
image: np.ndarray,
caption: str,
box_threshold: float = 0.35,
text_threshold: float = 0.25
) -> Tuple[sv.Detections, torch.Tensor, torch.Tensor, List[str]]:
if self.model is None:
filename = load_file_from_url(
url="https://github.com/IDEA-Research/GroundingDINO/releases/download/v0.1.0-alpha/groundingdino_swint_ogc.pth",
file_name='groundingdino_swint_ogc.pth',
model_dir=path_inpaint)
model = load_model(model_config_path=self.config_file, model_checkpoint_path=filename)
self.load_device = model_management.text_encoder_device()
self.offload_device = model_management.text_encoder_offload_device()
model.to(self.offload_device)
self.model = ModelPatcher(model, load_device=self.load_device, offload_device=self.offload_device)
model_management.load_model_gpu(self.model)
processed_image = GroundingDinoModel.preprocess_image(image_bgr=image).to(self.load_device)
boxes, logits, phrases = predict(
model=self.model,
image=processed_image,
caption=caption,
box_threshold=box_threshold,
text_threshold=text_threshold,
device=self.load_device)
source_h, source_w, _ = image.shape
detections = GroundingDinoModel.post_process_result(
source_h=source_h,
source_w=source_w,
boxes=boxes,
logits=logits)
return detections, boxes, logits, phrases
def predict(
model,
image: torch.Tensor,
caption: str,
box_threshold: float,
text_threshold: float,
device: str = "cuda"
) -> Tuple[torch.Tensor, torch.Tensor, List[str]]:
caption = preprocess_caption(caption=caption)
# override to use model wrapped by patcher
model = model.model.to(device)
image = image.to(device)
with torch.no_grad():
outputs = model(image[None], captions=[caption])
prediction_logits = outputs["pred_logits"].cpu().sigmoid()[0] # prediction_logits.shape = (nq, 256)
prediction_boxes = outputs["pred_boxes"].cpu()[0] # prediction_boxes.shape = (nq, 4)
mask = prediction_logits.max(dim=1)[0] > box_threshold
logits = prediction_logits[mask] # logits.shape = (n, 256)
boxes = prediction_boxes[mask] # boxes.shape = (n, 4)
tokenizer = model.tokenizer
tokenized = tokenizer(caption)
phrases = [
get_phrases_from_posmap(logit > text_threshold, tokenized, tokenizer).replace('.', '')
for logit
in logits
]
return boxes, logits.max(dim=1)[0], phrases
default_groundingdino = GroundingDinoModel().predict_with_caption

View File

@ -41,7 +41,7 @@ class Censor:
model_management.load_model_gpu(self.safety_checker_model)
single = False
if not isinstance(images, list) or isinstance(images, np.ndarray):
if not isinstance(images, (list, np.ndarray)):
images = [images]
single = True

130
extras/inpaint_mask.py Normal file
View File

@ -0,0 +1,130 @@
import sys
import modules.config
import numpy as np
import torch
from extras.GroundingDINO.util.inference import default_groundingdino
from extras.sam.predictor import SamPredictor
from rembg import remove, new_session
from segment_anything import sam_model_registry
from segment_anything.utils.amg import remove_small_regions
class SAMOptions:
def __init__(self,
# GroundingDINO
dino_prompt: str = '',
dino_box_threshold=0.3,
dino_text_threshold=0.25,
dino_erode_or_dilate=0,
dino_debug=False,
# SAM
max_detections=2,
model_type='vit_b'
):
self.dino_prompt = dino_prompt
self.dino_box_threshold = dino_box_threshold
self.dino_text_threshold = dino_text_threshold
self.dino_erode_or_dilate = dino_erode_or_dilate
self.dino_debug = dino_debug
self.max_detections = max_detections
self.model_type = model_type
def optimize_masks(masks: torch.Tensor) -> torch.Tensor:
"""
removes small disconnected regions and holes
"""
fine_masks = []
for mask in masks.to('cpu').numpy(): # masks: [num_masks, 1, h, w]
fine_masks.append(remove_small_regions(mask[0], 400, mode="holes")[0])
masks = np.stack(fine_masks, axis=0)[:, np.newaxis]
return torch.from_numpy(masks)
def generate_mask_from_image(image: np.ndarray, mask_model: str = 'sam', extras=None,
sam_options: SAMOptions | None = SAMOptions) -> tuple[np.ndarray | None, int | None, int | None, int | None]:
dino_detection_count = 0
sam_detection_count = 0
sam_detection_on_mask_count = 0
if image is None:
return None, dino_detection_count, sam_detection_count, sam_detection_on_mask_count
if extras is None:
extras = {}
if 'image' in image:
image = image['image']
if mask_model != 'sam' or sam_options is None:
result = remove(
image,
session=new_session(mask_model, **extras),
only_mask=True,
**extras
)
return result, dino_detection_count, sam_detection_count, sam_detection_on_mask_count
detections, boxes, logits, phrases = default_groundingdino(
image=image,
caption=sam_options.dino_prompt,
box_threshold=sam_options.dino_box_threshold,
text_threshold=sam_options.dino_text_threshold
)
H, W = image.shape[0], image.shape[1]
boxes = boxes * torch.Tensor([W, H, W, H])
boxes[:, :2] = boxes[:, :2] - boxes[:, 2:] / 2
boxes[:, 2:] = boxes[:, 2:] + boxes[:, :2]
sam_checkpoint = modules.config.download_sam_model(sam_options.model_type)
sam = sam_model_registry[sam_options.model_type](checkpoint=sam_checkpoint)
sam_predictor = SamPredictor(sam)
final_mask_tensor = torch.zeros((image.shape[0], image.shape[1]))
dino_detection_count = boxes.size(0)
if dino_detection_count > 0:
sam_predictor.set_image(image)
if sam_options.dino_erode_or_dilate != 0:
for index in range(boxes.size(0)):
assert boxes.size(1) == 4
boxes[index][0] -= sam_options.dino_erode_or_dilate
boxes[index][1] -= sam_options.dino_erode_or_dilate
boxes[index][2] += sam_options.dino_erode_or_dilate
boxes[index][3] += sam_options.dino_erode_or_dilate
if sam_options.dino_debug:
from PIL import ImageDraw, Image
debug_dino_image = Image.new("RGB", (image.shape[1], image.shape[0]), color="black")
draw = ImageDraw.Draw(debug_dino_image)
for box in boxes.numpy():
draw.rectangle(box.tolist(), fill="white")
return np.array(debug_dino_image), dino_detection_count, sam_detection_count, sam_detection_on_mask_count
transformed_boxes = sam_predictor.transform.apply_boxes_torch(boxes, image.shape[:2])
masks, _, _ = sam_predictor.predict_torch(
point_coords=None,
point_labels=None,
boxes=transformed_boxes,
multimask_output=False,
)
masks = optimize_masks(masks)
sam_detection_count = len(masks)
if sam_options.max_detections == 0:
sam_options.max_detections = sys.maxsize
sam_objects = min(len(logits), sam_options.max_detections)
for obj_ind in range(sam_objects):
mask_tensor = masks[obj_ind][0]
final_mask_tensor += mask_tensor
sam_detection_on_mask_count += 1
final_mask_tensor = (final_mask_tensor > 0).to('cpu').numpy()
mask_image = np.dstack((final_mask_tensor, final_mask_tensor, final_mask_tensor)) * 255
mask_image = np.array(mask_image, dtype=np.uint8)
return mask_image, dino_detection_count, sam_detection_count, sam_detection_on_mask_count

288
extras/sam/predictor.py Normal file
View File

@ -0,0 +1,288 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from ldm_patched.modules import model_management
from ldm_patched.modules.model_patcher import ModelPatcher
from segment_anything.modeling import Sam
from typing import Optional, Tuple
from segment_anything.utils.transforms import ResizeLongestSide
class SamPredictor:
def __init__(
self,
model: Sam,
load_device=model_management.text_encoder_device(),
offload_device=model_management.text_encoder_offload_device()
) -> None:
"""
Uses SAM to calculate the image embedding for an image, and then
allow repeated, efficient mask prediction given prompts.
Arguments:
model (Sam): The model to use for mask prediction.
"""
super().__init__()
self.load_device = load_device
self.offload_device = offload_device
# can't use model.half() here as slow_conv2d_cpu is not implemented for half
model.to(self.offload_device)
self.patcher = ModelPatcher(model, load_device=self.load_device, offload_device=self.offload_device)
self.transform = ResizeLongestSide(model.image_encoder.img_size)
self.reset_image()
def set_image(
self,
image: np.ndarray,
image_format: str = "RGB",
) -> None:
"""
Calculates the image embeddings for the provided image, allowing
masks to be predicted with the 'predict' method.
Arguments:
image (np.ndarray): The image for calculating masks. Expects an
image in HWC uint8 format, with pixel values in [0, 255].
image_format (str): The color format of the image, in ['RGB', 'BGR'].
"""
assert image_format in [
"RGB",
"BGR",
], f"image_format must be in ['RGB', 'BGR'], is {image_format}."
if image_format != self.patcher.model.image_format:
image = image[..., ::-1]
# Transform the image to the form expected by the model
input_image = self.transform.apply_image(image)
input_image_torch = torch.as_tensor(input_image, device=self.load_device)
input_image_torch = input_image_torch.permute(2, 0, 1).contiguous()[None, :, :, :]
self.set_torch_image(input_image_torch, image.shape[:2])
@torch.no_grad()
def set_torch_image(
self,
transformed_image: torch.Tensor,
original_image_size: Tuple[int, ...],
) -> None:
"""
Calculates the image embeddings for the provided image, allowing
masks to be predicted with the 'predict' method. Expects the input
image to be already transformed to the format expected by the model.
Arguments:
transformed_image (torch.Tensor): The input image, with shape
1x3xHxW, which has been transformed with ResizeLongestSide.
original_image_size (tuple(int, int)): The size of the image
before transformation, in (H, W) format.
"""
assert (
len(transformed_image.shape) == 4
and transformed_image.shape[1] == 3
and max(*transformed_image.shape[2:]) == self.patcher.model.image_encoder.img_size
), f"set_torch_image input must be BCHW with long side {self.patcher.model.image_encoder.img_size}."
self.reset_image()
self.original_size = original_image_size
self.input_size = tuple(transformed_image.shape[-2:])
model_management.load_model_gpu(self.patcher)
input_image = self.patcher.model.preprocess(transformed_image.to(self.load_device))
self.features = self.patcher.model.image_encoder(input_image)
self.is_image_set = True
def predict(
self,
point_coords: Optional[np.ndarray] = None,
point_labels: Optional[np.ndarray] = None,
box: Optional[np.ndarray] = None,
mask_input: Optional[np.ndarray] = None,
multimask_output: bool = True,
return_logits: bool = False,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Predict masks for the given input prompts, using the currently set image.
Arguments:
point_coords (np.ndarray or None): A Nx2 array of point prompts to the
model. Each point is in (X,Y) in pixels.
point_labels (np.ndarray or None): A length N array of labels for the
point prompts. 1 indicates a foreground point and 0 indicates a
background point.
box (np.ndarray or None): A length 4 array given a box prompt to the
model, in XYXY format.
mask_input (np.ndarray): A low resolution mask input to the model, typically
coming from a previous prediction iteration. Has form 1xHxW, where
for SAM, H=W=256.
multimask_output (bool): If true, the model will return three masks.
For ambiguous input prompts (such as a single click), this will often
produce better masks than a single prediction. If only a single
mask is needed, the model's predicted quality score can be used
to select the best mask. For non-ambiguous prompts, such as multiple
input prompts, multimask_output=False can give better results.
return_logits (bool): If true, returns un-thresholded masks logits
instead of a binary mask.
Returns:
(np.ndarray): The output masks in CxHxW format, where C is the
number of masks, and (H, W) is the original image size.
(np.ndarray): An array of length C containing the model's
predictions for the quality of each mask.
(np.ndarray): An array of shape CxHxW, where C is the number
of masks and H=W=256. These low resolution logits can be passed to
a subsequent iteration as mask input.
"""
if not self.is_image_set:
raise RuntimeError("An image must be set with .set_image(...) before mask prediction.")
# Transform input prompts
coords_torch, labels_torch, box_torch, mask_input_torch = None, None, None, None
if point_coords is not None:
assert (
point_labels is not None
), "point_labels must be supplied if point_coords is supplied."
point_coords = self.transform.apply_coords(point_coords, self.original_size)
coords_torch = torch.as_tensor(point_coords, dtype=torch.float, device=self.load_device)
labels_torch = torch.as_tensor(point_labels, dtype=torch.int, device=self.load_device)
coords_torch, labels_torch = coords_torch[None, :, :], labels_torch[None, :]
if box is not None:
box = self.transform.apply_boxes(box, self.original_size)
box_torch = torch.as_tensor(box, dtype=torch.float, device=self.load_device)
box_torch = box_torch[None, :]
if mask_input is not None:
mask_input_torch = torch.as_tensor(mask_input, dtype=torch.float, device=self.load_device)
mask_input_torch = mask_input_torch[None, :, :, :]
masks, iou_predictions, low_res_masks = self.predict_torch(
coords_torch,
labels_torch,
box_torch,
mask_input_torch,
multimask_output,
return_logits=return_logits,
)
masks = masks[0].detach().cpu().numpy()
iou_predictions = iou_predictions[0].detach().cpu().numpy()
low_res_masks = low_res_masks[0].detach().cpu().numpy()
return masks, iou_predictions, low_res_masks
@torch.no_grad()
def predict_torch(
self,
point_coords: Optional[torch.Tensor],
point_labels: Optional[torch.Tensor],
boxes: Optional[torch.Tensor] = None,
mask_input: Optional[torch.Tensor] = None,
multimask_output: bool = True,
return_logits: bool = False,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Predict masks for the given input prompts, using the currently set image.
Input prompts are batched torch tensors and are expected to already be
transformed to the input frame using ResizeLongestSide.
Arguments:
point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the
model. Each point is in (X,Y) in pixels.
point_labels (torch.Tensor or None): A BxN array of labels for the
point prompts. 1 indicates a foreground point and 0 indicates a
background point.
box (np.ndarray or None): A Bx4 array given a box prompt to the
model, in XYXY format.
mask_input (np.ndarray): A low resolution mask input to the model, typically
coming from a previous prediction iteration. Has form Bx1xHxW, where
for SAM, H=W=256. Masks returned by a previous iteration of the
predict method do not need further transformation.
multimask_output (bool): If true, the model will return three masks.
For ambiguous input prompts (such as a single click), this will often
produce better masks than a single prediction. If only a single
mask is needed, the model's predicted quality score can be used
to select the best mask. For non-ambiguous prompts, such as multiple
input prompts, multimask_output=False can give better results.
return_logits (bool): If true, returns un-thresholded masks logits
instead of a binary mask.
Returns:
(torch.Tensor): The output masks in BxCxHxW format, where C is the
number of masks, and (H, W) is the original image size.
(torch.Tensor): An array of shape BxC containing the model's
predictions for the quality of each mask.
(torch.Tensor): An array of shape BxCxHxW, where C is the number
of masks and H=W=256. These low res logits can be passed to
a subsequent iteration as mask input.
"""
if not self.is_image_set:
raise RuntimeError("An image must be set with .set_image(...) before mask prediction.")
if point_coords is not None:
points = (point_coords.to(self.load_device), point_labels.to(self.load_device))
else:
points = None
# load
if boxes is not None:
boxes = boxes.to(self.load_device)
if mask_input is not None:
mask_input = mask_input.to(self.load_device)
model_management.load_model_gpu(self.patcher)
# Embed prompts
sparse_embeddings, dense_embeddings = self.patcher.model.prompt_encoder(
points=points,
boxes=boxes,
masks=mask_input,
)
# Predict masks
low_res_masks, iou_predictions = self.patcher.model.mask_decoder(
image_embeddings=self.features,
image_pe=self.patcher.model.prompt_encoder.get_dense_pe(),
sparse_prompt_embeddings=sparse_embeddings,
dense_prompt_embeddings=dense_embeddings,
multimask_output=multimask_output,
)
# Upscale the masks to the original image resolution
masks = self.patcher.model.postprocess_masks(low_res_masks, self.input_size, self.original_size)
if not return_logits:
masks = masks > self.patcher.model.mask_threshold
return masks, iou_predictions, low_res_masks
def get_image_embedding(self) -> torch.Tensor:
"""
Returns the image embeddings for the currently set image, with
shape 1xCxHxW, where C is the embedding dimension and (H,W) are
the embedding spatial dimension of SAM (typically C=256, H=W=64).
"""
if not self.is_image_set:
raise RuntimeError(
"An image must be set with .set_image(...) to generate an embedding."
)
assert self.features is not None, "Features must exist if an image has been set."
return self.features
@property
def device(self) -> torch.device:
return self.patcher.model.device
def reset_image(self) -> None:
"""Resets the currently set image."""
self.is_image_set = False
self.features = None
self.orig_h = None
self.orig_w = None
self.input_h = None
self.input_w = None

View File

@ -1 +1 @@
version = '2.4.3'
version = '2.5.0-rc1'

View File

@ -66,7 +66,7 @@ function on_style_selection_blur() {
}
onUiLoaded(async () => {
let spans = document.querySelectorAll('.aspect_ratios span');
let spans = document.querySelectorAll('.aspect_ratios span, .performance_selections span');
spans.forEach(function (span) {
span.innerHTML = span.innerHTML.replace(/&lt;/g, '<').replace(/&gt;/g, '>');

View File

@ -642,4 +642,5 @@ onUiLoaded(async() => {
}
applyZoomAndPan("#inpaint_canvas");
applyZoomAndPan("#inpaint_mask_canvas");
});

View File

@ -11,6 +11,7 @@
"Image Prompt": "Image Prompt",
"Inpaint or Outpaint": "Inpaint or Outpaint",
"Outpaint Direction": "Outpaint Direction",
"Enable Advanced Masking Features": "Enable Advanced Masking Features",
"Method": "Method",
"Describe": "Describe",
"Content Type": "Content Type",
@ -44,6 +45,8 @@
"Top": "Top",
"Bottom": "Bottom",
"* \"Inpaint or Outpaint\" is powered by the sampler \"DPMPP Fooocus Seamless 2M SDE Karras Inpaint Sampler\" (beta)": "* \"Inpaint or Outpaint\" is powered by the sampler \"DPMPP Fooocus Seamless 2M SDE Karras Inpaint Sampler\" (beta)",
"Advanced options": "Advanced options",
"Generate mask from image": "Generate mask from image",
"Setting": "Setting",
"Style": "Style",
"Preset": "Preset",
@ -57,6 +60,7 @@
"Image Number": "Image Number",
"Negative Prompt": "Negative Prompt",
"Describing what you do not want to see.": "Describing what you do not want to see.",
"Uses the internet to translate prompts to English.": "Uses the internet to translate prompts to English.",
"Random": "Random",
"Seed": "Seed",
"Disable seed increment": "Disable seed increment",
@ -367,10 +371,14 @@
"Disable preview during generation.": "Disable preview during generation.",
"Disable Intermediate Results": "Disable Intermediate Results",
"Disable intermediate results during generation, only show final gallery.": "Disable intermediate results during generation, only show final gallery.",
"Debug Inpaint Preprocessing": "Debug Inpaint Preprocessing",
"Debug GroundingDINO": "Debug GroundingDINO",
"Used for SAM object detection and box generation": "Used for SAM object detection and box generation",
"GroundingDINO Box Erode or Dilate": "GroundingDINO Box Erode or Dilate",
"Inpaint Engine": "Inpaint Engine",
"v1": "v1",
"Version of Fooocus inpaint model": "Version of Fooocus inpaint model",
"v2.5": "v2.5",
"v2.6": "v2.6",
"Control Debug": "Control Debug",
"Debug Preprocessors": "Debug Preprocessors",
"Mixing Image Prompt and Vary/Upscale": "Mixing Image Prompt and Vary/Upscale",
@ -400,5 +408,74 @@
"Image Prompt parameters are not included. Use png and a1111 for compatibility with Civitai.": "Image Prompt parameters are not included. Use png and a1111 for compatibility with Civitai.",
"fooocus (json)": "fooocus (json)",
"a1111 (plain text)": "a1111 (plain text)",
"Unsupported image type in input": "Unsupported image type in input"
"Unsupported image type in input": "Unsupported image type in input",
"Enhance": "Enhance",
"Detection prompt": "Detection prompt",
"Detection Prompt Quick List": "Detection Prompt Quick List",
"Maximum number of detections": "Maximum number of detections",
"Use with Enhance, skips image generation": "Use with Enhance, skips image generation",
"Order of Processing": "Order of Processing",
"Use before to enhance small details and after to enhance large areas.": "Use before to enhance small details and after to enhance large areas.",
"Before First Enhancement": "Before First Enhancement",
"After Last Enhancement": "After Last Enhancement",
"Prompt Type": "Prompt Type",
"Choose which prompt to use for Upscale or Variation.": "Choose which prompt to use for Upscale or Variation.",
"Original Prompts": "Original Prompts",
"Last Filled Enhancement Prompts": "Last Filled Enhancement Prompts",
"Enable": "Enable",
"Describe what you want to detect.": "Describe what you want to detect.",
"Enhancement positive prompt": "Enhancement positive prompt",
"Uses original prompt instead if empty.": "Uses original prompt instead if empty.",
"Enhancement negative prompt": "Enhancement negative prompt",
"Uses original negative prompt instead if empty.": "Uses original negative prompt instead if empty.",
"Detection": "Detection",
"u2net": "u2net",
"u2netp": "u2netp",
"u2net_human_seg": "u2net_human_seg",
"u2net_cloth_seg": "u2net_cloth_seg",
"silueta": "silueta",
"isnet-general-use": "isnet-general-use",
"isnet-anime": "isnet-anime",
"sam": "sam",
"Mask generation model": "Mask generation model",
"Cloth category": "Cloth category",
"Use singular whenever possible": "Use singular whenever possible",
"full": "full",
"upper": "upper",
"lower": "lower",
"SAM Options": "SAM Options",
"SAM model": "SAM model",
"vit_b": "vit_b",
"vit_l": "vit_l",
"vit_h": "vit_h",
"Box Threshold": "Box Threshold",
"Text Threshold": "Text Threshold",
"Set to 0 to detect all": "Set to 0 to detect all",
"Inpaint": "Inpaint",
"Inpaint or Outpaint (default)": "Inpaint or Outpaint (default)",
"Improve Detail (face, hand, eyes, etc.)": "Improve Detail (face, hand, eyes, etc.)",
"Modify Content (add objects, change background, etc.)": "Modify Content (add objects, change background, etc.)",
"Disable initial latent in inpaint": "Disable initial latent in inpaint",
"Version of Fooocus inpaint model. If set, use performance Quality or Speed (no performance LoRAs) for best results.": "Version of Fooocus inpaint model. If set, use performance Quality or Speed (no performance LoRAs) for best results.",
"Inpaint Denoising Strength": "Inpaint Denoising Strength",
"Same as the denoising strength in A1111 inpaint. Only used in inpaint, not used in outpaint. (Outpaint always use 1.0)": "Same as the denoising strength in A1111 inpaint. Only used in inpaint, not used in outpaint. (Outpaint always use 1.0)",
"Inpaint Respective Field": "Inpaint Respective Field",
"The area to inpaint. Value 0 is same as \"Only Masked\" in A1111. Value 1 is same as \"Whole Image\" in A1111. Only used in inpaint, not used in outpaint. (Outpaint always use 1.0)": "The area to inpaint. Value 0 is same as \"Only Masked\" in A1111. Value 1 is same as \"Whole Image\" in A1111. Only used in inpaint, not used in outpaint. (Outpaint always use 1.0)",
"Mask Erode or Dilate": "Mask Erode or Dilate",
"Positive value will make white area in the mask larger, negative value will make white area smaller. (default is 0, always processed before any mask invert)": "Positive value will make white area in the mask larger, negative value will make white area smaller. (default is 0, always processed before any mask invert)",
"Invert Mask When Generating": "Invert Mask When Generating",
"Debug Enhance Masks": "Debug Enhance Masks",
"Show enhance masks in preview and final results": "Show enhance masks in preview and final results",
"Use GroundingDINO boxes instead of more detailed SAM masks": "Use GroundingDINO boxes instead of more detailed SAM masks",
"highly detailed face": "highly detailed face",
"detailed girl face": "detailed girl face",
"detailed man face": "detailed man face",
"detailed hand": "detailed hand",
"beautiful eyes": "beautiful eyes",
"face": "face",
"eye": "eye",
"mouth": "mouth",
"hair": "hair",
"hand": "hand",
"body": "body"
}

View File

@ -21,6 +21,7 @@ import fooocus_version
from build_launcher import build_launcher
from modules.launch_util import is_installed, run, python, run_pip, requirements_met, delete_folder_content
from modules.model_loader import load_file_from_url
from modules import config
REINSTALL_ALL = False
TRY_INSTALL_XFORMERS = False
@ -85,6 +86,7 @@ if args.hf_mirror is not None :
print("Set hf_mirror to:", args.hf_mirror)
from modules import config
os.environ["U2NET_HOME"] = config.path_inpaint
os.environ['GRADIO_TEMP_DIR'] = config.temp_path
@ -97,7 +99,7 @@ if config.temp_path_cleanup_on_launch:
print(f"[Cleanup] Failed to delete content of temp dir.")
def download_models(default_model, previous_default_models, checkpoint_downloads, embeddings_downloads, lora_downloads):
def download_models(default_model, previous_default_models, checkpoint_downloads, embeddings_downloads, lora_downloads, vae_downloads):
for file_name, url in vae_approx_filenames:
load_file_from_url(url=url, model_dir=config.path_vae_approx, file_name=file_name)
@ -129,12 +131,14 @@ def download_models(default_model, previous_default_models, checkpoint_downloads
load_file_from_url(url=url, model_dir=config.path_embeddings, file_name=file_name)
for file_name, url in lora_downloads.items():
load_file_from_url(url=url, model_dir=config.paths_loras[0], file_name=file_name)
for file_name, url in vae_downloads.items():
load_file_from_url(url=url, model_dir=config.path_vae, file_name=file_name)
return default_model, checkpoint_downloads
config.default_base_model_name, config.checkpoint_downloads = download_models(
config.default_base_model_name, config.previous_default_models, config.checkpoint_downloads,
config.embeddings_downloads, config.lora_downloads)
config.embeddings_downloads, config.lora_downloads, config.vae_downloads)
from webui import *

File diff suppressed because it is too large Load Diff

View File

@ -7,6 +7,7 @@ import args_manager
import tempfile
import modules.flags
import modules.sdxl_styles
from modules.hash_cache import init_cache
from modules.model_loader import load_file_from_url
from modules.extra_utils import makedirs_with_log, get_files_from_folder, try_eval_env_var
@ -98,7 +99,6 @@ def try_load_deprecated_user_path_config():
try_load_deprecated_user_path_config()
def get_presets():
preset_folder = 'presets'
presets = ['initial']
@ -106,8 +106,11 @@ def get_presets():
print('No presets found.')
return presets
return presets + [f[:f.index('.json')] for f in os.listdir(preset_folder) if f.endswith('.json')]
return presets + [f[:f.index(".json")] for f in os.listdir(preset_folder) if f.endswith('.json')]
def update_presets():
global available_presets
available_presets = get_presets()
def try_get_preset_content(preset):
if isinstance(preset, str):
@ -198,6 +201,7 @@ path_clip_vision = get_dir_or_set_default('path_clip_vision', '../models/clip_vi
path_fooocus_expansion = get_dir_or_set_default('path_fooocus_expansion', '../models/prompt_expansion/fooocus_expansion')
path_wildcards = get_dir_or_set_default('path_wildcards', '../wildcards/')
path_safety_checker = get_dir_or_set_default('path_safety_checker', '../models/safety_checker/')
path_sam = get_dir_or_set_default('path_sam', '../models/sam/')
path_outputs = get_path_output()
@ -397,7 +401,7 @@ default_prompt = get_config_item_or_set_default(
default_performance = get_config_item_or_set_default(
key='default_performance',
default_value=Performance.SPEED.value,
validator=lambda x: x in Performance.list(),
validator=lambda x: x in Performance.values(),
expected_type=str
)
default_advanced_checkbox = get_config_item_or_set_default(
@ -442,6 +446,12 @@ embeddings_downloads = get_config_item_or_set_default(
validator=lambda x: isinstance(x, dict) and all(isinstance(k, str) and isinstance(v, str) for k, v in x.items()),
expected_type=dict
)
vae_downloads = get_config_item_or_set_default(
key='vae_downloads',
default_value={},
validator=lambda x: isinstance(x, dict) and all(isinstance(k, str) and isinstance(v, str) for k, v in x.items()),
expected_type=dict
)
available_aspect_ratios = get_config_item_or_set_default(
key='available_aspect_ratios',
default_value=modules.flags.sdxl_aspect_ratios,
@ -460,6 +470,12 @@ default_inpaint_engine_version = get_config_item_or_set_default(
validator=lambda x: x in modules.flags.inpaint_engine_versions,
expected_type=str
)
default_inpaint_method = get_config_item_or_set_default(
key='default_inpaint_method',
default_value=modules.flags.inpaint_option_default,
validator=lambda x: x in modules.flags.inpaint_options,
expected_type=str
)
default_cfg_tsnr = get_config_item_or_set_default(
key='default_cfg_tsnr',
default_value=7.0,
@ -484,6 +500,11 @@ default_overwrite_switch = get_config_item_or_set_default(
validator=lambda x: isinstance(x, int),
expected_type=int
)
default_overwrite_upscale = get_config_item_or_set_default(
key='default_overwrite_upscale',
default_value=-1,
validator=lambda x: isinstance(x, numbers.Number)
)
example_inpaint_prompts = get_config_item_or_set_default(
key='example_inpaint_prompts',
default_value=[
@ -492,6 +513,50 @@ example_inpaint_prompts = get_config_item_or_set_default(
validator=lambda x: isinstance(x, list) and all(isinstance(v, str) for v in x),
expected_type=list
)
example_enhance_detection_prompts = get_config_item_or_set_default(
key='example_enhance_detection_prompts',
default_value=[
'face', 'eye', 'mouth', 'hair', 'hand', 'body'
],
validator=lambda x: isinstance(x, list) and all(isinstance(v, str) for v in x),
expected_type=list
)
default_enhance_tabs = get_config_item_or_set_default(
key='default_enhance_tabs',
default_value=3,
validator=lambda x: isinstance(x, int) and 1 <= x <= 5,
expected_type=int
)
default_enhance_checkbox = get_config_item_or_set_default(
key='default_enhance_checkbox',
default_value=False,
validator=lambda x: isinstance(x, bool),
expected_type=bool
)
default_enhance_uov_method = get_config_item_or_set_default(
key='default_enhance_uov_method',
default_value=modules.flags.disabled,
validator=lambda x: x in modules.flags.uov_list,
expected_type=int
)
default_enhance_uov_processing_order = get_config_item_or_set_default(
key='default_enhance_uov_processing_order',
default_value=modules.flags.enhancement_uov_before,
validator=lambda x: x in modules.flags.enhancement_uov_processing_order,
expected_type=int
)
default_enhance_uov_prompt_type = get_config_item_or_set_default(
key='default_enhance_uov_prompt_type',
default_value=modules.flags.enhancement_uov_prompt_type_original,
validator=lambda x: x in modules.flags.enhancement_uov_prompt_types,
expected_type=int
)
default_sam_max_detections = get_config_item_or_set_default(
key='default_sam_max_detections',
default_value=0,
validator=lambda x: isinstance(x, int) and 0 <= x <= 10,
expected_type=int
)
default_black_out_nsfw = get_config_item_or_set_default(
key='default_black_out_nsfw',
default_value=False,
@ -518,6 +583,35 @@ metadata_created_by = get_config_item_or_set_default(
)
example_inpaint_prompts = [[x] for x in example_inpaint_prompts]
example_enhance_detection_prompts = [[x] for x in example_enhance_detection_prompts]
default_inpaint_mask_model = get_config_item_or_set_default(
key='default_inpaint_mask_model',
default_value='isnet-general-use',
validator=lambda x: x in modules.flags.inpaint_mask_models,
expected_type=str
)
default_enhance_inpaint_mask_model = get_config_item_or_set_default(
key='default_enhance_inpaint_mask_model',
default_value='sam',
validator=lambda x: x in modules.flags.inpaint_mask_models,
expected_type=str
)
default_inpaint_mask_cloth_category = get_config_item_or_set_default(
key='default_inpaint_mask_cloth_category',
default_value='full',
validator=lambda x: x in modules.flags.inpaint_mask_cloth_category,
expected_type=str
)
default_inpaint_mask_sam_model = get_config_item_or_set_default(
key='default_inpaint_mask_sam_model',
default_value='vit_b',
validator=lambda x: x in [y[1] for y in modules.flags.inpaint_mask_sam_model if y[1] == x],
expected_type=str
)
config_dict["default_loras"] = default_loras = default_loras[:default_max_lora_number] + [[True, 'None', 1.0] for _ in range(default_max_lora_number - len(default_loras))]
@ -537,6 +631,7 @@ possible_preset_keys = {
"default_sampler": "sampler",
"default_scheduler": "scheduler",
"default_overwrite_step": "steps",
"default_overwrite_switch": "overwrite_switch",
"default_performance": "performance",
"default_image_number": "image_number",
"default_prompt": "prompt",
@ -547,7 +642,10 @@ possible_preset_keys = {
"checkpoint_downloads": "checkpoint_downloads",
"embeddings_downloads": "embeddings_downloads",
"lora_downloads": "lora_downloads",
"default_vae": "vae"
"vae_downloads": "vae_downloads",
"default_vae": "vae",
# "default_inpaint_method": "inpaint_method", # disabled so inpaint mode doesn't refresh after every preset change
"default_inpaint_engine_version": "inpaint_engine_version",
}
REWRITE_PRESET = False
@ -754,4 +852,44 @@ def downloading_safety_checker_model():
return os.path.join(path_safety_checker, 'stable-diffusion-safety-checker.bin')
def download_sam_model(sam_model: str) -> str:
match sam_model:
case 'vit_b':
return downloading_sam_vit_b()
case 'vit_l':
return downloading_sam_vit_l()
case 'vit_h':
return downloading_sam_vit_h()
case _:
raise ValueError(f"sam model {sam_model} does not exist.")
def downloading_sam_vit_b():
load_file_from_url(
url='https://huggingface.co/mashb1t/misc/resolve/main/sam_vit_b_01ec64.pth',
model_dir=path_sam,
file_name='sam_vit_b_01ec64.pth'
)
return os.path.join(path_sam, 'sam_vit_b_01ec64.pth')
def downloading_sam_vit_l():
load_file_from_url(
url='https://huggingface.co/mashb1t/misc/resolve/main/sam_vit_l_0b3195.pth',
model_dir=path_sam,
file_name='sam_vit_l_0b3195.pth'
)
return os.path.join(path_sam, 'sam_vit_l_0b3195.pth')
def downloading_sam_vit_h():
load_file_from_url(
url='https://huggingface.co/mashb1t/misc/resolve/main/sam_vit_h_4b8939.pth',
model_dir=path_sam,
file_name='sam_vit_h_4b8939.pth'
)
return os.path.join(path_sam, 'sam_vit_h_4b8939.pth')
update_files()
init_cache(model_filenames, paths_checkpoints, lora_filenames, paths_loras)

View File

@ -8,9 +8,15 @@ upscale_15 = 'Upscale (1.5x)'
upscale_2 = 'Upscale (2x)'
upscale_fast = 'Upscale (Fast 2x)'
uov_list = [
disabled, subtle_variation, strong_variation, upscale_15, upscale_2, upscale_fast
]
uov_list = [disabled, subtle_variation, strong_variation, upscale_15, upscale_2, upscale_fast]
enhancement_uov_before = "Before First Enhancement"
enhancement_uov_after = "After Last Enhancement"
enhancement_uov_processing_order = [enhancement_uov_before, enhancement_uov_after]
enhancement_uov_prompt_type_original = 'Original Prompts'
enhancement_uov_prompt_type_last_filled = 'Last Filled Enhancement Prompts'
enhancement_uov_prompt_types = [enhancement_uov_prompt_type_original, enhancement_uov_prompt_type_last_filled]
CIVITAI_NO_KARRAS = ["euler", "euler_ancestral", "heun", "dpm_fast", "dpm_adaptive", "ddim", "uni_pc"]
@ -75,6 +81,10 @@ default_parameters = {
output_formats = ['png', 'jpeg', 'webp']
inpaint_mask_models = ['u2net', 'u2netp', 'u2net_human_seg', 'u2net_cloth_seg', 'silueta', 'isnet-general-use', 'isnet-anime', 'sam']
inpaint_mask_cloth_category = ['full', 'upper', 'lower']
inpaint_mask_sam_model = ['vit_b', 'vit_l', 'vit_h']
inpaint_engine_versions = ['None', 'v1', 'v2.5', 'v2.6']
inpaint_option_default = 'Inpaint or Outpaint (default)'
inpaint_option_detail = 'Improve Detail (face, hand, eyes, etc.)'
@ -104,7 +114,6 @@ metadata_scheme = [
]
controlnet_image_count = 4
preparation_step_count = 13
class OutputFormat(Enum):
@ -154,7 +163,7 @@ class Performance(Enum):
@classmethod
def list(cls) -> list:
return list(map(lambda c: c.value, cls))
return list(map(lambda c: (c.name, c.value), cls))
@classmethod
def values(cls) -> list:
@ -178,3 +187,12 @@ class Performance(Enum):
def lora_filename(self) -> str | None:
return PerformanceLoRA[self.name].value if self.name in PerformanceLoRA.__members__ else None
performance_selections = []
for name, value in Performance.list():
restricted_text = ''
if Performance.has_restricted_features(value):
restricted_text = '*'
performance_selections.append((f'{value} <span style="color: grey;"> \U00002223 {Steps[name].value} steps {restricted_text}</span>', Performance[name].value))

84
modules/hash_cache.py Normal file
View File

@ -0,0 +1,84 @@
import json
import os
from concurrent.futures import ThreadPoolExecutor
from multiprocessing import cpu_count
import args_manager
from modules.util import get_file_from_folder_list
from modules.util import sha256, HASH_SHA256_LENGTH
hash_cache_filename = 'hash_cache.txt'
hash_cache = {}
def sha256_from_cache(filepath):
global hash_cache
if filepath not in hash_cache:
print(f"[Cache] Calculating sha256 for {filepath}")
hash_value = sha256(filepath)
print(f"[Cache] sha256 for {filepath}: {hash_value}")
hash_cache[filepath] = hash_value
save_cache_to_file(filepath, hash_value)
return hash_cache[filepath]
def load_cache_from_file():
global hash_cache
try:
if os.path.exists(hash_cache_filename):
with open(hash_cache_filename, 'rt', encoding='utf-8') as fp:
for line in fp:
entry = json.loads(line)
for filepath, hash_value in entry.items():
if not os.path.exists(filepath) or not isinstance(hash_value, str) and len(hash_value) != HASH_SHA256_LENGTH:
print(f'[Cache] Skipping invalid cache entry: {filepath}')
continue
hash_cache[filepath] = hash_value
except Exception as e:
print(f'[Cache] Loading failed: {e}')
def save_cache_to_file(filename=None, hash_value=None):
global hash_cache
if filename is not None and hash_value is not None:
items = [(filename, hash_value)]
mode = 'at'
else:
items = sorted(hash_cache.items())
mode = 'wt'
try:
with open(hash_cache_filename, mode, encoding='utf-8') as fp:
for filepath, hash_value in items:
json.dump({filepath: hash_value}, fp)
fp.write('\n')
except Exception as e:
print(f'[Cache] Saving failed: {e}')
def init_cache(model_filenames, paths_checkpoints, lora_filenames, paths_loras):
load_cache_from_file()
if args_manager.args.rebuild_hash_cache:
max_workers = args_manager.args.rebuild_hash_cache if args_manager.args.rebuild_hash_cache > 0 else cpu_count()
rebuild_cache(lora_filenames, model_filenames, paths_checkpoints, paths_loras, max_workers)
# write cache to file again for sorting and cleanup of invalid cache entries
save_cache_to_file()
def rebuild_cache(lora_filenames, model_filenames, paths_checkpoints, paths_loras, max_workers=cpu_count()):
def thread(filename, paths):
filepath = get_file_from_folder_list(filename, paths)
sha256_from_cache(filepath)
print('[Cache] Rebuilding hash cache')
with ThreadPoolExecutor(max_workers=max_workers) as executor:
for model_filename in model_filenames:
executor.submit(thread, model_filename, paths_checkpoints)
for lora_filename in lora_filenames:
executor.submit(thread, lora_filename, paths_loras)
print('[Cache] Done')

View File

@ -9,18 +9,18 @@ from PIL import Image
import fooocus_version
import modules.config
import modules.sdxl_styles
from modules import hash_cache
from modules.flags import MetadataScheme, Performance, Steps
from modules.flags import SAMPLERS, CIVITAI_NO_KARRAS
from modules.util import quote, unquote, extract_styles_from_prompt, is_json, get_file_from_folder_list, sha256
from modules.hash_cache import sha256_from_cache
from modules.util import quote, unquote, extract_styles_from_prompt, is_json, get_file_from_folder_list
re_param_code = r'\s*(\w[\w \-/]+):\s*("(?:\\.|[^\\"])+"|[^,]*)(?:,|$)'
re_param = re.compile(re_param_code)
re_imagesize = re.compile(r"^(\d+)x(\d+)$")
hash_cache = {}
def load_parameter_button_click(raw_metadata: dict | str, is_generating: bool):
def load_parameter_button_click(raw_metadata: dict | str, is_generating: bool, inpaint_mode: str):
loaded_parameter_dict = raw_metadata
if isinstance(raw_metadata, str):
loaded_parameter_dict = json.loads(raw_metadata)
@ -49,6 +49,8 @@ def load_parameter_button_click(raw_metadata: dict | str, is_generating: bool):
get_str('scheduler', 'Scheduler', loaded_parameter_dict, results)
get_str('vae', 'VAE', loaded_parameter_dict, results)
get_seed('seed', 'Seed', loaded_parameter_dict, results)
get_inpaint_engine_version('inpaint_engine_version', 'Inpaint Engine Version', loaded_parameter_dict, results, inpaint_mode)
get_inpaint_method('inpaint_method', 'Inpaint Mode', loaded_parameter_dict, results)
if is_generating:
results.append(gr.update())
@ -160,6 +162,36 @@ def get_seed(key: str, fallback: str | None, source_dict: dict, results: list, d
results.append(gr.update())
def get_inpaint_engine_version(key: str, fallback: str | None, source_dict: dict, results: list, inpaint_mode: str, default=None) -> str | None:
try:
h = source_dict.get(key, source_dict.get(fallback, default))
assert isinstance(h, str) and h in modules.flags.inpaint_engine_versions
if inpaint_mode != modules.flags.inpaint_option_detail:
results.append(h)
else:
results.append(gr.update())
results.append(h)
return h
except:
results.append(gr.update())
results.append('empty')
return None
def get_inpaint_method(key: str, fallback: str | None, source_dict: dict, results: list, default=None) -> str | None:
try:
h = source_dict.get(key, source_dict.get(fallback, default))
assert isinstance(h, str) and h in modules.flags.inpaint_options
results.append(h)
for i in range(modules.config.default_enhance_tabs):
results.append(h)
return h
except:
results.append(gr.update())
for i in range(modules.config.default_enhance_tabs):
results.append(gr.update())
def get_adm_guidance(key: str, fallback: str | None, source_dict: dict, results: list, default=None):
try:
h = source_dict.get(key, source_dict.get(fallback, default))
@ -215,14 +247,6 @@ def get_lora(key: str, fallback: str | None, source_dict: dict, results: list, p
results.append(1)
def get_sha256(filepath):
global hash_cache
if filepath not in hash_cache:
hash_cache[filepath] = sha256(filepath)
return hash_cache[filepath]
def parse_meta_from_preset(preset_content):
assert isinstance(preset_content, dict)
preset_prepared = {}
@ -245,8 +269,7 @@ def parse_meta_from_preset(preset_content):
height = height[:height.index(" ")]
preset_prepared[meta_key] = (width, height)
else:
preset_prepared[meta_key] = items[settings_key] if settings_key in items and items[
settings_key] is not None else getattr(modules.config, settings_key)
preset_prepared[meta_key] = items[settings_key] if settings_key in items and items[settings_key] is not None else getattr(modules.config, settings_key)
if settings_key == "default_styles" or settings_key == "default_aspect_ratio":
preset_prepared[meta_key] = str(preset_prepared[meta_key])
@ -290,18 +313,18 @@ class MetadataParser(ABC):
self.base_model_name = Path(base_model_name).stem
base_model_path = get_file_from_folder_list(base_model_name, modules.config.paths_checkpoints)
self.base_model_hash = get_sha256(base_model_path)
self.base_model_hash = sha256_from_cache(base_model_path)
if refiner_model_name not in ['', 'None']:
self.refiner_model_name = Path(refiner_model_name).stem
refiner_model_path = get_file_from_folder_list(refiner_model_name, modules.config.paths_checkpoints)
self.refiner_model_hash = get_sha256(refiner_model_path)
self.refiner_model_hash = sha256_from_cache(refiner_model_path)
self.loras = []
for (lora_name, lora_weight) in loras:
if lora_name != 'None':
lora_path = get_file_from_folder_list(lora_name, modules.config.paths_loras)
lora_hash = get_sha256(lora_path)
lora_hash = sha256_from_cache(lora_path)
self.loras.append((Path(lora_name).stem, lora_weight, lora_hash))
self.vae_name = Path(vae_name).stem

View File

@ -37,6 +37,7 @@ def sort_styles(selected):
global all_styles
unselected = [y for y in all_styles if y not in selected]
sorted_styles = selected + unselected
"""
try:
with open('sorted_styles.json', 'wt', encoding='utf-8') as fp:
json.dump(sorted_styles, fp, indent=4)
@ -44,6 +45,7 @@ def sort_styles(selected):
print('Write style sorting failed.')
print(e)
all_styles = sorted_styles
"""
return gr.CheckboxGroup.update(choices=sorted_styles)

15
modules/translator.py Normal file
View File

@ -0,0 +1,15 @@
import translators
from functools import lru_cache
@lru_cache(maxsize=32, typed=False)
def translate2en(text, element):
if not text:
return text
try:
result = translators.translate_text(text,to_language='en')
print(f'[Parameters] Translated {element}: {result}')
return result
except Exception as e:
print(f'[Parameters] Error during translation of {element}: {e}')
return text

View File

@ -1,13 +1,11 @@
import os
import torch
import modules.core as core
from ldm_patched.pfn.architecture.RRDB import RRDBNet as ESRGAN
from ldm_patched.contrib.external_upscale_model import ImageUpscaleWithModel
from collections import OrderedDict
from modules.config import path_upscale_models
model_filename = os.path.join(path_upscale_models, 'fooocus_upscaler_s409985e5.bin')
import modules.core as core
import torch
from ldm_patched.contrib.external_upscale_model import ImageUpscaleWithModel
from ldm_patched.pfn.architecture.RRDB import RRDBNet as ESRGAN
from modules.config import downloading_upscale_model
opImageUpscaleWithModel = ImageUpscaleWithModel()
model = None
@ -18,6 +16,7 @@ def perform_upscale(img):
print(f'Upscaling image with shape {str(img.shape)} ...')
if model is None:
model_filename = downloading_upscale_model()
sd = torch.load(model_filename)
sdo = OrderedDict()
for k, v in sd.items():

View File

@ -176,13 +176,11 @@ def generate_temp_filename(folder='./outputs/', extension='png'):
def sha256(filename, use_addnet_hash=False, length=HASH_SHA256_LENGTH):
print(f"Calculating sha256 for {filename}: ", end='')
if use_addnet_hash:
with open(filename, "rb") as file:
sha256_value = addnet_hash_safetensors(file)
else:
sha256_value = calculate_sha256(filename)
print(f"{sha256_value}")
return sha256_value[:length] if length is not None else sha256_value
@ -383,13 +381,6 @@ def get_file_from_folder_list(name, folders):
return os.path.abspath(os.path.realpath(os.path.join(folders[0], name)))
def makedirs_with_log(path):
try:
os.makedirs(path, exist_ok=True)
except OSError as error:
print(f'Directory {path} could not be created, reason: {error}')
def get_enabled_loras(loras: list, remove_none=True) -> list:
return [(lora[1], lora[2]) for lora in loras if lora[0] and (lora[1] != 'None' if remove_none else True)]
@ -397,6 +388,9 @@ def get_enabled_loras(loras: list, remove_none=True) -> list:
def parse_lora_references_from_prompt(prompt: str, loras: List[Tuple[AnyStr, float]], loras_limit: int = 5,
skip_file_check=False, prompt_cleanup=True, deduplicate_loras=True,
lora_filenames=None) -> tuple[List[Tuple[AnyStr, float]], str]:
# prevent unintended side effects when returning without detection
loras = loras.copy()
if lora_filenames is None:
lora_filenames = []

1
presets/.gitignore vendored
View File

@ -3,5 +3,6 @@
!default.json
!lcm.json
!playground_v2.5.json
!pony_v6.json
!realistic.json
!sai.json

View File

@ -1,5 +1,5 @@
{
"default_model": "animaPencilXL_v310.safetensors",
"default_model": "animaPencilXL_v500.safetensors",
"default_refiner": "None",
"default_refiner_switch": 0.5,
"default_loras": [
@ -42,12 +42,15 @@
"Fooocus Masterpiece"
],
"default_aspect_ratio": "896*1152",
"default_overwrite_step": -1,
"checkpoint_downloads": {
"animaPencilXL_v310.safetensors": "https://huggingface.co/mashb1t/fav_models/resolve/main/fav/animaPencilXL_v310.safetensors"
"animaPencilXL_v500.safetensors": "https://huggingface.co/mashb1t/fav_models/resolve/main/fav/animaPencilXL_v500.safetensors"
},
"embeddings_downloads": {},
"lora_downloads": {},
"previous_default_models": [
"animaPencilXL_v400.safetensors",
"animaPencilXL_v310.safetensors",
"animaPencilXL_v300.safetensors",
"animaPencilXL_v260.safetensors",
"animaPencilXL_v210.safetensors",

View File

@ -42,6 +42,7 @@
"Fooocus Sharp"
],
"default_aspect_ratio": "1152*896",
"default_overwrite_step": -1,
"checkpoint_downloads": {
"juggernautXL_v8Rundiffusion.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/juggernautXL_v8Rundiffusion.safetensors"
},

View File

@ -42,6 +42,7 @@
"Fooocus Sharp"
],
"default_aspect_ratio": "1152*896",
"default_overwrite_step": -1,
"checkpoint_downloads": {
"juggernautXL_v8Rundiffusion.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/juggernautXL_v8Rundiffusion.safetensors"
},

View File

@ -40,6 +40,8 @@
"Fooocus V2"
],
"default_aspect_ratio": "1024*1024",
"default_overwrite_step": -1,
"default_inpaint_engine_version": "None",
"checkpoint_downloads": {
"playground-v2.5-1024px-aesthetic.fp16.safetensors": "https://huggingface.co/mashb1t/fav_models/resolve/main/fav/playground-v2.5-1024px-aesthetic.fp16.safetensors"
},

54
presets/pony_v6.json Normal file
View File

@ -0,0 +1,54 @@
{
"default_model": "ponyDiffusionV6XL.safetensors",
"default_refiner": "None",
"default_refiner_switch": 0.5,
"default_vae": "ponyDiffusionV6XL_vae.safetensors",
"default_loras": [
[
true,
"None",
1.0
],
[
true,
"None",
1.0
],
[
true,
"None",
1.0
],
[
true,
"None",
1.0
],
[
true,
"None",
1.0
]
],
"default_cfg_scale": 7.0,
"default_sample_sharpness": 2.0,
"default_sampler": "dpmpp_2m_sde_gpu",
"default_scheduler": "karras",
"default_performance": "Speed",
"default_prompt": "",
"default_prompt_negative": "",
"default_styles": [
"Fooocus Pony"
],
"default_aspect_ratio": "896*1152",
"default_overwrite_step": -1,
"default_inpaint_engine_version": "None",
"checkpoint_downloads": {
"ponyDiffusionV6XL.safetensors": "https://huggingface.co/mashb1t/fav_models/resolve/main/fav/ponyDiffusionV6XL.safetensors"
},
"embeddings_downloads": {},
"lora_downloads": {},
"vae_downloads": {
"ponyDiffusionV6XL_vae.safetensors": "https://huggingface.co/mashb1t/fav_models/resolve/main/fav/ponyDiffusionV6XL_vae.safetensors"
}
}

View File

@ -5,7 +5,7 @@
"default_loras": [
[
true,
"SDXL_FILM_PHOTOGRAPHY_STYLE_BetaV0.4.safetensors",
"SDXL_FILM_PHOTOGRAPHY_STYLE_V1.safetensors",
0.25
],
[
@ -42,12 +42,13 @@
"Fooocus Negative"
],
"default_aspect_ratio": "896*1152",
"default_overwrite_step": -1,
"checkpoint_downloads": {
"realisticStockPhoto_v20.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/realisticStockPhoto_v20.safetensors"
},
"embeddings_downloads": {},
"lora_downloads": {
"SDXL_FILM_PHOTOGRAPHY_STYLE_BetaV0.4.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/SDXL_FILM_PHOTOGRAPHY_STYLE_BetaV0.4.safetensors"
"SDXL_FILM_PHOTOGRAPHY_STYLE_V1.safetensors": "https://huggingface.co/mashb1t/fav_models/resolve/main/fav/SDXL_FILM_PHOTOGRAPHY_STYLE_V1.safetensors"
},
"previous_default_models": ["realisticStockPhoto_v10.safetensors"]
}

View File

@ -41,6 +41,7 @@
"Fooocus Cinematic"
],
"default_aspect_ratio": "1152*896",
"default_overwrite_step": -1,
"checkpoint_downloads": {
"sd_xl_base_1.0_0.9vae.safetensors": "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_base_1.0_0.9vae.safetensors",
"sd_xl_refiner_1.0_0.9vae.safetensors": "https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0/resolve/main/sd_xl_refiner_1.0_0.9vae.safetensors"

View File

@ -285,11 +285,11 @@ See the common problems [here](troubleshoot.md).
Given different goals, the default models and configs of Fooocus are different:
| Task | Windows | Linux args | Main Model | Refiner | Config |
| --- | --- | --- | --- | --- |--------------------------------------------------------------------------------|
| General | run.bat | | juggernautXL_v8Rundiffusion | not used | [here](https://github.com/lllyasviel/Fooocus/blob/main/presets/default.json) |
| Realistic | run_realistic.bat | --preset realistic | realisticStockPhoto_v20 | not used | [here](https://github.com/lllyasviel/Fooocus/blob/main/presets/realistic.json) |
| Anime | run_anime.bat | --preset anime | animaPencilXL_v100 | not used | [here](https://github.com/lllyasviel/Fooocus/blob/main/presets/anime.json) |
| Task | Windows | Linux args | Main Model | Refiner | Config |
|-----------| --- | --- |-----------------------------| --- |--------------------------------------------------------------------------------|
| General | run.bat | | juggernautXL_v8Rundiffusion | not used | [here](https://github.com/lllyasviel/Fooocus/blob/main/presets/default.json) |
| Realistic | run_realistic.bat | --preset realistic | realisticStockPhoto_v20 | not used | [here](https://github.com/lllyasviel/Fooocus/blob/main/presets/realistic.json) |
| Anime | run_anime.bat | --preset anime | animaPencilXL_v500 | not used | [here](https://github.com/lllyasviel/Fooocus/blob/main/presets/anime.json) |
Note that the download is **automatic** - you do not need to do anything if the internet connection is okay. However, you can download them manually if you (or move them from somewhere else) have your own preparation.

View File

@ -6,7 +6,7 @@ accelerate==0.21.0
pyyaml==6.0
Pillow==9.2.0
scipy==1.9.3
tqdm==4.64.1
tqdm==4.65.0
psutil==5.9.5
pytorch_lightning==1.9.4
omegaconf==2.2.3
@ -16,3 +16,7 @@ opencv-contrib-python==4.8.0.74
httpx==0.24.1
onnxruntime==1.16.3
timm==0.9.2
translators==5.9.2
rembg==2.0.57
groundingdino-py==0.4.0
segment_anything==1.0

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.1 KiB

View File

@ -14,7 +14,7 @@
},
{
"name": "Fooocus Masterpiece",
"prompt": "(masterpiece), (best quality), (ultra-detailed), {prompt}, illustration, disheveled hair, detailed eyes, perfect composition, moist skin, intricate details, earrings, by wlop",
"prompt": "(masterpiece), (best quality), (ultra-detailed), {prompt}, illustration, disheveled hair, detailed eyes, perfect composition, moist skin, intricate details, earrings",
"negative_prompt": "longbody, lowres, bad anatomy, bad hands, missing fingers, pubic hair,extra digit, fewer digits, cropped, worst quality, low quality"
},
{
@ -30,5 +30,10 @@
"name": "Fooocus Cinematic",
"prompt": "cinematic still {prompt} . emotional, harmonious, vignette, highly detailed, high budget, bokeh, cinemascope, moody, epic, gorgeous, film grain, grainy",
"negative_prompt": "anime, cartoon, graphic, text, painting, crayon, graphite, abstract, glitch, deformed, mutated, ugly, disfigured"
},
{
"name": "Fooocus Pony",
"prompt": "score_9, score_8_up, score_7_up, {prompt}",
"negative_prompt": "score_6, score_5, score_4"
}
]

View File

@ -1,3 +1,25 @@
# [2.5.0-rc1](https://github.com/lllyasviel/Fooocus/releases/tag/v2.5.0-rc1)
* Add enhance feature, which offers easy image refinement steps (similar to adetailer, but based on dynamic image detection instead of specific mask detection models). See [documentation](https://github.com/mashb1t/Fooocus/discussions/42).
* Rewrite async worker code, make code much more reusable to allow iterations and improve reusability
* Improve GroundingDINO and SAM image masking
* Fix inference tensor version counter tracking issue for GroundingDINO after using Enhance (see [discussion](https://github.com/lllyasviel/Fooocus/discussions/3213))
* Update python dependencies, add segment_anything
* Move checkboxes Enable Mask Upload and Invert Mask When Generating from Developer Debug Mode to Inpaint Or Outpaint
* Add persistent model cache for metadata. Use `--rebuild-hash-cache X` (X = int, number of CPU cores, default all) to manually rebuild the cache for all non-cached hashes
* Rename `--enable-describe-uov-image` to `--enable-auto-describe-image`, now also works for enhance image upload
* Rename checkbox `Enable Mask Upload` to `Enable Advanced Masking Features` to better hint to mask auto-generation feature
* Get upscale model filepath by calling downloading_upscale_model() to ensure the model exists
* Update default models to latest versions
* animaPencilXL_v400 => animaPencilXL_v500
* DreamShaperXL_Turbo_dpmppSdeKarras => DreamShaperXL_Turbo_v2_1
* SDXL_FILM_PHOTOGRAPHY_STYLE_BetaV0.4 => SDXL_FILM_PHOTOGRAPHY_STYLE_V1
* Add preset for pony_v6 (using ponyDiffusionV6XL)
* Add style `Fooocus Pony`
* Add restart sampler ([paper](https://arxiv.org/abs/2306.14878))
* Add config option for default_inpaint_engine_version, sets inpaint engine for pony_v6 and playground_v2.5 to None for improved results (incompatible with inpaint engine)
* Add image editor functionality to mask upload (same as for inpaint, now correctly resizes and allows more detailed mask creation)
# [2.4.3](https://github.com/lllyasviel/Fooocus/releases/tag/v2.4.3)
* Fix alphas_cumprod setter for TCD sampler

458
webui.py
View File

@ -16,6 +16,7 @@ import modules.meta_parser
import args_manager
import copy
import launch
from extras.inpaint_mask import SAMOptions
from modules.sdxl_styles import legal_style_names
from modules.private_logger import get_current_html_path
@ -89,6 +90,37 @@ def generate_clicked(task: worker.AsyncTask):
return
def inpaint_mode_change(mode, inpaint_engine_version):
assert mode in modules.flags.inpaint_options
# inpaint_additional_prompt, outpaint_selections, example_inpaint_prompts,
# inpaint_disable_initial_latent, inpaint_engine,
# inpaint_strength, inpaint_respective_field
if mode == modules.flags.inpaint_option_detail:
return [
gr.update(visible=True), gr.update(visible=False, value=[]),
gr.Dataset.update(visible=True, samples=modules.config.example_inpaint_prompts),
False, 'None', 0.5, 0.0
]
if inpaint_engine_version == 'empty':
inpaint_engine_version = modules.config.default_inpaint_engine_version
if mode == modules.flags.inpaint_option_modify:
return [
gr.update(visible=True), gr.update(visible=False, value=[]),
gr.Dataset.update(visible=False, samples=modules.config.example_inpaint_prompts),
True, inpaint_engine_version, 1.0, 0.0
]
return [
gr.update(visible=False, value=''), gr.update(visible=True),
gr.Dataset.update(visible=False, samples=modules.config.example_inpaint_prompts),
False, inpaint_engine_version, 1.0, 0.618
]
reload_javascript()
title = f'Fooocus {fooocus_version.version}'
@ -100,6 +132,7 @@ shared.gradio_root = gr.Blocks(title=title).queue()
with shared.gradio_root:
currentTask = gr.State(worker.AsyncTask(args=[]))
inpaint_engine_state = gr.State('empty')
with gr.Row():
with gr.Column(scale=2):
with gr.Row():
@ -146,6 +179,7 @@ with shared.gradio_root:
skip_button.click(skip_clicked, inputs=currentTask, outputs=currentTask, queue=False, show_progress=False)
with gr.Row(elem_classes='advanced_check_row'):
input_image_checkbox = gr.Checkbox(label='Input Image', value=False, container=False, elem_classes='min_check')
enhance_checkbox = gr.Checkbox(label='Enhance', value=modules.config.default_enhance_checkbox, container=False, elem_classes='min_check')
advanced_checkbox = gr.Checkbox(label='Advanced', value=modules.config.default_advanced_checkbox, container=False, elem_classes='min_check')
with gr.Row(visible=False) as image_input_panel:
with gr.Tabs():
@ -199,18 +233,84 @@ with shared.gradio_root:
ip_advanced.change(ip_advance_checked, inputs=ip_advanced,
outputs=ip_ad_cols + ip_types + ip_stops + ip_weights,
queue=False, show_progress=False)
with gr.TabItem(label='Inpaint or Outpaint') as inpaint_tab:
with gr.Row():
inpaint_input_image = grh.Image(label='Image', source='upload', type='numpy', tool='sketch', height=500, brush_color="#FFFFFF", elem_id='inpaint_canvas', show_label=False)
inpaint_mask_image = grh.Image(label='Mask Upload', source='upload', type='numpy', height=500, visible=False)
with gr.Column():
inpaint_input_image = grh.Image(label='Image', source='upload', type='numpy', tool='sketch', height=500, brush_color="#FFFFFF", elem_id='inpaint_canvas', show_label=False)
inpaint_advanced_masking_checkbox = gr.Checkbox(label='Enable Advanced Masking Features', value=False)
inpaint_mode = gr.Dropdown(choices=modules.flags.inpaint_options, value=modules.config.default_inpaint_method, label='Method')
inpaint_additional_prompt = gr.Textbox(placeholder="Describe what you want to inpaint.", elem_id='inpaint_additional_prompt', label='Inpaint Additional Prompt', visible=False)
outpaint_selections = gr.CheckboxGroup(choices=['Left', 'Right', 'Top', 'Bottom'], value=[], label='Outpaint Direction')
example_inpaint_prompts = gr.Dataset(samples=modules.config.example_inpaint_prompts,
label='Additional Prompt Quick List',
components=[inpaint_additional_prompt],
visible=False)
gr.HTML('* Powered by Fooocus Inpaint Engine <a href="https://github.com/lllyasviel/Fooocus/discussions/414" target="_blank">\U0001F4D4 Document</a>')
example_inpaint_prompts.click(lambda x: x[0], inputs=example_inpaint_prompts, outputs=inpaint_additional_prompt, show_progress=False, queue=False)
with gr.Column(visible=False) as inpaint_mask_generation_col:
inpaint_mask_image = grh.Image(label='Mask Upload', source='upload', type='numpy', tool='sketch', height=500, brush_color="#FFFFFF", mask_opacity=1, elem_id='inpaint_mask_canvas')
invert_mask_checkbox = gr.Checkbox(label='Invert Mask When Generating', value=False)
inpaint_mask_model = gr.Dropdown(label='Mask generation model',
choices=flags.inpaint_mask_models,
value=modules.config.default_inpaint_mask_model)
inpaint_mask_cloth_category = gr.Dropdown(label='Cloth category',
choices=flags.inpaint_mask_cloth_category,
value=modules.config.default_inpaint_mask_cloth_category,
visible=False)
inpaint_mask_dino_prompt_text = gr.Textbox(label='Detection prompt', value='', visible=False, info='Use singular whenever possible', placeholder='Describe what you want to detect.')
example_inpaint_mask_dino_prompt_text = gr.Dataset(
samples=modules.config.example_enhance_detection_prompts,
label='Detection Prompt Quick List',
components=[inpaint_mask_dino_prompt_text],
visible=modules.config.default_inpaint_mask_model == 'sam')
example_inpaint_mask_dino_prompt_text.click(lambda x: x[0],
inputs=example_inpaint_mask_dino_prompt_text,
outputs=inpaint_mask_dino_prompt_text,
show_progress=False, queue=False)
with gr.Accordion("Advanced options", visible=False, open=False) as inpaint_mask_advanced_options:
inpaint_mask_sam_model = gr.Dropdown(label='SAM model', choices=flags.inpaint_mask_sam_model, value=modules.config.default_inpaint_mask_sam_model)
inpaint_mask_box_threshold = gr.Slider(label="Box Threshold", minimum=0.0, maximum=1.0, value=0.3, step=0.05)
inpaint_mask_text_threshold = gr.Slider(label="Text Threshold", minimum=0.0, maximum=1.0, value=0.25, step=0.05)
inpaint_mask_sam_max_detections = gr.Slider(label="Maximum number of detections", info="Set to 0 to detect all", minimum=0, maximum=10, value=modules.config.default_sam_max_detections, step=1, interactive=True)
generate_mask_button = gr.Button(value='Generate mask from image')
def generate_mask(image, mask_model, cloth_category, dino_prompt_text, sam_model, box_threshold, text_threshold, sam_max_detections, dino_erode_or_dilate, dino_debug):
from extras.inpaint_mask import generate_mask_from_image
extras = {}
sam_options = None
if mask_model == 'u2net_cloth_seg':
extras['cloth_category'] = cloth_category
elif mask_model == 'sam':
sam_options = SAMOptions(
dino_prompt=dino_prompt_text,
dino_box_threshold=box_threshold,
dino_text_threshold=text_threshold,
dino_erode_or_dilate=dino_erode_or_dilate,
dino_debug=dino_debug,
max_detections=sam_max_detections,
model_type=sam_model
)
mask, _, _, _ = generate_mask_from_image(image, mask_model, extras, sam_options)
return mask
inpaint_mask_model.change(lambda x: [gr.update(visible=x == 'u2net_cloth_seg')] +
[gr.update(visible=x == 'sam')] * 2 +
[gr.Dataset.update(visible=x == 'sam',
samples=modules.config.example_enhance_detection_prompts)],
inputs=inpaint_mask_model,
outputs=[inpaint_mask_cloth_category,
inpaint_mask_dino_prompt_text,
inpaint_mask_advanced_options,
example_inpaint_mask_dino_prompt_text],
queue=False, show_progress=False)
with gr.Row():
inpaint_additional_prompt = gr.Textbox(placeholder="Describe what you want to inpaint.", elem_id='inpaint_additional_prompt', label='Inpaint Additional Prompt', visible=False)
outpaint_selections = gr.CheckboxGroup(choices=['Left', 'Right', 'Top', 'Bottom'], value=[], label='Outpaint Direction')
inpaint_mode = gr.Dropdown(choices=modules.flags.inpaint_options, value=modules.flags.inpaint_option_default, label='Method')
example_inpaint_prompts = gr.Dataset(samples=modules.config.example_inpaint_prompts, label='Additional Prompt Quick List', components=[inpaint_additional_prompt], visible=False)
gr.HTML('* Powered by Fooocus Inpaint Engine <a href="https://github.com/lllyasviel/Fooocus/discussions/414" target="_blank">\U0001F4D4 Document</a>')
example_inpaint_prompts.click(lambda x: x[0], inputs=example_inpaint_prompts, outputs=inpaint_additional_prompt, show_progress=False, queue=False)
with gr.TabItem(label='Describe') as desc_tab:
with gr.Row():
with gr.Column():
@ -231,6 +331,12 @@ with shared.gradio_root:
desc_input_image.upload(trigger_show_image_properties, inputs=desc_input_image,
outputs=desc_image_size, show_progress=False, queue=False)
with gr.TabItem(label='Enhance') as enhance_tab:
with gr.Row():
with gr.Column():
enhance_input_image = grh.Image(label='Use with Enhance, skips image generation', source='upload', type='numpy')
gr.HTML('<a href="https://github.com/mashb1t/Fooocus/discussions/42" target="_blank">\U0001F4D4 Document</a>')
with gr.TabItem(label='Metadata') as metadata_tab:
with gr.Column():
metadata_input_image = grh.Image(label='For images created by Fooocus', source='upload', type='filepath')
@ -252,6 +358,164 @@ with shared.gradio_root:
metadata_input_image.upload(trigger_metadata_preview, inputs=metadata_input_image,
outputs=metadata_json, queue=False, show_progress=True)
with gr.Row(visible=modules.config.default_enhance_checkbox) as enhance_input_panel:
with gr.Tabs():
with gr.TabItem(label='Upscale or Variation'):
with gr.Row():
with gr.Column():
enhance_uov_method = gr.Radio(label='Upscale or Variation:', choices=flags.uov_list,
value=modules.config.default_enhance_uov_method)
enhance_uov_processing_order = gr.Radio(label='Order of Processing',
info='Use before to enhance small details and after to enhance large areas.',
choices=flags.enhancement_uov_processing_order,
value=modules.config.default_enhance_uov_processing_order)
enhance_uov_prompt_type = gr.Radio(label='Prompt',
info='Choose which prompt to use for Upscale or Variation.',
choices=flags.enhancement_uov_prompt_types,
value=modules.config.default_enhance_uov_prompt_type,
visible=modules.config.default_enhance_uov_processing_order == flags.enhancement_uov_after)
enhance_uov_processing_order.change(lambda x: gr.update(visible=x == flags.enhancement_uov_after),
inputs=enhance_uov_processing_order,
outputs=enhance_uov_prompt_type,
queue=False, show_progress=False)
gr.HTML('<a href="https://github.com/mashb1t/Fooocus/discussions/42" target="_blank">\U0001F4D4 Document</a>')
enhance_ctrls = []
enhance_inpaint_mode_ctrls = []
enhance_inpaint_engine_ctrls = []
enhance_inpaint_update_ctrls = []
for index in range(modules.config.default_enhance_tabs):
with gr.TabItem(label=f'#{index + 1}') as enhance_tab_item:
enhance_enabled = gr.Checkbox(label='Enable', value=False, elem_classes='min_check',
container=False)
enhance_mask_dino_prompt_text = gr.Textbox(label='Detection prompt',
info='Use singular whenever possible',
placeholder='Describe what you want to detect.',
interactive=True,
visible=modules.config.default_enhance_inpaint_mask_model == 'sam')
example_enhance_mask_dino_prompt_text = gr.Dataset(
samples=modules.config.example_enhance_detection_prompts,
label='Detection Prompt Quick List',
components=[enhance_mask_dino_prompt_text],
visible=modules.config.default_enhance_inpaint_mask_model == 'sam')
example_enhance_mask_dino_prompt_text.click(lambda x: x[0],
inputs=example_enhance_mask_dino_prompt_text,
outputs=enhance_mask_dino_prompt_text,
show_progress=False, queue=False)
enhance_prompt = gr.Textbox(label="Enhancement positive prompt",
placeholder="Uses original prompt instead if empty.",
elem_id='enhance_prompt')
enhance_negative_prompt = gr.Textbox(label="Enhancement negative prompt",
placeholder="Uses original negative prompt instead if empty.",
elem_id='enhance_negative_prompt')
with gr.Accordion("Detection", open=False):
enhance_mask_model = gr.Dropdown(label='Mask generation model',
choices=flags.inpaint_mask_models,
value=modules.config.default_enhance_inpaint_mask_model)
enhance_mask_cloth_category = gr.Dropdown(label='Cloth category',
choices=flags.inpaint_mask_cloth_category,
value=modules.config.default_inpaint_mask_cloth_category,
visible=modules.config.default_enhance_inpaint_mask_model == 'u2net_cloth_seg',
interactive=True)
with gr.Accordion("SAM Options",
visible=modules.config.default_enhance_inpaint_mask_model == 'sam',
open=False) as sam_options:
enhance_mask_sam_model = gr.Dropdown(label='SAM model',
choices=flags.inpaint_mask_sam_model,
value=modules.config.default_inpaint_mask_sam_model,
interactive=True)
enhance_mask_box_threshold = gr.Slider(label="Box Threshold", minimum=0.0,
maximum=1.0, value=0.3, step=0.05,
interactive=True)
enhance_mask_text_threshold = gr.Slider(label="Text Threshold", minimum=0.0,
maximum=1.0, value=0.25, step=0.05,
interactive=True)
enhance_mask_sam_max_detections = gr.Slider(label="Maximum number of detections",
info="Set to 0 to detect all",
minimum=0, maximum=10,
value=modules.config.default_sam_max_detections,
step=1, interactive=True)
with gr.Accordion("Inpaint", visible=True, open=False):
enhance_inpaint_mode = gr.Dropdown(choices=modules.flags.inpaint_options,
value=modules.config.default_inpaint_method,
label='Method', interactive=True)
enhance_inpaint_disable_initial_latent = gr.Checkbox(
label='Disable initial latent in inpaint', value=False)
enhance_inpaint_engine = gr.Dropdown(label='Inpaint Engine',
value=modules.config.default_inpaint_engine_version,
choices=flags.inpaint_engine_versions,
info='Version of Fooocus inpaint model. If set, use performance Quality or Speed (no performance LoRAs) for best results.')
enhance_inpaint_strength = gr.Slider(label='Inpaint Denoising Strength',
minimum=0.0, maximum=1.0, step=0.001,
value=1.0,
info='Same as the denoising strength in A1111 inpaint. '
'Only used in inpaint, not used in outpaint. '
'(Outpaint always use 1.0)')
enhance_inpaint_respective_field = gr.Slider(label='Inpaint Respective Field',
minimum=0.0, maximum=1.0, step=0.001,
value=0.618,
info='The area to inpaint. '
'Value 0 is same as "Only Masked" in A1111. '
'Value 1 is same as "Whole Image" in A1111. '
'Only used in inpaint, not used in outpaint. '
'(Outpaint always use 1.0)')
enhance_inpaint_erode_or_dilate = gr.Slider(label='Mask Erode or Dilate',
minimum=-64, maximum=64, step=1, value=0,
info='Positive value will make white area in the mask larger, '
'negative value will make white area smaller. '
'(default is 0, always processed before any mask invert)')
enhance_mask_invert = gr.Checkbox(label='Invert Mask', value=False)
gr.HTML('<a href="https://github.com/mashb1t/Fooocus/discussions/42" target="_blank">\U0001F4D4 Document</a>')
enhance_ctrls += [
enhance_enabled,
enhance_mask_dino_prompt_text,
enhance_prompt,
enhance_negative_prompt,
enhance_mask_model,
enhance_mask_cloth_category,
enhance_mask_sam_model,
enhance_mask_text_threshold,
enhance_mask_box_threshold,
enhance_mask_sam_max_detections,
enhance_inpaint_disable_initial_latent,
enhance_inpaint_engine,
enhance_inpaint_strength,
enhance_inpaint_respective_field,
enhance_inpaint_erode_or_dilate,
enhance_mask_invert
]
enhance_inpaint_mode_ctrls += [enhance_inpaint_mode]
enhance_inpaint_engine_ctrls += [enhance_inpaint_engine]
enhance_inpaint_update_ctrls += [[
enhance_inpaint_mode, enhance_inpaint_disable_initial_latent, enhance_inpaint_engine,
enhance_inpaint_strength, enhance_inpaint_respective_field
]]
enhance_inpaint_mode.change(inpaint_mode_change, inputs=[enhance_inpaint_mode, inpaint_engine_state], outputs=[
inpaint_additional_prompt, outpaint_selections, example_inpaint_prompts,
enhance_inpaint_disable_initial_latent, enhance_inpaint_engine,
enhance_inpaint_strength, enhance_inpaint_respective_field
], show_progress=False, queue=False)
enhance_mask_model.change(
lambda x: [gr.update(visible=x == 'u2net_cloth_seg')] +
[gr.update(visible=x == 'sam')] * 2 +
[gr.Dataset.update(visible=x == 'sam',
samples=modules.config.example_enhance_detection_prompts)],
inputs=enhance_mask_model,
outputs=[enhance_mask_cloth_category, enhance_mask_dino_prompt_text, sam_options,
example_enhance_mask_dino_prompt_text],
queue=False, show_progress=False)
switch_js = "(x) => {if(x){viewer_to_bottom(100);viewer_to_bottom(500);}else{viewer_to_top();} return x;}"
down_js = "() => {viewer_to_bottom();}"
@ -264,19 +528,25 @@ with shared.gradio_root:
inpaint_tab.select(lambda: 'inpaint', outputs=current_tab, queue=False, _js=down_js, show_progress=False)
ip_tab.select(lambda: 'ip', outputs=current_tab, queue=False, _js=down_js, show_progress=False)
desc_tab.select(lambda: 'desc', outputs=current_tab, queue=False, _js=down_js, show_progress=False)
enhance_tab.select(lambda: 'enhance', outputs=current_tab, queue=False, _js=down_js, show_progress=False)
metadata_tab.select(lambda: 'metadata', outputs=current_tab, queue=False, _js=down_js, show_progress=False)
enhance_checkbox.change(lambda x: gr.update(visible=x), inputs=enhance_checkbox,
outputs=enhance_input_panel, queue=False, show_progress=False, _js=switch_js)
with gr.Column(scale=1, visible=modules.config.default_advanced_checkbox) as advanced_column:
with gr.Tab(label='Setting'):
with gr.Tab(label='Settings'):
if not args_manager.args.disable_preset_selection:
preset_selection = gr.Dropdown(label='Preset',
choices=modules.config.available_presets,
value=args_manager.args.preset if args_manager.args.preset else "initial",
interactive=True)
performance_selection = gr.Radio(label='Performance',
choices=flags.Performance.list(),
info='* = restricted feature set, intermediate results disabled',
choices=modules.flags.performance_selections,
value=modules.config.default_performance,
elem_classes=['performance_selection'])
elem_classes='performance_selections')
with gr.Accordion(label='Aspect Ratios', open=False, elem_id='aspect_ratios_accordion') as aspect_ratios_accordion:
aspect_ratios_selection = gr.Radio(label='Aspect Ratios', show_label=False,
choices=modules.config.available_aspect_ratios_labels,
@ -297,6 +567,9 @@ with shared.gradio_root:
info='Describing what you do not want to see.', lines=2,
elem_id='negative_prompt',
value=modules.config.default_prompt_negative)
translate_prompts = gr.Checkbox(label='Translate Prompts',
info='Uses the internet to translate prompts to English.',
value=False)
seed_random = gr.Checkbox(label='Random', value=True)
image_seed = gr.Textbox(label='Seed', value=0, max_lines=1, visible=False) # workaround for https://github.com/gradio-app/gradio/issues/5354
@ -327,7 +600,7 @@ with shared.gradio_root:
history_link = gr.HTML()
shared.gradio_root.load(update_history_link, outputs=history_link, queue=False, show_progress=False)
with gr.Tab(label='Style', elem_classes=['style_selections_tab']):
with gr.Tab(label='Styles', elem_classes=['style_selections_tab']):
style_sorter.try_load_sorted_styles(
style_names=legal_style_names,
default_selected=modules.config.default_styles)
@ -360,7 +633,7 @@ with shared.gradio_root:
show_progress=False).then(
lambda: None, _js='()=>{refresh_style_localization();}')
with gr.Tab(label='Model'):
with gr.Tab(label='Models'):
with gr.Group():
with gr.Row():
base_model = gr.Dropdown(label='Base Model (SDXL only)', choices=modules.config.model_filenames, value=modules.config.default_base_model_name, show_label=True)
@ -455,21 +728,23 @@ with shared.gradio_root:
minimum=-1, maximum=1.0, step=0.001, value=-1,
info='Set as negative number to disable. For developer debugging.')
overwrite_upscale_strength = gr.Slider(label='Forced Overwrite of Denoising Strength of "Upscale"',
minimum=-1, maximum=1.0, step=0.001, value=-1,
minimum=-1, maximum=1.0, step=0.001,
value=modules.config.default_overwrite_upscale,
info='Set as negative number to disable. For developer debugging.')
disable_preview = gr.Checkbox(label='Disable Preview', value=modules.config.default_black_out_nsfw,
interactive=not modules.config.default_black_out_nsfw,
info='Disable preview during generation.')
disable_intermediate_results = gr.Checkbox(label='Disable Intermediate Results',
value=flags.Performance.has_restricted_features(modules.config.default_performance),
info='Disable intermediate results during generation, only show final gallery.')
disable_seed_increment = gr.Checkbox(label='Disable seed increment',
info='Disable automatic seed increment when image number is > 1.',
value=False)
read_wildcards_in_order = gr.Checkbox(label="Read wildcards in order", value=False)
black_out_nsfw = gr.Checkbox(label='Black Out NSFW',
value=modules.config.default_black_out_nsfw,
black_out_nsfw = gr.Checkbox(label='Black Out NSFW', value=modules.config.default_black_out_nsfw,
interactive=not modules.config.default_black_out_nsfw,
info='Use black image if NSFW is detected.')
@ -510,11 +785,15 @@ with shared.gradio_root:
with gr.Tab(label='Inpaint'):
debugging_inpaint_preprocessor = gr.Checkbox(label='Debug Inpaint Preprocessing', value=False)
debugging_enhance_masks_checkbox = gr.Checkbox(label='Debug Enhance Masks', value=False,
info='Show enhance masks in preview and final results')
debugging_dino = gr.Checkbox(label='Debug GroundingDINO', value=False,
info='Use GroundingDINO boxes instead of more detailed SAM masks')
inpaint_disable_initial_latent = gr.Checkbox(label='Disable initial latent in inpaint', value=False)
inpaint_engine = gr.Dropdown(label='Inpaint Engine',
value=modules.config.default_inpaint_engine_version,
choices=flags.inpaint_engine_versions,
info='Version of Fooocus inpaint model')
info='Version of Fooocus inpaint model. If set, use performance Quality or Speed (no performance LoRAs) for best results.')
inpaint_strength = gr.Slider(label='Inpaint Denoising Strength',
minimum=0.0, maximum=1.0, step=0.001, value=1.0,
info='Same as the denoising strength in A1111 inpaint. '
@ -530,21 +809,24 @@ with shared.gradio_root:
inpaint_erode_or_dilate = gr.Slider(label='Mask Erode or Dilate',
minimum=-64, maximum=64, step=1, value=0,
info='Positive value will make white area in the mask larger, '
'negative value will make white area smaller.'
'(default is 0, always process before any mask invert)')
inpaint_mask_upload_checkbox = gr.Checkbox(label='Enable Mask Upload', value=False)
invert_mask_checkbox = gr.Checkbox(label='Invert Mask', value=False)
'negative value will make white area smaller. '
'(default is 0, always processed before any mask invert)')
dino_erode_or_dilate = gr.Slider(label='GroundingDINO Box Erode or Dilate',
minimum=-64, maximum=64, step=1, value=0,
info='Positive value will make white area in the mask larger, '
'negative value will make white area smaller. '
'(default is 0, processed before SAM)')
inpaint_mask_color = gr.ColorPicker(label='Inpaint brush color', value='#FFFFFF', elem_id='inpaint_brush_color')
inpaint_ctrls = [debugging_inpaint_preprocessor, inpaint_disable_initial_latent, inpaint_engine,
inpaint_strength, inpaint_respective_field,
inpaint_mask_upload_checkbox, invert_mask_checkbox, inpaint_erode_or_dilate]
inpaint_advanced_masking_checkbox, invert_mask_checkbox, inpaint_erode_or_dilate]
inpaint_mask_upload_checkbox.change(lambda x: gr.update(visible=x),
inputs=inpaint_mask_upload_checkbox,
outputs=inpaint_mask_image, queue=False,
show_progress=False)
inpaint_advanced_masking_checkbox.change(lambda x: [gr.update(visible=x)] * 2,
inputs=inpaint_advanced_masking_checkbox,
outputs=[inpaint_mask_image, inpaint_mask_generation_col],
queue=False, show_progress=False)
inpaint_mask_color.change(lambda x: gr.update(brush_color=x), inputs=inpaint_mask_color,
outputs=inpaint_input_image,
@ -582,6 +864,24 @@ with shared.gradio_root:
refresh_files.click(refresh_files_clicked, [], refresh_files_output + lora_ctrls,
queue=False, show_progress=False)
with gr.Tab(label='Audio'):
play_notification = gr.Checkbox(label='Play notification after rendering', value=False)
notification_file = 'notification.mp3'
if os.path.exists(notification_file):
notification = gr.State(value=notification_file)
notification_input = gr.Audio(label='Notification', interactive=True, elem_id='audio_notification', visible=False, show_edit_button=False)
def play_notification_checked(r, notification):
return gr.update(visible=r, value=notification if r else None)
def notification_input_changed(notification_input, notification):
if notification_input:
notification = notification_input
return notification
play_notification.change(fn=play_notification_checked, inputs=[play_notification, notification], outputs=[notification_input], queue=False)
notification_input.change(fn=notification_input_changed, inputs=[notification_input, notification], outputs=[notification], queue=False)
state_is_generating = gr.State(False)
load_data_outputs = [advanced_checkbox, image_number, prompt, negative_prompt, style_selections,
@ -589,10 +889,12 @@ with shared.gradio_root:
overwrite_width, overwrite_height, guidance_scale, sharpness, adm_scaler_positive,
adm_scaler_negative, adm_scaler_end, refiner_swap_method, adaptive_cfg, clip_skip,
base_model, refiner_model, refiner_switch, sampler_name, scheduler_name, vae_name,
seed_random, image_seed, generate_button, load_parameter_button] + freeu_ctrls + lora_ctrls
seed_random, image_seed, inpaint_engine, inpaint_engine_state,
inpaint_mode] + enhance_inpaint_mode_ctrls + [generate_button,
load_parameter_button] + freeu_ctrls + lora_ctrls
if not args_manager.args.disable_preset_selection:
def preset_selection_change(preset, is_generating):
def preset_selection_change(preset, is_generating, inpaint_mode):
preset_content = modules.config.try_get_preset_content(preset) if preset != 'initial' else {}
preset_prepared = modules.meta_parser.parse_meta_from_preset(preset_content)
@ -601,17 +903,35 @@ with shared.gradio_root:
checkpoint_downloads = preset_prepared.get('checkpoint_downloads', {})
embeddings_downloads = preset_prepared.get('embeddings_downloads', {})
lora_downloads = preset_prepared.get('lora_downloads', {})
vae_downloads = preset_prepared.get('vae_downloads', {})
preset_prepared['base_model'], preset_prepared['lora_downloads'] = launch.download_models(
default_model, previous_default_models, checkpoint_downloads, embeddings_downloads, lora_downloads)
preset_prepared['base_model'], preset_prepared['checkpoint_downloads'] = launch.download_models(
default_model, previous_default_models, checkpoint_downloads, embeddings_downloads, lora_downloads,
vae_downloads)
if 'prompt' in preset_prepared and preset_prepared.get('prompt') == '':
del preset_prepared['prompt']
return modules.meta_parser.load_parameter_button_click(json.dumps(preset_prepared), is_generating)
return modules.meta_parser.load_parameter_button_click(json.dumps(preset_prepared), is_generating, inpaint_mode)
preset_selection.change(preset_selection_change, inputs=[preset_selection, state_is_generating], outputs=load_data_outputs, queue=False, show_progress=True) \
.then(fn=style_sorter.sort_styles, inputs=style_selections, outputs=style_selections, queue=False, show_progress=False)
def inpaint_engine_state_change(inpaint_engine_version, *args):
if inpaint_engine_version == 'empty':
inpaint_engine_version = modules.config.default_inpaint_engine_version
result = []
for inpaint_mode in args:
if inpaint_mode != modules.flags.inpaint_option_detail:
result.append(gr.update(value=inpaint_engine_version))
else:
result.append(gr.update())
return result
preset_selection.change(preset_selection_change, inputs=[preset_selection, state_is_generating, inpaint_mode], outputs=load_data_outputs, queue=False, show_progress=True) \
.then(fn=style_sorter.sort_styles, inputs=style_selections, outputs=style_selections, queue=False, show_progress=False) \
.then(lambda: None, _js='()=>{refresh_style_localization();}') \
.then(inpaint_engine_state_change, inputs=[inpaint_engine_state] + enhance_inpaint_mode_ctrls, outputs=enhance_inpaint_engine_ctrls, queue=False, show_progress=False)
performance_selection.change(lambda x: [gr.update(interactive=not flags.Performance.has_restricted_features(x))] * 11 +
[gr.update(visible=not flags.Performance.has_restricted_features(x))] * 1 +
@ -629,42 +949,30 @@ with shared.gradio_root:
queue=False, show_progress=False) \
.then(fn=lambda: None, _js='refresh_grid_delayed', queue=False, show_progress=False)
def inpaint_mode_change(mode):
assert mode in modules.flags.inpaint_options
# inpaint_additional_prompt, outpaint_selections, example_inpaint_prompts,
# inpaint_disable_initial_latent, inpaint_engine,
# inpaint_strength, inpaint_respective_field
if mode == modules.flags.inpaint_option_detail:
return [
gr.update(visible=True), gr.update(visible=False, value=[]),
gr.Dataset.update(visible=True, samples=modules.config.example_inpaint_prompts),
False, 'None', 0.5, 0.0
]
if mode == modules.flags.inpaint_option_modify:
return [
gr.update(visible=True), gr.update(visible=False, value=[]),
gr.Dataset.update(visible=False, samples=modules.config.example_inpaint_prompts),
True, modules.config.default_inpaint_engine_version, 1.0, 0.0
]
return [
gr.update(visible=False, value=''), gr.update(visible=True),
gr.Dataset.update(visible=False, samples=modules.config.example_inpaint_prompts),
False, modules.config.default_inpaint_engine_version, 1.0, 0.618
]
inpaint_mode.input(inpaint_mode_change, inputs=inpaint_mode, outputs=[
inpaint_mode.change(inpaint_mode_change, inputs=[inpaint_mode, inpaint_engine_state], outputs=[
inpaint_additional_prompt, outpaint_selections, example_inpaint_prompts,
inpaint_disable_initial_latent, inpaint_engine,
inpaint_strength, inpaint_respective_field
], show_progress=False, queue=False)
# load configured default_inpaint_method
default_inpaint_ctrls = [inpaint_mode, inpaint_disable_initial_latent, inpaint_engine, inpaint_strength, inpaint_respective_field]
for mode, disable_initial_latent, engine, strength, respective_field in [default_inpaint_ctrls] + enhance_inpaint_update_ctrls:
shared.gradio_root.load(inpaint_mode_change, inputs=[mode, inpaint_engine_state], outputs=[
inpaint_additional_prompt, outpaint_selections, example_inpaint_prompts, disable_initial_latent,
engine, strength, respective_field
], show_progress=False, queue=False)
generate_mask_button.click(fn=generate_mask,
inputs=[inpaint_input_image, inpaint_mask_model, inpaint_mask_cloth_category,
inpaint_mask_dino_prompt_text, inpaint_mask_sam_model,
inpaint_mask_box_threshold, inpaint_mask_text_threshold,
inpaint_mask_sam_max_detections, dino_erode_or_dilate, debugging_dino],
outputs=inpaint_mask_image, show_progress=True, queue=True)
ctrls = [currentTask, generate_image_grid]
ctrls += [
prompt, negative_prompt, style_selections,
prompt, negative_prompt, translate_prompts, style_selections,
performance_selection, aspect_ratios_selection, image_number, output_format, image_seed,
read_wildcards_in_order, sharpness, guidance_scale
]
@ -687,6 +995,10 @@ with shared.gradio_root:
ctrls += [save_metadata_to_images, metadata_scheme]
ctrls += ip_ctrls
ctrls += [debugging_dino, dino_erode_or_dilate, debugging_enhance_masks_checkbox,
enhance_input_image, enhance_checkbox, enhance_uov_method, enhance_uov_processing_order,
enhance_uov_prompt_type]
ctrls += enhance_ctrls
def parse_meta(raw_prompt_txt, is_generating):
loaded_json = None
@ -703,7 +1015,7 @@ with shared.gradio_root:
prompt.input(parse_meta, inputs=[prompt, state_is_generating], outputs=[prompt, generate_button, load_parameter_button], queue=False, show_progress=False)
load_parameter_button.click(modules.meta_parser.load_parameter_button_click, inputs=[prompt, state_is_generating], outputs=load_data_outputs, queue=False, show_progress=False)
load_parameter_button.click(modules.meta_parser.load_parameter_button_click, inputs=[prompt, state_is_generating, inpaint_mode], outputs=load_data_outputs, queue=False, show_progress=False)
def trigger_metadata_import(filepath, state_is_generating):
parameters, metadata_scheme = modules.meta_parser.read_info_from_image(filepath)
@ -714,7 +1026,7 @@ with shared.gradio_root:
metadata_parser = modules.meta_parser.get_metadata_parser(metadata_scheme)
parsed_parameters = metadata_parser.to_json(parameters)
return modules.meta_parser.load_parameter_button_click(parsed_parameters, state_is_generating)
return modules.meta_parser.load_parameter_button_click(parsed_parameters, state_is_generating, inpaint_mode)
metadata_import_button.click(trigger_metadata_import, inputs=[metadata_input_image, state_is_generating], outputs=load_data_outputs, queue=False, show_progress=True) \
.then(style_sorter.sort_styles, inputs=style_selections, outputs=style_selections, queue=False, show_progress=False)
@ -737,11 +1049,6 @@ with shared.gradio_root:
progress_html, progress_window, progress_gallery, gallery],
queue=False)
for notification_file in ['notification.ogg', 'notification.mp3']:
if os.path.exists(notification_file):
gr.Audio(interactive=False, value=notification_file, elem_id='audio_notification', visible=False)
break
def trigger_describe(mode, img):
if mode == flags.desc_type_photo:
from extras.interrogate import default_interrogator as default_interrogator_photo
@ -754,15 +1061,18 @@ with shared.gradio_root:
desc_btn.click(trigger_describe, inputs=[desc_method, desc_input_image],
outputs=[prompt, style_selections], show_progress=True, queue=True)
if args_manager.args.enable_describe_uov_image:
def trigger_uov_describe(mode, img, prompt):
if args_manager.args.enable_auto_describe_image:
def trigger_auto_describe(mode, img, prompt):
# keep prompt if not empty
if prompt == '':
return trigger_describe(mode, img)
return gr.update(), gr.update()
uov_input_image.upload(trigger_uov_describe, inputs=[desc_method, uov_input_image, prompt],
outputs=[prompt, style_selections], show_progress=True, queue=True)
uov_input_image.upload(trigger_auto_describe, inputs=[desc_method, uov_input_image, prompt],
outputs=[prompt, style_selections], show_progress=True, queue=True)
enhance_input_image.upload(lambda: gr.update(value=True), outputs=enhance_checkbox, queue=False, show_progress=False) \
.then(trigger_auto_describe, inputs=[desc_method, enhance_input_image, prompt], outputs=[prompt, style_selections], show_progress=True, queue=True)
def dump_default_english_config():
from modules.localization import dump_english_config