Compare commits
87 Commits
| Author | SHA1 | Date |
|---|---|---|
|
|
ae05379cc9 | |
|
|
59f183ab9b | |
|
|
4b5021f8f6 | |
|
|
d7439b2d60 | |
|
|
670d798332 | |
|
|
8da1d3ff68 | |
|
|
710a9fa2c5 | |
|
|
251a130f06 | |
|
|
0a87da7dc1 | |
|
|
1d98d1c760 | |
|
|
1068d3fde4 | |
|
|
082a5262b0 | |
|
|
14895ebb13 | |
|
|
b0d16a3aa7 | |
|
|
fd74b57f56 | |
|
|
8bd9ea1dbf | |
|
|
ee12d114c1 | |
|
|
2c78cec01d | |
|
|
ef0acca9f9 | |
|
|
60af8d2d84 | |
|
|
39d07bf0f3 | |
|
|
f0dcf5a911 | |
|
|
c4d5b160be | |
|
|
2f08cb4360 | |
|
|
da3d4d006f | |
|
|
c2dc17e883 | |
|
|
1a53e0676a | |
|
|
a5040f6218 | |
|
|
3f25b885a7 | |
|
|
e36fa0b5f7 | |
|
|
1be3c504ed | |
|
|
c4ce2ce600 | |
|
|
03655fa5ea | |
|
|
a9248c8e46 | |
|
|
37360e95fe | |
|
|
54985596e8 | |
|
|
3a20e14ca0 | |
|
|
2262061145 | |
|
|
56928b769b | |
|
|
2e8cff296e | |
|
|
f597bf1ab6 | |
|
|
f97adafc09 | |
|
|
97a8475a62 | |
|
|
033cb90e6e | |
|
|
aed3240ccd | |
|
|
4f12bbb02b | |
|
|
9f93cf6110 | |
|
|
1f429ffeda | |
|
|
8d67166dd1 | |
|
|
3a86fa2f0d | |
|
|
ef8dd27f91 | |
|
|
d46e47ab3d | |
|
|
069bea534b | |
|
|
e0d3325894 | |
|
|
5a1003a726 | |
|
|
5e8110e430 | |
|
|
ee02643020 | |
|
|
e1f4b65fc9 | |
|
|
f2a21900c6 | |
|
|
5a71495822 | |
|
|
34f67c01a8 | |
|
|
9178aa8ebb | |
|
|
7c1a101c0f | |
|
|
9d41c9521b | |
|
|
3e453501f7 | |
|
|
55ef7608ea | |
|
|
ba77e7f706 | |
|
|
5abae220c5 | |
|
|
04d764820e | |
|
|
350fdd9021 | |
|
|
85a8deecee | |
|
|
b58bc7774e | |
|
|
2d55a5f257 | |
|
|
cb24c686b0 | |
|
|
ab01104d42 | |
|
|
3d43976e8e | |
|
|
07c6c89edf | |
|
|
7899261755 | |
|
|
64c29a8c43 | |
|
|
4e658bb63a | |
|
|
3ef663c5b7 | |
|
|
bf70815a66 | |
|
|
725bf05c31 | |
|
|
4a070a9d61 | |
|
|
0e621ae34e | |
|
|
dfff9b7dcf | |
|
|
989a1ad52b |
|
|
@ -1 +1 @@
|
|||
* @mashb1t
|
||||
* @lllyasviel
|
||||
|
|
|
|||
|
|
@ -16,9 +16,9 @@ body:
|
|||
description: |
|
||||
Please perform basic debugging to see if your configuration is the cause of the issue.
|
||||
Basic debug procedure
|
||||
2. Update Fooocus - sometimes things just need to be updated
|
||||
3. Backup and remove your config.txt - check if the issue is caused by bad configuration
|
||||
5. Try a fresh installation of Fooocus in a different directory - see if a clean installation solves the issue
|
||||
1. Update Fooocus - sometimes things just need to be updated
|
||||
2. Backup and remove your config.txt - check if the issue is caused by bad configuration
|
||||
3. Try a fresh installation of Fooocus in a different directory - see if a clean installation solves the issue
|
||||
Before making a issue report please, check that the issue hasn't been reported recently.
|
||||
options:
|
||||
- label: The issue has not been resolved by following the [troubleshooting guide](https://github.com/lllyasviel/Fooocus/blob/main/troubleshoot.md)
|
||||
|
|
|
|||
|
|
@ -1,9 +1,11 @@
|
|||
name: Create and publish a container image
|
||||
name: Docker image build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
tags:
|
||||
- 'v*'
|
||||
- v*
|
||||
|
||||
jobs:
|
||||
build-and-push-image:
|
||||
|
|
@ -15,7 +17,7 @@ jobs:
|
|||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Log in to the Container registry
|
||||
uses: docker/login-action@v3
|
||||
|
|
@ -33,9 +35,10 @@ jobs:
|
|||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=semver,pattern={{major}}
|
||||
type=edge,branch=main
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
file: ./Dockerfile
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@ __pycache__
|
|||
*.partial
|
||||
*.onnx
|
||||
sorted_styles.json
|
||||
hash_cache.txt
|
||||
/input
|
||||
/cache
|
||||
/language/default.json
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ RUN chown -R user:user /content
|
|||
WORKDIR /content
|
||||
USER user
|
||||
|
||||
COPY . /content/app
|
||||
COPY --chown=user:user . /content/app
|
||||
RUN mv /content/app/models /content/app/models.org
|
||||
|
||||
CMD [ "sh", "-c", "/content/entrypoint.sh ${CMDARGS}" ]
|
||||
|
|
|
|||
|
|
@ -1,7 +1,4 @@
|
|||
import ldm_patched.modules.args_parser as args_parser
|
||||
import os
|
||||
|
||||
from tempfile import gettempdir
|
||||
|
||||
args_parser.parser.add_argument("--share", action='store_true', help="Set whether to share on Gradio.")
|
||||
|
||||
|
|
@ -20,7 +17,7 @@ args_parser.parser.add_argument("--disable-offload-from-vram", action="store_tru
|
|||
|
||||
args_parser.parser.add_argument("--theme", type=str, help="launches the UI with light or dark theme", default=None)
|
||||
args_parser.parser.add_argument("--disable-image-log", action='store_true',
|
||||
help="Prevent writing images and logs to hard drive.")
|
||||
help="Prevent writing images and logs to the outputs folder.")
|
||||
|
||||
args_parser.parser.add_argument("--disable-analytics", action='store_true',
|
||||
help="Disables analytics for Gradio.")
|
||||
|
|
@ -31,11 +28,17 @@ args_parser.parser.add_argument("--disable-metadata", action='store_true',
|
|||
args_parser.parser.add_argument("--disable-preset-download", action='store_true',
|
||||
help="Disables downloading models for presets", default=False)
|
||||
|
||||
args_parser.parser.add_argument("--enable-describe-uov-image", action='store_true',
|
||||
help="Disables automatic description of uov images when prompt is empty", default=False)
|
||||
args_parser.parser.add_argument("--disable-enhance-output-sorting", action='store_true',
|
||||
help="Disables enhance output sorting for final image gallery.")
|
||||
|
||||
args_parser.parser.add_argument("--enable-auto-describe-image", action='store_true',
|
||||
help="Enables automatic description of uov and enhance image when prompt is empty", default=False)
|
||||
|
||||
args_parser.parser.add_argument("--always-download-new-model", action='store_true',
|
||||
help="Always download newer models ", default=False)
|
||||
help="Always download newer models", default=False)
|
||||
|
||||
args_parser.parser.add_argument("--rebuild-hash-cache", help="Generates missing model and LoRA hashes.",
|
||||
type=int, nargs="?", metavar="CPU_NUM_THREADS", const=-1)
|
||||
|
||||
args_parser.parser.set_defaults(
|
||||
disable_cuda_malloc=True,
|
||||
|
|
|
|||
|
|
@ -74,31 +74,35 @@ progress::after {
|
|||
text-align: right;
|
||||
width: 215px;
|
||||
}
|
||||
|
||||
.type_row{
|
||||
height: 80px !important;
|
||||
div:has(> #positive_prompt) {
|
||||
border: none;
|
||||
}
|
||||
|
||||
.type_row_half{
|
||||
height: 32px !important;
|
||||
#positive_prompt {
|
||||
padding: 1px;
|
||||
background: var(--background-fill-primary);
|
||||
}
|
||||
|
||||
.scroll-hide{
|
||||
resize: none !important;
|
||||
.type_row {
|
||||
height: 84px !important;
|
||||
}
|
||||
|
||||
.refresh_button{
|
||||
.type_row_half {
|
||||
height: 34px !important;
|
||||
}
|
||||
|
||||
.refresh_button {
|
||||
border: none !important;
|
||||
background: none !important;
|
||||
font-size: none !important;
|
||||
box-shadow: none !important;
|
||||
}
|
||||
|
||||
.advanced_check_row{
|
||||
width: 250px !important;
|
||||
.advanced_check_row {
|
||||
width: 330px !important;
|
||||
}
|
||||
|
||||
.min_check{
|
||||
.min_check {
|
||||
min-width: min(1px, 100%) !important;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,24 @@
|
|||
# https://github.com/sail-sg/EditAnything/blob/main/sam2groundingdino_edit.py
|
||||
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
|
||||
from extras.inpaint_mask import SAMOptions, generate_mask_from_image
|
||||
|
||||
original_image = Image.open('cat.webp')
|
||||
image = np.array(original_image, dtype=np.uint8)
|
||||
|
||||
sam_options = SAMOptions(
|
||||
dino_prompt='eye',
|
||||
dino_box_threshold=0.3,
|
||||
dino_text_threshold=0.25,
|
||||
dino_erode_or_dilate=0,
|
||||
dino_debug=False,
|
||||
max_detections=2,
|
||||
model_type='vit_b'
|
||||
)
|
||||
|
||||
mask_image, _, _, _ = generate_mask_from_image(image, sam_options=sam_options)
|
||||
|
||||
merged_masks_img = Image.fromarray(mask_image)
|
||||
merged_masks_img.show()
|
||||
|
|
@ -216,9 +216,9 @@ def is_url(url_or_filename):
|
|||
def load_checkpoint(model,url_or_filename):
|
||||
if is_url(url_or_filename):
|
||||
cached_file = download_cached_file(url_or_filename, check_hash=False, progress=True)
|
||||
checkpoint = torch.load(cached_file, map_location='cpu')
|
||||
checkpoint = torch.load(cached_file, map_location='cpu', weights_only=True)
|
||||
elif os.path.isfile(url_or_filename):
|
||||
checkpoint = torch.load(url_or_filename, map_location='cpu')
|
||||
checkpoint = torch.load(url_or_filename, map_location='cpu', weights_only=True)
|
||||
else:
|
||||
raise RuntimeError('checkpoint url or path is invalid')
|
||||
|
||||
|
|
|
|||
|
|
@ -78,9 +78,9 @@ def blip_nlvr(pretrained='',**kwargs):
|
|||
def load_checkpoint(model,url_or_filename):
|
||||
if is_url(url_or_filename):
|
||||
cached_file = download_cached_file(url_or_filename, check_hash=False, progress=True)
|
||||
checkpoint = torch.load(cached_file, map_location='cpu')
|
||||
checkpoint = torch.load(cached_file, map_location='cpu', weights_only=True)
|
||||
elif os.path.isfile(url_or_filename):
|
||||
checkpoint = torch.load(url_or_filename, map_location='cpu')
|
||||
checkpoint = torch.load(url_or_filename, map_location='cpu', weights_only=True)
|
||||
else:
|
||||
raise RuntimeError('checkpoint url or path is invalid')
|
||||
state_dict = checkpoint['model']
|
||||
|
|
|
|||
|
|
@ -0,0 +1,43 @@
|
|||
batch_size = 1
|
||||
modelname = "groundingdino"
|
||||
backbone = "swin_T_224_1k"
|
||||
position_embedding = "sine"
|
||||
pe_temperatureH = 20
|
||||
pe_temperatureW = 20
|
||||
return_interm_indices = [1, 2, 3]
|
||||
backbone_freeze_keywords = None
|
||||
enc_layers = 6
|
||||
dec_layers = 6
|
||||
pre_norm = False
|
||||
dim_feedforward = 2048
|
||||
hidden_dim = 256
|
||||
dropout = 0.0
|
||||
nheads = 8
|
||||
num_queries = 900
|
||||
query_dim = 4
|
||||
num_patterns = 0
|
||||
num_feature_levels = 4
|
||||
enc_n_points = 4
|
||||
dec_n_points = 4
|
||||
two_stage_type = "standard"
|
||||
two_stage_bbox_embed_share = False
|
||||
two_stage_class_embed_share = False
|
||||
transformer_activation = "relu"
|
||||
dec_pred_bbox_embed_share = True
|
||||
dn_box_noise_scale = 1.0
|
||||
dn_label_noise_ratio = 0.5
|
||||
dn_label_coef = 1.0
|
||||
dn_bbox_coef = 1.0
|
||||
embed_init_tgt = True
|
||||
dn_labelbook_size = 2000
|
||||
max_text_len = 256
|
||||
text_encoder_type = "bert-base-uncased"
|
||||
use_text_enhancer = True
|
||||
use_fusion_layer = True
|
||||
use_checkpoint = True
|
||||
use_transformer_ckpt = True
|
||||
use_text_cross_attention = True
|
||||
text_dropout = 0.0
|
||||
fusion_dropout = 0.0
|
||||
fusion_droppath = 0.1
|
||||
sub_sentence_present = True
|
||||
|
|
@ -0,0 +1,100 @@
|
|||
from typing import Tuple, List
|
||||
|
||||
import ldm_patched.modules.model_management as model_management
|
||||
from ldm_patched.modules.model_patcher import ModelPatcher
|
||||
from modules.config import path_inpaint
|
||||
from modules.model_loader import load_file_from_url
|
||||
|
||||
import numpy as np
|
||||
import supervision as sv
|
||||
import torch
|
||||
from groundingdino.util.inference import Model
|
||||
from groundingdino.util.inference import load_model, preprocess_caption, get_phrases_from_posmap
|
||||
|
||||
|
||||
class GroundingDinoModel(Model):
|
||||
def __init__(self):
|
||||
self.config_file = 'extras/GroundingDINO/config/GroundingDINO_SwinT_OGC.py'
|
||||
self.model = None
|
||||
self.load_device = torch.device('cpu')
|
||||
self.offload_device = torch.device('cpu')
|
||||
|
||||
@torch.no_grad()
|
||||
@torch.inference_mode()
|
||||
def predict_with_caption(
|
||||
self,
|
||||
image: np.ndarray,
|
||||
caption: str,
|
||||
box_threshold: float = 0.35,
|
||||
text_threshold: float = 0.25
|
||||
) -> Tuple[sv.Detections, torch.Tensor, torch.Tensor, List[str]]:
|
||||
if self.model is None:
|
||||
filename = load_file_from_url(
|
||||
url="https://github.com/IDEA-Research/GroundingDINO/releases/download/v0.1.0-alpha/groundingdino_swint_ogc.pth",
|
||||
file_name='groundingdino_swint_ogc.pth',
|
||||
model_dir=path_inpaint)
|
||||
model = load_model(model_config_path=self.config_file, model_checkpoint_path=filename)
|
||||
|
||||
self.load_device = model_management.text_encoder_device()
|
||||
self.offload_device = model_management.text_encoder_offload_device()
|
||||
|
||||
model.to(self.offload_device)
|
||||
|
||||
self.model = ModelPatcher(model, load_device=self.load_device, offload_device=self.offload_device)
|
||||
|
||||
model_management.load_model_gpu(self.model)
|
||||
|
||||
processed_image = GroundingDinoModel.preprocess_image(image_bgr=image).to(self.load_device)
|
||||
boxes, logits, phrases = predict(
|
||||
model=self.model,
|
||||
image=processed_image,
|
||||
caption=caption,
|
||||
box_threshold=box_threshold,
|
||||
text_threshold=text_threshold,
|
||||
device=self.load_device)
|
||||
source_h, source_w, _ = image.shape
|
||||
detections = GroundingDinoModel.post_process_result(
|
||||
source_h=source_h,
|
||||
source_w=source_w,
|
||||
boxes=boxes,
|
||||
logits=logits)
|
||||
return detections, boxes, logits, phrases
|
||||
|
||||
|
||||
def predict(
|
||||
model,
|
||||
image: torch.Tensor,
|
||||
caption: str,
|
||||
box_threshold: float,
|
||||
text_threshold: float,
|
||||
device: str = "cuda"
|
||||
) -> Tuple[torch.Tensor, torch.Tensor, List[str]]:
|
||||
caption = preprocess_caption(caption=caption)
|
||||
|
||||
# override to use model wrapped by patcher
|
||||
model = model.model.to(device)
|
||||
image = image.to(device)
|
||||
|
||||
with torch.no_grad():
|
||||
outputs = model(image[None], captions=[caption])
|
||||
|
||||
prediction_logits = outputs["pred_logits"].cpu().sigmoid()[0] # prediction_logits.shape = (nq, 256)
|
||||
prediction_boxes = outputs["pred_boxes"].cpu()[0] # prediction_boxes.shape = (nq, 4)
|
||||
|
||||
mask = prediction_logits.max(dim=1)[0] > box_threshold
|
||||
logits = prediction_logits[mask] # logits.shape = (n, 256)
|
||||
boxes = prediction_boxes[mask] # boxes.shape = (n, 4)
|
||||
|
||||
tokenizer = model.tokenizer
|
||||
tokenized = tokenizer(caption)
|
||||
|
||||
phrases = [
|
||||
get_phrases_from_posmap(logit > text_threshold, tokenized, tokenizer).replace('.', '')
|
||||
for logit
|
||||
in logits
|
||||
]
|
||||
|
||||
return boxes, logits.max(dim=1)[0], phrases
|
||||
|
||||
|
||||
default_groundingdino = GroundingDinoModel().predict_with_caption
|
||||
|
|
@ -41,7 +41,7 @@ class Censor:
|
|||
model_management.load_model_gpu(self.safety_checker_model)
|
||||
|
||||
single = False
|
||||
if not isinstance(images, list) or isinstance(images, np.ndarray):
|
||||
if not isinstance(images, (list, np.ndarray)):
|
||||
images = [images]
|
||||
single = True
|
||||
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ def init_detection_model(model_name, half=False, device='cuda', model_rootpath=N
|
|||
url=model_url, model_dir='facexlib/weights', progress=True, file_name=None, save_dir=model_rootpath)
|
||||
|
||||
# TODO: clean pretrained model
|
||||
load_net = torch.load(model_path, map_location=lambda storage, loc: storage)
|
||||
load_net = torch.load(model_path, map_location=lambda storage, loc: storage, weights_only=True)
|
||||
# remove unnecessary 'module.'
|
||||
for k, v in deepcopy(load_net).items():
|
||||
if k.startswith('module.'):
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ def init_parsing_model(model_name='bisenet', half=False, device='cuda', model_ro
|
|||
|
||||
model_path = load_file_from_url(
|
||||
url=model_url, model_dir='facexlib/weights', progress=True, file_name=None, save_dir=model_rootpath)
|
||||
load_net = torch.load(model_path, map_location=lambda storage, loc: storage)
|
||||
load_net = torch.load(model_path, map_location=lambda storage, loc: storage, weights_only=True)
|
||||
model.load_state_dict(load_net, strict=True)
|
||||
model.eval()
|
||||
model = model.to(device)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,130 @@
|
|||
import sys
|
||||
|
||||
import modules.config
|
||||
import numpy as np
|
||||
import torch
|
||||
from extras.GroundingDINO.util.inference import default_groundingdino
|
||||
from extras.sam.predictor import SamPredictor
|
||||
from rembg import remove, new_session
|
||||
from segment_anything import sam_model_registry
|
||||
from segment_anything.utils.amg import remove_small_regions
|
||||
|
||||
|
||||
class SAMOptions:
|
||||
def __init__(self,
|
||||
# GroundingDINO
|
||||
dino_prompt: str = '',
|
||||
dino_box_threshold=0.3,
|
||||
dino_text_threshold=0.25,
|
||||
dino_erode_or_dilate=0,
|
||||
dino_debug=False,
|
||||
|
||||
# SAM
|
||||
max_detections=2,
|
||||
model_type='vit_b'
|
||||
):
|
||||
self.dino_prompt = dino_prompt
|
||||
self.dino_box_threshold = dino_box_threshold
|
||||
self.dino_text_threshold = dino_text_threshold
|
||||
self.dino_erode_or_dilate = dino_erode_or_dilate
|
||||
self.dino_debug = dino_debug
|
||||
self.max_detections = max_detections
|
||||
self.model_type = model_type
|
||||
|
||||
|
||||
def optimize_masks(masks: torch.Tensor) -> torch.Tensor:
|
||||
"""
|
||||
removes small disconnected regions and holes
|
||||
"""
|
||||
fine_masks = []
|
||||
for mask in masks.to('cpu').numpy(): # masks: [num_masks, 1, h, w]
|
||||
fine_masks.append(remove_small_regions(mask[0], 400, mode="holes")[0])
|
||||
masks = np.stack(fine_masks, axis=0)[:, np.newaxis]
|
||||
return torch.from_numpy(masks)
|
||||
|
||||
|
||||
def generate_mask_from_image(image: np.ndarray, mask_model: str = 'sam', extras=None,
|
||||
sam_options: SAMOptions | None = SAMOptions) -> tuple[np.ndarray | None, int | None, int | None, int | None]:
|
||||
dino_detection_count = 0
|
||||
sam_detection_count = 0
|
||||
sam_detection_on_mask_count = 0
|
||||
|
||||
if image is None:
|
||||
return None, dino_detection_count, sam_detection_count, sam_detection_on_mask_count
|
||||
|
||||
if extras is None:
|
||||
extras = {}
|
||||
|
||||
if 'image' in image:
|
||||
image = image['image']
|
||||
|
||||
if mask_model != 'sam' or sam_options is None:
|
||||
result = remove(
|
||||
image,
|
||||
session=new_session(mask_model, **extras),
|
||||
only_mask=True,
|
||||
**extras
|
||||
)
|
||||
|
||||
return result, dino_detection_count, sam_detection_count, sam_detection_on_mask_count
|
||||
|
||||
detections, boxes, logits, phrases = default_groundingdino(
|
||||
image=image,
|
||||
caption=sam_options.dino_prompt,
|
||||
box_threshold=sam_options.dino_box_threshold,
|
||||
text_threshold=sam_options.dino_text_threshold
|
||||
)
|
||||
|
||||
H, W = image.shape[0], image.shape[1]
|
||||
boxes = boxes * torch.Tensor([W, H, W, H])
|
||||
boxes[:, :2] = boxes[:, :2] - boxes[:, 2:] / 2
|
||||
boxes[:, 2:] = boxes[:, 2:] + boxes[:, :2]
|
||||
|
||||
sam_checkpoint = modules.config.download_sam_model(sam_options.model_type)
|
||||
sam = sam_model_registry[sam_options.model_type](checkpoint=sam_checkpoint)
|
||||
|
||||
sam_predictor = SamPredictor(sam)
|
||||
final_mask_tensor = torch.zeros((image.shape[0], image.shape[1]))
|
||||
dino_detection_count = boxes.size(0)
|
||||
|
||||
if dino_detection_count > 0:
|
||||
sam_predictor.set_image(image)
|
||||
|
||||
if sam_options.dino_erode_or_dilate != 0:
|
||||
for index in range(boxes.size(0)):
|
||||
assert boxes.size(1) == 4
|
||||
boxes[index][0] -= sam_options.dino_erode_or_dilate
|
||||
boxes[index][1] -= sam_options.dino_erode_or_dilate
|
||||
boxes[index][2] += sam_options.dino_erode_or_dilate
|
||||
boxes[index][3] += sam_options.dino_erode_or_dilate
|
||||
|
||||
if sam_options.dino_debug:
|
||||
from PIL import ImageDraw, Image
|
||||
debug_dino_image = Image.new("RGB", (image.shape[1], image.shape[0]), color="black")
|
||||
draw = ImageDraw.Draw(debug_dino_image)
|
||||
for box in boxes.numpy():
|
||||
draw.rectangle(box.tolist(), fill="white")
|
||||
return np.array(debug_dino_image), dino_detection_count, sam_detection_count, sam_detection_on_mask_count
|
||||
|
||||
transformed_boxes = sam_predictor.transform.apply_boxes_torch(boxes, image.shape[:2])
|
||||
masks, _, _ = sam_predictor.predict_torch(
|
||||
point_coords=None,
|
||||
point_labels=None,
|
||||
boxes=transformed_boxes,
|
||||
multimask_output=False,
|
||||
)
|
||||
|
||||
masks = optimize_masks(masks)
|
||||
sam_detection_count = len(masks)
|
||||
if sam_options.max_detections == 0:
|
||||
sam_options.max_detections = sys.maxsize
|
||||
sam_objects = min(len(logits), sam_options.max_detections)
|
||||
for obj_ind in range(sam_objects):
|
||||
mask_tensor = masks[obj_ind][0]
|
||||
final_mask_tensor += mask_tensor
|
||||
sam_detection_on_mask_count += 1
|
||||
|
||||
final_mask_tensor = (final_mask_tensor > 0).to('cpu').numpy()
|
||||
mask_image = np.dstack((final_mask_tensor, final_mask_tensor, final_mask_tensor)) * 255
|
||||
mask_image = np.array(mask_image, dtype=np.uint8)
|
||||
return mask_image, dino_detection_count, sam_detection_count, sam_detection_on_mask_count
|
||||
|
|
@ -104,7 +104,7 @@ def load_ip_adapter(clip_vision_path, ip_negative_path, ip_adapter_path):
|
|||
offload_device = torch.device('cpu')
|
||||
|
||||
use_fp16 = model_management.should_use_fp16(device=load_device)
|
||||
ip_state_dict = torch.load(ip_adapter_path, map_location="cpu")
|
||||
ip_state_dict = torch.load(ip_adapter_path, map_location="cpu", weights_only=True)
|
||||
plus = "latents" in ip_state_dict["image_proj"]
|
||||
cross_attention_dim = ip_state_dict["ip_adapter"]["1.to_k_ip.weight"].shape[1]
|
||||
sdxl = cross_attention_dim == 2048
|
||||
|
|
|
|||
|
|
@ -0,0 +1,288 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
|
||||
# This source code is licensed under the license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from ldm_patched.modules import model_management
|
||||
from ldm_patched.modules.model_patcher import ModelPatcher
|
||||
|
||||
from segment_anything.modeling import Sam
|
||||
|
||||
from typing import Optional, Tuple
|
||||
|
||||
from segment_anything.utils.transforms import ResizeLongestSide
|
||||
|
||||
|
||||
class SamPredictor:
|
||||
def __init__(
|
||||
self,
|
||||
model: Sam,
|
||||
load_device=model_management.text_encoder_device(),
|
||||
offload_device=model_management.text_encoder_offload_device()
|
||||
) -> None:
|
||||
"""
|
||||
Uses SAM to calculate the image embedding for an image, and then
|
||||
allow repeated, efficient mask prediction given prompts.
|
||||
|
||||
Arguments:
|
||||
model (Sam): The model to use for mask prediction.
|
||||
"""
|
||||
super().__init__()
|
||||
|
||||
self.load_device = load_device
|
||||
self.offload_device = offload_device
|
||||
# can't use model.half() here as slow_conv2d_cpu is not implemented for half
|
||||
model.to(self.offload_device)
|
||||
|
||||
self.patcher = ModelPatcher(model, load_device=self.load_device, offload_device=self.offload_device)
|
||||
|
||||
self.transform = ResizeLongestSide(model.image_encoder.img_size)
|
||||
self.reset_image()
|
||||
|
||||
def set_image(
|
||||
self,
|
||||
image: np.ndarray,
|
||||
image_format: str = "RGB",
|
||||
) -> None:
|
||||
"""
|
||||
Calculates the image embeddings for the provided image, allowing
|
||||
masks to be predicted with the 'predict' method.
|
||||
|
||||
Arguments:
|
||||
image (np.ndarray): The image for calculating masks. Expects an
|
||||
image in HWC uint8 format, with pixel values in [0, 255].
|
||||
image_format (str): The color format of the image, in ['RGB', 'BGR'].
|
||||
"""
|
||||
assert image_format in [
|
||||
"RGB",
|
||||
"BGR",
|
||||
], f"image_format must be in ['RGB', 'BGR'], is {image_format}."
|
||||
if image_format != self.patcher.model.image_format:
|
||||
image = image[..., ::-1]
|
||||
|
||||
# Transform the image to the form expected by the model
|
||||
input_image = self.transform.apply_image(image)
|
||||
input_image_torch = torch.as_tensor(input_image, device=self.load_device)
|
||||
input_image_torch = input_image_torch.permute(2, 0, 1).contiguous()[None, :, :, :]
|
||||
|
||||
self.set_torch_image(input_image_torch, image.shape[:2])
|
||||
|
||||
@torch.no_grad()
|
||||
def set_torch_image(
|
||||
self,
|
||||
transformed_image: torch.Tensor,
|
||||
original_image_size: Tuple[int, ...],
|
||||
) -> None:
|
||||
"""
|
||||
Calculates the image embeddings for the provided image, allowing
|
||||
masks to be predicted with the 'predict' method. Expects the input
|
||||
image to be already transformed to the format expected by the model.
|
||||
|
||||
Arguments:
|
||||
transformed_image (torch.Tensor): The input image, with shape
|
||||
1x3xHxW, which has been transformed with ResizeLongestSide.
|
||||
original_image_size (tuple(int, int)): The size of the image
|
||||
before transformation, in (H, W) format.
|
||||
"""
|
||||
assert (
|
||||
len(transformed_image.shape) == 4
|
||||
and transformed_image.shape[1] == 3
|
||||
and max(*transformed_image.shape[2:]) == self.patcher.model.image_encoder.img_size
|
||||
), f"set_torch_image input must be BCHW with long side {self.patcher.model.image_encoder.img_size}."
|
||||
self.reset_image()
|
||||
|
||||
self.original_size = original_image_size
|
||||
self.input_size = tuple(transformed_image.shape[-2:])
|
||||
model_management.load_model_gpu(self.patcher)
|
||||
input_image = self.patcher.model.preprocess(transformed_image.to(self.load_device))
|
||||
self.features = self.patcher.model.image_encoder(input_image)
|
||||
self.is_image_set = True
|
||||
|
||||
def predict(
|
||||
self,
|
||||
point_coords: Optional[np.ndarray] = None,
|
||||
point_labels: Optional[np.ndarray] = None,
|
||||
box: Optional[np.ndarray] = None,
|
||||
mask_input: Optional[np.ndarray] = None,
|
||||
multimask_output: bool = True,
|
||||
return_logits: bool = False,
|
||||
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
||||
"""
|
||||
Predict masks for the given input prompts, using the currently set image.
|
||||
|
||||
Arguments:
|
||||
point_coords (np.ndarray or None): A Nx2 array of point prompts to the
|
||||
model. Each point is in (X,Y) in pixels.
|
||||
point_labels (np.ndarray or None): A length N array of labels for the
|
||||
point prompts. 1 indicates a foreground point and 0 indicates a
|
||||
background point.
|
||||
box (np.ndarray or None): A length 4 array given a box prompt to the
|
||||
model, in XYXY format.
|
||||
mask_input (np.ndarray): A low resolution mask input to the model, typically
|
||||
coming from a previous prediction iteration. Has form 1xHxW, where
|
||||
for SAM, H=W=256.
|
||||
multimask_output (bool): If true, the model will return three masks.
|
||||
For ambiguous input prompts (such as a single click), this will often
|
||||
produce better masks than a single prediction. If only a single
|
||||
mask is needed, the model's predicted quality score can be used
|
||||
to select the best mask. For non-ambiguous prompts, such as multiple
|
||||
input prompts, multimask_output=False can give better results.
|
||||
return_logits (bool): If true, returns un-thresholded masks logits
|
||||
instead of a binary mask.
|
||||
|
||||
Returns:
|
||||
(np.ndarray): The output masks in CxHxW format, where C is the
|
||||
number of masks, and (H, W) is the original image size.
|
||||
(np.ndarray): An array of length C containing the model's
|
||||
predictions for the quality of each mask.
|
||||
(np.ndarray): An array of shape CxHxW, where C is the number
|
||||
of masks and H=W=256. These low resolution logits can be passed to
|
||||
a subsequent iteration as mask input.
|
||||
"""
|
||||
if not self.is_image_set:
|
||||
raise RuntimeError("An image must be set with .set_image(...) before mask prediction.")
|
||||
|
||||
# Transform input prompts
|
||||
coords_torch, labels_torch, box_torch, mask_input_torch = None, None, None, None
|
||||
if point_coords is not None:
|
||||
assert (
|
||||
point_labels is not None
|
||||
), "point_labels must be supplied if point_coords is supplied."
|
||||
point_coords = self.transform.apply_coords(point_coords, self.original_size)
|
||||
coords_torch = torch.as_tensor(point_coords, dtype=torch.float, device=self.load_device)
|
||||
labels_torch = torch.as_tensor(point_labels, dtype=torch.int, device=self.load_device)
|
||||
coords_torch, labels_torch = coords_torch[None, :, :], labels_torch[None, :]
|
||||
if box is not None:
|
||||
box = self.transform.apply_boxes(box, self.original_size)
|
||||
box_torch = torch.as_tensor(box, dtype=torch.float, device=self.load_device)
|
||||
box_torch = box_torch[None, :]
|
||||
if mask_input is not None:
|
||||
mask_input_torch = torch.as_tensor(mask_input, dtype=torch.float, device=self.load_device)
|
||||
mask_input_torch = mask_input_torch[None, :, :, :]
|
||||
|
||||
masks, iou_predictions, low_res_masks = self.predict_torch(
|
||||
coords_torch,
|
||||
labels_torch,
|
||||
box_torch,
|
||||
mask_input_torch,
|
||||
multimask_output,
|
||||
return_logits=return_logits,
|
||||
)
|
||||
|
||||
masks = masks[0].detach().cpu().numpy()
|
||||
iou_predictions = iou_predictions[0].detach().cpu().numpy()
|
||||
low_res_masks = low_res_masks[0].detach().cpu().numpy()
|
||||
return masks, iou_predictions, low_res_masks
|
||||
|
||||
@torch.no_grad()
|
||||
def predict_torch(
|
||||
self,
|
||||
point_coords: Optional[torch.Tensor],
|
||||
point_labels: Optional[torch.Tensor],
|
||||
boxes: Optional[torch.Tensor] = None,
|
||||
mask_input: Optional[torch.Tensor] = None,
|
||||
multimask_output: bool = True,
|
||||
return_logits: bool = False,
|
||||
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
||||
"""
|
||||
Predict masks for the given input prompts, using the currently set image.
|
||||
Input prompts are batched torch tensors and are expected to already be
|
||||
transformed to the input frame using ResizeLongestSide.
|
||||
|
||||
Arguments:
|
||||
point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the
|
||||
model. Each point is in (X,Y) in pixels.
|
||||
point_labels (torch.Tensor or None): A BxN array of labels for the
|
||||
point prompts. 1 indicates a foreground point and 0 indicates a
|
||||
background point.
|
||||
box (np.ndarray or None): A Bx4 array given a box prompt to the
|
||||
model, in XYXY format.
|
||||
mask_input (np.ndarray): A low resolution mask input to the model, typically
|
||||
coming from a previous prediction iteration. Has form Bx1xHxW, where
|
||||
for SAM, H=W=256. Masks returned by a previous iteration of the
|
||||
predict method do not need further transformation.
|
||||
multimask_output (bool): If true, the model will return three masks.
|
||||
For ambiguous input prompts (such as a single click), this will often
|
||||
produce better masks than a single prediction. If only a single
|
||||
mask is needed, the model's predicted quality score can be used
|
||||
to select the best mask. For non-ambiguous prompts, such as multiple
|
||||
input prompts, multimask_output=False can give better results.
|
||||
return_logits (bool): If true, returns un-thresholded masks logits
|
||||
instead of a binary mask.
|
||||
|
||||
Returns:
|
||||
(torch.Tensor): The output masks in BxCxHxW format, where C is the
|
||||
number of masks, and (H, W) is the original image size.
|
||||
(torch.Tensor): An array of shape BxC containing the model's
|
||||
predictions for the quality of each mask.
|
||||
(torch.Tensor): An array of shape BxCxHxW, where C is the number
|
||||
of masks and H=W=256. These low res logits can be passed to
|
||||
a subsequent iteration as mask input.
|
||||
"""
|
||||
if not self.is_image_set:
|
||||
raise RuntimeError("An image must be set with .set_image(...) before mask prediction.")
|
||||
|
||||
if point_coords is not None:
|
||||
points = (point_coords.to(self.load_device), point_labels.to(self.load_device))
|
||||
else:
|
||||
points = None
|
||||
|
||||
# load
|
||||
if boxes is not None:
|
||||
boxes = boxes.to(self.load_device)
|
||||
if mask_input is not None:
|
||||
mask_input = mask_input.to(self.load_device)
|
||||
model_management.load_model_gpu(self.patcher)
|
||||
|
||||
# Embed prompts
|
||||
sparse_embeddings, dense_embeddings = self.patcher.model.prompt_encoder(
|
||||
points=points,
|
||||
boxes=boxes,
|
||||
masks=mask_input,
|
||||
)
|
||||
|
||||
# Predict masks
|
||||
low_res_masks, iou_predictions = self.patcher.model.mask_decoder(
|
||||
image_embeddings=self.features,
|
||||
image_pe=self.patcher.model.prompt_encoder.get_dense_pe(),
|
||||
sparse_prompt_embeddings=sparse_embeddings,
|
||||
dense_prompt_embeddings=dense_embeddings,
|
||||
multimask_output=multimask_output,
|
||||
)
|
||||
|
||||
# Upscale the masks to the original image resolution
|
||||
masks = self.patcher.model.postprocess_masks(low_res_masks, self.input_size, self.original_size)
|
||||
|
||||
if not return_logits:
|
||||
masks = masks > self.patcher.model.mask_threshold
|
||||
|
||||
return masks, iou_predictions, low_res_masks
|
||||
|
||||
def get_image_embedding(self) -> torch.Tensor:
|
||||
"""
|
||||
Returns the image embeddings for the currently set image, with
|
||||
shape 1xCxHxW, where C is the embedding dimension and (H,W) are
|
||||
the embedding spatial dimension of SAM (typically C=256, H=W=64).
|
||||
"""
|
||||
if not self.is_image_set:
|
||||
raise RuntimeError(
|
||||
"An image must be set with .set_image(...) to generate an embedding."
|
||||
)
|
||||
assert self.features is not None, "Features must exist if an image has been set."
|
||||
return self.features
|
||||
|
||||
@property
|
||||
def device(self) -> torch.device:
|
||||
return self.patcher.model.device
|
||||
|
||||
def reset_image(self) -> None:
|
||||
"""Resets the currently set image."""
|
||||
self.is_image_set = False
|
||||
self.features = None
|
||||
self.orig_h = None
|
||||
self.orig_w = None
|
||||
self.input_h = None
|
||||
self.input_w = None
|
||||
|
|
@ -8,7 +8,7 @@
|
|||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install pygit2==1.12.2\n",
|
||||
"!pip install pygit2==1.15.1\n",
|
||||
"%cd /content\n",
|
||||
"!git clone https://github.com/lllyasviel/Fooocus.git\n",
|
||||
"%cd /content/Fooocus\n",
|
||||
|
|
|
|||
|
|
@ -1 +1 @@
|
|||
version = '2.4.0'
|
||||
version = '2.5.5'
|
||||
|
|
@ -81,9 +81,12 @@ function refresh_style_localization() {
|
|||
}
|
||||
|
||||
function refresh_aspect_ratios_label(value) {
|
||||
label = document.querySelector('#aspect_ratios_accordion div span[data-original-text="Aspect Ratios"]')
|
||||
translation = getTranslation("Aspect Ratios")
|
||||
label.textContent = translation + " " + htmlDecode(value)
|
||||
label = document.querySelector('#aspect_ratios_accordion div span');
|
||||
translation = getTranslation("Aspect Ratios");
|
||||
if (typeof translation == "undefined") {
|
||||
translation = "Aspect Ratios";
|
||||
}
|
||||
label.textContent = translation + " " + htmlDecode(value);
|
||||
}
|
||||
|
||||
function localizeWholePage() {
|
||||
|
|
|
|||
|
|
@ -642,4 +642,5 @@ onUiLoaded(async() => {
|
|||
}
|
||||
|
||||
applyZoomAndPan("#inpaint_canvas");
|
||||
applyZoomAndPan("#inpaint_mask_canvas");
|
||||
});
|
||||
|
|
|
|||
|
|
@ -11,11 +11,13 @@
|
|||
"Image Prompt": "Image Prompt",
|
||||
"Inpaint or Outpaint": "Inpaint or Outpaint",
|
||||
"Outpaint Direction": "Outpaint Direction",
|
||||
"Enable Advanced Masking Features": "Enable Advanced Masking Features",
|
||||
"Method": "Method",
|
||||
"Describe": "Describe",
|
||||
"Content Type": "Content Type",
|
||||
"Photograph": "Photograph",
|
||||
"Art/Anime": "Art/Anime",
|
||||
"Apply Styles": "Apply Styles",
|
||||
"Describe this Image into Prompt": "Describe this Image into Prompt",
|
||||
"Image Size and Recommended Size": "Image Size and Recommended Size",
|
||||
"Upscale or Variation:": "Upscale or Variation:",
|
||||
|
|
@ -25,7 +27,7 @@
|
|||
"Upscale (1.5x)": "Upscale (1.5x)",
|
||||
"Upscale (2x)": "Upscale (2x)",
|
||||
"Upscale (Fast 2x)": "Upscale (Fast 2x)",
|
||||
"\ud83d\udcd4 Document": "\uD83D\uDCD4 Document",
|
||||
"\ud83d\udcd4 Documentation": "\uD83D\uDCD4 Documentation",
|
||||
"Image": "Image",
|
||||
"Stop At": "Stop At",
|
||||
"Weight": "Weight",
|
||||
|
|
@ -44,8 +46,11 @@
|
|||
"Top": "Top",
|
||||
"Bottom": "Bottom",
|
||||
"* \"Inpaint or Outpaint\" is powered by the sampler \"DPMPP Fooocus Seamless 2M SDE Karras Inpaint Sampler\" (beta)": "* \"Inpaint or Outpaint\" is powered by the sampler \"DPMPP Fooocus Seamless 2M SDE Karras Inpaint Sampler\" (beta)",
|
||||
"Setting": "Setting",
|
||||
"Advanced options": "Advanced options",
|
||||
"Generate mask from image": "Generate mask from image",
|
||||
"Settings": "Settings",
|
||||
"Style": "Style",
|
||||
"Styles": "Styles",
|
||||
"Preset": "Preset",
|
||||
"Performance": "Performance",
|
||||
"Speed": "Speed",
|
||||
|
|
@ -64,6 +69,9 @@
|
|||
"Read wildcards in order": "Read wildcards in order",
|
||||
"Black Out NSFW": "Black Out NSFW",
|
||||
"Use black image if NSFW is detected.": "Use black image if NSFW is detected.",
|
||||
"Save only final enhanced image": "Save only final enhanced image",
|
||||
"Save Metadata to Images": "Save Metadata to Images",
|
||||
"Adds parameters to generated images allowing manual regeneration.": "Adds parameters to generated images allowing manual regeneration.",
|
||||
"\ud83d\udcda History Log": "\uD83D\uDCDA History Log",
|
||||
"Image Style": "Image Style",
|
||||
"Fooocus V2": "Fooocus V2",
|
||||
|
|
@ -279,7 +287,7 @@
|
|||
"Volumetric Lighting": "Volumetric Lighting",
|
||||
"Watercolor 2": "Watercolor 2",
|
||||
"Whimsical And Playful": "Whimsical And Playful",
|
||||
"Model": "Model",
|
||||
"Models": "Models",
|
||||
"Base Model (SDXL only)": "Base Model (SDXL only)",
|
||||
"sd_xl_base_1.0_0.9vae.safetensors": "sd_xl_base_1.0_0.9vae.safetensors",
|
||||
"bluePencilXL_v009.safetensors": "bluePencilXL_v009.safetensors",
|
||||
|
|
@ -321,6 +329,7 @@
|
|||
"CFG Mimicking from TSNR": "CFG Mimicking from TSNR",
|
||||
"Enabling Fooocus's implementation of CFG mimicking for TSNR (effective when real CFG > mimicked CFG).": "Enabling Fooocus's implementation of CFG mimicking for TSNR (effective when real CFG > mimicked CFG).",
|
||||
"CLIP Skip": "CLIP Skip",
|
||||
"Bypass CLIP layers to avoid overfitting (use 1 to not skip any layers, 2 is recommended).": "Bypass CLIP layers to avoid overfitting (use 1 to not skip any layers, 2 is recommended).",
|
||||
"Sampler": "Sampler",
|
||||
"dpmpp_2m_sde_gpu": "dpmpp_2m_sde_gpu",
|
||||
"Only effective in non-inpaint mode.": "Only effective in non-inpaint mode.",
|
||||
|
|
@ -366,10 +375,14 @@
|
|||
"Disable preview during generation.": "Disable preview during generation.",
|
||||
"Disable Intermediate Results": "Disable Intermediate Results",
|
||||
"Disable intermediate results during generation, only show final gallery.": "Disable intermediate results during generation, only show final gallery.",
|
||||
"Debug Inpaint Preprocessing": "Debug Inpaint Preprocessing",
|
||||
"Debug GroundingDINO": "Debug GroundingDINO",
|
||||
"Used for SAM object detection and box generation": "Used for SAM object detection and box generation",
|
||||
"GroundingDINO Box Erode or Dilate": "GroundingDINO Box Erode or Dilate",
|
||||
"Inpaint Engine": "Inpaint Engine",
|
||||
"v1": "v1",
|
||||
"Version of Fooocus inpaint model": "Version of Fooocus inpaint model",
|
||||
"v2.5": "v2.5",
|
||||
"v2.6": "v2.6",
|
||||
"Control Debug": "Control Debug",
|
||||
"Debug Preprocessors": "Debug Preprocessors",
|
||||
"Mixing Image Prompt and Vary/Upscale": "Mixing Image Prompt and Vary/Upscale",
|
||||
|
|
@ -399,5 +412,74 @@
|
|||
"Image Prompt parameters are not included. Use png and a1111 for compatibility with Civitai.": "Image Prompt parameters are not included. Use png and a1111 for compatibility with Civitai.",
|
||||
"fooocus (json)": "fooocus (json)",
|
||||
"a1111 (plain text)": "a1111 (plain text)",
|
||||
"Unsupported image type in input": "Unsupported image type in input"
|
||||
"Unsupported image type in input": "Unsupported image type in input",
|
||||
"Enhance": "Enhance",
|
||||
"Detection prompt": "Detection prompt",
|
||||
"Detection Prompt Quick List": "Detection Prompt Quick List",
|
||||
"Maximum number of detections": "Maximum number of detections",
|
||||
"Use with Enhance, skips image generation": "Use with Enhance, skips image generation",
|
||||
"Order of Processing": "Order of Processing",
|
||||
"Use before to enhance small details and after to enhance large areas.": "Use before to enhance small details and after to enhance large areas.",
|
||||
"Before First Enhancement": "Before First Enhancement",
|
||||
"After Last Enhancement": "After Last Enhancement",
|
||||
"Prompt Type": "Prompt Type",
|
||||
"Choose which prompt to use for Upscale or Variation.": "Choose which prompt to use for Upscale or Variation.",
|
||||
"Original Prompts": "Original Prompts",
|
||||
"Last Filled Enhancement Prompts": "Last Filled Enhancement Prompts",
|
||||
"Enable": "Enable",
|
||||
"Describe what you want to detect.": "Describe what you want to detect.",
|
||||
"Enhancement positive prompt": "Enhancement positive prompt",
|
||||
"Uses original prompt instead if empty.": "Uses original prompt instead if empty.",
|
||||
"Enhancement negative prompt": "Enhancement negative prompt",
|
||||
"Uses original negative prompt instead if empty.": "Uses original negative prompt instead if empty.",
|
||||
"Detection": "Detection",
|
||||
"u2net": "u2net",
|
||||
"u2netp": "u2netp",
|
||||
"u2net_human_seg": "u2net_human_seg",
|
||||
"u2net_cloth_seg": "u2net_cloth_seg",
|
||||
"silueta": "silueta",
|
||||
"isnet-general-use": "isnet-general-use",
|
||||
"isnet-anime": "isnet-anime",
|
||||
"sam": "sam",
|
||||
"Mask generation model": "Mask generation model",
|
||||
"Cloth category": "Cloth category",
|
||||
"Use singular whenever possible": "Use singular whenever possible",
|
||||
"full": "full",
|
||||
"upper": "upper",
|
||||
"lower": "lower",
|
||||
"SAM Options": "SAM Options",
|
||||
"SAM model": "SAM model",
|
||||
"vit_b": "vit_b",
|
||||
"vit_l": "vit_l",
|
||||
"vit_h": "vit_h",
|
||||
"Box Threshold": "Box Threshold",
|
||||
"Text Threshold": "Text Threshold",
|
||||
"Set to 0 to detect all": "Set to 0 to detect all",
|
||||
"Inpaint": "Inpaint",
|
||||
"Inpaint or Outpaint (default)": "Inpaint or Outpaint (default)",
|
||||
"Improve Detail (face, hand, eyes, etc.)": "Improve Detail (face, hand, eyes, etc.)",
|
||||
"Modify Content (add objects, change background, etc.)": "Modify Content (add objects, change background, etc.)",
|
||||
"Disable initial latent in inpaint": "Disable initial latent in inpaint",
|
||||
"Version of Fooocus inpaint model. If set, use performance Quality or Speed (no performance LoRAs) for best results.": "Version of Fooocus inpaint model. If set, use performance Quality or Speed (no performance LoRAs) for best results.",
|
||||
"Inpaint Denoising Strength": "Inpaint Denoising Strength",
|
||||
"Same as the denoising strength in A1111 inpaint. Only used in inpaint, not used in outpaint. (Outpaint always use 1.0)": "Same as the denoising strength in A1111 inpaint. Only used in inpaint, not used in outpaint. (Outpaint always use 1.0)",
|
||||
"Inpaint Respective Field": "Inpaint Respective Field",
|
||||
"The area to inpaint. Value 0 is same as \"Only Masked\" in A1111. Value 1 is same as \"Whole Image\" in A1111. Only used in inpaint, not used in outpaint. (Outpaint always use 1.0)": "The area to inpaint. Value 0 is same as \"Only Masked\" in A1111. Value 1 is same as \"Whole Image\" in A1111. Only used in inpaint, not used in outpaint. (Outpaint always use 1.0)",
|
||||
"Mask Erode or Dilate": "Mask Erode or Dilate",
|
||||
"Positive value will make white area in the mask larger, negative value will make white area smaller. (default is 0, always processed before any mask invert)": "Positive value will make white area in the mask larger, negative value will make white area smaller. (default is 0, always processed before any mask invert)",
|
||||
"Invert Mask When Generating": "Invert Mask When Generating",
|
||||
"Debug Enhance Masks": "Debug Enhance Masks",
|
||||
"Show enhance masks in preview and final results": "Show enhance masks in preview and final results",
|
||||
"Use GroundingDINO boxes instead of more detailed SAM masks": "Use GroundingDINO boxes instead of more detailed SAM masks",
|
||||
"highly detailed face": "highly detailed face",
|
||||
"detailed girl face": "detailed girl face",
|
||||
"detailed man face": "detailed man face",
|
||||
"detailed hand": "detailed hand",
|
||||
"beautiful eyes": "beautiful eyes",
|
||||
"face": "face",
|
||||
"eye": "eye",
|
||||
"mouth": "mouth",
|
||||
"hair": "hair",
|
||||
"hand": "hand",
|
||||
"body": "body"
|
||||
}
|
||||
26
launch.py
26
launch.py
|
|
@ -80,11 +80,14 @@ if args.gpu_device_id is not None:
|
|||
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_device_id)
|
||||
print("Set device to:", args.gpu_device_id)
|
||||
|
||||
if args.hf_mirror is not None :
|
||||
if args.hf_mirror is not None:
|
||||
os.environ['HF_MIRROR'] = str(args.hf_mirror)
|
||||
print("Set hf_mirror to:", args.hf_mirror)
|
||||
|
||||
from modules import config
|
||||
from modules.hash_cache import init_cache
|
||||
|
||||
os.environ["U2NET_HOME"] = config.path_inpaint
|
||||
|
||||
os.environ['GRADIO_TEMP_DIR'] = config.temp_path
|
||||
|
||||
|
|
@ -97,7 +100,9 @@ if config.temp_path_cleanup_on_launch:
|
|||
print(f"[Cleanup] Failed to delete content of temp dir.")
|
||||
|
||||
|
||||
def download_models(default_model, previous_default_models, checkpoint_downloads, embeddings_downloads, lora_downloads):
|
||||
def download_models(default_model, previous_default_models, checkpoint_downloads, embeddings_downloads, lora_downloads, vae_downloads):
|
||||
from modules.util import get_file_from_folder_list
|
||||
|
||||
for file_name, url in vae_approx_filenames:
|
||||
load_file_from_url(url=url, model_dir=config.path_vae_approx, file_name=file_name)
|
||||
|
||||
|
|
@ -112,9 +117,9 @@ def download_models(default_model, previous_default_models, checkpoint_downloads
|
|||
return default_model, checkpoint_downloads
|
||||
|
||||
if not args.always_download_new_model:
|
||||
if not os.path.exists(os.path.join(config.paths_checkpoints[0], default_model)):
|
||||
if not os.path.isfile(get_file_from_folder_list(default_model, config.paths_checkpoints)):
|
||||
for alternative_model_name in previous_default_models:
|
||||
if os.path.exists(os.path.join(config.paths_checkpoints[0], alternative_model_name)):
|
||||
if os.path.isfile(get_file_from_folder_list(alternative_model_name, config.paths_checkpoints)):
|
||||
print(f'You do not have [{default_model}] but you have [{alternative_model_name}].')
|
||||
print(f'Fooocus will use [{alternative_model_name}] to avoid downloading new models, '
|
||||
f'but you are not using the latest models.')
|
||||
|
|
@ -124,17 +129,24 @@ def download_models(default_model, previous_default_models, checkpoint_downloads
|
|||
break
|
||||
|
||||
for file_name, url in checkpoint_downloads.items():
|
||||
load_file_from_url(url=url, model_dir=config.paths_checkpoints[0], file_name=file_name)
|
||||
model_dir = os.path.dirname(get_file_from_folder_list(file_name, config.paths_checkpoints))
|
||||
load_file_from_url(url=url, model_dir=model_dir, file_name=file_name)
|
||||
for file_name, url in embeddings_downloads.items():
|
||||
load_file_from_url(url=url, model_dir=config.path_embeddings, file_name=file_name)
|
||||
for file_name, url in lora_downloads.items():
|
||||
load_file_from_url(url=url, model_dir=config.paths_loras[0], file_name=file_name)
|
||||
model_dir = os.path.dirname(get_file_from_folder_list(file_name, config.paths_loras))
|
||||
load_file_from_url(url=url, model_dir=model_dir, file_name=file_name)
|
||||
for file_name, url in vae_downloads.items():
|
||||
load_file_from_url(url=url, model_dir=config.path_vae, file_name=file_name)
|
||||
|
||||
return default_model, checkpoint_downloads
|
||||
|
||||
|
||||
config.default_base_model_name, config.checkpoint_downloads = download_models(
|
||||
config.default_base_model_name, config.previous_default_models, config.checkpoint_downloads,
|
||||
config.embeddings_downloads, config.lora_downloads)
|
||||
config.embeddings_downloads, config.lora_downloads, config.vae_downloads)
|
||||
|
||||
config.update_files()
|
||||
init_cache(config.model_filenames, config.paths_checkpoints, config.lora_filenames, config.paths_loras)
|
||||
|
||||
from webui import *
|
||||
|
|
|
|||
|
|
@ -107,8 +107,7 @@ class SDTurboScheduler:
|
|||
def get_sigmas(self, model, steps, denoise):
|
||||
start_step = 10 - int(10 * denoise)
|
||||
timesteps = torch.flip(torch.arange(1, 11) * 100 - 1, (0,))[start_step:start_step + steps]
|
||||
ldm_patched.modules.model_management.load_models_gpu([model])
|
||||
sigmas = model.model.model_sampling.sigma(timesteps)
|
||||
sigmas = model.model_sampling.sigma(timesteps)
|
||||
sigmas = torch.cat([sigmas, sigmas.new_zeros([1])])
|
||||
return (sigmas, )
|
||||
|
||||
|
|
|
|||
|
|
@ -108,7 +108,7 @@ class ModelSamplingContinuousEDM:
|
|||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": { "model": ("MODEL",),
|
||||
"sampling": (["v_prediction", "eps"],),
|
||||
"sampling": (["v_prediction", "edm_playground_v2.5", "eps"],),
|
||||
"sigma_max": ("FLOAT", {"default": 120.0, "min": 0.0, "max": 1000.0, "step":0.001, "round": False}),
|
||||
"sigma_min": ("FLOAT", {"default": 0.002, "min": 0.0, "max": 1000.0, "step":0.001, "round": False}),
|
||||
}}
|
||||
|
|
@ -121,17 +121,25 @@ class ModelSamplingContinuousEDM:
|
|||
def patch(self, model, sampling, sigma_max, sigma_min):
|
||||
m = model.clone()
|
||||
|
||||
latent_format = None
|
||||
sigma_data = 1.0
|
||||
if sampling == "eps":
|
||||
sampling_type = ldm_patched.modules.model_sampling.EPS
|
||||
elif sampling == "v_prediction":
|
||||
sampling_type = ldm_patched.modules.model_sampling.V_PREDICTION
|
||||
elif sampling == "edm_playground_v2.5":
|
||||
sampling_type = ldm_patched.modules.model_sampling.EDM
|
||||
sigma_data = 0.5
|
||||
latent_format = ldm_patched.modules.latent_formats.SDXL_Playground_2_5()
|
||||
|
||||
class ModelSamplingAdvanced(ldm_patched.modules.model_sampling.ModelSamplingContinuousEDM, sampling_type):
|
||||
pass
|
||||
|
||||
model_sampling = ModelSamplingAdvanced(model.model.model_config)
|
||||
model_sampling.set_sigma_range(sigma_min, sigma_max)
|
||||
model_sampling.set_parameters(sigma_min, sigma_max, sigma_data)
|
||||
m.add_object_patch("model_sampling", model_sampling)
|
||||
if latent_format is not None:
|
||||
m.add_object_patch("latent_format", latent_format)
|
||||
return (m, )
|
||||
|
||||
class RescaleCFG:
|
||||
|
|
|
|||
|
|
@ -832,5 +832,77 @@ def sample_tcd(model, x, sigmas, extra_args=None, callback=None, disable=None, n
|
|||
if eta > 0 and sigmas[i + 1] > 0:
|
||||
noise = noise_sampler(sigmas[i], sigmas[i + 1])
|
||||
x = x / alpha_prod_s[i+1].sqrt() + noise * (sigmas[i+1]**2 + 1 - 1/alpha_prod_s[i+1]).sqrt()
|
||||
else:
|
||||
x *= torch.sqrt(1.0 + sigmas[i + 1] ** 2)
|
||||
|
||||
return x
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
def sample_restart(model, x, sigmas, extra_args=None, callback=None, disable=None, s_noise=1., restart_list=None):
|
||||
"""Implements restart sampling in Restart Sampling for Improving Generative Processes (2023)
|
||||
Restart_list format: {min_sigma: [ restart_steps, restart_times, max_sigma]}
|
||||
If restart_list is None: will choose restart_list automatically, otherwise will use the given restart_list
|
||||
"""
|
||||
extra_args = {} if extra_args is None else extra_args
|
||||
s_in = x.new_ones([x.shape[0]])
|
||||
step_id = 0
|
||||
|
||||
def heun_step(x, old_sigma, new_sigma, second_order=True):
|
||||
nonlocal step_id
|
||||
denoised = model(x, old_sigma * s_in, **extra_args)
|
||||
d = to_d(x, old_sigma, denoised)
|
||||
if callback is not None:
|
||||
callback({'x': x, 'i': step_id, 'sigma': new_sigma, 'sigma_hat': old_sigma, 'denoised': denoised})
|
||||
dt = new_sigma - old_sigma
|
||||
if new_sigma == 0 or not second_order:
|
||||
# Euler method
|
||||
x = x + d * dt
|
||||
else:
|
||||
# Heun's method
|
||||
x_2 = x + d * dt
|
||||
denoised_2 = model(x_2, new_sigma * s_in, **extra_args)
|
||||
d_2 = to_d(x_2, new_sigma, denoised_2)
|
||||
d_prime = (d + d_2) / 2
|
||||
x = x + d_prime * dt
|
||||
step_id += 1
|
||||
return x
|
||||
|
||||
steps = sigmas.shape[0] - 1
|
||||
if restart_list is None:
|
||||
if steps >= 20:
|
||||
restart_steps = 9
|
||||
restart_times = 1
|
||||
if steps >= 36:
|
||||
restart_steps = steps // 4
|
||||
restart_times = 2
|
||||
sigmas = get_sigmas_karras(steps - restart_steps * restart_times, sigmas[-2].item(), sigmas[0].item(), device=sigmas.device)
|
||||
restart_list = {0.1: [restart_steps + 1, restart_times, 2]}
|
||||
else:
|
||||
restart_list = {}
|
||||
|
||||
restart_list = {int(torch.argmin(abs(sigmas - key), dim=0)): value for key, value in restart_list.items()}
|
||||
|
||||
step_list = []
|
||||
for i in range(len(sigmas) - 1):
|
||||
step_list.append((sigmas[i], sigmas[i + 1]))
|
||||
if i + 1 in restart_list:
|
||||
restart_steps, restart_times, restart_max = restart_list[i + 1]
|
||||
min_idx = i + 1
|
||||
max_idx = int(torch.argmin(abs(sigmas - restart_max), dim=0))
|
||||
if max_idx < min_idx:
|
||||
sigma_restart = get_sigmas_karras(restart_steps, sigmas[min_idx].item(), sigmas[max_idx].item(), device=sigmas.device)[:-1]
|
||||
while restart_times > 0:
|
||||
restart_times -= 1
|
||||
step_list.extend(zip(sigma_restart[:-1], sigma_restart[1:]))
|
||||
|
||||
last_sigma = None
|
||||
for old_sigma, new_sigma in tqdm(step_list, disable=disable):
|
||||
if last_sigma is None:
|
||||
last_sigma = old_sigma
|
||||
elif last_sigma < old_sigma:
|
||||
x = x + torch.randn_like(x) * s_noise * (old_sigma ** 2 - last_sigma ** 2) ** 0.5
|
||||
x = heun_step(x, old_sigma, new_sigma)
|
||||
last_sigma = new_sigma
|
||||
|
||||
return x
|
||||
|
|
@ -8,7 +8,7 @@ class CLIPEmbeddingNoiseAugmentation(ImageConcatWithNoiseAugmentation):
|
|||
if clip_stats_path is None:
|
||||
clip_mean, clip_std = torch.zeros(timestep_dim), torch.ones(timestep_dim)
|
||||
else:
|
||||
clip_mean, clip_std = torch.load(clip_stats_path, map_location="cpu")
|
||||
clip_mean, clip_std = torch.load(clip_stats_path, map_location="cpu", weights_only=True)
|
||||
self.register_buffer("data_mean", clip_mean[None, :], persistent=False)
|
||||
self.register_buffer("data_std", clip_std[None, :], persistent=False)
|
||||
self.time_embed = Timestep(timestep_dim)
|
||||
|
|
|
|||
|
|
@ -1,3 +1,4 @@
|
|||
import torch
|
||||
|
||||
class LatentFormat:
|
||||
scale_factor = 1.0
|
||||
|
|
@ -34,6 +35,70 @@ class SDXL(LatentFormat):
|
|||
]
|
||||
self.taesd_decoder_name = "taesdxl_decoder"
|
||||
|
||||
class SDXL_Playground_2_5(LatentFormat):
|
||||
def __init__(self):
|
||||
self.scale_factor = 0.5
|
||||
self.latents_mean = torch.tensor([-1.6574, 1.886, -1.383, 2.5155]).view(1, 4, 1, 1)
|
||||
self.latents_std = torch.tensor([8.4927, 5.9022, 6.5498, 5.2299]).view(1, 4, 1, 1)
|
||||
|
||||
self.latent_rgb_factors = [
|
||||
# R G B
|
||||
[ 0.3920, 0.4054, 0.4549],
|
||||
[-0.2634, -0.0196, 0.0653],
|
||||
[ 0.0568, 0.1687, -0.0755],
|
||||
[-0.3112, -0.2359, -0.2076]
|
||||
]
|
||||
self.taesd_decoder_name = "taesdxl_decoder"
|
||||
|
||||
def process_in(self, latent):
|
||||
latents_mean = self.latents_mean.to(latent.device, latent.dtype)
|
||||
latents_std = self.latents_std.to(latent.device, latent.dtype)
|
||||
return (latent - latents_mean) * self.scale_factor / latents_std
|
||||
|
||||
def process_out(self, latent):
|
||||
latents_mean = self.latents_mean.to(latent.device, latent.dtype)
|
||||
latents_std = self.latents_std.to(latent.device, latent.dtype)
|
||||
return latent * latents_std / self.scale_factor + latents_mean
|
||||
|
||||
|
||||
class SD_X4(LatentFormat):
|
||||
def __init__(self):
|
||||
self.scale_factor = 0.08333
|
||||
self.latent_rgb_factors = [
|
||||
[-0.2340, -0.3863, -0.3257],
|
||||
[ 0.0994, 0.0885, -0.0908],
|
||||
[-0.2833, -0.2349, -0.3741],
|
||||
[ 0.2523, -0.0055, -0.1651]
|
||||
]
|
||||
|
||||
class SC_Prior(LatentFormat):
|
||||
def __init__(self):
|
||||
self.scale_factor = 1.0
|
||||
self.latent_rgb_factors = [
|
||||
[-0.0326, -0.0204, -0.0127],
|
||||
[-0.1592, -0.0427, 0.0216],
|
||||
[ 0.0873, 0.0638, -0.0020],
|
||||
[-0.0602, 0.0442, 0.1304],
|
||||
[ 0.0800, -0.0313, -0.1796],
|
||||
[-0.0810, -0.0638, -0.1581],
|
||||
[ 0.1791, 0.1180, 0.0967],
|
||||
[ 0.0740, 0.1416, 0.0432],
|
||||
[-0.1745, -0.1888, -0.1373],
|
||||
[ 0.2412, 0.1577, 0.0928],
|
||||
[ 0.1908, 0.0998, 0.0682],
|
||||
[ 0.0209, 0.0365, -0.0092],
|
||||
[ 0.0448, -0.0650, -0.1728],
|
||||
[-0.1658, -0.1045, -0.1308],
|
||||
[ 0.0542, 0.1545, 0.1325],
|
||||
[-0.0352, -0.1672, -0.2541]
|
||||
]
|
||||
|
||||
class SC_B(LatentFormat):
|
||||
def __init__(self):
|
||||
self.scale_factor = 1.0 / 0.43
|
||||
self.latent_rgb_factors = [
|
||||
[ 0.1121, 0.2006, 0.1023],
|
||||
[-0.2093, -0.0222, -0.0195],
|
||||
[-0.3087, -0.1535, 0.0366],
|
||||
[ 0.0290, -0.1574, -0.4078]
|
||||
]
|
||||
|
|
@ -1,7 +1,7 @@
|
|||
import torch
|
||||
import numpy as np
|
||||
from ldm_patched.ldm.modules.diffusionmodules.util import make_beta_schedule
|
||||
import math
|
||||
import numpy as np
|
||||
|
||||
class EPS:
|
||||
def calculate_input(self, sigma, noise):
|
||||
|
|
@ -12,12 +12,28 @@ class EPS:
|
|||
sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1))
|
||||
return model_input - model_output * sigma
|
||||
|
||||
def noise_scaling(self, sigma, noise, latent_image, max_denoise=False):
|
||||
if max_denoise:
|
||||
noise = noise * torch.sqrt(1.0 + sigma ** 2.0)
|
||||
else:
|
||||
noise = noise * sigma
|
||||
|
||||
noise += latent_image
|
||||
return noise
|
||||
|
||||
def inverse_noise_scaling(self, sigma, latent):
|
||||
return latent
|
||||
|
||||
class V_PREDICTION(EPS):
|
||||
def calculate_denoised(self, sigma, model_output, model_input):
|
||||
sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1))
|
||||
return model_input * self.sigma_data ** 2 / (sigma ** 2 + self.sigma_data ** 2) - model_output * sigma * self.sigma_data / (sigma ** 2 + self.sigma_data ** 2) ** 0.5
|
||||
|
||||
class EDM(V_PREDICTION):
|
||||
def calculate_denoised(self, sigma, model_output, model_input):
|
||||
sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1))
|
||||
return model_input * self.sigma_data ** 2 / (sigma ** 2 + self.sigma_data ** 2) + model_output * sigma * self.sigma_data / (sigma ** 2 + self.sigma_data ** 2) ** 0.5
|
||||
|
||||
|
||||
class ModelSamplingDiscrete(torch.nn.Module):
|
||||
def __init__(self, model_config=None):
|
||||
|
|
@ -42,21 +58,25 @@ class ModelSamplingDiscrete(torch.nn.Module):
|
|||
else:
|
||||
betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s)
|
||||
alphas = 1. - betas
|
||||
alphas_cumprod = torch.tensor(np.cumprod(alphas, axis=0), dtype=torch.float32)
|
||||
# alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
|
||||
alphas_cumprod = torch.cumprod(alphas, dim=0)
|
||||
|
||||
timesteps, = betas.shape
|
||||
self.num_timesteps = int(timesteps)
|
||||
self.linear_start = linear_start
|
||||
self.linear_end = linear_end
|
||||
|
||||
# self.register_buffer('betas', torch.tensor(betas, dtype=torch.float32))
|
||||
# self.register_buffer('alphas_cumprod', torch.tensor(alphas_cumprod, dtype=torch.float32))
|
||||
# self.register_buffer('alphas_cumprod_prev', torch.tensor(alphas_cumprod_prev, dtype=torch.float32))
|
||||
|
||||
sigmas = ((1 - alphas_cumprod) / alphas_cumprod) ** 0.5
|
||||
alphas_cumprod = torch.tensor(np.cumprod(alphas, axis=0), dtype=torch.float32)
|
||||
self.set_sigmas(sigmas)
|
||||
self.set_alphas_cumprod(alphas_cumprod.float())
|
||||
|
||||
def set_sigmas(self, sigmas):
|
||||
self.register_buffer('sigmas', sigmas)
|
||||
self.register_buffer('log_sigmas', sigmas.log())
|
||||
self.register_buffer('sigmas', sigmas.float())
|
||||
self.register_buffer('log_sigmas', sigmas.log().float())
|
||||
|
||||
def set_alphas_cumprod(self, alphas_cumprod):
|
||||
self.register_buffer("alphas_cumprod", alphas_cumprod.float())
|
||||
|
|
@ -94,8 +114,6 @@ class ModelSamplingDiscrete(torch.nn.Module):
|
|||
class ModelSamplingContinuousEDM(torch.nn.Module):
|
||||
def __init__(self, model_config=None):
|
||||
super().__init__()
|
||||
self.sigma_data = 1.0
|
||||
|
||||
if model_config is not None:
|
||||
sampling_settings = model_config.sampling_settings
|
||||
else:
|
||||
|
|
@ -103,9 +121,11 @@ class ModelSamplingContinuousEDM(torch.nn.Module):
|
|||
|
||||
sigma_min = sampling_settings.get("sigma_min", 0.002)
|
||||
sigma_max = sampling_settings.get("sigma_max", 120.0)
|
||||
self.set_sigma_range(sigma_min, sigma_max)
|
||||
sigma_data = sampling_settings.get("sigma_data", 1.0)
|
||||
self.set_parameters(sigma_min, sigma_max, sigma_data)
|
||||
|
||||
def set_sigma_range(self, sigma_min, sigma_max):
|
||||
def set_parameters(self, sigma_min, sigma_max, sigma_data):
|
||||
self.sigma_data = sigma_data
|
||||
sigmas = torch.linspace(math.log(sigma_min), math.log(sigma_max), 1000).exp()
|
||||
|
||||
self.register_buffer('sigmas', sigmas) #for compatibility with some schedulers
|
||||
|
|
@ -134,3 +154,56 @@ class ModelSamplingContinuousEDM(torch.nn.Module):
|
|||
|
||||
log_sigma_min = math.log(self.sigma_min)
|
||||
return math.exp((math.log(self.sigma_max) - log_sigma_min) * percent + log_sigma_min)
|
||||
|
||||
class StableCascadeSampling(ModelSamplingDiscrete):
|
||||
def __init__(self, model_config=None):
|
||||
super().__init__()
|
||||
|
||||
if model_config is not None:
|
||||
sampling_settings = model_config.sampling_settings
|
||||
else:
|
||||
sampling_settings = {}
|
||||
|
||||
self.set_parameters(sampling_settings.get("shift", 1.0))
|
||||
|
||||
def set_parameters(self, shift=1.0, cosine_s=8e-3):
|
||||
self.shift = shift
|
||||
self.cosine_s = torch.tensor(cosine_s)
|
||||
self._init_alpha_cumprod = torch.cos(self.cosine_s / (1 + self.cosine_s) * torch.pi * 0.5) ** 2
|
||||
|
||||
#This part is just for compatibility with some schedulers in the codebase
|
||||
self.num_timesteps = 10000
|
||||
sigmas = torch.empty((self.num_timesteps), dtype=torch.float32)
|
||||
for x in range(self.num_timesteps):
|
||||
t = (x + 1) / self.num_timesteps
|
||||
sigmas[x] = self.sigma(t)
|
||||
|
||||
self.set_sigmas(sigmas)
|
||||
|
||||
def sigma(self, timestep):
|
||||
alpha_cumprod = (torch.cos((timestep + self.cosine_s) / (1 + self.cosine_s) * torch.pi * 0.5) ** 2 / self._init_alpha_cumprod)
|
||||
|
||||
if self.shift != 1.0:
|
||||
var = alpha_cumprod
|
||||
logSNR = (var/(1-var)).log()
|
||||
logSNR += 2 * torch.log(1.0 / torch.tensor(self.shift))
|
||||
alpha_cumprod = logSNR.sigmoid()
|
||||
|
||||
alpha_cumprod = alpha_cumprod.clamp(0.0001, 0.9999)
|
||||
return ((1 - alpha_cumprod) / alpha_cumprod) ** 0.5
|
||||
|
||||
def timestep(self, sigma):
|
||||
var = 1 / ((sigma * sigma) + 1)
|
||||
var = var.clamp(0, 1.0)
|
||||
s, min_var = self.cosine_s.to(var.device), self._init_alpha_cumprod.to(var.device)
|
||||
t = (((var * min_var) ** 0.5).acos() / (torch.pi * 0.5)) * (1 + s) - s
|
||||
return t
|
||||
|
||||
def percent_to_sigma(self, percent):
|
||||
if percent <= 0.0:
|
||||
return 999999999.9
|
||||
if percent >= 1.0:
|
||||
return 0.0
|
||||
|
||||
percent = 1.0 - percent
|
||||
return self.sigma(torch.tensor(percent))
|
||||
|
|
@ -523,7 +523,7 @@ class UNIPCBH2(Sampler):
|
|||
|
||||
KSAMPLER_NAMES = ["euler", "euler_ancestral", "heun", "heunpp2","dpm_2", "dpm_2_ancestral",
|
||||
"lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_sde", "dpmpp_sde_gpu",
|
||||
"dpmpp_2m", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "tcd"]
|
||||
"dpmpp_2m", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "tcd", "edm_playground_v2.5", "restart"]
|
||||
|
||||
class KSAMPLER(Sampler):
|
||||
def __init__(self, sampler_function, extra_options={}, inpaint_options={}):
|
||||
|
|
|
|||
|
|
@ -326,7 +326,7 @@ def load_embed(embedding_name, embedding_directory, embedding_size, embed_key=No
|
|||
except:
|
||||
embed_out = safe_load_embed_zip(embed_path)
|
||||
else:
|
||||
embed = torch.load(embed_path, map_location="cpu")
|
||||
embed = torch.load(embed_path, map_location="cpu", weights_only=True)
|
||||
except Exception as e:
|
||||
print(traceback.format_exc())
|
||||
print()
|
||||
|
|
|
|||
|
|
@ -377,15 +377,15 @@ class VQAutoEncoder(nn.Module):
|
|||
)
|
||||
|
||||
if model_path is not None:
|
||||
chkpt = torch.load(model_path, map_location="cpu")
|
||||
chkpt = torch.load(model_path, map_location="cpu", weights_only=True)
|
||||
if "params_ema" in chkpt:
|
||||
self.load_state_dict(
|
||||
torch.load(model_path, map_location="cpu")["params_ema"]
|
||||
torch.load(model_path, map_location="cpu", weights_only=True)["params_ema"]
|
||||
)
|
||||
logger.info(f"vqgan is loaded from: {model_path} [params_ema]")
|
||||
elif "params" in chkpt:
|
||||
self.load_state_dict(
|
||||
torch.load(model_path, map_location="cpu")["params"]
|
||||
torch.load(model_path, map_location="cpu", weights_only=True)["params"]
|
||||
)
|
||||
logger.info(f"vqgan is loaded from: {model_path} [params]")
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -273,8 +273,8 @@ class GFPGANBilinear(nn.Module):
|
|||
if decoder_load_path:
|
||||
self.stylegan_decoder.load_state_dict(
|
||||
torch.load(
|
||||
decoder_load_path, map_location=lambda storage, loc: storage
|
||||
)["params_ema"]
|
||||
decoder_load_path, map_location=lambda storage, loc: storage,
|
||||
weights_only=True)["params_ema"]
|
||||
)
|
||||
# fix decoder without updating params
|
||||
if fix_decoder:
|
||||
|
|
|
|||
|
|
@ -373,8 +373,8 @@ class GFPGANv1(nn.Module):
|
|||
if decoder_load_path:
|
||||
self.stylegan_decoder.load_state_dict(
|
||||
torch.load(
|
||||
decoder_load_path, map_location=lambda storage, loc: storage
|
||||
)["params_ema"]
|
||||
decoder_load_path, map_location=lambda storage, loc: storage,
|
||||
weights_only=True)["params_ema"]
|
||||
)
|
||||
# fix decoder without updating params
|
||||
if fix_decoder:
|
||||
|
|
|
|||
|
|
@ -284,8 +284,8 @@ class GFPGANv1Clean(nn.Module):
|
|||
if decoder_load_path:
|
||||
self.stylegan_decoder.load_state_dict(
|
||||
torch.load(
|
||||
decoder_load_path, map_location=lambda storage, loc: storage
|
||||
)["params_ema"]
|
||||
decoder_load_path, map_location=lambda storage, loc: storage,
|
||||
weights_only=True)["params_ema"]
|
||||
)
|
||||
# fix decoder without updating params
|
||||
if fix_decoder:
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -2,13 +2,14 @@ import os
|
|||
import json
|
||||
import math
|
||||
import numbers
|
||||
|
||||
import args_manager
|
||||
import tempfile
|
||||
import modules.flags
|
||||
import modules.sdxl_styles
|
||||
|
||||
from modules.model_loader import load_file_from_url
|
||||
from modules.extra_utils import makedirs_with_log, get_files_from_folder
|
||||
from modules.extra_utils import makedirs_with_log, get_files_from_folder, try_eval_env_var
|
||||
from modules.flags import OutputFormat, Performance, MetadataScheme
|
||||
|
||||
|
||||
|
|
@ -97,7 +98,6 @@ def try_load_deprecated_user_path_config():
|
|||
|
||||
try_load_deprecated_user_path_config()
|
||||
|
||||
|
||||
def get_presets():
|
||||
preset_folder = 'presets'
|
||||
presets = ['initial']
|
||||
|
|
@ -105,8 +105,11 @@ def get_presets():
|
|||
print('No presets found.')
|
||||
return presets
|
||||
|
||||
return presets + [f[:f.index('.json')] for f in os.listdir(preset_folder) if f.endswith('.json')]
|
||||
return presets + [f[:f.index(".json")] for f in os.listdir(preset_folder) if f.endswith('.json')]
|
||||
|
||||
def update_presets():
|
||||
global available_presets
|
||||
available_presets = get_presets()
|
||||
|
||||
def try_get_preset_content(preset):
|
||||
if isinstance(preset, str):
|
||||
|
|
@ -197,10 +200,11 @@ path_clip_vision = get_dir_or_set_default('path_clip_vision', '../models/clip_vi
|
|||
path_fooocus_expansion = get_dir_or_set_default('path_fooocus_expansion', '../models/prompt_expansion/fooocus_expansion')
|
||||
path_wildcards = get_dir_or_set_default('path_wildcards', '../wildcards/')
|
||||
path_safety_checker = get_dir_or_set_default('path_safety_checker', '../models/safety_checker/')
|
||||
path_sam = get_dir_or_set_default('path_sam', '../models/sam/')
|
||||
path_outputs = get_path_output()
|
||||
|
||||
|
||||
def get_config_item_or_set_default(key, default_value, validator, disable_empty_as_none=False):
|
||||
def get_config_item_or_set_default(key, default_value, validator, disable_empty_as_none=False, expected_type=None):
|
||||
global config_dict, visited_keys
|
||||
|
||||
if key not in visited_keys:
|
||||
|
|
@ -208,6 +212,7 @@ def get_config_item_or_set_default(key, default_value, validator, disable_empty_
|
|||
|
||||
v = os.getenv(key)
|
||||
if v is not None:
|
||||
v = try_eval_env_var(v, expected_type)
|
||||
print(f"Environment: {key} = {v}")
|
||||
config_dict[key] = v
|
||||
|
||||
|
|
@ -252,41 +257,49 @@ temp_path = init_temp_path(get_config_item_or_set_default(
|
|||
key='temp_path',
|
||||
default_value=default_temp_path,
|
||||
validator=lambda x: isinstance(x, str),
|
||||
expected_type=str
|
||||
), default_temp_path)
|
||||
temp_path_cleanup_on_launch = get_config_item_or_set_default(
|
||||
key='temp_path_cleanup_on_launch',
|
||||
default_value=True,
|
||||
validator=lambda x: isinstance(x, bool)
|
||||
validator=lambda x: isinstance(x, bool),
|
||||
expected_type=bool
|
||||
)
|
||||
default_base_model_name = default_model = get_config_item_or_set_default(
|
||||
key='default_model',
|
||||
default_value='model.safetensors',
|
||||
validator=lambda x: isinstance(x, str)
|
||||
validator=lambda x: isinstance(x, str),
|
||||
expected_type=str
|
||||
)
|
||||
previous_default_models = get_config_item_or_set_default(
|
||||
key='previous_default_models',
|
||||
default_value=[],
|
||||
validator=lambda x: isinstance(x, list) and all(isinstance(k, str) for k in x)
|
||||
validator=lambda x: isinstance(x, list) and all(isinstance(k, str) for k in x),
|
||||
expected_type=list
|
||||
)
|
||||
default_refiner_model_name = default_refiner = get_config_item_or_set_default(
|
||||
key='default_refiner',
|
||||
default_value='None',
|
||||
validator=lambda x: isinstance(x, str)
|
||||
validator=lambda x: isinstance(x, str),
|
||||
expected_type=str
|
||||
)
|
||||
default_refiner_switch = get_config_item_or_set_default(
|
||||
key='default_refiner_switch',
|
||||
default_value=0.8,
|
||||
validator=lambda x: isinstance(x, numbers.Number) and 0 <= x <= 1
|
||||
validator=lambda x: isinstance(x, numbers.Number) and 0 <= x <= 1,
|
||||
expected_type=numbers.Number
|
||||
)
|
||||
default_loras_min_weight = get_config_item_or_set_default(
|
||||
key='default_loras_min_weight',
|
||||
default_value=-2,
|
||||
validator=lambda x: isinstance(x, numbers.Number) and -10 <= x <= 10
|
||||
validator=lambda x: isinstance(x, numbers.Number) and -10 <= x <= 10,
|
||||
expected_type=numbers.Number
|
||||
)
|
||||
default_loras_max_weight = get_config_item_or_set_default(
|
||||
key='default_loras_max_weight',
|
||||
default_value=2,
|
||||
validator=lambda x: isinstance(x, numbers.Number) and -10 <= x <= 10
|
||||
validator=lambda x: isinstance(x, numbers.Number) and -10 <= x <= 10,
|
||||
expected_type=numbers.Number
|
||||
)
|
||||
default_loras = get_config_item_or_set_default(
|
||||
key='default_loras',
|
||||
|
|
@ -320,38 +333,45 @@ default_loras = get_config_item_or_set_default(
|
|||
validator=lambda x: isinstance(x, list) and all(
|
||||
len(y) == 3 and isinstance(y[0], bool) and isinstance(y[1], str) and isinstance(y[2], numbers.Number)
|
||||
or len(y) == 2 and isinstance(y[0], str) and isinstance(y[1], numbers.Number)
|
||||
for y in x)
|
||||
for y in x),
|
||||
expected_type=list
|
||||
)
|
||||
default_loras = [(y[0], y[1], y[2]) if len(y) == 3 else (True, y[0], y[1]) for y in default_loras]
|
||||
default_max_lora_number = get_config_item_or_set_default(
|
||||
key='default_max_lora_number',
|
||||
default_value=len(default_loras) if isinstance(default_loras, list) and len(default_loras) > 0 else 5,
|
||||
validator=lambda x: isinstance(x, int) and x >= 1
|
||||
validator=lambda x: isinstance(x, int) and x >= 1,
|
||||
expected_type=int
|
||||
)
|
||||
default_cfg_scale = get_config_item_or_set_default(
|
||||
key='default_cfg_scale',
|
||||
default_value=7.0,
|
||||
validator=lambda x: isinstance(x, numbers.Number)
|
||||
validator=lambda x: isinstance(x, numbers.Number),
|
||||
expected_type=numbers.Number
|
||||
)
|
||||
default_sample_sharpness = get_config_item_or_set_default(
|
||||
key='default_sample_sharpness',
|
||||
default_value=2.0,
|
||||
validator=lambda x: isinstance(x, numbers.Number)
|
||||
validator=lambda x: isinstance(x, numbers.Number),
|
||||
expected_type=numbers.Number
|
||||
)
|
||||
default_sampler = get_config_item_or_set_default(
|
||||
key='default_sampler',
|
||||
default_value='dpmpp_2m_sde_gpu',
|
||||
validator=lambda x: x in modules.flags.sampler_list
|
||||
validator=lambda x: x in modules.flags.sampler_list,
|
||||
expected_type=str
|
||||
)
|
||||
default_scheduler = get_config_item_or_set_default(
|
||||
key='default_scheduler',
|
||||
default_value='karras',
|
||||
validator=lambda x: x in modules.flags.scheduler_list
|
||||
validator=lambda x: x in modules.flags.scheduler_list,
|
||||
expected_type=str
|
||||
)
|
||||
default_vae = get_config_item_or_set_default(
|
||||
key='default_vae',
|
||||
default_value=modules.flags.default_vae,
|
||||
validator=lambda x: isinstance(x, str)
|
||||
validator=lambda x: isinstance(x, str),
|
||||
expected_type=str
|
||||
)
|
||||
default_styles = get_config_item_or_set_default(
|
||||
key='default_styles',
|
||||
|
|
@ -360,124 +380,344 @@ default_styles = get_config_item_or_set_default(
|
|||
"Fooocus Enhance",
|
||||
"Fooocus Sharp"
|
||||
],
|
||||
validator=lambda x: isinstance(x, list) and all(y in modules.sdxl_styles.legal_style_names for y in x)
|
||||
validator=lambda x: isinstance(x, list) and all(y in modules.sdxl_styles.legal_style_names for y in x),
|
||||
expected_type=list
|
||||
)
|
||||
default_prompt_negative = get_config_item_or_set_default(
|
||||
key='default_prompt_negative',
|
||||
default_value='',
|
||||
validator=lambda x: isinstance(x, str),
|
||||
disable_empty_as_none=True
|
||||
disable_empty_as_none=True,
|
||||
expected_type=str
|
||||
)
|
||||
default_prompt = get_config_item_or_set_default(
|
||||
key='default_prompt',
|
||||
default_value='',
|
||||
validator=lambda x: isinstance(x, str),
|
||||
disable_empty_as_none=True
|
||||
disable_empty_as_none=True,
|
||||
expected_type=str
|
||||
)
|
||||
default_performance = get_config_item_or_set_default(
|
||||
key='default_performance',
|
||||
default_value=Performance.SPEED.value,
|
||||
validator=lambda x: x in Performance.list()
|
||||
validator=lambda x: x in Performance.values(),
|
||||
expected_type=str
|
||||
)
|
||||
default_image_prompt_checkbox = get_config_item_or_set_default(
|
||||
key='default_image_prompt_checkbox',
|
||||
default_value=False,
|
||||
validator=lambda x: isinstance(x, bool),
|
||||
expected_type=bool
|
||||
)
|
||||
default_enhance_checkbox = get_config_item_or_set_default(
|
||||
key='default_enhance_checkbox',
|
||||
default_value=False,
|
||||
validator=lambda x: isinstance(x, bool),
|
||||
expected_type=bool
|
||||
)
|
||||
default_advanced_checkbox = get_config_item_or_set_default(
|
||||
key='default_advanced_checkbox',
|
||||
default_value=False,
|
||||
validator=lambda x: isinstance(x, bool)
|
||||
validator=lambda x: isinstance(x, bool),
|
||||
expected_type=bool
|
||||
)
|
||||
default_developer_debug_mode_checkbox = get_config_item_or_set_default(
|
||||
key='default_developer_debug_mode_checkbox',
|
||||
default_value=False,
|
||||
validator=lambda x: isinstance(x, bool),
|
||||
expected_type=bool
|
||||
)
|
||||
default_image_prompt_advanced_checkbox = get_config_item_or_set_default(
|
||||
key='default_image_prompt_advanced_checkbox',
|
||||
default_value=False,
|
||||
validator=lambda x: isinstance(x, bool),
|
||||
expected_type=bool
|
||||
)
|
||||
default_max_image_number = get_config_item_or_set_default(
|
||||
key='default_max_image_number',
|
||||
default_value=32,
|
||||
validator=lambda x: isinstance(x, int) and x >= 1
|
||||
validator=lambda x: isinstance(x, int) and x >= 1,
|
||||
expected_type=int
|
||||
)
|
||||
default_output_format = get_config_item_or_set_default(
|
||||
key='default_output_format',
|
||||
default_value='png',
|
||||
validator=lambda x: x in OutputFormat.list()
|
||||
validator=lambda x: x in OutputFormat.list(),
|
||||
expected_type=str
|
||||
)
|
||||
default_image_number = get_config_item_or_set_default(
|
||||
key='default_image_number',
|
||||
default_value=2,
|
||||
validator=lambda x: isinstance(x, int) and 1 <= x <= default_max_image_number
|
||||
validator=lambda x: isinstance(x, int) and 1 <= x <= default_max_image_number,
|
||||
expected_type=int
|
||||
)
|
||||
checkpoint_downloads = get_config_item_or_set_default(
|
||||
key='checkpoint_downloads',
|
||||
default_value={},
|
||||
validator=lambda x: isinstance(x, dict) and all(isinstance(k, str) and isinstance(v, str) for k, v in x.items())
|
||||
validator=lambda x: isinstance(x, dict) and all(isinstance(k, str) and isinstance(v, str) for k, v in x.items()),
|
||||
expected_type=dict
|
||||
)
|
||||
lora_downloads = get_config_item_or_set_default(
|
||||
key='lora_downloads',
|
||||
default_value={},
|
||||
validator=lambda x: isinstance(x, dict) and all(isinstance(k, str) and isinstance(v, str) for k, v in x.items())
|
||||
validator=lambda x: isinstance(x, dict) and all(isinstance(k, str) and isinstance(v, str) for k, v in x.items()),
|
||||
expected_type=dict
|
||||
)
|
||||
embeddings_downloads = get_config_item_or_set_default(
|
||||
key='embeddings_downloads',
|
||||
default_value={},
|
||||
validator=lambda x: isinstance(x, dict) and all(isinstance(k, str) and isinstance(v, str) for k, v in x.items())
|
||||
validator=lambda x: isinstance(x, dict) and all(isinstance(k, str) and isinstance(v, str) for k, v in x.items()),
|
||||
expected_type=dict
|
||||
)
|
||||
vae_downloads = get_config_item_or_set_default(
|
||||
key='vae_downloads',
|
||||
default_value={},
|
||||
validator=lambda x: isinstance(x, dict) and all(isinstance(k, str) and isinstance(v, str) for k, v in x.items()),
|
||||
expected_type=dict
|
||||
)
|
||||
available_aspect_ratios = get_config_item_or_set_default(
|
||||
key='available_aspect_ratios',
|
||||
default_value=modules.flags.sdxl_aspect_ratios,
|
||||
validator=lambda x: isinstance(x, list) and all('*' in v for v in x) and len(x) > 1
|
||||
validator=lambda x: isinstance(x, list) and all('*' in v for v in x) and len(x) > 1,
|
||||
expected_type=list
|
||||
)
|
||||
default_aspect_ratio = get_config_item_or_set_default(
|
||||
key='default_aspect_ratio',
|
||||
default_value='1152*896' if '1152*896' in available_aspect_ratios else available_aspect_ratios[0],
|
||||
validator=lambda x: x in available_aspect_ratios
|
||||
validator=lambda x: x in available_aspect_ratios,
|
||||
expected_type=str
|
||||
)
|
||||
default_inpaint_engine_version = get_config_item_or_set_default(
|
||||
key='default_inpaint_engine_version',
|
||||
default_value='v2.6',
|
||||
validator=lambda x: x in modules.flags.inpaint_engine_versions
|
||||
validator=lambda x: x in modules.flags.inpaint_engine_versions,
|
||||
expected_type=str
|
||||
)
|
||||
default_selected_image_input_tab_id = get_config_item_or_set_default(
|
||||
key='default_selected_image_input_tab_id',
|
||||
default_value=modules.flags.default_input_image_tab,
|
||||
validator=lambda x: x in modules.flags.input_image_tab_ids,
|
||||
expected_type=str
|
||||
)
|
||||
default_uov_method = get_config_item_or_set_default(
|
||||
key='default_uov_method',
|
||||
default_value=modules.flags.disabled,
|
||||
validator=lambda x: x in modules.flags.uov_list,
|
||||
expected_type=str
|
||||
)
|
||||
default_controlnet_image_count = get_config_item_or_set_default(
|
||||
key='default_controlnet_image_count',
|
||||
default_value=4,
|
||||
validator=lambda x: isinstance(x, int) and x > 0,
|
||||
expected_type=int
|
||||
)
|
||||
default_ip_images = {}
|
||||
default_ip_stop_ats = {}
|
||||
default_ip_weights = {}
|
||||
default_ip_types = {}
|
||||
|
||||
for image_count in range(default_controlnet_image_count):
|
||||
image_count += 1
|
||||
default_ip_images[image_count] = get_config_item_or_set_default(
|
||||
key=f'default_ip_image_{image_count}',
|
||||
default_value='None',
|
||||
validator=lambda x: x == 'None' or isinstance(x, str) and os.path.exists(x),
|
||||
expected_type=str
|
||||
)
|
||||
|
||||
if default_ip_images[image_count] == 'None':
|
||||
default_ip_images[image_count] = None
|
||||
|
||||
default_ip_types[image_count] = get_config_item_or_set_default(
|
||||
key=f'default_ip_type_{image_count}',
|
||||
default_value=modules.flags.default_ip,
|
||||
validator=lambda x: x in modules.flags.ip_list,
|
||||
expected_type=str
|
||||
)
|
||||
|
||||
default_end, default_weight = modules.flags.default_parameters[default_ip_types[image_count]]
|
||||
|
||||
default_ip_stop_ats[image_count] = get_config_item_or_set_default(
|
||||
key=f'default_ip_stop_at_{image_count}',
|
||||
default_value=default_end,
|
||||
validator=lambda x: isinstance(x, float) and 0 <= x <= 1,
|
||||
expected_type=float
|
||||
)
|
||||
default_ip_weights[image_count] = get_config_item_or_set_default(
|
||||
key=f'default_ip_weight_{image_count}',
|
||||
default_value=default_weight,
|
||||
validator=lambda x: isinstance(x, float) and 0 <= x <= 2,
|
||||
expected_type=float
|
||||
)
|
||||
|
||||
default_inpaint_advanced_masking_checkbox = get_config_item_or_set_default(
|
||||
key='default_inpaint_advanced_masking_checkbox',
|
||||
default_value=False,
|
||||
validator=lambda x: isinstance(x, bool),
|
||||
expected_type=bool
|
||||
)
|
||||
default_inpaint_method = get_config_item_or_set_default(
|
||||
key='default_inpaint_method',
|
||||
default_value=modules.flags.inpaint_option_default,
|
||||
validator=lambda x: x in modules.flags.inpaint_options,
|
||||
expected_type=str
|
||||
)
|
||||
default_cfg_tsnr = get_config_item_or_set_default(
|
||||
key='default_cfg_tsnr',
|
||||
default_value=7.0,
|
||||
validator=lambda x: isinstance(x, numbers.Number)
|
||||
validator=lambda x: isinstance(x, numbers.Number),
|
||||
expected_type=numbers.Number
|
||||
)
|
||||
default_clip_skip = get_config_item_or_set_default(
|
||||
key='default_clip_skip',
|
||||
default_value=1,
|
||||
validator=lambda x: isinstance(x, numbers.Number)
|
||||
default_value=2,
|
||||
validator=lambda x: isinstance(x, int) and 1 <= x <= modules.flags.clip_skip_max,
|
||||
expected_type=int
|
||||
)
|
||||
default_overwrite_step = get_config_item_or_set_default(
|
||||
key='default_overwrite_step',
|
||||
default_value=-1,
|
||||
validator=lambda x: isinstance(x, int)
|
||||
validator=lambda x: isinstance(x, int),
|
||||
expected_type=int
|
||||
)
|
||||
default_overwrite_switch = get_config_item_or_set_default(
|
||||
key='default_overwrite_switch',
|
||||
default_value=-1,
|
||||
validator=lambda x: isinstance(x, int)
|
||||
validator=lambda x: isinstance(x, int),
|
||||
expected_type=int
|
||||
)
|
||||
default_overwrite_upscale = get_config_item_or_set_default(
|
||||
key='default_overwrite_upscale',
|
||||
default_value=-1,
|
||||
validator=lambda x: isinstance(x, numbers.Number)
|
||||
)
|
||||
example_inpaint_prompts = get_config_item_or_set_default(
|
||||
key='example_inpaint_prompts',
|
||||
default_value=[
|
||||
'highly detailed face', 'detailed girl face', 'detailed man face', 'detailed hand', 'beautiful eyes'
|
||||
],
|
||||
validator=lambda x: isinstance(x, list) and all(isinstance(v, str) for v in x)
|
||||
validator=lambda x: isinstance(x, list) and all(isinstance(v, str) for v in x),
|
||||
expected_type=list
|
||||
)
|
||||
example_enhance_detection_prompts = get_config_item_or_set_default(
|
||||
key='example_enhance_detection_prompts',
|
||||
default_value=[
|
||||
'face', 'eye', 'mouth', 'hair', 'hand', 'body'
|
||||
],
|
||||
validator=lambda x: isinstance(x, list) and all(isinstance(v, str) for v in x),
|
||||
expected_type=list
|
||||
)
|
||||
default_enhance_tabs = get_config_item_or_set_default(
|
||||
key='default_enhance_tabs',
|
||||
default_value=3,
|
||||
validator=lambda x: isinstance(x, int) and 1 <= x <= 5,
|
||||
expected_type=int
|
||||
)
|
||||
default_enhance_uov_method = get_config_item_or_set_default(
|
||||
key='default_enhance_uov_method',
|
||||
default_value=modules.flags.disabled,
|
||||
validator=lambda x: x in modules.flags.uov_list,
|
||||
expected_type=int
|
||||
)
|
||||
default_enhance_uov_processing_order = get_config_item_or_set_default(
|
||||
key='default_enhance_uov_processing_order',
|
||||
default_value=modules.flags.enhancement_uov_before,
|
||||
validator=lambda x: x in modules.flags.enhancement_uov_processing_order,
|
||||
expected_type=int
|
||||
)
|
||||
default_enhance_uov_prompt_type = get_config_item_or_set_default(
|
||||
key='default_enhance_uov_prompt_type',
|
||||
default_value=modules.flags.enhancement_uov_prompt_type_original,
|
||||
validator=lambda x: x in modules.flags.enhancement_uov_prompt_types,
|
||||
expected_type=int
|
||||
)
|
||||
default_sam_max_detections = get_config_item_or_set_default(
|
||||
key='default_sam_max_detections',
|
||||
default_value=0,
|
||||
validator=lambda x: isinstance(x, int) and 0 <= x <= 10,
|
||||
expected_type=int
|
||||
)
|
||||
default_black_out_nsfw = get_config_item_or_set_default(
|
||||
key='default_black_out_nsfw',
|
||||
default_value=False,
|
||||
validator=lambda x: isinstance(x, bool)
|
||||
validator=lambda x: isinstance(x, bool),
|
||||
expected_type=bool
|
||||
)
|
||||
default_save_only_final_enhanced_image = get_config_item_or_set_default(
|
||||
key='default_save_only_final_enhanced_image',
|
||||
default_value=False,
|
||||
validator=lambda x: isinstance(x, bool),
|
||||
expected_type=bool
|
||||
)
|
||||
default_save_metadata_to_images = get_config_item_or_set_default(
|
||||
key='default_save_metadata_to_images',
|
||||
default_value=False,
|
||||
validator=lambda x: isinstance(x, bool)
|
||||
validator=lambda x: isinstance(x, bool),
|
||||
expected_type=bool
|
||||
)
|
||||
default_metadata_scheme = get_config_item_or_set_default(
|
||||
key='default_metadata_scheme',
|
||||
default_value=MetadataScheme.FOOOCUS.value,
|
||||
validator=lambda x: x in [y[1] for y in modules.flags.metadata_scheme if y[1] == x]
|
||||
validator=lambda x: x in [y[1] for y in modules.flags.metadata_scheme if y[1] == x],
|
||||
expected_type=str
|
||||
)
|
||||
metadata_created_by = get_config_item_or_set_default(
|
||||
key='metadata_created_by',
|
||||
default_value='',
|
||||
validator=lambda x: isinstance(x, str)
|
||||
validator=lambda x: isinstance(x, str),
|
||||
expected_type=str
|
||||
)
|
||||
|
||||
example_inpaint_prompts = [[x] for x in example_inpaint_prompts]
|
||||
example_enhance_detection_prompts = [[x] for x in example_enhance_detection_prompts]
|
||||
|
||||
default_invert_mask_checkbox = get_config_item_or_set_default(
|
||||
key='default_invert_mask_checkbox',
|
||||
default_value=False,
|
||||
validator=lambda x: isinstance(x, bool),
|
||||
expected_type=bool
|
||||
)
|
||||
|
||||
default_inpaint_mask_model = get_config_item_or_set_default(
|
||||
key='default_inpaint_mask_model',
|
||||
default_value='isnet-general-use',
|
||||
validator=lambda x: x in modules.flags.inpaint_mask_models,
|
||||
expected_type=str
|
||||
)
|
||||
|
||||
default_enhance_inpaint_mask_model = get_config_item_or_set_default(
|
||||
key='default_enhance_inpaint_mask_model',
|
||||
default_value='sam',
|
||||
validator=lambda x: x in modules.flags.inpaint_mask_models,
|
||||
expected_type=str
|
||||
)
|
||||
|
||||
default_inpaint_mask_cloth_category = get_config_item_or_set_default(
|
||||
key='default_inpaint_mask_cloth_category',
|
||||
default_value='full',
|
||||
validator=lambda x: x in modules.flags.inpaint_mask_cloth_category,
|
||||
expected_type=str
|
||||
)
|
||||
|
||||
default_inpaint_mask_sam_model = get_config_item_or_set_default(
|
||||
key='default_inpaint_mask_sam_model',
|
||||
default_value='vit_b',
|
||||
validator=lambda x: x in modules.flags.inpaint_mask_sam_model,
|
||||
expected_type=str
|
||||
)
|
||||
|
||||
default_describe_apply_prompts_checkbox = get_config_item_or_set_default(
|
||||
key='default_describe_apply_prompts_checkbox',
|
||||
default_value=True,
|
||||
validator=lambda x: isinstance(x, bool),
|
||||
expected_type=bool
|
||||
)
|
||||
default_describe_content_type = get_config_item_or_set_default(
|
||||
key='default_describe_content_type',
|
||||
default_value=[modules.flags.describe_type_photo],
|
||||
validator=lambda x: all(k in modules.flags.describe_types for k in x),
|
||||
expected_type=list
|
||||
)
|
||||
|
||||
config_dict["default_loras"] = default_loras = default_loras[:default_max_lora_number] + [[True, 'None', 1.0] for _ in range(default_max_lora_number - len(default_loras))]
|
||||
|
||||
|
|
@ -497,6 +737,7 @@ possible_preset_keys = {
|
|||
"default_sampler": "sampler",
|
||||
"default_scheduler": "scheduler",
|
||||
"default_overwrite_step": "steps",
|
||||
"default_overwrite_switch": "overwrite_switch",
|
||||
"default_performance": "performance",
|
||||
"default_image_number": "image_number",
|
||||
"default_prompt": "prompt",
|
||||
|
|
@ -506,7 +747,11 @@ possible_preset_keys = {
|
|||
"default_save_metadata_to_images": "default_save_metadata_to_images",
|
||||
"checkpoint_downloads": "checkpoint_downloads",
|
||||
"embeddings_downloads": "embeddings_downloads",
|
||||
"lora_downloads": "lora_downloads"
|
||||
"lora_downloads": "lora_downloads",
|
||||
"vae_downloads": "vae_downloads",
|
||||
"default_vae": "vae",
|
||||
# "default_inpaint_method": "inpaint_method", # disabled so inpaint mode doesn't refresh after every preset change
|
||||
"default_inpaint_engine_version": "inpaint_engine_version",
|
||||
}
|
||||
|
||||
REWRITE_PRESET = False
|
||||
|
|
@ -548,25 +793,9 @@ with open(config_example_path, "w", encoding="utf-8") as json_file:
|
|||
|
||||
model_filenames = []
|
||||
lora_filenames = []
|
||||
lora_filenames_no_special = []
|
||||
vae_filenames = []
|
||||
wildcard_filenames = []
|
||||
|
||||
sdxl_lcm_lora = 'sdxl_lcm_lora.safetensors'
|
||||
sdxl_lightning_lora = 'sdxl_lightning_4step_lora.safetensors'
|
||||
sdxl_hyper_sd_lora = 'sdxl_hyper_sd_4step_lora.safetensors'
|
||||
loras_metadata_remove = [sdxl_lcm_lora, sdxl_lightning_lora, sdxl_hyper_sd_lora]
|
||||
|
||||
|
||||
def remove_special_loras(lora_filenames):
|
||||
global loras_metadata_remove
|
||||
|
||||
loras_no_special = lora_filenames.copy()
|
||||
for lora_to_remove in loras_metadata_remove:
|
||||
if lora_to_remove in loras_no_special:
|
||||
loras_no_special.remove(lora_to_remove)
|
||||
return loras_no_special
|
||||
|
||||
|
||||
def get_model_filenames(folder_paths, extensions=None, name_filter=None):
|
||||
if extensions is None:
|
||||
|
|
@ -582,10 +811,9 @@ def get_model_filenames(folder_paths, extensions=None, name_filter=None):
|
|||
|
||||
|
||||
def update_files():
|
||||
global model_filenames, lora_filenames, lora_filenames_no_special, vae_filenames, wildcard_filenames, available_presets
|
||||
global model_filenames, lora_filenames, vae_filenames, wildcard_filenames, available_presets
|
||||
model_filenames = get_model_filenames(paths_checkpoints)
|
||||
lora_filenames = get_model_filenames(paths_loras)
|
||||
lora_filenames_no_special = remove_special_loras(lora_filenames)
|
||||
vae_filenames = get_model_filenames(path_vae)
|
||||
wildcard_filenames = get_files_from_folder(path_wildcards, ['.txt'])
|
||||
available_presets = get_presets()
|
||||
|
|
@ -634,26 +862,27 @@ def downloading_sdxl_lcm_lora():
|
|||
load_file_from_url(
|
||||
url='https://huggingface.co/lllyasviel/misc/resolve/main/sdxl_lcm_lora.safetensors',
|
||||
model_dir=paths_loras[0],
|
||||
file_name=sdxl_lcm_lora
|
||||
file_name=modules.flags.PerformanceLoRA.EXTREME_SPEED.value
|
||||
)
|
||||
return sdxl_lcm_lora
|
||||
return modules.flags.PerformanceLoRA.EXTREME_SPEED.value
|
||||
|
||||
|
||||
def downloading_sdxl_lightning_lora():
|
||||
load_file_from_url(
|
||||
url='https://huggingface.co/mashb1t/misc/resolve/main/sdxl_lightning_4step_lora.safetensors',
|
||||
model_dir=paths_loras[0],
|
||||
file_name=sdxl_lightning_lora
|
||||
file_name=modules.flags.PerformanceLoRA.LIGHTNING.value
|
||||
)
|
||||
return sdxl_lightning_lora
|
||||
return modules.flags.PerformanceLoRA.LIGHTNING.value
|
||||
|
||||
|
||||
def downloading_sdxl_hyper_sd_lora():
|
||||
load_file_from_url(
|
||||
url='https://huggingface.co/mashb1t/misc/resolve/main/sdxl_hyper_sd_4step_lora.safetensors',
|
||||
model_dir=paths_loras[0],
|
||||
file_name=sdxl_hyper_sd_lora
|
||||
file_name=modules.flags.PerformanceLoRA.HYPER_SD.value
|
||||
)
|
||||
return sdxl_hyper_sd_lora
|
||||
return modules.flags.PerformanceLoRA.HYPER_SD.value
|
||||
|
||||
|
||||
def downloading_controlnet_canny():
|
||||
|
|
@ -729,4 +958,40 @@ def downloading_safety_checker_model():
|
|||
return os.path.join(path_safety_checker, 'stable-diffusion-safety-checker.bin')
|
||||
|
||||
|
||||
update_files()
|
||||
def download_sam_model(sam_model: str) -> str:
|
||||
match sam_model:
|
||||
case 'vit_b':
|
||||
return downloading_sam_vit_b()
|
||||
case 'vit_l':
|
||||
return downloading_sam_vit_l()
|
||||
case 'vit_h':
|
||||
return downloading_sam_vit_h()
|
||||
case _:
|
||||
raise ValueError(f"sam model {sam_model} does not exist.")
|
||||
|
||||
|
||||
def downloading_sam_vit_b():
|
||||
load_file_from_url(
|
||||
url='https://huggingface.co/mashb1t/misc/resolve/main/sam_vit_b_01ec64.pth',
|
||||
model_dir=path_sam,
|
||||
file_name='sam_vit_b_01ec64.pth'
|
||||
)
|
||||
return os.path.join(path_sam, 'sam_vit_b_01ec64.pth')
|
||||
|
||||
|
||||
def downloading_sam_vit_l():
|
||||
load_file_from_url(
|
||||
url='https://huggingface.co/mashb1t/misc/resolve/main/sam_vit_l_0b3195.pth',
|
||||
model_dir=path_sam,
|
||||
file_name='sam_vit_l_0b3195.pth'
|
||||
)
|
||||
return os.path.join(path_sam, 'sam_vit_l_0b3195.pth')
|
||||
|
||||
|
||||
def downloading_sam_vit_h():
|
||||
load_file_from_url(
|
||||
url='https://huggingface.co/mashb1t/misc/resolve/main/sam_vit_h_4b8939.pth',
|
||||
model_dir=path_sam,
|
||||
file_name='sam_vit_h_4b8939.pth'
|
||||
)
|
||||
return os.path.join(path_sam, 'sam_vit_h_4b8939.pth')
|
||||
|
|
|
|||
|
|
@ -21,8 +21,7 @@ from modules.lora import match_lora
|
|||
from modules.util import get_file_from_folder_list
|
||||
from ldm_patched.modules.lora import model_lora_keys_unet, model_lora_keys_clip
|
||||
from modules.config import path_embeddings
|
||||
from ldm_patched.contrib.external_model_advanced import ModelSamplingDiscrete
|
||||
|
||||
from ldm_patched.contrib.external_model_advanced import ModelSamplingDiscrete, ModelSamplingContinuousEDM
|
||||
|
||||
opEmptyLatentImage = EmptyLatentImage()
|
||||
opVAEDecode = VAEDecode()
|
||||
|
|
@ -32,6 +31,7 @@ opVAEEncodeTiled = VAEEncodeTiled()
|
|||
opControlNetApplyAdvanced = ControlNetApplyAdvanced()
|
||||
opFreeU = FreeU_V2()
|
||||
opModelSamplingDiscrete = ModelSamplingDiscrete()
|
||||
opModelSamplingContinuousEDM = ModelSamplingContinuousEDM()
|
||||
|
||||
|
||||
class StableDiffusionModel:
|
||||
|
|
@ -231,7 +231,7 @@ def get_previewer(model):
|
|||
if vae_approx_filename in VAE_approx_models:
|
||||
VAE_approx_model = VAE_approx_models[vae_approx_filename]
|
||||
else:
|
||||
sd = torch.load(vae_approx_filename, map_location='cpu')
|
||||
sd = torch.load(vae_approx_filename, map_location='cpu', weights_only=True)
|
||||
VAE_approx_model = VAEApprox()
|
||||
VAE_approx_model.load_state_dict(sd)
|
||||
del sd
|
||||
|
|
|
|||
|
|
@ -1,4 +1,6 @@
|
|||
import os
|
||||
from ast import literal_eval
|
||||
|
||||
|
||||
def makedirs_with_log(path):
|
||||
try:
|
||||
|
|
@ -24,3 +26,16 @@ def get_files_from_folder(folder_path, extensions=None, name_filter=None):
|
|||
filenames.append(path)
|
||||
|
||||
return filenames
|
||||
|
||||
|
||||
def try_eval_env_var(value: str, expected_type=None):
|
||||
try:
|
||||
value_eval = value
|
||||
if expected_type is bool:
|
||||
value_eval = value.title()
|
||||
value_eval = literal_eval(value_eval)
|
||||
if expected_type is not None and not isinstance(value_eval, expected_type):
|
||||
return value
|
||||
return value_eval
|
||||
except:
|
||||
return value
|
||||
|
|
|
|||
|
|
@ -8,9 +8,15 @@ upscale_15 = 'Upscale (1.5x)'
|
|||
upscale_2 = 'Upscale (2x)'
|
||||
upscale_fast = 'Upscale (Fast 2x)'
|
||||
|
||||
uov_list = [
|
||||
disabled, subtle_variation, strong_variation, upscale_15, upscale_2, upscale_fast
|
||||
]
|
||||
uov_list = [disabled, subtle_variation, strong_variation, upscale_15, upscale_2, upscale_fast]
|
||||
|
||||
enhancement_uov_before = "Before First Enhancement"
|
||||
enhancement_uov_after = "After Last Enhancement"
|
||||
enhancement_uov_processing_order = [enhancement_uov_before, enhancement_uov_after]
|
||||
|
||||
enhancement_uov_prompt_type_original = 'Original Prompts'
|
||||
enhancement_uov_prompt_type_last_filled = 'Last Filled Enhancement Prompts'
|
||||
enhancement_uov_prompt_types = [enhancement_uov_prompt_type_original, enhancement_uov_prompt_type_last_filled]
|
||||
|
||||
CIVITAI_NO_KARRAS = ["euler", "euler_ancestral", "heun", "dpm_fast", "dpm_adaptive", "ddim", "uni_pc"]
|
||||
|
||||
|
|
@ -35,7 +41,8 @@ KSAMPLER = {
|
|||
"dpmpp_3m_sde_gpu": "",
|
||||
"ddpm": "",
|
||||
"lcm": "LCM",
|
||||
"tcd": "TCD"
|
||||
"tcd": "TCD",
|
||||
"restart": "Restart"
|
||||
}
|
||||
|
||||
SAMPLER_EXTRA = {
|
||||
|
|
@ -48,16 +55,21 @@ SAMPLERS = KSAMPLER | SAMPLER_EXTRA
|
|||
|
||||
KSAMPLER_NAMES = list(KSAMPLER.keys())
|
||||
|
||||
SCHEDULER_NAMES = ["normal", "karras", "exponential", "sgm_uniform", "simple", "ddim_uniform", "lcm", "turbo", "align_your_steps", "tcd"]
|
||||
SCHEDULER_NAMES = ["normal", "karras", "exponential", "sgm_uniform", "simple", "ddim_uniform", "lcm", "turbo", "align_your_steps", "tcd", "edm_playground_v2.5"]
|
||||
SAMPLER_NAMES = KSAMPLER_NAMES + list(SAMPLER_EXTRA.keys())
|
||||
|
||||
sampler_list = SAMPLER_NAMES
|
||||
scheduler_list = SCHEDULER_NAMES
|
||||
|
||||
clip_skip_max = 12
|
||||
|
||||
default_vae = 'Default (model)'
|
||||
|
||||
refiner_swap_method = 'joint'
|
||||
|
||||
default_input_image_tab = 'uov_tab'
|
||||
input_image_tab_ids = ['uov_tab', 'ip_tab', 'inpaint_tab', 'describe_tab', 'enhance_tab', 'metadata_tab']
|
||||
|
||||
cn_ip = "ImagePrompt"
|
||||
cn_ip_face = "FaceSwap"
|
||||
cn_canny = "PyraCanny"
|
||||
|
|
@ -72,14 +84,19 @@ default_parameters = {
|
|||
|
||||
output_formats = ['png', 'jpeg', 'webp']
|
||||
|
||||
inpaint_mask_models = ['u2net', 'u2netp', 'u2net_human_seg', 'u2net_cloth_seg', 'silueta', 'isnet-general-use', 'isnet-anime', 'sam']
|
||||
inpaint_mask_cloth_category = ['full', 'upper', 'lower']
|
||||
inpaint_mask_sam_model = ['vit_b', 'vit_l', 'vit_h']
|
||||
|
||||
inpaint_engine_versions = ['None', 'v1', 'v2.5', 'v2.6']
|
||||
inpaint_option_default = 'Inpaint or Outpaint (default)'
|
||||
inpaint_option_detail = 'Improve Detail (face, hand, eyes, etc.)'
|
||||
inpaint_option_modify = 'Modify Content (add objects, change background, etc.)'
|
||||
inpaint_options = [inpaint_option_default, inpaint_option_detail, inpaint_option_modify]
|
||||
|
||||
desc_type_photo = 'Photograph'
|
||||
desc_type_anime = 'Art/Anime'
|
||||
describe_type_photo = 'Photograph'
|
||||
describe_type_anime = 'Art/Anime'
|
||||
describe_types = [describe_type_photo, describe_type_anime]
|
||||
|
||||
sdxl_aspect_ratios = [
|
||||
'704*1408', '704*1344', '768*1344', '768*1280', '832*1216', '832*1152',
|
||||
|
|
@ -89,6 +106,7 @@ sdxl_aspect_ratios = [
|
|||
'1664*576', '1728*576'
|
||||
]
|
||||
|
||||
|
||||
class MetadataScheme(Enum):
|
||||
FOOOCUS = 'fooocus'
|
||||
A1111 = 'a1111'
|
||||
|
|
@ -99,9 +117,6 @@ metadata_scheme = [
|
|||
(f'{MetadataScheme.A1111.value} (plain text)', MetadataScheme.A1111.value),
|
||||
]
|
||||
|
||||
controlnet_image_count = 4
|
||||
preparation_step_count = 13
|
||||
|
||||
|
||||
class OutputFormat(Enum):
|
||||
PNG = 'png'
|
||||
|
|
@ -113,6 +128,14 @@ class OutputFormat(Enum):
|
|||
return list(map(lambda c: c.value, cls))
|
||||
|
||||
|
||||
class PerformanceLoRA(Enum):
|
||||
QUALITY = None
|
||||
SPEED = None
|
||||
EXTREME_SPEED = 'sdxl_lcm_lora.safetensors'
|
||||
LIGHTNING = 'sdxl_lightning_4step_lora.safetensors'
|
||||
HYPER_SD = 'sdxl_hyper_sd_4step_lora.safetensors'
|
||||
|
||||
|
||||
class Steps(IntEnum):
|
||||
QUALITY = 60
|
||||
SPEED = 30
|
||||
|
|
@ -120,6 +143,10 @@ class Steps(IntEnum):
|
|||
LIGHTNING = 4
|
||||
HYPER_SD = 4
|
||||
|
||||
@classmethod
|
||||
def keys(cls) -> list:
|
||||
return list(map(lambda c: c, Steps.__members__))
|
||||
|
||||
|
||||
class StepsUOV(IntEnum):
|
||||
QUALITY = 36
|
||||
|
|
@ -138,8 +165,16 @@ class Performance(Enum):
|
|||
|
||||
@classmethod
|
||||
def list(cls) -> list:
|
||||
return list(map(lambda c: (c.name, c.value), cls))
|
||||
|
||||
@classmethod
|
||||
def values(cls) -> list:
|
||||
return list(map(lambda c: c.value, cls))
|
||||
|
||||
@classmethod
|
||||
def by_steps(cls, steps: int | str):
|
||||
return cls[Steps(int(steps)).name]
|
||||
|
||||
@classmethod
|
||||
def has_restricted_features(cls, x) -> bool:
|
||||
if isinstance(x, Performance):
|
||||
|
|
@ -147,7 +182,10 @@ class Performance(Enum):
|
|||
return x in [cls.EXTREME_SPEED.value, cls.LIGHTNING.value, cls.HYPER_SD.value]
|
||||
|
||||
def steps(self) -> int | None:
|
||||
return Steps[self.name].value if Steps[self.name] else None
|
||||
return Steps[self.name].value if self.name in Steps.__members__ else None
|
||||
|
||||
def steps_uov(self) -> int | None:
|
||||
return StepsUOV[self.name].value if Steps[self.name] else None
|
||||
return StepsUOV[self.name].value if self.name in StepsUOV.__members__ else None
|
||||
|
||||
def lora_filename(self) -> str | None:
|
||||
return PerformanceLoRA[self.name].value if self.name in PerformanceLoRA.__members__ else None
|
||||
|
|
|
|||
|
|
@ -0,0 +1,83 @@
|
|||
import json
|
||||
import os
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from multiprocessing import cpu_count
|
||||
|
||||
import args_manager
|
||||
from modules.util import sha256, HASH_SHA256_LENGTH, get_file_from_folder_list
|
||||
|
||||
hash_cache_filename = 'hash_cache.txt'
|
||||
hash_cache = {}
|
||||
|
||||
|
||||
def sha256_from_cache(filepath):
|
||||
global hash_cache
|
||||
if filepath not in hash_cache:
|
||||
print(f"[Cache] Calculating sha256 for {filepath}")
|
||||
hash_value = sha256(filepath)
|
||||
print(f"[Cache] sha256 for {filepath}: {hash_value}")
|
||||
hash_cache[filepath] = hash_value
|
||||
save_cache_to_file(filepath, hash_value)
|
||||
|
||||
return hash_cache[filepath]
|
||||
|
||||
|
||||
def load_cache_from_file():
|
||||
global hash_cache
|
||||
|
||||
try:
|
||||
if os.path.exists(hash_cache_filename):
|
||||
with open(hash_cache_filename, 'rt', encoding='utf-8') as fp:
|
||||
for line in fp:
|
||||
entry = json.loads(line)
|
||||
for filepath, hash_value in entry.items():
|
||||
if not os.path.exists(filepath) or not isinstance(hash_value, str) and len(hash_value) != HASH_SHA256_LENGTH:
|
||||
print(f'[Cache] Skipping invalid cache entry: {filepath}')
|
||||
continue
|
||||
hash_cache[filepath] = hash_value
|
||||
except Exception as e:
|
||||
print(f'[Cache] Loading failed: {e}')
|
||||
|
||||
|
||||
def save_cache_to_file(filename=None, hash_value=None):
|
||||
global hash_cache
|
||||
|
||||
if filename is not None and hash_value is not None:
|
||||
items = [(filename, hash_value)]
|
||||
mode = 'at'
|
||||
else:
|
||||
items = sorted(hash_cache.items())
|
||||
mode = 'wt'
|
||||
|
||||
try:
|
||||
with open(hash_cache_filename, mode, encoding='utf-8') as fp:
|
||||
for filepath, hash_value in items:
|
||||
json.dump({filepath: hash_value}, fp)
|
||||
fp.write('\n')
|
||||
except Exception as e:
|
||||
print(f'[Cache] Saving failed: {e}')
|
||||
|
||||
|
||||
def init_cache(model_filenames, paths_checkpoints, lora_filenames, paths_loras):
|
||||
load_cache_from_file()
|
||||
|
||||
if args_manager.args.rebuild_hash_cache:
|
||||
max_workers = args_manager.args.rebuild_hash_cache if args_manager.args.rebuild_hash_cache > 0 else cpu_count()
|
||||
rebuild_cache(lora_filenames, model_filenames, paths_checkpoints, paths_loras, max_workers)
|
||||
|
||||
# write cache to file again for sorting and cleanup of invalid cache entries
|
||||
save_cache_to_file()
|
||||
|
||||
|
||||
def rebuild_cache(lora_filenames, model_filenames, paths_checkpoints, paths_loras, max_workers=cpu_count()):
|
||||
def thread(filename, paths):
|
||||
filepath = get_file_from_folder_list(filename, paths)
|
||||
sha256_from_cache(filepath)
|
||||
|
||||
print('[Cache] Rebuilding hash cache')
|
||||
with ThreadPoolExecutor(max_workers=max_workers) as executor:
|
||||
for model_filename in model_filenames:
|
||||
executor.submit(thread, model_filename, paths_checkpoints)
|
||||
for lora_filename in lora_filenames:
|
||||
executor.submit(thread, lora_filename, paths_loras)
|
||||
print('[Cache] Done')
|
||||
|
|
@ -196,7 +196,7 @@ class InpaintWorker:
|
|||
|
||||
if inpaint_head_model is None:
|
||||
inpaint_head_model = InpaintHead()
|
||||
sd = torch.load(inpaint_head_model_path, map_location='cpu')
|
||||
sd = torch.load(inpaint_head_model_path, map_location='cpu', weights_only=True)
|
||||
inpaint_head_model.load_state_dict(sd)
|
||||
|
||||
feed = torch.cat([
|
||||
|
|
|
|||
|
|
@ -11,16 +11,15 @@ import modules.config
|
|||
import modules.sdxl_styles
|
||||
from modules.flags import MetadataScheme, Performance, Steps
|
||||
from modules.flags import SAMPLERS, CIVITAI_NO_KARRAS
|
||||
from modules.util import quote, unquote, extract_styles_from_prompt, is_json, get_file_from_folder_list, sha256
|
||||
from modules.hash_cache import sha256_from_cache
|
||||
from modules.util import quote, unquote, extract_styles_from_prompt, is_json, get_file_from_folder_list
|
||||
|
||||
re_param_code = r'\s*(\w[\w \-/]+):\s*("(?:\\.|[^\\"])+"|[^,]*)(?:,|$)'
|
||||
re_param = re.compile(re_param_code)
|
||||
re_imagesize = re.compile(r"^(\d+)x(\d+)$")
|
||||
|
||||
hash_cache = {}
|
||||
|
||||
|
||||
def load_parameter_button_click(raw_metadata: dict | str, is_generating: bool):
|
||||
def load_parameter_button_click(raw_metadata: dict | str, is_generating: bool, inpaint_mode: str):
|
||||
loaded_parameter_dict = raw_metadata
|
||||
if isinstance(raw_metadata, str):
|
||||
loaded_parameter_dict = json.loads(raw_metadata)
|
||||
|
|
@ -32,7 +31,7 @@ def load_parameter_button_click(raw_metadata: dict | str, is_generating: bool):
|
|||
get_str('prompt', 'Prompt', loaded_parameter_dict, results)
|
||||
get_str('negative_prompt', 'Negative Prompt', loaded_parameter_dict, results)
|
||||
get_list('styles', 'Styles', loaded_parameter_dict, results)
|
||||
get_str('performance', 'Performance', loaded_parameter_dict, results)
|
||||
performance = get_str('performance', 'Performance', loaded_parameter_dict, results)
|
||||
get_steps('steps', 'Steps', loaded_parameter_dict, results)
|
||||
get_number('overwrite_switch', 'Overwrite Switch', loaded_parameter_dict, results)
|
||||
get_resolution('resolution', 'Resolution', loaded_parameter_dict, results)
|
||||
|
|
@ -49,6 +48,8 @@ def load_parameter_button_click(raw_metadata: dict | str, is_generating: bool):
|
|||
get_str('scheduler', 'Scheduler', loaded_parameter_dict, results)
|
||||
get_str('vae', 'VAE', loaded_parameter_dict, results)
|
||||
get_seed('seed', 'Seed', loaded_parameter_dict, results)
|
||||
get_inpaint_engine_version('inpaint_engine_version', 'Inpaint Engine Version', loaded_parameter_dict, results, inpaint_mode)
|
||||
get_inpaint_method('inpaint_method', 'Inpaint Mode', loaded_parameter_dict, results)
|
||||
|
||||
if is_generating:
|
||||
results.append(gr.update())
|
||||
|
|
@ -59,19 +60,27 @@ def load_parameter_button_click(raw_metadata: dict | str, is_generating: bool):
|
|||
|
||||
get_freeu('freeu', 'FreeU', loaded_parameter_dict, results)
|
||||
|
||||
# prevent performance LoRAs to be added twice, by performance and by lora
|
||||
performance_filename = None
|
||||
if performance is not None and performance in Performance.values():
|
||||
performance = Performance(performance)
|
||||
performance_filename = performance.lora_filename()
|
||||
|
||||
for i in range(modules.config.default_max_lora_number):
|
||||
get_lora(f'lora_combined_{i + 1}', f'LoRA {i + 1}', loaded_parameter_dict, results)
|
||||
get_lora(f'lora_combined_{i + 1}', f'LoRA {i + 1}', loaded_parameter_dict, results, performance_filename)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def get_str(key: str, fallback: str | None, source_dict: dict, results: list, default=None):
|
||||
def get_str(key: str, fallback: str | None, source_dict: dict, results: list, default=None) -> str | None:
|
||||
try:
|
||||
h = source_dict.get(key, source_dict.get(fallback, default))
|
||||
assert isinstance(h, str)
|
||||
results.append(h)
|
||||
return h
|
||||
except:
|
||||
results.append(gr.update())
|
||||
return None
|
||||
|
||||
|
||||
def get_list(key: str, fallback: str | None, source_dict: dict, results: list, default=None):
|
||||
|
|
@ -111,8 +120,9 @@ def get_steps(key: str, fallback: str | None, source_dict: dict, results: list,
|
|||
assert h is not None
|
||||
h = int(h)
|
||||
# if not in steps or in steps and performance is not the same
|
||||
if h not in iter(Steps) or Steps(h).name.casefold() != source_dict.get('performance', '').replace(' ',
|
||||
'_').casefold():
|
||||
performance_name = source_dict.get('performance', '').replace(' ', '_').replace('-', '_').casefold()
|
||||
performance_candidates = [key for key in Steps.keys() if key.casefold() == performance_name and Steps[key] == h]
|
||||
if len(performance_candidates) == 0:
|
||||
results.append(h)
|
||||
return
|
||||
results.append(-1)
|
||||
|
|
@ -151,6 +161,36 @@ def get_seed(key: str, fallback: str | None, source_dict: dict, results: list, d
|
|||
results.append(gr.update())
|
||||
|
||||
|
||||
def get_inpaint_engine_version(key: str, fallback: str | None, source_dict: dict, results: list, inpaint_mode: str, default=None) -> str | None:
|
||||
try:
|
||||
h = source_dict.get(key, source_dict.get(fallback, default))
|
||||
assert isinstance(h, str) and h in modules.flags.inpaint_engine_versions
|
||||
if inpaint_mode != modules.flags.inpaint_option_detail:
|
||||
results.append(h)
|
||||
else:
|
||||
results.append(gr.update())
|
||||
results.append(h)
|
||||
return h
|
||||
except:
|
||||
results.append(gr.update())
|
||||
results.append('empty')
|
||||
return None
|
||||
|
||||
|
||||
def get_inpaint_method(key: str, fallback: str | None, source_dict: dict, results: list, default=None) -> str | None:
|
||||
try:
|
||||
h = source_dict.get(key, source_dict.get(fallback, default))
|
||||
assert isinstance(h, str) and h in modules.flags.inpaint_options
|
||||
results.append(h)
|
||||
for i in range(modules.config.default_enhance_tabs):
|
||||
results.append(h)
|
||||
return h
|
||||
except:
|
||||
results.append(gr.update())
|
||||
for i in range(modules.config.default_enhance_tabs):
|
||||
results.append(gr.update())
|
||||
|
||||
|
||||
def get_adm_guidance(key: str, fallback: str | None, source_dict: dict, results: list, default=None):
|
||||
try:
|
||||
h = source_dict.get(key, source_dict.get(fallback, default))
|
||||
|
|
@ -181,7 +221,7 @@ def get_freeu(key: str, fallback: str | None, source_dict: dict, results: list,
|
|||
results.append(gr.update())
|
||||
|
||||
|
||||
def get_lora(key: str, fallback: str | None, source_dict: dict, results: list):
|
||||
def get_lora(key: str, fallback: str | None, source_dict: dict, results: list, performance_filename: str | None):
|
||||
try:
|
||||
split_data = source_dict.get(key, source_dict.get(fallback)).split(' : ')
|
||||
enabled = True
|
||||
|
|
@ -193,6 +233,9 @@ def get_lora(key: str, fallback: str | None, source_dict: dict, results: list):
|
|||
name = split_data[1]
|
||||
weight = split_data[2]
|
||||
|
||||
if name == performance_filename:
|
||||
raise Exception
|
||||
|
||||
weight = float(weight)
|
||||
results.append(enabled)
|
||||
results.append(name)
|
||||
|
|
@ -203,14 +246,6 @@ def get_lora(key: str, fallback: str | None, source_dict: dict, results: list):
|
|||
results.append(1)
|
||||
|
||||
|
||||
def get_sha256(filepath):
|
||||
global hash_cache
|
||||
if filepath not in hash_cache:
|
||||
hash_cache[filepath] = sha256(filepath)
|
||||
|
||||
return hash_cache[filepath]
|
||||
|
||||
|
||||
def parse_meta_from_preset(preset_content):
|
||||
assert isinstance(preset_content, dict)
|
||||
preset_prepared = {}
|
||||
|
|
@ -221,7 +256,7 @@ def parse_meta_from_preset(preset_content):
|
|||
loras = getattr(modules.config, settings_key)
|
||||
if settings_key in items:
|
||||
loras = items[settings_key]
|
||||
for index, lora in enumerate(loras[:5]):
|
||||
for index, lora in enumerate(loras[:modules.config.default_max_lora_number]):
|
||||
preset_prepared[f'lora_combined_{index + 1}'] = ' : '.join(map(str, lora))
|
||||
elif settings_key == "default_aspect_ratio":
|
||||
if settings_key in items and items[settings_key] is not None:
|
||||
|
|
@ -233,8 +268,7 @@ def parse_meta_from_preset(preset_content):
|
|||
height = height[:height.index(" ")]
|
||||
preset_prepared[meta_key] = (width, height)
|
||||
else:
|
||||
preset_prepared[meta_key] = items[settings_key] if settings_key in items and items[
|
||||
settings_key] is not None else getattr(modules.config, settings_key)
|
||||
preset_prepared[meta_key] = items[settings_key] if settings_key in items and items[settings_key] is not None else getattr(modules.config, settings_key)
|
||||
|
||||
if settings_key == "default_styles" or settings_key == "default_aspect_ratio":
|
||||
preset_prepared[meta_key] = str(preset_prepared[meta_key])
|
||||
|
|
@ -248,7 +282,7 @@ class MetadataParser(ABC):
|
|||
self.full_prompt: str = ''
|
||||
self.raw_negative_prompt: str = ''
|
||||
self.full_negative_prompt: str = ''
|
||||
self.steps: int = 30
|
||||
self.steps: int = Steps.SPEED.value
|
||||
self.base_model_name: str = ''
|
||||
self.base_model_hash: str = ''
|
||||
self.refiner_model_name: str = ''
|
||||
|
|
@ -261,11 +295,11 @@ class MetadataParser(ABC):
|
|||
raise NotImplementedError
|
||||
|
||||
@abstractmethod
|
||||
def parse_json(self, metadata: dict | str) -> dict:
|
||||
def to_json(self, metadata: dict | str) -> dict:
|
||||
raise NotImplementedError
|
||||
|
||||
@abstractmethod
|
||||
def parse_string(self, metadata: dict) -> str:
|
||||
def to_string(self, metadata: dict) -> str:
|
||||
raise NotImplementedError
|
||||
|
||||
def set_data(self, raw_prompt, full_prompt, raw_negative_prompt, full_negative_prompt, steps, base_model_name,
|
||||
|
|
@ -278,18 +312,18 @@ class MetadataParser(ABC):
|
|||
self.base_model_name = Path(base_model_name).stem
|
||||
|
||||
base_model_path = get_file_from_folder_list(base_model_name, modules.config.paths_checkpoints)
|
||||
self.base_model_hash = get_sha256(base_model_path)
|
||||
self.base_model_hash = sha256_from_cache(base_model_path)
|
||||
|
||||
if refiner_model_name not in ['', 'None']:
|
||||
self.refiner_model_name = Path(refiner_model_name).stem
|
||||
refiner_model_path = get_file_from_folder_list(refiner_model_name, modules.config.paths_checkpoints)
|
||||
self.refiner_model_hash = get_sha256(refiner_model_path)
|
||||
self.refiner_model_hash = sha256_from_cache(refiner_model_path)
|
||||
|
||||
self.loras = []
|
||||
for (lora_name, lora_weight) in loras:
|
||||
if lora_name != 'None':
|
||||
lora_path = get_file_from_folder_list(lora_name, modules.config.paths_loras)
|
||||
lora_hash = get_sha256(lora_path)
|
||||
lora_hash = sha256_from_cache(lora_path)
|
||||
self.loras.append((Path(lora_name).stem, lora_weight, lora_hash))
|
||||
self.vae_name = Path(vae_name).stem
|
||||
|
||||
|
|
@ -328,7 +362,7 @@ class A1111MetadataParser(MetadataParser):
|
|||
'version': 'Version'
|
||||
}
|
||||
|
||||
def parse_json(self, metadata: str) -> dict:
|
||||
def to_json(self, metadata: str) -> dict:
|
||||
metadata_prompt = ''
|
||||
metadata_negative_prompt = ''
|
||||
|
||||
|
|
@ -382,9 +416,9 @@ class A1111MetadataParser(MetadataParser):
|
|||
data['styles'] = str(found_styles)
|
||||
|
||||
# try to load performance based on steps, fallback for direct A1111 imports
|
||||
if 'steps' in data and 'performance' not in data:
|
||||
if 'steps' in data and 'performance' in data is None:
|
||||
try:
|
||||
data['performance'] = Performance[Steps(int(data['steps'])).name].value
|
||||
data['performance'] = Performance.by_steps(data['steps']).value
|
||||
except ValueError | KeyError:
|
||||
pass
|
||||
|
||||
|
|
@ -414,7 +448,7 @@ class A1111MetadataParser(MetadataParser):
|
|||
lora_split = lora.split(': ')
|
||||
lora_name = lora_split[0]
|
||||
lora_weight = lora_split[2] if len(lora_split) == 3 else lora_split[1]
|
||||
for filename in modules.config.lora_filenames_no_special:
|
||||
for filename in modules.config.lora_filenames:
|
||||
path = Path(filename)
|
||||
if lora_name == path.stem:
|
||||
data[f'lora_combined_{li + 1}'] = f'{filename} : {lora_weight}'
|
||||
|
|
@ -422,7 +456,7 @@ class A1111MetadataParser(MetadataParser):
|
|||
|
||||
return data
|
||||
|
||||
def parse_string(self, metadata: dict) -> str:
|
||||
def to_string(self, metadata: dict) -> str:
|
||||
data = {k: v for _, k, v in metadata}
|
||||
|
||||
width, height = eval(data['resolution'])
|
||||
|
|
@ -502,14 +536,14 @@ class FooocusMetadataParser(MetadataParser):
|
|||
def get_scheme(self) -> MetadataScheme:
|
||||
return MetadataScheme.FOOOCUS
|
||||
|
||||
def parse_json(self, metadata: dict) -> dict:
|
||||
def to_json(self, metadata: dict) -> dict:
|
||||
for key, value in metadata.items():
|
||||
if value in ['', 'None']:
|
||||
continue
|
||||
if key in ['base_model', 'refiner_model']:
|
||||
metadata[key] = self.replace_value_with_filename(key, value, modules.config.model_filenames)
|
||||
elif key.startswith('lora_combined_'):
|
||||
metadata[key] = self.replace_value_with_filename(key, value, modules.config.lora_filenames_no_special)
|
||||
metadata[key] = self.replace_value_with_filename(key, value, modules.config.lora_filenames)
|
||||
elif key == 'vae':
|
||||
metadata[key] = self.replace_value_with_filename(key, value, modules.config.vae_filenames)
|
||||
else:
|
||||
|
|
@ -517,7 +551,7 @@ class FooocusMetadataParser(MetadataParser):
|
|||
|
||||
return metadata
|
||||
|
||||
def parse_string(self, metadata: list) -> str:
|
||||
def to_string(self, metadata: list) -> str:
|
||||
for li, (label, key, value) in enumerate(metadata):
|
||||
# remove model folder paths from metadata
|
||||
if key.startswith('lora_combined_'):
|
||||
|
|
@ -557,6 +591,8 @@ class FooocusMetadataParser(MetadataParser):
|
|||
elif value == path.stem:
|
||||
return filename
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def get_metadata_parser(metadata_scheme: MetadataScheme) -> MetadataParser:
|
||||
match metadata_scheme:
|
||||
|
|
@ -568,9 +604,8 @@ def get_metadata_parser(metadata_scheme: MetadataScheme) -> MetadataParser:
|
|||
raise NotImplementedError
|
||||
|
||||
|
||||
def read_info_from_image(filepath) -> tuple[str | None, MetadataScheme | None]:
|
||||
with Image.open(filepath) as image:
|
||||
items = (image.info or {}).copy()
|
||||
def read_info_from_image(file) -> tuple[str | None, MetadataScheme | None]:
|
||||
items = (file.info or {}).copy()
|
||||
|
||||
parameters = items.pop('parameters', None)
|
||||
metadata_scheme = items.pop('fooocus_scheme', None)
|
||||
|
|
@ -579,7 +614,7 @@ def read_info_from_image(filepath) -> tuple[str | None, MetadataScheme | None]:
|
|||
if parameters is not None and is_json(parameters):
|
||||
parameters = json.loads(parameters)
|
||||
elif exif is not None:
|
||||
exif = image.getexif()
|
||||
exif = file.getexif()
|
||||
# 0x9286 = UserComment
|
||||
parameters = exif.get(0x9286, None)
|
||||
# 0x927C = MakerNote
|
||||
|
|
|
|||
|
|
@ -21,13 +21,13 @@ def get_current_html_path(output_format=None):
|
|||
return html_name
|
||||
|
||||
|
||||
def log(img, metadata, metadata_parser: MetadataParser | None = None, output_format=None, task=None) -> str:
|
||||
path_outputs = modules.config.temp_path if args_manager.args.disable_image_log else modules.config.path_outputs
|
||||
def log(img, metadata, metadata_parser: MetadataParser | None = None, output_format=None, task=None, persist_image=True) -> str:
|
||||
path_outputs = modules.config.temp_path if args_manager.args.disable_image_log or not persist_image else modules.config.path_outputs
|
||||
output_format = output_format if output_format else modules.config.default_output_format
|
||||
date_string, local_temp_filename, only_name = generate_temp_filename(folder=path_outputs, extension=output_format)
|
||||
os.makedirs(os.path.dirname(local_temp_filename), exist_ok=True)
|
||||
|
||||
parsed_parameters = metadata_parser.parse_string(metadata.copy()) if metadata_parser is not None else ''
|
||||
parsed_parameters = metadata_parser.to_string(metadata.copy()) if metadata_parser is not None else ''
|
||||
image = Image.fromarray(img)
|
||||
|
||||
if output_format == OutputFormat.PNG.value:
|
||||
|
|
|
|||
|
|
@ -175,7 +175,7 @@ def calculate_sigmas_scheduler_hacked(model, scheduler_name, steps):
|
|||
elif scheduler_name == "sgm_uniform":
|
||||
sigmas = normal_scheduler(model, steps, sgm=True)
|
||||
elif scheduler_name == "turbo":
|
||||
sigmas = SDTurboScheduler().get_sigmas(namedtuple('Patcher', ['model'])(model=model), steps=steps, denoise=1.0)[0]
|
||||
sigmas = SDTurboScheduler().get_sigmas(model=model, steps=steps, denoise=1.0)[0]
|
||||
elif scheduler_name == "align_your_steps":
|
||||
model_type = 'SDXL' if isinstance(model.latent_format, ldm_patched.modules.latent_formats.SDXL) else 'SD1'
|
||||
sigmas = AlignYourStepsScheduler().get_sigmas(model_type=model_type, steps=steps, denoise=1.0)[0]
|
||||
|
|
|
|||
|
|
@ -59,7 +59,7 @@ def get_random_style(rng: Random) -> str:
|
|||
|
||||
def apply_style(style, positive):
|
||||
p, n = styles[style]
|
||||
return p.replace('{prompt}', positive).splitlines(), n.splitlines()
|
||||
return p.replace('{prompt}', positive).splitlines(), n.splitlines(), '{prompt}' in p
|
||||
|
||||
|
||||
def get_words(arrays, total_mult, index):
|
||||
|
|
|
|||
|
|
@ -1,13 +1,11 @@
|
|||
import os
|
||||
import torch
|
||||
import modules.core as core
|
||||
|
||||
from ldm_patched.pfn.architecture.RRDB import RRDBNet as ESRGAN
|
||||
from ldm_patched.contrib.external_upscale_model import ImageUpscaleWithModel
|
||||
from collections import OrderedDict
|
||||
from modules.config import path_upscale_models
|
||||
|
||||
model_filename = os.path.join(path_upscale_models, 'fooocus_upscaler_s409985e5.bin')
|
||||
import modules.core as core
|
||||
import torch
|
||||
from ldm_patched.contrib.external_upscale_model import ImageUpscaleWithModel
|
||||
from ldm_patched.pfn.architecture.RRDB import RRDBNet as ESRGAN
|
||||
from modules.config import downloading_upscale_model
|
||||
|
||||
opImageUpscaleWithModel = ImageUpscaleWithModel()
|
||||
model = None
|
||||
|
||||
|
|
@ -18,7 +16,8 @@ def perform_upscale(img):
|
|||
print(f'Upscaling image with shape {str(img.shape)} ...')
|
||||
|
||||
if model is None:
|
||||
sd = torch.load(model_filename)
|
||||
model_filename = downloading_upscale_model()
|
||||
sd = torch.load(model_filename, weights_only=True)
|
||||
sdo = OrderedDict()
|
||||
for k, v in sd.items():
|
||||
sdo[k.replace('residual_block_', 'RDB')] = v
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@ from PIL import Image
|
|||
|
||||
import modules.config
|
||||
import modules.sdxl_styles
|
||||
from modules.flags import Performance
|
||||
|
||||
LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS)
|
||||
|
||||
|
|
@ -175,13 +176,11 @@ def generate_temp_filename(folder='./outputs/', extension='png'):
|
|||
|
||||
|
||||
def sha256(filename, use_addnet_hash=False, length=HASH_SHA256_LENGTH):
|
||||
print(f"Calculating sha256 for {filename}: ", end='')
|
||||
if use_addnet_hash:
|
||||
with open(filename, "rb") as file:
|
||||
sha256_value = addnet_hash_safetensors(file)
|
||||
else:
|
||||
sha256_value = calculate_sha256(filename)
|
||||
print(f"{sha256_value}")
|
||||
|
||||
return sha256_value[:length] if length is not None else sha256_value
|
||||
|
||||
|
|
@ -381,26 +380,24 @@ def get_file_from_folder_list(name, folders):
|
|||
|
||||
return os.path.abspath(os.path.realpath(os.path.join(folders[0], name)))
|
||||
|
||||
def ordinal_suffix(number: int) -> str:
|
||||
return 'th' if 10 <= number % 100 <= 20 else {1: 'st', 2: 'nd', 3: 'rd'}.get(number % 10, 'th')
|
||||
|
||||
|
||||
def makedirs_with_log(path):
|
||||
try:
|
||||
os.makedirs(path, exist_ok=True)
|
||||
except OSError as error:
|
||||
print(f'Directory {path} could not be created, reason: {error}')
|
||||
|
||||
|
||||
def get_enabled_loras(loras: list, remove_none=True) -> list:
|
||||
return [(lora[1], lora[2]) for lora in loras if lora[0] and (lora[1] != 'None' if remove_none else True)]
|
||||
|
||||
|
||||
def parse_lora_references_from_prompt(prompt: str, loras: List[Tuple[AnyStr, float]], loras_limit: int = 5,
|
||||
skip_file_check=False, prompt_cleanup=True, deduplicate_loras=True) -> tuple[List[Tuple[AnyStr, float]], str]:
|
||||
skip_file_check=False, prompt_cleanup=True, deduplicate_loras=True,
|
||||
lora_filenames=None) -> tuple[List[Tuple[AnyStr, float]], str]:
|
||||
# prevent unintended side effects when returning without detection
|
||||
loras = loras.copy()
|
||||
|
||||
if lora_filenames is None:
|
||||
lora_filenames = []
|
||||
|
||||
found_loras = []
|
||||
prompt_without_loras = ''
|
||||
cleaned_prompt = ''
|
||||
|
||||
for token in prompt.split(','):
|
||||
matches = LORAS_PROMPT_PATTERN.findall(token)
|
||||
|
||||
|
|
@ -410,7 +407,7 @@ def parse_lora_references_from_prompt(prompt: str, loras: List[Tuple[AnyStr, flo
|
|||
for match in matches:
|
||||
lora_name = match[1] + '.safetensors'
|
||||
if not skip_file_check:
|
||||
lora_name = get_filname_by_stem(match[1], modules.config.lora_filenames_no_special)
|
||||
lora_name = get_filname_by_stem(match[1], lora_filenames)
|
||||
if lora_name is not None:
|
||||
found_loras.append((lora_name, float(match[2])))
|
||||
token = token.replace(match[0], '')
|
||||
|
|
@ -440,6 +437,22 @@ def parse_lora_references_from_prompt(prompt: str, loras: List[Tuple[AnyStr, flo
|
|||
return updated_loras[:loras_limit], cleaned_prompt
|
||||
|
||||
|
||||
def remove_performance_lora(filenames: list, performance: Performance | None):
|
||||
loras_without_performance = filenames.copy()
|
||||
|
||||
if performance is None:
|
||||
return loras_without_performance
|
||||
|
||||
performance_lora = performance.lora_filename()
|
||||
|
||||
for filename in filenames:
|
||||
path = Path(filename)
|
||||
if performance_lora == path.name:
|
||||
loras_without_performance.remove(filename)
|
||||
|
||||
return loras_without_performance
|
||||
|
||||
|
||||
def cleanup_prompt(prompt):
|
||||
prompt = re.sub(' +', ' ', prompt)
|
||||
prompt = re.sub(',+', ',', prompt)
|
||||
|
|
|
|||
Binary file not shown.
Binary file not shown.
|
|
@ -2,5 +2,7 @@
|
|||
!anime.json
|
||||
!default.json
|
||||
!lcm.json
|
||||
!playground_v2.5.json
|
||||
!pony_v6.json
|
||||
!realistic.json
|
||||
!sai.json
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
{
|
||||
"default_model": "animaPencilXL_v310.safetensors",
|
||||
"default_model": "animaPencilXL_v500.safetensors",
|
||||
"default_refiner": "None",
|
||||
"default_refiner_switch": 0.5,
|
||||
"default_loras": [
|
||||
|
|
@ -42,12 +42,15 @@
|
|||
"Fooocus Masterpiece"
|
||||
],
|
||||
"default_aspect_ratio": "896*1152",
|
||||
"default_overwrite_step": -1,
|
||||
"checkpoint_downloads": {
|
||||
"animaPencilXL_v310.safetensors": "https://huggingface.co/mashb1t/fav_models/resolve/main/fav/animaPencilXL_v310.safetensors"
|
||||
"animaPencilXL_v500.safetensors": "https://huggingface.co/mashb1t/fav_models/resolve/main/fav/animaPencilXL_v500.safetensors"
|
||||
},
|
||||
"embeddings_downloads": {},
|
||||
"lora_downloads": {},
|
||||
"previous_default_models": [
|
||||
"animaPencilXL_v400.safetensors",
|
||||
"animaPencilXL_v310.safetensors",
|
||||
"animaPencilXL_v300.safetensors",
|
||||
"animaPencilXL_v260.safetensors",
|
||||
"animaPencilXL_v210.safetensors",
|
||||
|
|
|
|||
|
|
@ -42,6 +42,7 @@
|
|||
"Fooocus Sharp"
|
||||
],
|
||||
"default_aspect_ratio": "1152*896",
|
||||
"default_overwrite_step": -1,
|
||||
"checkpoint_downloads": {
|
||||
"juggernautXL_v8Rundiffusion.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/juggernautXL_v8Rundiffusion.safetensors"
|
||||
},
|
||||
|
|
|
|||
|
|
@ -42,6 +42,7 @@
|
|||
"Fooocus Sharp"
|
||||
],
|
||||
"default_aspect_ratio": "1152*896",
|
||||
"default_overwrite_step": -1,
|
||||
"checkpoint_downloads": {
|
||||
"juggernautXL_v8Rundiffusion.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/juggernautXL_v8Rundiffusion.safetensors"
|
||||
},
|
||||
|
|
|
|||
|
|
@ -0,0 +1,51 @@
|
|||
{
|
||||
"default_model": "playground-v2.5-1024px-aesthetic.fp16.safetensors",
|
||||
"default_refiner": "None",
|
||||
"default_refiner_switch": 0.5,
|
||||
"default_loras": [
|
||||
[
|
||||
true,
|
||||
"None",
|
||||
1.0
|
||||
],
|
||||
[
|
||||
true,
|
||||
"None",
|
||||
1.0
|
||||
],
|
||||
[
|
||||
true,
|
||||
"None",
|
||||
1.0
|
||||
],
|
||||
[
|
||||
true,
|
||||
"None",
|
||||
1.0
|
||||
],
|
||||
[
|
||||
true,
|
||||
"None",
|
||||
1.0
|
||||
]
|
||||
],
|
||||
"default_cfg_scale": 2.0,
|
||||
"default_sample_sharpness": 2.0,
|
||||
"default_sampler": "dpmpp_2m",
|
||||
"default_scheduler": "edm_playground_v2.5",
|
||||
"default_performance": "Speed",
|
||||
"default_prompt": "",
|
||||
"default_prompt_negative": "",
|
||||
"default_styles": [
|
||||
"Fooocus V2"
|
||||
],
|
||||
"default_aspect_ratio": "1024*1024",
|
||||
"default_overwrite_step": -1,
|
||||
"default_inpaint_engine_version": "None",
|
||||
"checkpoint_downloads": {
|
||||
"playground-v2.5-1024px-aesthetic.fp16.safetensors": "https://huggingface.co/mashb1t/fav_models/resolve/main/fav/playground-v2.5-1024px-aesthetic.fp16.safetensors"
|
||||
},
|
||||
"embeddings_downloads": {},
|
||||
"lora_downloads": {},
|
||||
"previous_default_models": []
|
||||
}
|
||||
|
|
@ -0,0 +1,54 @@
|
|||
{
|
||||
"default_model": "ponyDiffusionV6XL.safetensors",
|
||||
"default_refiner": "None",
|
||||
"default_refiner_switch": 0.5,
|
||||
"default_vae": "ponyDiffusionV6XL_vae.safetensors",
|
||||
"default_loras": [
|
||||
[
|
||||
true,
|
||||
"None",
|
||||
1.0
|
||||
],
|
||||
[
|
||||
true,
|
||||
"None",
|
||||
1.0
|
||||
],
|
||||
[
|
||||
true,
|
||||
"None",
|
||||
1.0
|
||||
],
|
||||
[
|
||||
true,
|
||||
"None",
|
||||
1.0
|
||||
],
|
||||
[
|
||||
true,
|
||||
"None",
|
||||
1.0
|
||||
]
|
||||
],
|
||||
"default_cfg_scale": 7.0,
|
||||
"default_sample_sharpness": 2.0,
|
||||
"default_sampler": "dpmpp_2m_sde_gpu",
|
||||
"default_scheduler": "karras",
|
||||
"default_performance": "Speed",
|
||||
"default_prompt": "",
|
||||
"default_prompt_negative": "",
|
||||
"default_styles": [
|
||||
"Fooocus Pony"
|
||||
],
|
||||
"default_aspect_ratio": "896*1152",
|
||||
"default_overwrite_step": -1,
|
||||
"default_inpaint_engine_version": "None",
|
||||
"checkpoint_downloads": {
|
||||
"ponyDiffusionV6XL.safetensors": "https://huggingface.co/mashb1t/fav_models/resolve/main/fav/ponyDiffusionV6XL.safetensors"
|
||||
},
|
||||
"embeddings_downloads": {},
|
||||
"lora_downloads": {},
|
||||
"vae_downloads": {
|
||||
"ponyDiffusionV6XL_vae.safetensors": "https://huggingface.co/mashb1t/fav_models/resolve/main/fav/ponyDiffusionV6XL_vae.safetensors"
|
||||
}
|
||||
}
|
||||
|
|
@ -5,7 +5,7 @@
|
|||
"default_loras": [
|
||||
[
|
||||
true,
|
||||
"SDXL_FILM_PHOTOGRAPHY_STYLE_BetaV0.4.safetensors",
|
||||
"SDXL_FILM_PHOTOGRAPHY_STYLE_V1.safetensors",
|
||||
0.25
|
||||
],
|
||||
[
|
||||
|
|
@ -42,12 +42,13 @@
|
|||
"Fooocus Negative"
|
||||
],
|
||||
"default_aspect_ratio": "896*1152",
|
||||
"default_overwrite_step": -1,
|
||||
"checkpoint_downloads": {
|
||||
"realisticStockPhoto_v20.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/realisticStockPhoto_v20.safetensors"
|
||||
},
|
||||
"embeddings_downloads": {},
|
||||
"lora_downloads": {
|
||||
"SDXL_FILM_PHOTOGRAPHY_STYLE_BetaV0.4.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/SDXL_FILM_PHOTOGRAPHY_STYLE_BetaV0.4.safetensors"
|
||||
"SDXL_FILM_PHOTOGRAPHY_STYLE_V1.safetensors": "https://huggingface.co/mashb1t/fav_models/resolve/main/fav/SDXL_FILM_PHOTOGRAPHY_STYLE_V1.safetensors"
|
||||
},
|
||||
"previous_default_models": ["realisticStockPhoto_v10.safetensors"]
|
||||
}
|
||||
|
|
@ -41,6 +41,7 @@
|
|||
"Fooocus Cinematic"
|
||||
],
|
||||
"default_aspect_ratio": "1152*896",
|
||||
"default_overwrite_step": -1,
|
||||
"checkpoint_downloads": {
|
||||
"sd_xl_base_1.0_0.9vae.safetensors": "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_base_1.0_0.9vae.safetensors",
|
||||
"sd_xl_refiner_1.0_0.9vae.safetensors": "https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0/resolve/main/sd_xl_refiner_1.0_0.9vae.safetensors"
|
||||
|
|
|
|||
145
readme.md
145
readme.md
|
|
@ -1,40 +1,30 @@
|
|||
<div align=center>
|
||||
<img src="https://github.com/lllyasviel/Fooocus/assets/19834515/483fb86d-c9a2-4c20-997c-46dafc124f25">
|
||||
|
||||
**Non-cherry-picked** random batch by just typing two words "forest elf",
|
||||
|
||||
without any parameter tweaking, without any strange prompt tags.
|
||||
|
||||
See also **non-cherry-picked** generalization and diversity tests [here](https://github.com/lllyasviel/Fooocus/discussions/2067) and [here](https://github.com/lllyasviel/Fooocus/discussions/808) and [here](https://github.com/lllyasviel/Fooocus/discussions/679) and [here](https://github.com/lllyasviel/Fooocus/discussions/679#realistic).
|
||||
|
||||
In the entire open source community, only Fooocus can achieve this level of **non-cherry-picked** quality.
|
||||
|
||||
</div>
|
||||
|
||||
|
||||
# Fooocus
|
||||
|
||||
Fooocus is an image generating software (based on [Gradio](https://www.gradio.app/)).
|
||||
[>>> Click Here to Install Fooocus <<<](#download)
|
||||
|
||||
Fooocus is a rethinking of Stable Diffusion and Midjourney’s designs:
|
||||
Fooocus is an image generating software (based on [Gradio](https://www.gradio.app/) <a href='https://github.com/gradio-app/gradio'><img src='https://img.shields.io/github/stars/gradio-app/gradio'></a>).
|
||||
|
||||
* Learned from Stable Diffusion, the software is offline, open source, and free.
|
||||
|
||||
* Learned from Midjourney, the manual tweaking is not needed, and users only need to focus on the prompts and images.
|
||||
|
||||
Fooocus has included and automated [lots of inner optimizations and quality improvements](#tech_list). Users can forget all those difficult technical parameters, and just enjoy the interaction between human and computer to "explore new mediums of thought and expanding the imaginative powers of the human species" `[1]`.
|
||||
|
||||
Fooocus has simplified the installation. Between pressing "download" and generating the first image, the number of needed mouse clicks is strictly limited to less than 3. Minimal GPU memory requirement is 4GB (Nvidia).
|
||||
|
||||
`[1]` David Holz, 2019.
|
||||
Fooocus presents a rethinking of image generator designs. The software is offline, open source, and free, while at the same time, similar to many online image generators like Midjourney, the manual tweaking is not needed, and users only need to focus on the prompts and images. Fooocus has also simplified the installation: between pressing "download" and generating the first image, the number of needed mouse clicks is strictly limited to less than 3. Minimal GPU memory requirement is 4GB (Nvidia).
|
||||
|
||||
**Recently many fake websites exist on Google when you search “fooocus”. Do not trust those – here is the only official source of Fooocus.**
|
||||
|
||||
## [Installing Fooocus](#download)
|
||||
# Project Status: Limited Long-Term Support (LTS) with Bug Fixes Only
|
||||
|
||||
# Moving from Midjourney to Fooocus
|
||||
The Fooocus project, built entirely on the **Stable Diffusion XL** architecture, is now in a state of limited long-term support (LTS) with bug fixes only. As the existing functionalities are considered as nearly free of programmartic issues (Thanks to [mashb1t](https://github.com/mashb1t)'s huge efforts), future updates will focus exclusively on addressing any bugs that may arise.
|
||||
|
||||
Using Fooocus is as easy as (probably easier than) Midjourney – but this does not mean we lack functionality. Below are the details.
|
||||
**There are no current plans to migrate to or incorporate newer model architectures.** However, this may change during time with the development of open-source community. For example, if the community converge to one single dominant method for image generation (which may really happen in half or one years given the current status), Fooocus may also migrate to that exact method.
|
||||
|
||||
For those interested in utilizing newer models such as **Flux**, we recommend exploring alternative platforms such as [WebUI Forge](https://github.com/lllyasviel/stable-diffusion-webui-forge) (also from us), [ComfyUI/SwarmUI](https://github.com/comfyanonymous/ComfyUI). Additionally, several [excellent forks of Fooocus](https://github.com/lllyasviel/Fooocus?tab=readme-ov-file#forks) are available for experimentation.
|
||||
|
||||
Again, recently many fake websites exist on Google when you search “fooocus”. Do **NOT** get Fooocus from those websites – this page is the only official source of Fooocus. We never have any website like such as “fooocus.com”, “fooocus.net”, “fooocus.co”, “fooocus.ai”, “fooocus.org”, “fooocus.pro”, “fooocus.one”. Those websites are ALL FAKE. **They have ABSOLUTLY no relationship to us. Fooocus is a 100% non-commercial offline open-source software.**
|
||||
|
||||
# Features
|
||||
|
||||
Below is a quick list using Midjourney's examples:
|
||||
|
||||
| Midjourney | Fooocus |
|
||||
| - | - |
|
||||
|
|
@ -55,7 +45,7 @@ Using Fooocus is as easy as (probably easier than) Midjourney – but this does
|
|||
| InsightFace | Input Image -> Image Prompt -> Advanced -> FaceSwap |
|
||||
| Describe | Input Image -> Describe |
|
||||
|
||||
We also have a few things borrowed from the best parts of LeonardoAI:
|
||||
Below is a quick list using LeonardoAI's examples:
|
||||
|
||||
| LeonardoAI | Fooocus |
|
||||
| - | - |
|
||||
|
|
@ -63,7 +53,7 @@ We also have a few things borrowed from the best parts of LeonardoAI:
|
|||
| Advanced Sampler Parameters (like Contrast/Sharpness/etc) | Advanced -> Advanced -> Sampling Sharpness / etc |
|
||||
| User-friendly ControlNets | Input Image -> Image Prompt -> Advanced |
|
||||
|
||||
Fooocus also developed many "fooocus-only" features for advanced users to get perfect results. [Click here to browse the advanced features.](https://github.com/lllyasviel/Fooocus/discussions/117)
|
||||
Also, [click here to browse the advanced features.](https://github.com/lllyasviel/Fooocus/discussions/117)
|
||||
|
||||
# Download
|
||||
|
||||
|
|
@ -71,7 +61,7 @@ Fooocus also developed many "fooocus-only" features for advanced users to get pe
|
|||
|
||||
You can directly download Fooocus with:
|
||||
|
||||
**[>>> Click here to download <<<](https://github.com/lllyasviel/Fooocus/releases/download/release/Fooocus_win64_2-1-831.7z)**
|
||||
**[>>> Click here to download <<<](https://github.com/lllyasviel/Fooocus/releases/download/v2.5.0/Fooocus_win64_2-5-0.7z)**
|
||||
|
||||
After you download the file, please uncompress it and then run the "run.bat".
|
||||
|
||||
|
|
@ -119,7 +109,7 @@ See also the common problems and troubleshoots [here](troubleshoot.md).
|
|||
|
||||
### Colab
|
||||
|
||||
(Last tested - 2024 Mar 18 by [mashb1t](https://github.com/mashb1t))
|
||||
(Last tested - 2024 Aug 12 by [mashb1t](https://github.com/mashb1t))
|
||||
|
||||
| Colab | Info
|
||||
| --- | --- |
|
||||
|
|
@ -225,7 +215,7 @@ Then run the `run.bat`.
|
|||
|
||||
AMD is not intensively tested, however. The AMD support is in beta.
|
||||
|
||||
For AMD, use `.\python_embeded\python.exe entry_with_update.py --directml --preset anime` or `.\python_embeded\python.exe entry_with_update.py --directml --preset realistic` for Fooocus Anime/Realistic Edition.
|
||||
For AMD, use `.\python_embeded\python.exe Fooocus\entry_with_update.py --directml --preset anime` or `.\python_embeded\python.exe Fooocus\entry_with_update.py --directml --preset realistic` for Fooocus Anime/Realistic Edition.
|
||||
|
||||
### Mac
|
||||
|
||||
|
|
@ -285,11 +275,11 @@ See the common problems [here](troubleshoot.md).
|
|||
|
||||
Given different goals, the default models and configs of Fooocus are different:
|
||||
|
||||
| Task | Windows | Linux args | Main Model | Refiner | Config |
|
||||
| --- | --- | --- | --- | --- |--------------------------------------------------------------------------------|
|
||||
| General | run.bat | | juggernautXL_v8Rundiffusion | not used | [here](https://github.com/lllyasviel/Fooocus/blob/main/presets/default.json) |
|
||||
| Realistic | run_realistic.bat | --preset realistic | realisticStockPhoto_v20 | not used | [here](https://github.com/lllyasviel/Fooocus/blob/main/presets/realistic.json) |
|
||||
| Anime | run_anime.bat | --preset anime | animaPencilXL_v100 | not used | [here](https://github.com/lllyasviel/Fooocus/blob/main/presets/anime.json) |
|
||||
| Task | Windows | Linux args | Main Model | Refiner | Config |
|
||||
|-----------| --- | --- |-----------------------------| --- |--------------------------------------------------------------------------------|
|
||||
| General | run.bat | | juggernautXL_v8Rundiffusion | not used | [here](https://github.com/lllyasviel/Fooocus/blob/main/presets/default.json) |
|
||||
| Realistic | run_realistic.bat | --preset realistic | realisticStockPhoto_v20 | not used | [here](https://github.com/lllyasviel/Fooocus/blob/main/presets/realistic.json) |
|
||||
| Anime | run_anime.bat | --preset anime | animaPencilXL_v500 | not used | [here](https://github.com/lllyasviel/Fooocus/blob/main/presets/anime.json) |
|
||||
|
||||
Note that the download is **automatic** - you do not need to do anything if the internet connection is okay. However, you can download them manually if you (or move them from somewhere else) have your own preparation.
|
||||
|
||||
|
|
@ -303,7 +293,8 @@ In both ways the access is unauthenticated by default. You can add basic authent
|
|||
## List of "Hidden" Tricks
|
||||
<a name="tech_list"></a>
|
||||
|
||||
The below things are already inside the software, and **users do not need to do anything about these**.
|
||||
<details>
|
||||
<summary>Click to see a list of tricks. Those are based on SDXL and are not very up-to-date with latest models.</summary>
|
||||
|
||||
1. GPT2-based [prompt expansion as a dynamic style "Fooocus V2".](https://github.com/lllyasviel/Fooocus/discussions/117#raw) (similar to Midjourney's hidden pre-processing and "raw" mode, or the LeonardoAI's Prompt Magic).
|
||||
2. Native refiner swap inside one single k-sampler. The advantage is that the refiner model can now reuse the base model's momentum (or ODE's history parameters) collected from k-sampling to achieve more coherent sampling. In Automatic1111's high-res fix and ComfyUI's node system, the base model and refiner use two independent k-samplers, which means the momentum is largely wasted, and the sampling continuity is broken. Fooocus uses its own advanced k-diffusion sampling that ensures seamless, native, and continuous swap in a refiner setup. (Update Aug 13: Actually, I discussed this with Automatic1111 several days ago, and it seems that the “native refiner swap inside one single k-sampler” is [merged]( https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12371) into the dev branch of webui. Great!)
|
||||
|
|
@ -319,6 +310,7 @@ The below things are already inside the software, and **users do not need to do
|
|||
12. Using automatic1111's method to normalize prompt emphasizing. This significantly improves results when users directly copy prompts from civitai.
|
||||
13. The joint swap system of the refiner now also supports img2img and upscale in a seamless way.
|
||||
14. CFG Scale and TSNR correction (tuned for SDXL) when CFG is bigger than 10.
|
||||
</details>
|
||||
|
||||
## Customization
|
||||
|
||||
|
|
@ -370,42 +362,91 @@ entry_with_update.py [-h] [--listen [IP]] [--port PORT]
|
|||
[--web-upload-size WEB_UPLOAD_SIZE]
|
||||
[--hf-mirror HF_MIRROR]
|
||||
[--external-working-path PATH [PATH ...]]
|
||||
[--output-path OUTPUT_PATH] [--temp-path TEMP_PATH]
|
||||
[--cache-path CACHE_PATH] [--in-browser]
|
||||
[--disable-in-browser] [--gpu-device-id DEVICE_ID]
|
||||
[--output-path OUTPUT_PATH]
|
||||
[--temp-path TEMP_PATH] [--cache-path CACHE_PATH]
|
||||
[--in-browser] [--disable-in-browser]
|
||||
[--gpu-device-id DEVICE_ID]
|
||||
[--async-cuda-allocation | --disable-async-cuda-allocation]
|
||||
[--disable-attention-upcast] [--all-in-fp32 | --all-in-fp16]
|
||||
[--disable-attention-upcast]
|
||||
[--all-in-fp32 | --all-in-fp16]
|
||||
[--unet-in-bf16 | --unet-in-fp16 | --unet-in-fp8-e4m3fn | --unet-in-fp8-e5m2]
|
||||
[--vae-in-fp16 | --vae-in-fp32 | --vae-in-bf16]
|
||||
[--vae-in-cpu]
|
||||
[--clip-in-fp8-e4m3fn | --clip-in-fp8-e5m2 | --clip-in-fp16 | --clip-in-fp32]
|
||||
[--directml [DIRECTML_DEVICE]] [--disable-ipex-hijack]
|
||||
[--directml [DIRECTML_DEVICE]]
|
||||
[--disable-ipex-hijack]
|
||||
[--preview-option [none,auto,fast,taesd]]
|
||||
[--attention-split | --attention-quad | --attention-pytorch]
|
||||
[--disable-xformers]
|
||||
[--always-gpu | --always-high-vram | --always-normal-vram |
|
||||
--always-low-vram | --always-no-vram | --always-cpu [CPU_NUM_THREADS]]
|
||||
[--always-offload-from-vram] [--disable-server-log]
|
||||
[--always-gpu | --always-high-vram | --always-normal-vram | --always-low-vram | --always-no-vram | --always-cpu [CPU_NUM_THREADS]]
|
||||
[--always-offload-from-vram]
|
||||
[--pytorch-deterministic] [--disable-server-log]
|
||||
[--debug-mode] [--is-windows-embedded-python]
|
||||
[--disable-server-info] [--share] [--preset PRESET]
|
||||
[--language LANGUAGE] [--disable-offload-from-vram]
|
||||
[--theme THEME] [--disable-image-log]
|
||||
[--disable-server-info] [--multi-user] [--share]
|
||||
[--preset PRESET] [--disable-preset-selection]
|
||||
[--language LANGUAGE]
|
||||
[--disable-offload-from-vram] [--theme THEME]
|
||||
[--disable-image-log] [--disable-analytics]
|
||||
[--disable-metadata] [--disable-preset-download]
|
||||
[--disable-enhance-output-sorting]
|
||||
[--enable-auto-describe-image]
|
||||
[--always-download-new-model]
|
||||
[--rebuild-hash-cache [CPU_NUM_THREADS]]
|
||||
```
|
||||
|
||||
## Inline Prompt Features
|
||||
|
||||
### Wildcards
|
||||
|
||||
Example prompt: `__color__ flower`
|
||||
|
||||
Processed for positive and negative prompt.
|
||||
|
||||
Selects a random wildcard from a predefined list of options, in this case the `wildcards/color.txt` file.
|
||||
The wildcard will be replaced with a random color (randomness based on seed).
|
||||
You can also disable randomness and process a wildcard file from top to bottom by enabling the checkbox `Read wildcards in order` in Developer Debug Mode.
|
||||
|
||||
Wildcards can be nested and combined, and multiple wildcards can be used in the same prompt (example see `wildcards/color_flower.txt`).
|
||||
|
||||
### Array Processing
|
||||
|
||||
Example prompt: `[[red, green, blue]] flower`
|
||||
|
||||
Processed only for positive prompt.
|
||||
|
||||
Processes the array from left to right, generating a separate image for each element in the array. In this case 3 images would be generated, one for each color.
|
||||
Increase the image number to 3 to generate all 3 variants.
|
||||
|
||||
Arrays can not be nested, but multiple arrays can be used in the same prompt.
|
||||
Does support inline LoRAs as array elements!
|
||||
|
||||
### Inline LoRAs
|
||||
|
||||
Example prompt: `flower <lora:sunflowers:1.2>`
|
||||
|
||||
Processed only for positive prompt.
|
||||
|
||||
Applies a LoRA to the prompt. The LoRA file must be located in the `models/loras` directory.
|
||||
|
||||
## Advanced Features
|
||||
|
||||
[Click here to browse the advanced features.](https://github.com/lllyasviel/Fooocus/discussions/117)
|
||||
|
||||
Fooocus also has many community forks, just like SD-WebUI's [vladmandic/automatic](https://github.com/vladmandic/automatic) and [anapnoe/stable-diffusion-webui-ux](https://github.com/anapnoe/stable-diffusion-webui-ux), for enthusiastic users who want to try!
|
||||
## Forks
|
||||
|
||||
Below are some Forks to Fooocus:
|
||||
|
||||
| Fooocus' forks |
|
||||
| - |
|
||||
| [fenneishi/Fooocus-Control](https://github.com/fenneishi/Fooocus-Control) </br>[runew0lf/RuinedFooocus](https://github.com/runew0lf/RuinedFooocus) </br> [MoonRide303/Fooocus-MRE](https://github.com/MoonRide303/Fooocus-MRE) </br> [metercai/SimpleSDXL](https://github.com/metercai/SimpleSDXL) </br> and so on ... |
|
||||
|
||||
See also [About Forking and Promotion of Forks](https://github.com/lllyasviel/Fooocus/discussions/699).
|
||||
| [fenneishi/Fooocus-Control](https://github.com/fenneishi/Fooocus-Control) </br>[runew0lf/RuinedFooocus](https://github.com/runew0lf/RuinedFooocus) </br> [MoonRide303/Fooocus-MRE](https://github.com/MoonRide303/Fooocus-MRE) </br> [mashb1t/Fooocus](https://github.com/mashb1t/Fooocus) </br> and so on ... |
|
||||
|
||||
## Thanks
|
||||
|
||||
Special thanks to [twri](https://github.com/twri) and [3Diva](https://github.com/3Diva) and [Marc K3nt3L](https://github.com/K3nt3L) for creating additional SDXL styles available in Fooocus. Thanks [daswer123](https://github.com/daswer123) for contributing the Canvas Zoom!
|
||||
Many thanks to [twri](https://github.com/twri) and [3Diva](https://github.com/3Diva) and [Marc K3nt3L](https://github.com/K3nt3L) for creating additional SDXL styles available in Fooocus.
|
||||
|
||||
The project starts from a mixture of [Stable Diffusion WebUI](https://github.com/AUTOMATIC1111/stable-diffusion-webui) and [ComfyUI](https://github.com/comfyanonymous/ComfyUI) codebases.
|
||||
|
||||
Also, thanks [daswer123](https://github.com/daswer123) for contributing the Canvas Zoom!
|
||||
|
||||
## Update Log
|
||||
|
||||
|
|
@ -413,8 +454,6 @@ The log is [here](update_log.md).
|
|||
|
||||
## Localization/Translation/I18N
|
||||
|
||||
**We need your help!** Please help translate Fooocus into international languages.
|
||||
|
||||
You can put json files in the `language` folder to translate the user interface.
|
||||
|
||||
For example, below is the content of `Fooocus/language/example.json`:
|
||||
|
|
|
|||
|
|
@ -1,18 +1,24 @@
|
|||
torchsde==0.2.5
|
||||
einops==0.4.1
|
||||
transformers==4.30.2
|
||||
safetensors==0.3.1
|
||||
accelerate==0.21.0
|
||||
pyyaml==6.0
|
||||
Pillow==9.2.0
|
||||
scipy==1.9.3
|
||||
tqdm==4.64.1
|
||||
psutil==5.9.5
|
||||
pytorch_lightning==1.9.4
|
||||
omegaconf==2.2.3
|
||||
torchsde==0.2.6
|
||||
einops==0.8.0
|
||||
transformers==4.42.4
|
||||
safetensors==0.4.3
|
||||
accelerate==0.32.1
|
||||
pyyaml==6.0.1
|
||||
pillow==10.4.0
|
||||
scipy==1.14.0
|
||||
tqdm==4.66.4
|
||||
psutil==6.0.0
|
||||
pytorch_lightning==2.3.3
|
||||
omegaconf==2.3.0
|
||||
gradio==3.41.2
|
||||
pygit2==1.12.2
|
||||
opencv-contrib-python==4.8.0.74
|
||||
httpx==0.24.1
|
||||
onnxruntime==1.16.3
|
||||
timm==0.9.2
|
||||
pygit2==1.15.1
|
||||
opencv-contrib-python-headless==4.10.0.84
|
||||
httpx==0.27.0
|
||||
onnxruntime==1.18.1
|
||||
timm==1.0.7
|
||||
numpy==1.26.4
|
||||
tokenizers==0.19.1
|
||||
packaging==24.1
|
||||
rembg==2.0.57
|
||||
groundingdino-py==0.4.0
|
||||
segment_anything==1.0
|
||||
Binary file not shown.
|
After Width: | Height: | Size: 5.1 KiB |
|
|
@ -14,7 +14,7 @@
|
|||
},
|
||||
{
|
||||
"name": "Fooocus Masterpiece",
|
||||
"prompt": "(masterpiece), (best quality), (ultra-detailed), {prompt}, illustration, disheveled hair, detailed eyes, perfect composition, moist skin, intricate details, earrings, by wlop",
|
||||
"prompt": "(masterpiece), (best quality), (ultra-detailed), {prompt}, illustration, disheveled hair, detailed eyes, perfect composition, moist skin, intricate details, earrings",
|
||||
"negative_prompt": "longbody, lowres, bad anatomy, bad hands, missing fingers, pubic hair,extra digit, fewer digits, cropped, worst quality, low quality"
|
||||
},
|
||||
{
|
||||
|
|
@ -30,5 +30,10 @@
|
|||
"name": "Fooocus Cinematic",
|
||||
"prompt": "cinematic still {prompt} . emotional, harmonious, vignette, highly detailed, high budget, bokeh, cinemascope, moody, epic, gorgeous, film grain, grainy",
|
||||
"negative_prompt": "anime, cartoon, graphic, text, painting, crayon, graphite, abstract, glitch, deformed, mutated, ugly, disfigured"
|
||||
},
|
||||
{
|
||||
"name": "Fooocus Pony",
|
||||
"prompt": "score_9, score_8_up, score_7_up, {prompt}",
|
||||
"negative_prompt": "score_6, score_5, score_4"
|
||||
}
|
||||
]
|
||||
|
|
|
|||
|
|
@ -0,0 +1,74 @@
|
|||
import numbers
|
||||
import os
|
||||
import unittest
|
||||
|
||||
import modules.flags
|
||||
from modules import extra_utils
|
||||
|
||||
|
||||
class TestUtils(unittest.TestCase):
|
||||
def test_try_eval_env_var(self):
|
||||
test_cases = [
|
||||
{
|
||||
"input": ("foo", str),
|
||||
"output": "foo"
|
||||
},
|
||||
{
|
||||
"input": ("1", int),
|
||||
"output": 1
|
||||
},
|
||||
{
|
||||
"input": ("1.0", float),
|
||||
"output": 1.0
|
||||
},
|
||||
{
|
||||
"input": ("1", numbers.Number),
|
||||
"output": 1
|
||||
},
|
||||
{
|
||||
"input": ("1.0", numbers.Number),
|
||||
"output": 1.0
|
||||
},
|
||||
{
|
||||
"input": ("true", bool),
|
||||
"output": True
|
||||
},
|
||||
{
|
||||
"input": ("True", bool),
|
||||
"output": True
|
||||
},
|
||||
{
|
||||
"input": ("false", bool),
|
||||
"output": False
|
||||
},
|
||||
{
|
||||
"input": ("False", bool),
|
||||
"output": False
|
||||
},
|
||||
{
|
||||
"input": ("True", str),
|
||||
"output": "True"
|
||||
},
|
||||
{
|
||||
"input": ("False", str),
|
||||
"output": "False"
|
||||
},
|
||||
{
|
||||
"input": ("['a', 'b', 'c']", list),
|
||||
"output": ['a', 'b', 'c']
|
||||
},
|
||||
{
|
||||
"input": ("{'a':1}", dict),
|
||||
"output": {'a': 1}
|
||||
},
|
||||
{
|
||||
"input": ("('foo', 1)", tuple),
|
||||
"output": ('foo', 1)
|
||||
}
|
||||
]
|
||||
|
||||
for test in test_cases:
|
||||
value, expected_type = test["input"]
|
||||
expected = test["output"]
|
||||
actual = extra_utils.try_eval_env_var(value, expected_type)
|
||||
self.assertEqual(expected, actual)
|
||||
|
|
@ -1,5 +1,7 @@
|
|||
import os
|
||||
import unittest
|
||||
|
||||
import modules.flags
|
||||
from modules import util
|
||||
|
||||
|
||||
|
|
@ -77,5 +79,59 @@ class TestUtils(unittest.TestCase):
|
|||
for test in test_cases:
|
||||
prompt, loras, loras_limit, skip_file_check = test["input"]
|
||||
expected = test["output"]
|
||||
actual = util.parse_lora_references_from_prompt(prompt, loras, loras_limit=loras_limit, skip_file_check=skip_file_check)
|
||||
actual = util.parse_lora_references_from_prompt(prompt, loras, loras_limit=loras_limit,
|
||||
skip_file_check=skip_file_check)
|
||||
self.assertEqual(expected, actual)
|
||||
|
||||
def test_can_parse_tokens_and_strip_performance_lora(self):
|
||||
lora_filenames = [
|
||||
'hey-lora.safetensors',
|
||||
modules.flags.PerformanceLoRA.EXTREME_SPEED.value,
|
||||
modules.flags.PerformanceLoRA.LIGHTNING.value,
|
||||
os.path.join('subfolder', modules.flags.PerformanceLoRA.HYPER_SD.value)
|
||||
]
|
||||
|
||||
test_cases = [
|
||||
{
|
||||
"input": ("some prompt, <lora:hey-lora:0.4>", [], 5, True, modules.flags.Performance.QUALITY),
|
||||
"output": (
|
||||
[('hey-lora.safetensors', 0.4)],
|
||||
'some prompt'
|
||||
),
|
||||
},
|
||||
{
|
||||
"input": ("some prompt, <lora:hey-lora:0.4>", [], 5, True, modules.flags.Performance.SPEED),
|
||||
"output": (
|
||||
[('hey-lora.safetensors', 0.4)],
|
||||
'some prompt'
|
||||
),
|
||||
},
|
||||
{
|
||||
"input": ("some prompt, <lora:sdxl_lcm_lora:1>, <lora:hey-lora:0.4>", [], 5, True, modules.flags.Performance.EXTREME_SPEED),
|
||||
"output": (
|
||||
[('hey-lora.safetensors', 0.4)],
|
||||
'some prompt'
|
||||
),
|
||||
},
|
||||
{
|
||||
"input": ("some prompt, <lora:sdxl_lightning_4step_lora:1>, <lora:hey-lora:0.4>", [], 5, True, modules.flags.Performance.LIGHTNING),
|
||||
"output": (
|
||||
[('hey-lora.safetensors', 0.4)],
|
||||
'some prompt'
|
||||
),
|
||||
},
|
||||
{
|
||||
"input": ("some prompt, <lora:sdxl_hyper_sd_4step_lora:1>, <lora:hey-lora:0.4>", [], 5, True, modules.flags.Performance.HYPER_SD),
|
||||
"output": (
|
||||
[('hey-lora.safetensors', 0.4)],
|
||||
'some prompt'
|
||||
),
|
||||
}
|
||||
]
|
||||
|
||||
for test in test_cases:
|
||||
prompt, loras, loras_limit, skip_file_check, performance = test["input"]
|
||||
lora_filenames = modules.util.remove_performance_lora(lora_filenames, performance)
|
||||
expected = test["output"]
|
||||
actual = util.parse_lora_references_from_prompt(prompt, loras, loras_limit=loras_limit, lora_filenames=lora_filenames)
|
||||
self.assertEqual(expected, actual)
|
||||
|
|
|
|||
|
|
@ -1,3 +1,83 @@
|
|||
# [2.5.5](https://github.com/lllyasviel/Fooocus/releases/tag/v2.5.5)
|
||||
|
||||
* Fix colab inpaint issue by moving an import statement
|
||||
|
||||
# [2.5.4](https://github.com/lllyasviel/Fooocus/releases/tag/v2.5.4)
|
||||
|
||||
* Fix validation for default_ip_image_* and default_inpaint_mask_sam_model
|
||||
* Fix enhance mask debugging in combination with image sorting
|
||||
* Fix loading of checkpoints and LoRAs when using multiple directories in config and then switching presets
|
||||
|
||||
# [2.5.3](https://github.com/lllyasviel/Fooocus/releases/tag/v2.5.3)
|
||||
|
||||
* Only load weights from non-safetensors files, preventing harmful code injection
|
||||
* Add checkbox for applying/resetting styles when describing images, also allowing multiple describe content types
|
||||
|
||||
# [2.5.2](https://github.com/lllyasviel/Fooocus/releases/tag/v2.5.2)
|
||||
|
||||
* Fix not adding positive prompt when styles didn't have a {prompt} placeholder in the positive prompt
|
||||
* Extend config settings for input image, see list in [PR](https://github.com/lllyasviel/Fooocus/pull/3382)
|
||||
|
||||
# [2.5.1](https://github.com/lllyasviel/Fooocus/releases/tag/v2.5.1)
|
||||
|
||||
* Update download URL in readme
|
||||
* Increase speed of metadata loading
|
||||
* Fix reading of metadata from jpeg, jpg and webp (exif)
|
||||
* Fix debug preprocessor
|
||||
* Update attributes and add inline prompt features section to readme
|
||||
* Add checkbox, config and handling for saving only the final enhanced image. Use config `default_save_only_final_enhanced_image`, default False.
|
||||
* Add sorting of final images when enhanced is enabled. Use argument `--disable-enhance-output-sorting` to disable.
|
||||
|
||||
# [2.5.0](https://github.com/lllyasviel/Fooocus/releases/tag/v2.5.0)
|
||||
|
||||
This version includes various package updates. If the auto-update doesn't work you can do one of the following:
|
||||
1. Open a terminal in the Fooocus folder (location of config.txt) and run `git pull`
|
||||
2. Update packages
|
||||
- Windows (installation through zip file): open a terminal in the Fooocus folder (location of config.txt) `..\python_embeded\python.exe -m pip install -r .\requirements_versions.txt` (Windows using embedded python, installation method zip file) or download Fooocus again (zip file attached to this release)
|
||||
- other: manually update the packages using `python.exe -m pip install -r requirements_versions.txt` or use the docker image
|
||||
|
||||
---
|
||||
|
||||
* Update python dependencies, add segment_anything
|
||||
* Add enhance feature, which offers easy image refinement steps (similar to adetailer, but based on dynamic image detection instead of specific mask detection models). See [documentation](https://github.com/lllyasviel/Fooocus/discussions/3281).
|
||||
* Rewrite async worker code, make code much more reusable to allow iterations and improve reusability
|
||||
* Improve GroundingDINO and SAM image masking
|
||||
* Fix inference tensor version counter tracking issue for GroundingDINO after using Enhance (see [discussion](https://github.com/lllyasviel/Fooocus/discussions/3213))
|
||||
* Move checkboxes Enable Mask Upload and Invert Mask When Generating from Developer Debug Mode to Inpaint Or Outpaint
|
||||
* Add persistent model cache for metadata. Use `--rebuild-hash-cache X` (X = int, number of CPU cores, default all) to manually rebuild the cache for all non-cached hashes
|
||||
* Rename `--enable-describe-uov-image` to `--enable-auto-describe-image`, now also works for enhance image upload
|
||||
* Rename checkbox `Enable Mask Upload` to `Enable Advanced Masking Features` to better hint to mask auto-generation feature
|
||||
* Get upscale model filepath by calling downloading_upscale_model() to ensure the model exists
|
||||
* Rename tab titles and translations from singular to plural
|
||||
* Rename document to documentation
|
||||
* Update default models to latest versions
|
||||
* animaPencilXL_v400 => animaPencilXL_v500
|
||||
* DreamShaperXL_Turbo_dpmppSdeKarras => DreamShaperXL_Turbo_v2_1
|
||||
* SDXL_FILM_PHOTOGRAPHY_STYLE_BetaV0.4 => SDXL_FILM_PHOTOGRAPHY_STYLE_V1
|
||||
* Add preset for pony_v6 (using ponyDiffusionV6XL)
|
||||
* Add style `Fooocus Pony`
|
||||
* Add restart sampler ([paper](https://arxiv.org/abs/2306.14878))
|
||||
* Add config option for default_inpaint_engine_version, sets inpaint engine for pony_v6 and playground_v2.5 to None for improved results (incompatible with inpaint engine)
|
||||
* Add image editor functionality to mask upload (same as for inpaint, now correctly resizes and allows more detailed mask creation)
|
||||
|
||||
# [2.4.3](https://github.com/lllyasviel/Fooocus/releases/tag/v2.4.3)
|
||||
|
||||
* Fix alphas_cumprod setter for TCD sampler
|
||||
* Add parser for env var strings to expected config value types to allow override of all non-path config keys
|
||||
|
||||
# [2.4.2](https://github.com/lllyasviel/Fooocus/releases/tag/v2.4.2)
|
||||
|
||||
* Fix some small bugs (tcd scheduler when gamma is 0, chown in Dockerfile, update cmd args in readme, translation for aspect ratios, vae default after file reload)
|
||||
* Fix performance LoRA replacement when data is loaded from history log and inline prompt
|
||||
* Add support and preset for playground v2.5 (only works with performance Quality or Speed, use with scheduler edm_playground_v2)
|
||||
* Make textboxes (incl. positive prompt) resizable
|
||||
* Hide intermediate images when performance of Gradio would bottleneck the generation process (Extreme Speed, Lightning, Hyper-SD)
|
||||
|
||||
# [2.4.1](https://github.com/lllyasviel/Fooocus/releases/tag/v2.4.1)
|
||||
|
||||
* Fix some small bugs (e.g. adjust clip skip default value from 1 to 2, add type check to aspect ratios js update function)
|
||||
* Add automated docker build on push to main, tagged with `edge`. See [available docker images](https://github.com/lllyasviel/Fooocus/pkgs/container/fooocus).
|
||||
|
||||
# [2.4.0](https://github.com/lllyasviel/Fooocus/releases/tag/v2.4.0)
|
||||
|
||||
* Change settings tab elements to be more compact
|
||||
|
|
|
|||
593
webui.py
593
webui.py
|
|
@ -16,6 +16,7 @@ import modules.meta_parser
|
|||
import args_manager
|
||||
import copy
|
||||
import launch
|
||||
from extras.inpaint_mask import SAMOptions
|
||||
|
||||
from modules.sdxl_styles import legal_style_names
|
||||
from modules.private_logger import get_current_html_path
|
||||
|
|
@ -72,6 +73,9 @@ def generate_clicked(task: worker.AsyncTask):
|
|||
gr.update(visible=True, value=product), \
|
||||
gr.update(visible=False)
|
||||
if flag == 'finish':
|
||||
if not args_manager.args.disable_enhance_output_sorting:
|
||||
product = sort_enhance_images(product, task)
|
||||
|
||||
yield gr.update(visible=False), \
|
||||
gr.update(visible=False), \
|
||||
gr.update(visible=False), \
|
||||
|
|
@ -89,6 +93,56 @@ def generate_clicked(task: worker.AsyncTask):
|
|||
return
|
||||
|
||||
|
||||
def sort_enhance_images(images, task):
|
||||
if not task.should_enhance or len(images) <= task.images_to_enhance_count:
|
||||
return images
|
||||
|
||||
sorted_images = []
|
||||
walk_index = task.images_to_enhance_count
|
||||
|
||||
for index, enhanced_img in enumerate(images[:task.images_to_enhance_count]):
|
||||
sorted_images.append(enhanced_img)
|
||||
if index not in task.enhance_stats:
|
||||
continue
|
||||
target_index = walk_index + task.enhance_stats[index]
|
||||
if walk_index < len(images) and target_index <= len(images):
|
||||
sorted_images += images[walk_index:target_index]
|
||||
walk_index += task.enhance_stats[index]
|
||||
|
||||
return sorted_images
|
||||
|
||||
|
||||
def inpaint_mode_change(mode, inpaint_engine_version):
|
||||
assert mode in modules.flags.inpaint_options
|
||||
|
||||
# inpaint_additional_prompt, outpaint_selections, example_inpaint_prompts,
|
||||
# inpaint_disable_initial_latent, inpaint_engine,
|
||||
# inpaint_strength, inpaint_respective_field
|
||||
|
||||
if mode == modules.flags.inpaint_option_detail:
|
||||
return [
|
||||
gr.update(visible=True), gr.update(visible=False, value=[]),
|
||||
gr.Dataset.update(visible=True, samples=modules.config.example_inpaint_prompts),
|
||||
False, 'None', 0.5, 0.0
|
||||
]
|
||||
|
||||
if inpaint_engine_version == 'empty':
|
||||
inpaint_engine_version = modules.config.default_inpaint_engine_version
|
||||
|
||||
if mode == modules.flags.inpaint_option_modify:
|
||||
return [
|
||||
gr.update(visible=True), gr.update(visible=False, value=[]),
|
||||
gr.Dataset.update(visible=False, samples=modules.config.example_inpaint_prompts),
|
||||
True, inpaint_engine_version, 1.0, 0.0
|
||||
]
|
||||
|
||||
return [
|
||||
gr.update(visible=False, value=''), gr.update(visible=True),
|
||||
gr.Dataset.update(visible=False, samples=modules.config.example_inpaint_prompts),
|
||||
False, inpaint_engine_version, 1.0, 0.618
|
||||
]
|
||||
|
||||
|
||||
reload_javascript()
|
||||
|
||||
title = f'Fooocus {fooocus_version.version}'
|
||||
|
|
@ -100,6 +154,7 @@ shared.gradio_root = gr.Blocks(title=title).queue()
|
|||
|
||||
with shared.gradio_root:
|
||||
currentTask = gr.State(worker.AsyncTask(args=[]))
|
||||
inpaint_engine_state = gr.State('empty')
|
||||
with gr.Row():
|
||||
with gr.Column(scale=2):
|
||||
with gr.Row():
|
||||
|
|
@ -112,10 +167,10 @@ with shared.gradio_root:
|
|||
gallery = gr.Gallery(label='Gallery', show_label=False, object_fit='contain', visible=True, height=768,
|
||||
elem_classes=['resizable_area', 'main_view', 'final_gallery', 'image_gallery'],
|
||||
elem_id='final_gallery')
|
||||
with gr.Row(elem_classes='type_row'):
|
||||
with gr.Row():
|
||||
with gr.Column(scale=17):
|
||||
prompt = gr.Textbox(show_label=False, placeholder="Type prompt here or paste parameters.", elem_id='positive_prompt',
|
||||
container=False, autofocus=True, elem_classes='type_row', lines=1024)
|
||||
autofocus=True, lines=3)
|
||||
|
||||
default_prompt = modules.config.default_prompt
|
||||
if isinstance(default_prompt, str) and default_prompt != '':
|
||||
|
|
@ -145,18 +200,19 @@ with shared.gradio_root:
|
|||
stop_button.click(stop_clicked, inputs=currentTask, outputs=currentTask, queue=False, show_progress=False, _js='cancelGenerateForever')
|
||||
skip_button.click(skip_clicked, inputs=currentTask, outputs=currentTask, queue=False, show_progress=False)
|
||||
with gr.Row(elem_classes='advanced_check_row'):
|
||||
input_image_checkbox = gr.Checkbox(label='Input Image', value=False, container=False, elem_classes='min_check')
|
||||
input_image_checkbox = gr.Checkbox(label='Input Image', value=modules.config.default_image_prompt_checkbox, container=False, elem_classes='min_check')
|
||||
enhance_checkbox = gr.Checkbox(label='Enhance', value=modules.config.default_enhance_checkbox, container=False, elem_classes='min_check')
|
||||
advanced_checkbox = gr.Checkbox(label='Advanced', value=modules.config.default_advanced_checkbox, container=False, elem_classes='min_check')
|
||||
with gr.Row(visible=False) as image_input_panel:
|
||||
with gr.Tabs():
|
||||
with gr.TabItem(label='Upscale or Variation') as uov_tab:
|
||||
with gr.Row(visible=modules.config.default_image_prompt_checkbox) as image_input_panel:
|
||||
with gr.Tabs(selected=modules.config.default_selected_image_input_tab_id):
|
||||
with gr.Tab(label='Upscale or Variation', id='uov_tab') as uov_tab:
|
||||
with gr.Row():
|
||||
with gr.Column():
|
||||
uov_input_image = grh.Image(label='Image', source='upload', type='numpy', show_label=False)
|
||||
with gr.Column():
|
||||
uov_method = gr.Radio(label='Upscale or Variation:', choices=flags.uov_list, value=flags.disabled)
|
||||
gr.HTML('<a href="https://github.com/lllyasviel/Fooocus/discussions/390" target="_blank">\U0001F4D4 Document</a>')
|
||||
with gr.TabItem(label='Image Prompt') as ip_tab:
|
||||
uov_method = gr.Radio(label='Upscale or Variation:', choices=flags.uov_list, value=modules.config.default_uov_method)
|
||||
gr.HTML('<a href="https://github.com/lllyasviel/Fooocus/discussions/390" target="_blank">\U0001F4D4 Documentation</a>')
|
||||
with gr.Tab(label='Image Prompt', id='ip_tab') as ip_tab:
|
||||
with gr.Row():
|
||||
ip_images = []
|
||||
ip_types = []
|
||||
|
|
@ -164,31 +220,30 @@ with shared.gradio_root:
|
|||
ip_weights = []
|
||||
ip_ctrls = []
|
||||
ip_ad_cols = []
|
||||
for _ in range(flags.controlnet_image_count):
|
||||
for image_count in range(modules.config.default_controlnet_image_count):
|
||||
image_count += 1
|
||||
with gr.Column():
|
||||
ip_image = grh.Image(label='Image', source='upload', type='numpy', show_label=False, height=300)
|
||||
ip_image = grh.Image(label='Image', source='upload', type='numpy', show_label=False, height=300, value=modules.config.default_ip_images[image_count])
|
||||
ip_images.append(ip_image)
|
||||
ip_ctrls.append(ip_image)
|
||||
with gr.Column(visible=False) as ad_col:
|
||||
with gr.Column(visible=modules.config.default_image_prompt_advanced_checkbox) as ad_col:
|
||||
with gr.Row():
|
||||
default_end, default_weight = flags.default_parameters[flags.default_ip]
|
||||
|
||||
ip_stop = gr.Slider(label='Stop At', minimum=0.0, maximum=1.0, step=0.001, value=default_end)
|
||||
ip_stop = gr.Slider(label='Stop At', minimum=0.0, maximum=1.0, step=0.001, value=modules.config.default_ip_stop_ats[image_count])
|
||||
ip_stops.append(ip_stop)
|
||||
ip_ctrls.append(ip_stop)
|
||||
|
||||
ip_weight = gr.Slider(label='Weight', minimum=0.0, maximum=2.0, step=0.001, value=default_weight)
|
||||
ip_weight = gr.Slider(label='Weight', minimum=0.0, maximum=2.0, step=0.001, value=modules.config.default_ip_weights[image_count])
|
||||
ip_weights.append(ip_weight)
|
||||
ip_ctrls.append(ip_weight)
|
||||
|
||||
ip_type = gr.Radio(label='Type', choices=flags.ip_list, value=flags.default_ip, container=False)
|
||||
ip_type = gr.Radio(label='Type', choices=flags.ip_list, value=modules.config.default_ip_types[image_count], container=False)
|
||||
ip_types.append(ip_type)
|
||||
ip_ctrls.append(ip_type)
|
||||
|
||||
ip_type.change(lambda x: flags.default_parameters[x], inputs=[ip_type], outputs=[ip_stop, ip_weight], queue=False, show_progress=False)
|
||||
ip_ad_cols.append(ad_col)
|
||||
ip_advanced = gr.Checkbox(label='Advanced', value=False, container=False)
|
||||
gr.HTML('* \"Image Prompt\" is powered by Fooocus Image Mixture Engine (v1.0.1). <a href="https://github.com/lllyasviel/Fooocus/discussions/557" target="_blank">\U0001F4D4 Document</a>')
|
||||
ip_advanced = gr.Checkbox(label='Advanced', value=modules.config.default_image_prompt_advanced_checkbox, container=False)
|
||||
gr.HTML('* \"Image Prompt\" is powered by Fooocus Image Mixture Engine (v1.0.1). <a href="https://github.com/lllyasviel/Fooocus/discussions/557" target="_blank">\U0001F4D4 Documentation</a>')
|
||||
|
||||
def ip_advance_checked(x):
|
||||
return [gr.update(visible=x)] * len(ip_ad_cols) + \
|
||||
|
|
@ -199,46 +254,119 @@ with shared.gradio_root:
|
|||
ip_advanced.change(ip_advance_checked, inputs=ip_advanced,
|
||||
outputs=ip_ad_cols + ip_types + ip_stops + ip_weights,
|
||||
queue=False, show_progress=False)
|
||||
with gr.TabItem(label='Inpaint or Outpaint') as inpaint_tab:
|
||||
with gr.Row():
|
||||
inpaint_input_image = grh.Image(label='Image', source='upload', type='numpy', tool='sketch', height=500, brush_color="#FFFFFF", elem_id='inpaint_canvas', show_label=False)
|
||||
inpaint_mask_image = grh.Image(label='Mask Upload', source='upload', type='numpy', height=500, visible=False)
|
||||
|
||||
with gr.Row():
|
||||
inpaint_additional_prompt = gr.Textbox(placeholder="Describe what you want to inpaint.", elem_id='inpaint_additional_prompt', label='Inpaint Additional Prompt', visible=False)
|
||||
outpaint_selections = gr.CheckboxGroup(choices=['Left', 'Right', 'Top', 'Bottom'], value=[], label='Outpaint Direction')
|
||||
inpaint_mode = gr.Dropdown(choices=modules.flags.inpaint_options, value=modules.flags.inpaint_option_default, label='Method')
|
||||
example_inpaint_prompts = gr.Dataset(samples=modules.config.example_inpaint_prompts, label='Additional Prompt Quick List', components=[inpaint_additional_prompt], visible=False)
|
||||
gr.HTML('* Powered by Fooocus Inpaint Engine <a href="https://github.com/lllyasviel/Fooocus/discussions/414" target="_blank">\U0001F4D4 Document</a>')
|
||||
example_inpaint_prompts.click(lambda x: x[0], inputs=example_inpaint_prompts, outputs=inpaint_additional_prompt, show_progress=False, queue=False)
|
||||
with gr.TabItem(label='Describe') as desc_tab:
|
||||
with gr.Tab(label='Inpaint or Outpaint', id='inpaint_tab') as inpaint_tab:
|
||||
with gr.Row():
|
||||
with gr.Column():
|
||||
desc_input_image = grh.Image(label='Image', source='upload', type='numpy', show_label=False)
|
||||
inpaint_input_image = grh.Image(label='Image', source='upload', type='numpy', tool='sketch', height=500, brush_color="#FFFFFF", elem_id='inpaint_canvas', show_label=False)
|
||||
inpaint_advanced_masking_checkbox = gr.Checkbox(label='Enable Advanced Masking Features', value=modules.config.default_inpaint_advanced_masking_checkbox)
|
||||
inpaint_mode = gr.Dropdown(choices=modules.flags.inpaint_options, value=modules.config.default_inpaint_method, label='Method')
|
||||
inpaint_additional_prompt = gr.Textbox(placeholder="Describe what you want to inpaint.", elem_id='inpaint_additional_prompt', label='Inpaint Additional Prompt', visible=False)
|
||||
outpaint_selections = gr.CheckboxGroup(choices=['Left', 'Right', 'Top', 'Bottom'], value=[], label='Outpaint Direction')
|
||||
example_inpaint_prompts = gr.Dataset(samples=modules.config.example_inpaint_prompts,
|
||||
label='Additional Prompt Quick List',
|
||||
components=[inpaint_additional_prompt],
|
||||
visible=False)
|
||||
gr.HTML('* Powered by Fooocus Inpaint Engine <a href="https://github.com/lllyasviel/Fooocus/discussions/414" target="_blank">\U0001F4D4 Documentation</a>')
|
||||
example_inpaint_prompts.click(lambda x: x[0], inputs=example_inpaint_prompts, outputs=inpaint_additional_prompt, show_progress=False, queue=False)
|
||||
|
||||
with gr.Column(visible=modules.config.default_inpaint_advanced_masking_checkbox) as inpaint_mask_generation_col:
|
||||
inpaint_mask_image = grh.Image(label='Mask Upload', source='upload', type='numpy', tool='sketch', height=500, brush_color="#FFFFFF", mask_opacity=1, elem_id='inpaint_mask_canvas')
|
||||
invert_mask_checkbox = gr.Checkbox(label='Invert Mask When Generating', value=modules.config.default_invert_mask_checkbox)
|
||||
inpaint_mask_model = gr.Dropdown(label='Mask generation model',
|
||||
choices=flags.inpaint_mask_models,
|
||||
value=modules.config.default_inpaint_mask_model)
|
||||
inpaint_mask_cloth_category = gr.Dropdown(label='Cloth category',
|
||||
choices=flags.inpaint_mask_cloth_category,
|
||||
value=modules.config.default_inpaint_mask_cloth_category,
|
||||
visible=False)
|
||||
inpaint_mask_dino_prompt_text = gr.Textbox(label='Detection prompt', value='', visible=False, info='Use singular whenever possible', placeholder='Describe what you want to detect.')
|
||||
example_inpaint_mask_dino_prompt_text = gr.Dataset(
|
||||
samples=modules.config.example_enhance_detection_prompts,
|
||||
label='Detection Prompt Quick List',
|
||||
components=[inpaint_mask_dino_prompt_text],
|
||||
visible=modules.config.default_inpaint_mask_model == 'sam')
|
||||
example_inpaint_mask_dino_prompt_text.click(lambda x: x[0],
|
||||
inputs=example_inpaint_mask_dino_prompt_text,
|
||||
outputs=inpaint_mask_dino_prompt_text,
|
||||
show_progress=False, queue=False)
|
||||
|
||||
with gr.Accordion("Advanced options", visible=False, open=False) as inpaint_mask_advanced_options:
|
||||
inpaint_mask_sam_model = gr.Dropdown(label='SAM model', choices=flags.inpaint_mask_sam_model, value=modules.config.default_inpaint_mask_sam_model)
|
||||
inpaint_mask_box_threshold = gr.Slider(label="Box Threshold", minimum=0.0, maximum=1.0, value=0.3, step=0.05)
|
||||
inpaint_mask_text_threshold = gr.Slider(label="Text Threshold", minimum=0.0, maximum=1.0, value=0.25, step=0.05)
|
||||
inpaint_mask_sam_max_detections = gr.Slider(label="Maximum number of detections", info="Set to 0 to detect all", minimum=0, maximum=10, value=modules.config.default_sam_max_detections, step=1, interactive=True)
|
||||
generate_mask_button = gr.Button(value='Generate mask from image')
|
||||
|
||||
def generate_mask(image, mask_model, cloth_category, dino_prompt_text, sam_model, box_threshold, text_threshold, sam_max_detections, dino_erode_or_dilate, dino_debug):
|
||||
from extras.inpaint_mask import generate_mask_from_image
|
||||
|
||||
extras = {}
|
||||
sam_options = None
|
||||
if mask_model == 'u2net_cloth_seg':
|
||||
extras['cloth_category'] = cloth_category
|
||||
elif mask_model == 'sam':
|
||||
sam_options = SAMOptions(
|
||||
dino_prompt=dino_prompt_text,
|
||||
dino_box_threshold=box_threshold,
|
||||
dino_text_threshold=text_threshold,
|
||||
dino_erode_or_dilate=dino_erode_or_dilate,
|
||||
dino_debug=dino_debug,
|
||||
max_detections=sam_max_detections,
|
||||
model_type=sam_model
|
||||
)
|
||||
|
||||
mask, _, _, _ = generate_mask_from_image(image, mask_model, extras, sam_options)
|
||||
|
||||
return mask
|
||||
|
||||
|
||||
inpaint_mask_model.change(lambda x: [gr.update(visible=x == 'u2net_cloth_seg')] +
|
||||
[gr.update(visible=x == 'sam')] * 2 +
|
||||
[gr.Dataset.update(visible=x == 'sam',
|
||||
samples=modules.config.example_enhance_detection_prompts)],
|
||||
inputs=inpaint_mask_model,
|
||||
outputs=[inpaint_mask_cloth_category,
|
||||
inpaint_mask_dino_prompt_text,
|
||||
inpaint_mask_advanced_options,
|
||||
example_inpaint_mask_dino_prompt_text],
|
||||
queue=False, show_progress=False)
|
||||
|
||||
with gr.Tab(label='Describe', id='describe_tab') as describe_tab:
|
||||
with gr.Row():
|
||||
with gr.Column():
|
||||
desc_method = gr.Radio(
|
||||
describe_input_image = grh.Image(label='Image', source='upload', type='numpy', show_label=False)
|
||||
with gr.Column():
|
||||
describe_methods = gr.CheckboxGroup(
|
||||
label='Content Type',
|
||||
choices=[flags.desc_type_photo, flags.desc_type_anime],
|
||||
value=flags.desc_type_photo)
|
||||
desc_btn = gr.Button(value='Describe this Image into Prompt')
|
||||
desc_image_size = gr.Textbox(label='Image Size and Recommended Size', elem_id='desc_image_size', visible=False)
|
||||
gr.HTML('<a href="https://github.com/lllyasviel/Fooocus/discussions/1363" target="_blank">\U0001F4D4 Document</a>')
|
||||
choices=flags.describe_types,
|
||||
value=modules.config.default_describe_content_type)
|
||||
describe_apply_styles = gr.Checkbox(label='Apply Styles', value=modules.config.default_describe_apply_prompts_checkbox)
|
||||
describe_btn = gr.Button(value='Describe this Image into Prompt')
|
||||
describe_image_size = gr.Textbox(label='Image Size and Recommended Size', elem_id='describe_image_size', visible=False)
|
||||
gr.HTML('<a href="https://github.com/lllyasviel/Fooocus/discussions/1363" target="_blank">\U0001F4D4 Documentation</a>')
|
||||
|
||||
def trigger_show_image_properties(image):
|
||||
value = modules.util.get_image_size_info(image, modules.flags.sdxl_aspect_ratios)
|
||||
return gr.update(value=value, visible=True)
|
||||
|
||||
desc_input_image.upload(trigger_show_image_properties, inputs=desc_input_image,
|
||||
outputs=desc_image_size, show_progress=False, queue=False)
|
||||
describe_input_image.upload(trigger_show_image_properties, inputs=describe_input_image,
|
||||
outputs=describe_image_size, show_progress=False, queue=False)
|
||||
|
||||
with gr.TabItem(label='Metadata') as metadata_tab:
|
||||
with gr.Tab(label='Enhance', id='enhance_tab') as enhance_tab:
|
||||
with gr.Row():
|
||||
with gr.Column():
|
||||
enhance_input_image = grh.Image(label='Use with Enhance, skips image generation', source='upload', type='numpy')
|
||||
gr.HTML('<a href="https://github.com/lllyasviel/Fooocus/discussions/3281" target="_blank">\U0001F4D4 Documentation</a>')
|
||||
|
||||
with gr.Tab(label='Metadata', id='metadata_tab') as metadata_tab:
|
||||
with gr.Column():
|
||||
metadata_input_image = grh.Image(label='For images created by Fooocus', source='upload', type='filepath')
|
||||
metadata_input_image = grh.Image(label='For images created by Fooocus', source='upload', type='pil')
|
||||
metadata_json = gr.JSON(label='Metadata')
|
||||
metadata_import_button = gr.Button(value='Apply Metadata')
|
||||
|
||||
def trigger_metadata_preview(filepath):
|
||||
parameters, metadata_scheme = modules.meta_parser.read_info_from_image(filepath)
|
||||
def trigger_metadata_preview(file):
|
||||
parameters, metadata_scheme = modules.meta_parser.read_info_from_image(file)
|
||||
|
||||
results = {}
|
||||
if parameters is not None:
|
||||
|
|
@ -252,6 +380,164 @@ with shared.gradio_root:
|
|||
metadata_input_image.upload(trigger_metadata_preview, inputs=metadata_input_image,
|
||||
outputs=metadata_json, queue=False, show_progress=True)
|
||||
|
||||
with gr.Row(visible=modules.config.default_enhance_checkbox) as enhance_input_panel:
|
||||
with gr.Tabs():
|
||||
with gr.Tab(label='Upscale or Variation'):
|
||||
with gr.Row():
|
||||
with gr.Column():
|
||||
enhance_uov_method = gr.Radio(label='Upscale or Variation:', choices=flags.uov_list,
|
||||
value=modules.config.default_enhance_uov_method)
|
||||
enhance_uov_processing_order = gr.Radio(label='Order of Processing',
|
||||
info='Use before to enhance small details and after to enhance large areas.',
|
||||
choices=flags.enhancement_uov_processing_order,
|
||||
value=modules.config.default_enhance_uov_processing_order)
|
||||
enhance_uov_prompt_type = gr.Radio(label='Prompt',
|
||||
info='Choose which prompt to use for Upscale or Variation.',
|
||||
choices=flags.enhancement_uov_prompt_types,
|
||||
value=modules.config.default_enhance_uov_prompt_type,
|
||||
visible=modules.config.default_enhance_uov_processing_order == flags.enhancement_uov_after)
|
||||
|
||||
enhance_uov_processing_order.change(lambda x: gr.update(visible=x == flags.enhancement_uov_after),
|
||||
inputs=enhance_uov_processing_order,
|
||||
outputs=enhance_uov_prompt_type,
|
||||
queue=False, show_progress=False)
|
||||
gr.HTML('<a href="https://github.com/lllyasviel/Fooocus/discussions/3281" target="_blank">\U0001F4D4 Documentation</a>')
|
||||
enhance_ctrls = []
|
||||
enhance_inpaint_mode_ctrls = []
|
||||
enhance_inpaint_engine_ctrls = []
|
||||
enhance_inpaint_update_ctrls = []
|
||||
for index in range(modules.config.default_enhance_tabs):
|
||||
with gr.Tab(label=f'#{index + 1}') as enhance_tab_item:
|
||||
enhance_enabled = gr.Checkbox(label='Enable', value=False, elem_classes='min_check',
|
||||
container=False)
|
||||
|
||||
enhance_mask_dino_prompt_text = gr.Textbox(label='Detection prompt',
|
||||
info='Use singular whenever possible',
|
||||
placeholder='Describe what you want to detect.',
|
||||
interactive=True,
|
||||
visible=modules.config.default_enhance_inpaint_mask_model == 'sam')
|
||||
example_enhance_mask_dino_prompt_text = gr.Dataset(
|
||||
samples=modules.config.example_enhance_detection_prompts,
|
||||
label='Detection Prompt Quick List',
|
||||
components=[enhance_mask_dino_prompt_text],
|
||||
visible=modules.config.default_enhance_inpaint_mask_model == 'sam')
|
||||
example_enhance_mask_dino_prompt_text.click(lambda x: x[0],
|
||||
inputs=example_enhance_mask_dino_prompt_text,
|
||||
outputs=enhance_mask_dino_prompt_text,
|
||||
show_progress=False, queue=False)
|
||||
|
||||
enhance_prompt = gr.Textbox(label="Enhancement positive prompt",
|
||||
placeholder="Uses original prompt instead if empty.",
|
||||
elem_id='enhance_prompt')
|
||||
enhance_negative_prompt = gr.Textbox(label="Enhancement negative prompt",
|
||||
placeholder="Uses original negative prompt instead if empty.",
|
||||
elem_id='enhance_negative_prompt')
|
||||
|
||||
with gr.Accordion("Detection", open=False):
|
||||
enhance_mask_model = gr.Dropdown(label='Mask generation model',
|
||||
choices=flags.inpaint_mask_models,
|
||||
value=modules.config.default_enhance_inpaint_mask_model)
|
||||
enhance_mask_cloth_category = gr.Dropdown(label='Cloth category',
|
||||
choices=flags.inpaint_mask_cloth_category,
|
||||
value=modules.config.default_inpaint_mask_cloth_category,
|
||||
visible=modules.config.default_enhance_inpaint_mask_model == 'u2net_cloth_seg',
|
||||
interactive=True)
|
||||
|
||||
with gr.Accordion("SAM Options",
|
||||
visible=modules.config.default_enhance_inpaint_mask_model == 'sam',
|
||||
open=False) as sam_options:
|
||||
enhance_mask_sam_model = gr.Dropdown(label='SAM model',
|
||||
choices=flags.inpaint_mask_sam_model,
|
||||
value=modules.config.default_inpaint_mask_sam_model,
|
||||
interactive=True)
|
||||
enhance_mask_box_threshold = gr.Slider(label="Box Threshold", minimum=0.0,
|
||||
maximum=1.0, value=0.3, step=0.05,
|
||||
interactive=True)
|
||||
enhance_mask_text_threshold = gr.Slider(label="Text Threshold", minimum=0.0,
|
||||
maximum=1.0, value=0.25, step=0.05,
|
||||
interactive=True)
|
||||
enhance_mask_sam_max_detections = gr.Slider(label="Maximum number of detections",
|
||||
info="Set to 0 to detect all",
|
||||
minimum=0, maximum=10,
|
||||
value=modules.config.default_sam_max_detections,
|
||||
step=1, interactive=True)
|
||||
|
||||
with gr.Accordion("Inpaint", visible=True, open=False):
|
||||
enhance_inpaint_mode = gr.Dropdown(choices=modules.flags.inpaint_options,
|
||||
value=modules.config.default_inpaint_method,
|
||||
label='Method', interactive=True)
|
||||
enhance_inpaint_disable_initial_latent = gr.Checkbox(
|
||||
label='Disable initial latent in inpaint', value=False)
|
||||
enhance_inpaint_engine = gr.Dropdown(label='Inpaint Engine',
|
||||
value=modules.config.default_inpaint_engine_version,
|
||||
choices=flags.inpaint_engine_versions,
|
||||
info='Version of Fooocus inpaint model. If set, use performance Quality or Speed (no performance LoRAs) for best results.')
|
||||
enhance_inpaint_strength = gr.Slider(label='Inpaint Denoising Strength',
|
||||
minimum=0.0, maximum=1.0, step=0.001,
|
||||
value=1.0,
|
||||
info='Same as the denoising strength in A1111 inpaint. '
|
||||
'Only used in inpaint, not used in outpaint. '
|
||||
'(Outpaint always use 1.0)')
|
||||
enhance_inpaint_respective_field = gr.Slider(label='Inpaint Respective Field',
|
||||
minimum=0.0, maximum=1.0, step=0.001,
|
||||
value=0.618,
|
||||
info='The area to inpaint. '
|
||||
'Value 0 is same as "Only Masked" in A1111. '
|
||||
'Value 1 is same as "Whole Image" in A1111. '
|
||||
'Only used in inpaint, not used in outpaint. '
|
||||
'(Outpaint always use 1.0)')
|
||||
enhance_inpaint_erode_or_dilate = gr.Slider(label='Mask Erode or Dilate',
|
||||
minimum=-64, maximum=64, step=1, value=0,
|
||||
info='Positive value will make white area in the mask larger, '
|
||||
'negative value will make white area smaller. '
|
||||
'(default is 0, always processed before any mask invert)')
|
||||
enhance_mask_invert = gr.Checkbox(label='Invert Mask', value=False)
|
||||
|
||||
gr.HTML('<a href="https://github.com/lllyasviel/Fooocus/discussions/3281" target="_blank">\U0001F4D4 Documentation</a>')
|
||||
|
||||
enhance_ctrls += [
|
||||
enhance_enabled,
|
||||
enhance_mask_dino_prompt_text,
|
||||
enhance_prompt,
|
||||
enhance_negative_prompt,
|
||||
enhance_mask_model,
|
||||
enhance_mask_cloth_category,
|
||||
enhance_mask_sam_model,
|
||||
enhance_mask_text_threshold,
|
||||
enhance_mask_box_threshold,
|
||||
enhance_mask_sam_max_detections,
|
||||
enhance_inpaint_disable_initial_latent,
|
||||
enhance_inpaint_engine,
|
||||
enhance_inpaint_strength,
|
||||
enhance_inpaint_respective_field,
|
||||
enhance_inpaint_erode_or_dilate,
|
||||
enhance_mask_invert
|
||||
]
|
||||
|
||||
enhance_inpaint_mode_ctrls += [enhance_inpaint_mode]
|
||||
enhance_inpaint_engine_ctrls += [enhance_inpaint_engine]
|
||||
|
||||
enhance_inpaint_update_ctrls += [[
|
||||
enhance_inpaint_mode, enhance_inpaint_disable_initial_latent, enhance_inpaint_engine,
|
||||
enhance_inpaint_strength, enhance_inpaint_respective_field
|
||||
]]
|
||||
|
||||
enhance_inpaint_mode.change(inpaint_mode_change, inputs=[enhance_inpaint_mode, inpaint_engine_state], outputs=[
|
||||
inpaint_additional_prompt, outpaint_selections, example_inpaint_prompts,
|
||||
enhance_inpaint_disable_initial_latent, enhance_inpaint_engine,
|
||||
enhance_inpaint_strength, enhance_inpaint_respective_field
|
||||
], show_progress=False, queue=False)
|
||||
|
||||
enhance_mask_model.change(
|
||||
lambda x: [gr.update(visible=x == 'u2net_cloth_seg')] +
|
||||
[gr.update(visible=x == 'sam')] * 2 +
|
||||
[gr.Dataset.update(visible=x == 'sam',
|
||||
samples=modules.config.example_enhance_detection_prompts)],
|
||||
inputs=enhance_mask_model,
|
||||
outputs=[enhance_mask_cloth_category, enhance_mask_dino_prompt_text, sam_options,
|
||||
example_enhance_mask_dino_prompt_text],
|
||||
queue=False, show_progress=False)
|
||||
|
||||
switch_js = "(x) => {if(x){viewer_to_bottom(100);viewer_to_bottom(500);}else{viewer_to_top();} return x;}"
|
||||
down_js = "() => {viewer_to_bottom();}"
|
||||
|
||||
|
|
@ -263,20 +549,25 @@ with shared.gradio_root:
|
|||
uov_tab.select(lambda: 'uov', outputs=current_tab, queue=False, _js=down_js, show_progress=False)
|
||||
inpaint_tab.select(lambda: 'inpaint', outputs=current_tab, queue=False, _js=down_js, show_progress=False)
|
||||
ip_tab.select(lambda: 'ip', outputs=current_tab, queue=False, _js=down_js, show_progress=False)
|
||||
desc_tab.select(lambda: 'desc', outputs=current_tab, queue=False, _js=down_js, show_progress=False)
|
||||
describe_tab.select(lambda: 'desc', outputs=current_tab, queue=False, _js=down_js, show_progress=False)
|
||||
enhance_tab.select(lambda: 'enhance', outputs=current_tab, queue=False, _js=down_js, show_progress=False)
|
||||
metadata_tab.select(lambda: 'metadata', outputs=current_tab, queue=False, _js=down_js, show_progress=False)
|
||||
enhance_checkbox.change(lambda x: gr.update(visible=x), inputs=enhance_checkbox,
|
||||
outputs=enhance_input_panel, queue=False, show_progress=False, _js=switch_js)
|
||||
|
||||
with gr.Column(scale=1, visible=modules.config.default_advanced_checkbox) as advanced_column:
|
||||
with gr.Tab(label='Setting'):
|
||||
with gr.Tab(label='Settings'):
|
||||
if not args_manager.args.disable_preset_selection:
|
||||
preset_selection = gr.Dropdown(label='Preset',
|
||||
choices=modules.config.available_presets,
|
||||
value=args_manager.args.preset if args_manager.args.preset else "initial",
|
||||
interactive=True)
|
||||
|
||||
performance_selection = gr.Radio(label='Performance',
|
||||
choices=flags.Performance.list(),
|
||||
choices=flags.Performance.values(),
|
||||
value=modules.config.default_performance,
|
||||
elem_classes=['performance_selection'])
|
||||
|
||||
with gr.Accordion(label='Aspect Ratios', open=False, elem_id='aspect_ratios_accordion') as aspect_ratios_accordion:
|
||||
aspect_ratios_selection = gr.Radio(label='Aspect Ratios', show_label=False,
|
||||
choices=modules.config.available_aspect_ratios_labels,
|
||||
|
|
@ -327,7 +618,7 @@ with shared.gradio_root:
|
|||
history_link = gr.HTML()
|
||||
shared.gradio_root.load(update_history_link, outputs=history_link, queue=False, show_progress=False)
|
||||
|
||||
with gr.Tab(label='Style', elem_classes=['style_selections_tab']):
|
||||
with gr.Tab(label='Styles', elem_classes=['style_selections_tab']):
|
||||
style_sorter.try_load_sorted_styles(
|
||||
style_names=legal_style_names,
|
||||
default_selected=modules.config.default_styles)
|
||||
|
|
@ -360,7 +651,7 @@ with shared.gradio_root:
|
|||
show_progress=False).then(
|
||||
lambda: None, _js='()=>{refresh_style_localization();}')
|
||||
|
||||
with gr.Tab(label='Model'):
|
||||
with gr.Tab(label='Models'):
|
||||
with gr.Group():
|
||||
with gr.Row():
|
||||
base_model = gr.Dropdown(label='Base Model (SDXL only)', choices=modules.config.model_filenames, value=modules.config.default_base_model_name, show_label=True)
|
||||
|
|
@ -401,10 +692,10 @@ with shared.gradio_root:
|
|||
sharpness = gr.Slider(label='Image Sharpness', minimum=0.0, maximum=30.0, step=0.001,
|
||||
value=modules.config.default_sample_sharpness,
|
||||
info='Higher value means image and texture are sharper.')
|
||||
gr.HTML('<a href="https://github.com/lllyasviel/Fooocus/discussions/117" target="_blank">\U0001F4D4 Document</a>')
|
||||
dev_mode = gr.Checkbox(label='Developer Debug Mode', value=False, container=False)
|
||||
gr.HTML('<a href="https://github.com/lllyasviel/Fooocus/discussions/117" target="_blank">\U0001F4D4 Documentation</a>')
|
||||
dev_mode = gr.Checkbox(label='Developer Debug Mode', value=modules.config.default_developer_debug_mode_checkbox, container=False)
|
||||
|
||||
with gr.Column(visible=False) as dev_tools:
|
||||
with gr.Column(visible=modules.config.default_developer_debug_mode_checkbox) as dev_tools:
|
||||
with gr.Tab(label='Debug Tools'):
|
||||
adm_scaler_positive = gr.Slider(label='Positive ADM Guidance Scaler', minimum=0.1, maximum=3.0,
|
||||
step=0.001, value=1.5, info='The scaler multiplied to positive ADM (use 1.0 to disable). ')
|
||||
|
|
@ -421,9 +712,9 @@ with shared.gradio_root:
|
|||
value=modules.config.default_cfg_tsnr,
|
||||
info='Enabling Fooocus\'s implementation of CFG mimicking for TSNR '
|
||||
'(effective when real CFG > mimicked CFG).')
|
||||
clip_skip = gr.Slider(label='CLIP Skip', minimum=1, maximum=10, step=1,
|
||||
clip_skip = gr.Slider(label='CLIP Skip', minimum=1, maximum=flags.clip_skip_max, step=1,
|
||||
value=modules.config.default_clip_skip,
|
||||
info='Bypass CLIP layers to avoid overfitting (use 1 to disable).')
|
||||
info='Bypass CLIP layers to avoid overfitting (use 1 to not skip any layers, 2 is recommended).')
|
||||
sampler_name = gr.Dropdown(label='Sampler', choices=flags.sampler_list,
|
||||
value=modules.config.default_sampler)
|
||||
scheduler_name = gr.Dropdown(label='Scheduler', choices=flags.scheduler_list,
|
||||
|
|
@ -455,22 +746,23 @@ with shared.gradio_root:
|
|||
minimum=-1, maximum=1.0, step=0.001, value=-1,
|
||||
info='Set as negative number to disable. For developer debugging.')
|
||||
overwrite_upscale_strength = gr.Slider(label='Forced Overwrite of Denoising Strength of "Upscale"',
|
||||
minimum=-1, maximum=1.0, step=0.001, value=-1,
|
||||
minimum=-1, maximum=1.0, step=0.001,
|
||||
value=modules.config.default_overwrite_upscale,
|
||||
info='Set as negative number to disable. For developer debugging.')
|
||||
|
||||
disable_preview = gr.Checkbox(label='Disable Preview', value=modules.config.default_black_out_nsfw,
|
||||
interactive=not modules.config.default_black_out_nsfw,
|
||||
info='Disable preview during generation.')
|
||||
disable_intermediate_results = gr.Checkbox(label='Disable Intermediate Results',
|
||||
value=modules.config.default_performance == flags.Performance.EXTREME_SPEED.value,
|
||||
interactive=modules.config.default_performance != flags.Performance.EXTREME_SPEED.value,
|
||||
value=flags.Performance.has_restricted_features(modules.config.default_performance),
|
||||
info='Disable intermediate results during generation, only show final gallery.')
|
||||
|
||||
disable_seed_increment = gr.Checkbox(label='Disable seed increment',
|
||||
info='Disable automatic seed increment when image number is > 1.',
|
||||
value=False)
|
||||
read_wildcards_in_order = gr.Checkbox(label="Read wildcards in order", value=False)
|
||||
|
||||
black_out_nsfw = gr.Checkbox(label='Black Out NSFW',
|
||||
value=modules.config.default_black_out_nsfw,
|
||||
black_out_nsfw = gr.Checkbox(label='Black Out NSFW', value=modules.config.default_black_out_nsfw,
|
||||
interactive=not modules.config.default_black_out_nsfw,
|
||||
info='Use black image if NSFW is detected.')
|
||||
|
||||
|
|
@ -478,6 +770,10 @@ with shared.gradio_root:
|
|||
inputs=black_out_nsfw, outputs=disable_preview, queue=False,
|
||||
show_progress=False)
|
||||
|
||||
if not args_manager.args.disable_image_log:
|
||||
save_final_enhanced_image_only = gr.Checkbox(label='Save only final enhanced image',
|
||||
value=modules.config.default_save_only_final_enhanced_image)
|
||||
|
||||
if not args_manager.args.disable_metadata:
|
||||
save_metadata_to_images = gr.Checkbox(label='Save Metadata to Images', value=modules.config.default_save_metadata_to_images,
|
||||
info='Adds parameters to generated images allowing manual regeneration.')
|
||||
|
|
@ -511,11 +807,15 @@ with shared.gradio_root:
|
|||
|
||||
with gr.Tab(label='Inpaint'):
|
||||
debugging_inpaint_preprocessor = gr.Checkbox(label='Debug Inpaint Preprocessing', value=False)
|
||||
debugging_enhance_masks_checkbox = gr.Checkbox(label='Debug Enhance Masks', value=False,
|
||||
info='Show enhance masks in preview and final results')
|
||||
debugging_dino = gr.Checkbox(label='Debug GroundingDINO', value=False,
|
||||
info='Use GroundingDINO boxes instead of more detailed SAM masks')
|
||||
inpaint_disable_initial_latent = gr.Checkbox(label='Disable initial latent in inpaint', value=False)
|
||||
inpaint_engine = gr.Dropdown(label='Inpaint Engine',
|
||||
value=modules.config.default_inpaint_engine_version,
|
||||
choices=flags.inpaint_engine_versions,
|
||||
info='Version of Fooocus inpaint model')
|
||||
info='Version of Fooocus inpaint model. If set, use performance Quality or Speed (no performance LoRAs) for best results.')
|
||||
inpaint_strength = gr.Slider(label='Inpaint Denoising Strength',
|
||||
minimum=0.0, maximum=1.0, step=0.001, value=1.0,
|
||||
info='Same as the denoising strength in A1111 inpaint. '
|
||||
|
|
@ -531,21 +831,24 @@ with shared.gradio_root:
|
|||
inpaint_erode_or_dilate = gr.Slider(label='Mask Erode or Dilate',
|
||||
minimum=-64, maximum=64, step=1, value=0,
|
||||
info='Positive value will make white area in the mask larger, '
|
||||
'negative value will make white area smaller.'
|
||||
'(default is 0, always process before any mask invert)')
|
||||
inpaint_mask_upload_checkbox = gr.Checkbox(label='Enable Mask Upload', value=False)
|
||||
invert_mask_checkbox = gr.Checkbox(label='Invert Mask', value=False)
|
||||
'negative value will make white area smaller. '
|
||||
'(default is 0, always processed before any mask invert)')
|
||||
dino_erode_or_dilate = gr.Slider(label='GroundingDINO Box Erode or Dilate',
|
||||
minimum=-64, maximum=64, step=1, value=0,
|
||||
info='Positive value will make white area in the mask larger, '
|
||||
'negative value will make white area smaller. '
|
||||
'(default is 0, processed before SAM)')
|
||||
|
||||
inpaint_mask_color = gr.ColorPicker(label='Inpaint brush color', value='#FFFFFF', elem_id='inpaint_brush_color')
|
||||
|
||||
inpaint_ctrls = [debugging_inpaint_preprocessor, inpaint_disable_initial_latent, inpaint_engine,
|
||||
inpaint_strength, inpaint_respective_field,
|
||||
inpaint_mask_upload_checkbox, invert_mask_checkbox, inpaint_erode_or_dilate]
|
||||
inpaint_advanced_masking_checkbox, invert_mask_checkbox, inpaint_erode_or_dilate]
|
||||
|
||||
inpaint_mask_upload_checkbox.change(lambda x: gr.update(visible=x),
|
||||
inputs=inpaint_mask_upload_checkbox,
|
||||
outputs=inpaint_mask_image, queue=False,
|
||||
show_progress=False)
|
||||
inpaint_advanced_masking_checkbox.change(lambda x: [gr.update(visible=x)] * 2,
|
||||
inputs=inpaint_advanced_masking_checkbox,
|
||||
outputs=[inpaint_mask_image, inpaint_mask_generation_col],
|
||||
queue=False, show_progress=False)
|
||||
|
||||
inpaint_mask_color.change(lambda x: gr.update(brush_color=x), inputs=inpaint_mask_color,
|
||||
outputs=inpaint_input_image,
|
||||
|
|
@ -569,7 +872,7 @@ with shared.gradio_root:
|
|||
modules.config.update_files()
|
||||
results = [gr.update(choices=modules.config.model_filenames)]
|
||||
results += [gr.update(choices=['None'] + modules.config.model_filenames)]
|
||||
results += [gr.update(choices=['None'] + modules.config.vae_filenames)]
|
||||
results += [gr.update(choices=[flags.default_vae] + modules.config.vae_filenames)]
|
||||
if not args_manager.args.disable_preset_selection:
|
||||
results += [gr.update(choices=modules.config.available_presets)]
|
||||
for i in range(modules.config.default_max_lora_number):
|
||||
|
|
@ -590,10 +893,12 @@ with shared.gradio_root:
|
|||
overwrite_width, overwrite_height, guidance_scale, sharpness, adm_scaler_positive,
|
||||
adm_scaler_negative, adm_scaler_end, refiner_swap_method, adaptive_cfg, clip_skip,
|
||||
base_model, refiner_model, refiner_switch, sampler_name, scheduler_name, vae_name,
|
||||
seed_random, image_seed, generate_button, load_parameter_button] + freeu_ctrls + lora_ctrls
|
||||
seed_random, image_seed, inpaint_engine, inpaint_engine_state,
|
||||
inpaint_mode] + enhance_inpaint_mode_ctrls + [generate_button,
|
||||
load_parameter_button] + freeu_ctrls + lora_ctrls
|
||||
|
||||
if not args_manager.args.disable_preset_selection:
|
||||
def preset_selection_change(preset, is_generating):
|
||||
def preset_selection_change(preset, is_generating, inpaint_mode):
|
||||
preset_content = modules.config.try_get_preset_content(preset) if preset != 'initial' else {}
|
||||
preset_prepared = modules.meta_parser.parse_meta_from_preset(preset_content)
|
||||
|
||||
|
|
@ -602,21 +907,39 @@ with shared.gradio_root:
|
|||
checkpoint_downloads = preset_prepared.get('checkpoint_downloads', {})
|
||||
embeddings_downloads = preset_prepared.get('embeddings_downloads', {})
|
||||
lora_downloads = preset_prepared.get('lora_downloads', {})
|
||||
vae_downloads = preset_prepared.get('vae_downloads', {})
|
||||
|
||||
preset_prepared['base_model'], preset_prepared['lora_downloads'] = launch.download_models(
|
||||
default_model, previous_default_models, checkpoint_downloads, embeddings_downloads, lora_downloads)
|
||||
preset_prepared['base_model'], preset_prepared['checkpoint_downloads'] = launch.download_models(
|
||||
default_model, previous_default_models, checkpoint_downloads, embeddings_downloads, lora_downloads,
|
||||
vae_downloads)
|
||||
|
||||
if 'prompt' in preset_prepared and preset_prepared.get('prompt') == '':
|
||||
del preset_prepared['prompt']
|
||||
|
||||
return modules.meta_parser.load_parameter_button_click(json.dumps(preset_prepared), is_generating)
|
||||
return modules.meta_parser.load_parameter_button_click(json.dumps(preset_prepared), is_generating, inpaint_mode)
|
||||
|
||||
preset_selection.change(preset_selection_change, inputs=[preset_selection, state_is_generating], outputs=load_data_outputs, queue=False, show_progress=True) \
|
||||
.then(fn=style_sorter.sort_styles, inputs=style_selections, outputs=style_selections, queue=False, show_progress=False)
|
||||
|
||||
def inpaint_engine_state_change(inpaint_engine_version, *args):
|
||||
if inpaint_engine_version == 'empty':
|
||||
inpaint_engine_version = modules.config.default_inpaint_engine_version
|
||||
|
||||
result = []
|
||||
for inpaint_mode in args:
|
||||
if inpaint_mode != modules.flags.inpaint_option_detail:
|
||||
result.append(gr.update(value=inpaint_engine_version))
|
||||
else:
|
||||
result.append(gr.update())
|
||||
|
||||
return result
|
||||
|
||||
preset_selection.change(preset_selection_change, inputs=[preset_selection, state_is_generating, inpaint_mode], outputs=load_data_outputs, queue=False, show_progress=True) \
|
||||
.then(fn=style_sorter.sort_styles, inputs=style_selections, outputs=style_selections, queue=False, show_progress=False) \
|
||||
.then(lambda: None, _js='()=>{refresh_style_localization();}') \
|
||||
.then(inpaint_engine_state_change, inputs=[inpaint_engine_state] + enhance_inpaint_mode_ctrls, outputs=enhance_inpaint_engine_ctrls, queue=False, show_progress=False)
|
||||
|
||||
performance_selection.change(lambda x: [gr.update(interactive=not flags.Performance.has_restricted_features(x))] * 11 +
|
||||
[gr.update(visible=not flags.Performance.has_restricted_features(x))] * 1 +
|
||||
[gr.update(interactive=not flags.Performance.has_restricted_features(x), value=flags.Performance.has_restricted_features(x))] * 1,
|
||||
[gr.update(value=flags.Performance.has_restricted_features(x))] * 1,
|
||||
inputs=performance_selection,
|
||||
outputs=[
|
||||
guidance_scale, sharpness, adm_scaler_end, adm_scaler_positive,
|
||||
|
|
@ -630,39 +953,27 @@ with shared.gradio_root:
|
|||
queue=False, show_progress=False) \
|
||||
.then(fn=lambda: None, _js='refresh_grid_delayed', queue=False, show_progress=False)
|
||||
|
||||
def inpaint_mode_change(mode):
|
||||
assert mode in modules.flags.inpaint_options
|
||||
|
||||
# inpaint_additional_prompt, outpaint_selections, example_inpaint_prompts,
|
||||
# inpaint_disable_initial_latent, inpaint_engine,
|
||||
# inpaint_strength, inpaint_respective_field
|
||||
|
||||
if mode == modules.flags.inpaint_option_detail:
|
||||
return [
|
||||
gr.update(visible=True), gr.update(visible=False, value=[]),
|
||||
gr.Dataset.update(visible=True, samples=modules.config.example_inpaint_prompts),
|
||||
False, 'None', 0.5, 0.0
|
||||
]
|
||||
|
||||
if mode == modules.flags.inpaint_option_modify:
|
||||
return [
|
||||
gr.update(visible=True), gr.update(visible=False, value=[]),
|
||||
gr.Dataset.update(visible=False, samples=modules.config.example_inpaint_prompts),
|
||||
True, modules.config.default_inpaint_engine_version, 1.0, 0.0
|
||||
]
|
||||
|
||||
return [
|
||||
gr.update(visible=False, value=''), gr.update(visible=True),
|
||||
gr.Dataset.update(visible=False, samples=modules.config.example_inpaint_prompts),
|
||||
False, modules.config.default_inpaint_engine_version, 1.0, 0.618
|
||||
]
|
||||
|
||||
inpaint_mode.input(inpaint_mode_change, inputs=inpaint_mode, outputs=[
|
||||
inpaint_mode.change(inpaint_mode_change, inputs=[inpaint_mode, inpaint_engine_state], outputs=[
|
||||
inpaint_additional_prompt, outpaint_selections, example_inpaint_prompts,
|
||||
inpaint_disable_initial_latent, inpaint_engine,
|
||||
inpaint_strength, inpaint_respective_field
|
||||
], show_progress=False, queue=False)
|
||||
|
||||
# load configured default_inpaint_method
|
||||
default_inpaint_ctrls = [inpaint_mode, inpaint_disable_initial_latent, inpaint_engine, inpaint_strength, inpaint_respective_field]
|
||||
for mode, disable_initial_latent, engine, strength, respective_field in [default_inpaint_ctrls] + enhance_inpaint_update_ctrls:
|
||||
shared.gradio_root.load(inpaint_mode_change, inputs=[mode, inpaint_engine_state], outputs=[
|
||||
inpaint_additional_prompt, outpaint_selections, example_inpaint_prompts, disable_initial_latent,
|
||||
engine, strength, respective_field
|
||||
], show_progress=False, queue=False)
|
||||
|
||||
generate_mask_button.click(fn=generate_mask,
|
||||
inputs=[inpaint_input_image, inpaint_mask_model, inpaint_mask_cloth_category,
|
||||
inpaint_mask_dino_prompt_text, inpaint_mask_sam_model,
|
||||
inpaint_mask_box_threshold, inpaint_mask_text_threshold,
|
||||
inpaint_mask_sam_max_detections, dino_erode_or_dilate, debugging_dino],
|
||||
outputs=inpaint_mask_image, show_progress=True, queue=True)
|
||||
|
||||
ctrls = [currentTask, generate_image_grid]
|
||||
ctrls += [
|
||||
prompt, negative_prompt, style_selections,
|
||||
|
|
@ -684,10 +995,17 @@ with shared.gradio_root:
|
|||
ctrls += freeu_ctrls
|
||||
ctrls += inpaint_ctrls
|
||||
|
||||
if not args_manager.args.disable_image_log:
|
||||
ctrls += [save_final_enhanced_image_only]
|
||||
|
||||
if not args_manager.args.disable_metadata:
|
||||
ctrls += [save_metadata_to_images, metadata_scheme]
|
||||
|
||||
ctrls += ip_ctrls
|
||||
ctrls += [debugging_dino, dino_erode_or_dilate, debugging_enhance_masks_checkbox,
|
||||
enhance_input_image, enhance_checkbox, enhance_uov_method, enhance_uov_processing_order,
|
||||
enhance_uov_prompt_type]
|
||||
ctrls += enhance_ctrls
|
||||
|
||||
def parse_meta(raw_prompt_txt, is_generating):
|
||||
loaded_json = None
|
||||
|
|
@ -704,18 +1022,18 @@ with shared.gradio_root:
|
|||
|
||||
prompt.input(parse_meta, inputs=[prompt, state_is_generating], outputs=[prompt, generate_button, load_parameter_button], queue=False, show_progress=False)
|
||||
|
||||
load_parameter_button.click(modules.meta_parser.load_parameter_button_click, inputs=[prompt, state_is_generating], outputs=load_data_outputs, queue=False, show_progress=False)
|
||||
load_parameter_button.click(modules.meta_parser.load_parameter_button_click, inputs=[prompt, state_is_generating, inpaint_mode], outputs=load_data_outputs, queue=False, show_progress=False)
|
||||
|
||||
def trigger_metadata_import(filepath, state_is_generating):
|
||||
parameters, metadata_scheme = modules.meta_parser.read_info_from_image(filepath)
|
||||
def trigger_metadata_import(file, state_is_generating):
|
||||
parameters, metadata_scheme = modules.meta_parser.read_info_from_image(file)
|
||||
if parameters is None:
|
||||
print('Could not find metadata in the image!')
|
||||
parsed_parameters = {}
|
||||
else:
|
||||
metadata_parser = modules.meta_parser.get_metadata_parser(metadata_scheme)
|
||||
parsed_parameters = metadata_parser.parse_json(parameters)
|
||||
parsed_parameters = metadata_parser.to_json(parameters)
|
||||
|
||||
return modules.meta_parser.load_parameter_button_click(parsed_parameters, state_is_generating)
|
||||
return modules.meta_parser.load_parameter_button_click(parsed_parameters, state_is_generating, inpaint_mode)
|
||||
|
||||
metadata_import_button.click(trigger_metadata_import, inputs=[metadata_input_image, state_is_generating], outputs=load_data_outputs, queue=False, show_progress=True) \
|
||||
.then(style_sorter.sort_styles, inputs=style_selections, outputs=style_selections, queue=False, show_progress=False)
|
||||
|
|
@ -743,27 +1061,54 @@ with shared.gradio_root:
|
|||
gr.Audio(interactive=False, value=notification_file, elem_id='audio_notification', visible=False)
|
||||
break
|
||||
|
||||
def trigger_describe(mode, img):
|
||||
if mode == flags.desc_type_photo:
|
||||
def trigger_describe(modes, img, apply_styles):
|
||||
describe_prompts = []
|
||||
styles = set()
|
||||
|
||||
if flags.describe_type_photo in modes:
|
||||
from extras.interrogate import default_interrogator as default_interrogator_photo
|
||||
return default_interrogator_photo(img), ["Fooocus V2", "Fooocus Enhance", "Fooocus Sharp"]
|
||||
if mode == flags.desc_type_anime:
|
||||
describe_prompts.append(default_interrogator_photo(img))
|
||||
styles.update(["Fooocus V2", "Fooocus Enhance", "Fooocus Sharp"])
|
||||
|
||||
if flags.describe_type_anime in modes:
|
||||
from extras.wd14tagger import default_interrogator as default_interrogator_anime
|
||||
return default_interrogator_anime(img), ["Fooocus V2", "Fooocus Masterpiece"]
|
||||
return mode, ["Fooocus V2"]
|
||||
describe_prompts.append(default_interrogator_anime(img))
|
||||
styles.update(["Fooocus V2", "Fooocus Masterpiece"])
|
||||
|
||||
desc_btn.click(trigger_describe, inputs=[desc_method, desc_input_image],
|
||||
outputs=[prompt, style_selections], show_progress=True, queue=True)
|
||||
if len(styles) == 0 or not apply_styles:
|
||||
styles = gr.update()
|
||||
else:
|
||||
styles = list(styles)
|
||||
|
||||
if args_manager.args.enable_describe_uov_image:
|
||||
def trigger_uov_describe(mode, img, prompt):
|
||||
if len(describe_prompts) == 0:
|
||||
describe_prompt = gr.update()
|
||||
else:
|
||||
describe_prompt = ', '.join(describe_prompts)
|
||||
|
||||
return describe_prompt, styles
|
||||
|
||||
describe_btn.click(trigger_describe, inputs=[describe_methods, describe_input_image, describe_apply_styles],
|
||||
outputs=[prompt, style_selections], show_progress=True, queue=True) \
|
||||
.then(fn=style_sorter.sort_styles, inputs=style_selections, outputs=style_selections, queue=False, show_progress=False) \
|
||||
.then(lambda: None, _js='()=>{refresh_style_localization();}')
|
||||
|
||||
if args_manager.args.enable_auto_describe_image:
|
||||
def trigger_auto_describe(mode, img, prompt, apply_styles):
|
||||
# keep prompt if not empty
|
||||
if prompt == '':
|
||||
return trigger_describe(mode, img)
|
||||
return trigger_describe(mode, img, apply_styles)
|
||||
return gr.update(), gr.update()
|
||||
|
||||
uov_input_image.upload(trigger_uov_describe, inputs=[desc_method, uov_input_image, prompt],
|
||||
outputs=[prompt, style_selections], show_progress=True, queue=True)
|
||||
uov_input_image.upload(trigger_auto_describe, inputs=[describe_methods, uov_input_image, prompt, describe_apply_styles],
|
||||
outputs=[prompt, style_selections], show_progress=True, queue=True) \
|
||||
.then(fn=style_sorter.sort_styles, inputs=style_selections, outputs=style_selections, queue=False, show_progress=False) \
|
||||
.then(lambda: None, _js='()=>{refresh_style_localization();}')
|
||||
|
||||
enhance_input_image.upload(lambda: gr.update(value=True), outputs=enhance_checkbox, queue=False, show_progress=False) \
|
||||
.then(trigger_auto_describe, inputs=[describe_methods, enhance_input_image, prompt, describe_apply_styles],
|
||||
outputs=[prompt, style_selections], show_progress=True, queue=True) \
|
||||
.then(fn=style_sorter.sort_styles, inputs=style_selections, outputs=style_selections, queue=False, show_progress=False) \
|
||||
.then(lambda: None, _js='()=>{refresh_style_localization();}')
|
||||
|
||||
def dump_default_english_config():
|
||||
from modules.localization import dump_english_config
|
||||
|
|
|
|||
Loading…
Reference in New Issue