From 510b587e845ea7ddb09f0e3d0f756f146084f31b Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Thu, 25 Jan 2024 21:08:19 +0100 Subject: [PATCH 01/52] fix: use pnginfo "parameters" insteadf of "Comments" see https://github.com/RupertAvery/DiffusionToolkit/issues/202 and https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/cf2772fab0af5573da775e7437e6acdca424f26e/modules/processing.py#L939 --- modules/private_logger.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/private_logger.py b/modules/private_logger.py index 9cfbfc92..223463b3 100644 --- a/modules/private_logger.py +++ b/modules/private_logger.py @@ -28,7 +28,7 @@ def log(img, dic, metadata=None, save_metadata_to_image=False): if save_metadata_to_image: pnginfo = PngInfo() - pnginfo.add_text("Comment", metadata) + pnginfo.add_text('parameters', metadata) else: pnginfo = None Image.fromarray(img).save(local_temp_filename, pnginfo=pnginfo) From 20b79788a09e00c27d28bf63dd6a51cec29d64c2 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Thu, 25 Jan 2024 23:48:47 +0100 Subject: [PATCH 02/52] feat: add resolved prompts to metadata --- modules/async_worker.py | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/modules/async_worker.py b/modules/async_worker.py index 4ce29384..4ee8b833 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -202,7 +202,7 @@ def worker(): modules.patch.adm_scaler_end = advanced_parameters.adm_scaler_end = 0.0 steps = 8 - if not args_manager.args.disable_metadata: + if save_metadata_to_images: base_model_path = os.path.join(modules.config.path_checkpoints, base_model_name) base_model_hash = calculate_sha256(base_model_path)[0:10] @@ -796,19 +796,23 @@ def worker(): metadata_string = '' if save_metadata_to_images and metadata_scheme == 'fooocus': metadata = { - 'prompt': raw_prompt, 'negative_prompt': raw_negative_prompt, 'styles': str(raw_style_selections), + # prompt with wildcards + 'prompt': raw_prompt, 'negative_prompt': raw_negative_prompt, + # prompt with resolved wildcards 'real_prompt': task['log_positive_prompt'], 'real_negative_prompt': task['log_negative_prompt'], + # prompt with resolved wildcards, styles and prompt expansion + 'complete_prompt_positive': task['positive'], 'complete_prompt_negative': task['negative'], + 'styles': str(raw_style_selections), 'seed': task['task_seed'], 'width': width, 'height': height, 'sampler': sampler_name, 'scheduler': scheduler_name, 'performance': performance_selection, 'steps': steps, 'refiner_switch': refiner_switch, 'sharpness': sharpness, 'cfg': cfg_scale, - 'base_model': base_model_name, 'refiner_model': refiner_model_name, + 'base_model': base_model_name, 'base_model_hash': base_model_hash, 'refiner_model': refiner_model_name, 'denoising_strength': denoising_strength, 'freeu': advanced_parameters.freeu_enabled, 'img2img': input_image_checkbox, 'prompt_expansion': task['expansion'] } - if advanced_parameters.freeu_enabled: metadata |= { 'freeu_b1': advanced_parameters.freeu_b1, 'freeu_b2': advanced_parameters.freeu_b2, 'freeu_s1': advanced_parameters.freeu_s1, 'freeu_s2': advanced_parameters.freeu_s2 @@ -829,12 +833,12 @@ def worker(): metadata |= { 'outpaint_selections': outpaint_selections } - else: - metadata |= { - 'inpaint_additional_prompt': inpaint_additional_prompt, 'inpaint_mask_upload': advanced_parameters.inpaint_mask_upload_checkbox, 'invert_mask': advanced_parameters.invert_mask_checkbox, - 'inpaint_disable_initial_latent': advanced_parameters.inpaint_disable_initial_latent, 'inpaint_engine': advanced_parameters.inpaint_engine, - 'inpaint_strength': advanced_parameters.inpaint_strength, 'inpaint_respective_field': advanced_parameters.inpaint_respective_field, - } + + metadata |= { + 'inpaint_additional_prompt': inpaint_additional_prompt, 'inpaint_mask_upload': advanced_parameters.inpaint_mask_upload_checkbox, 'invert_mask': advanced_parameters.invert_mask_checkbox, + 'inpaint_disable_initial_latent': advanced_parameters.inpaint_disable_initial_latent, 'inpaint_engine': advanced_parameters.inpaint_engine, + 'inpaint_strength': advanced_parameters.inpaint_strength, 'inpaint_respective_field': advanced_parameters.inpaint_respective_field, + } if 'cn' in goals: metadata |= { From 051faf78b823deaaaece9e381550670ee5bf5310 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Thu, 25 Jan 2024 23:49:25 +0100 Subject: [PATCH 03/52] fix: use correct default value in metadata check for created_by --- modules/async_worker.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/async_worker.py b/modules/async_worker.py index 4ee8b833..e563e667 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -860,7 +860,7 @@ def worker(): metadata |= { 'software': f'Fooocus v{fooocus_version.version}', } - if modules.config.metadata_created_by != 'None': + if modules.config.metadata_created_by != '': metadata |= { 'created_by': modules.config.metadata_created_by } @@ -879,7 +879,7 @@ def worker(): "Version": f'Fooocus v{fooocus_version.version}' } - if modules.config.metadata_created_by != 'None': + if modules.config.metadata_created_by != '': generation_params |= { 'Created By': f'{modules.config.metadata_created_by}' } From f3010313fc49c40a42c9141618060fa36418c633 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Sat, 27 Jan 2024 20:03:10 +0100 Subject: [PATCH 04/52] wip: add metadata mapping, reading and writing applying data after reading currently not functional for A1111 --- modules/async_worker.py | 147 +++++------------------- modules/config.py | 3 +- modules/flags.py | 9 +- modules/meta_parser.py | 233 ++++++++++++++++++-------------------- modules/metadata.py | 209 ++++++++++++++++++++++++++++++++++ modules/private_logger.py | 22 ++-- modules/util.py | 13 ++- webui.py | 83 ++++++++------ 8 files changed, 434 insertions(+), 285 deletions(-) create mode 100644 modules/metadata.py diff --git a/modules/async_worker.py b/modules/async_worker.py index e563e667..ddef06c1 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -17,7 +17,6 @@ def worker(): import os import traceback import math - import json import numpy as np import torch import time @@ -43,8 +42,9 @@ def worker(): from modules.private_logger import log from extras.expansion import safe_str from modules.util import remove_empty_str, HWC3, resize_image, \ - get_image_shape_ceil, set_image_shape_ceil, get_shape_ceil, resample_image, erode_or_dilate, calculate_sha256, quote + get_image_shape_ceil, set_image_shape_ceil, get_shape_ceil, resample_image, erode_or_dilate, calculate_sha256 from modules.upscaler import perform_upscale + from modules.metadata import MetadataScheme try: async_gradio_app = shared.gradio_root @@ -144,7 +144,8 @@ def worker(): inpaint_additional_prompt = args.pop() inpaint_mask_image_upload = args.pop() save_metadata_to_images = args.pop() if not args_manager.args.disable_metadata else False - metadata_scheme = args.pop() if not args_manager.args.disable_metadata else 'fooocus' + metadata_scheme = args.pop() if not args_manager.args.disable_metadata else MetadataScheme.FOOOCUS.value + assert metadata_scheme in [item.value for item in MetadataScheme] cn_tasks = {x: [] for x in flags.ip_list} for _ in range(4): @@ -793,129 +794,37 @@ def worker(): if inpaint_worker.current_task is not None: imgs = [inpaint_worker.current_task.post_process(x) for x in imgs] - metadata_string = '' - if save_metadata_to_images and metadata_scheme == 'fooocus': - metadata = { - # prompt with wildcards - 'prompt': raw_prompt, 'negative_prompt': raw_negative_prompt, - # prompt with resolved wildcards - 'real_prompt': task['log_positive_prompt'], 'real_negative_prompt': task['log_negative_prompt'], - # prompt with resolved wildcards, styles and prompt expansion - 'complete_prompt_positive': task['positive'], 'complete_prompt_negative': task['negative'], - 'styles': str(raw_style_selections), - 'seed': task['task_seed'], 'width': width, 'height': height, - 'sampler': sampler_name, 'scheduler': scheduler_name, 'performance': performance_selection, - 'steps': steps, 'refiner_switch': refiner_switch, 'sharpness': sharpness, 'cfg': cfg_scale, - 'base_model': base_model_name, 'base_model_hash': base_model_hash, 'refiner_model': refiner_model_name, - 'denoising_strength': denoising_strength, - 'freeu': advanced_parameters.freeu_enabled, - 'img2img': input_image_checkbox, - 'prompt_expansion': task['expansion'] - } - - if advanced_parameters.freeu_enabled: - metadata |= { - 'freeu_b1': advanced_parameters.freeu_b1, 'freeu_b2': advanced_parameters.freeu_b2, 'freeu_s1': advanced_parameters.freeu_s1, 'freeu_s2': advanced_parameters.freeu_s2 - } - - if 'vary' in goals: - metadata |= { - 'uov_method': uov_method - } - - if 'upscale' in goals: - metadata |= { - 'uov_method': uov_method, 'scale': f - } - - if 'inpaint' in goals: - if len(outpaint_selections) > 0: - metadata |= { - 'outpaint_selections': outpaint_selections - } - - metadata |= { - 'inpaint_additional_prompt': inpaint_additional_prompt, 'inpaint_mask_upload': advanced_parameters.inpaint_mask_upload_checkbox, 'invert_mask': advanced_parameters.invert_mask_checkbox, - 'inpaint_disable_initial_latent': advanced_parameters.inpaint_disable_initial_latent, 'inpaint_engine': advanced_parameters.inpaint_engine, - 'inpaint_strength': advanced_parameters.inpaint_strength, 'inpaint_respective_field': advanced_parameters.inpaint_respective_field, - } - - if 'cn' in goals: - metadata |= { - 'canny_low_threshold': advanced_parameters.canny_low_threshold, 'canny_high_threshold': advanced_parameters.canny_high_threshold, - } - - ip_list = {x: [] for x in flags.ip_list} - cn_task_index = 1 - for cn_type in ip_list: - for cn_task in cn_tasks[cn_type]: - cn_img, cn_stop, cn_weight = cn_task - metadata |= { - f'image_prompt_{cn_task_index}': { - 'cn_type': cn_type, 'cn_stop': cn_stop, 'cn_weight': cn_weight, - } - } - cn_task_index += 1 - - metadata |= { - 'software': f'Fooocus v{fooocus_version.version}', - } - if modules.config.metadata_created_by != '': - metadata |= { - 'created_by': modules.config.metadata_created_by - } - metadata_string = json.dumps(metadata, ensure_ascii=False) - elif save_metadata_to_images and metadata_scheme == 'a1111': - generation_params = { - "Steps": steps, - "Sampler": sampler_name, - "CFG scale": cfg_scale, - "Seed": task['task_seed'], - "Size": f"{width}x{height}", - "Model hash": base_model_hash, - "Model": base_model_name.split('.')[0], - "Lora hashes": lora_hashes_string, - "Denoising strength": denoising_strength, - "Version": f'Fooocus v{fooocus_version.version}' - } - - if modules.config.metadata_created_by != '': - generation_params |= { - 'Created By': f'{modules.config.metadata_created_by}' - } - - generation_params_text = ", ".join([k if k == v else f'{k}: {quote(v)}' for k, v in generation_params.items() if v is not None]) - positive_prompt_resolved = ', '.join(task['positive']) - negative_prompt_resolved = ', '.join(task['negative']) - negative_prompt_text = f"\nNegative prompt: {negative_prompt_resolved}" if negative_prompt_resolved else "" - metadata_string = f"{positive_prompt_resolved}{negative_prompt_text}\n{generation_params_text}".strip() - for x in imgs: d = [ - ('Prompt', task['log_positive_prompt']), - ('Negative Prompt', task['log_negative_prompt']), - ('Fooocus V2 Expansion', task['expansion']), - ('Styles', str(raw_style_selections)), - ('Performance', performance_selection), - ('Resolution', str((width, height))), - ('Sharpness', sharpness), - ('Guidance Scale', guidance_scale), - ('ADM Guidance', str(( + ('Prompt', 'prompt', task['log_positive_prompt'], True, True), + ('Full Positive Prompt', 'full_prompt', task['positive'], False, False), + ('Negative Prompt', 'negative_prompt', task['log_negative_prompt'], True, True), + ('Full Negative Prompt', 'full_negative_prompt', task['negative'], False, False), + ('Fooocus V2 Expansion', 'prompt_expansion', task['expansion'], True, True), + ('Styles', 'styles', str(raw_style_selections), True, True), + ('Performance', 'performance', performance_selection, True, True), + ('Steps', 'steps', steps, False, False), + ('Resolution', 'resolution', str((width, height)), True, True), + ('Sharpness', 'sharpness', sharpness, True, True), + ('Guidance Scale', 'guidance_scale', guidance_scale, True, True), + ('ADM Guidance', 'adm_guidance', str(( modules.patch.positive_adm_scale, modules.patch.negative_adm_scale, - modules.patch.adm_scaler_end))), - ('Base Model', base_model_name), - ('Refiner Model', refiner_model_name), - ('Refiner Switch', refiner_switch), - ('Sampler', sampler_name), - ('Scheduler', scheduler_name), - ('Seed', task['task_seed']), + modules.patch.adm_scaler_end)), True, True), + ('Base Model', 'base_model', base_model_name, True, True), + ('Refiner Model', 'refiner_model', refiner_model_name, True, True), + ('Refiner Switch', 'refiner_switch', refiner_switch, True, True), + ('Sampler', 'sampler', sampler_name, True, True), + ('Scheduler', 'scheduler', scheduler_name, True, True), + ('Seed', 'seed', task['task_seed'], True, True) ] for li, (n, w) in enumerate(loras): if n != 'None': - d.append((f'LoRA {li + 1}', f'{n} : {w}')) - d.append(('Version', 'v' + fooocus_version.version)) - log(x, d, metadata_string, save_metadata_to_images) + d.append((f'LoRA {li + 1}', f'lora{li + 1}_combined', f'{n} : {w}', True, True)) + # d.append((f'LoRA {li + 1} Name', f'lora{li + 1}_name', n, False, False)) + # d.append((f'LoRA {li + 1} Weight', f'lora{li + 1}_weight', n, False, False)) + d.append(('Version', 'version', 'v' + fooocus_version.version, True, True)) + log(x, d, save_metadata_to_images, metadata_scheme) yield_result(async_task, imgs, do_not_show_finished_images=len(tasks) == 1) except ldm_patched.modules.model_management.InterruptProcessingException as e: diff --git a/modules/config.py b/modules/config.py index 924284c5..4a9b6837 100644 --- a/modules/config.py +++ b/modules/config.py @@ -6,6 +6,7 @@ import args_manager import modules.flags import modules.sdxl_styles +from modules.metadata import MetadataScheme from modules.model_loader import load_file_from_url from modules.util import get_files_from_folder @@ -322,7 +323,7 @@ default_save_metadata_to_images = get_config_item_or_set_default( ) default_metadata_scheme = get_config_item_or_set_default( key='default_metadata_scheme', - default_value='fooocus', + default_value=MetadataScheme.FOOOCUS.value, validator=lambda x: x in [y[1] for y in modules.flags.metadata_scheme if y[1] == x] ) metadata_created_by = get_config_item_or_set_default( diff --git a/modules/flags.py b/modules/flags.py index fd346f2e..abcd3f60 100644 --- a/modules/flags.py +++ b/modules/flags.py @@ -1,3 +1,5 @@ +from modules.metadata import MetadataScheme + disabled = 'Disabled' enabled = 'Enabled' subtle_variation = 'Vary (Subtle)' @@ -32,9 +34,10 @@ default_parameters = { cn_ip: (0.5, 0.6), cn_ip_face: (0.9, 0.75), cn_canny: (0.5, 1.0), cn_cpds: (0.5, 1.0) } # stop, weight -metadata_scheme =[ - ('Fooocus (json)', 'fooocus'), - ('A1111 (plain text)', 'a1111'), +# TODO use translation here +metadata_scheme = [ + ('Fooocus (json)', MetadataScheme.FOOOCUS.value), + ('A1111 (plain text)', MetadataScheme.A1111.value), ] inpaint_engine_versions = ['None', 'v1', 'v2.5', 'v2.6'] diff --git a/modules/meta_parser.py b/modules/meta_parser.py index 07b42a16..e7cf8a47 100644 --- a/modules/meta_parser.py +++ b/modules/meta_parser.py @@ -1,138 +1,38 @@ import json + import gradio as gr + import modules.config -def load_parameter_button_click(raw_prompt_txt, is_generating): - loaded_parameter_dict = json.loads(raw_prompt_txt) +def load_parameter_button_click(raw_metadata: dict | str, is_generating: bool): + loaded_parameter_dict = raw_metadata + if isinstance(raw_metadata, str): + loaded_parameter_dict = json.loads(raw_metadata) assert isinstance(loaded_parameter_dict, dict) results = [True, 1] - try: - h = loaded_parameter_dict.get('Prompt', None) - assert isinstance(h, str) - results.append(h) - except: - results.append(gr.update()) - - try: - h = loaded_parameter_dict.get('Negative Prompt', None) - assert isinstance(h, str) - results.append(h) - except: - results.append(gr.update()) - - try: - h = loaded_parameter_dict.get('Styles', None) - h = eval(h) - assert isinstance(h, list) - results.append(h) - except: - results.append(gr.update()) - - try: - h = loaded_parameter_dict.get('Performance', None) - assert isinstance(h, str) - results.append(h) - except: - results.append(gr.update()) - - try: - h = loaded_parameter_dict.get('Resolution', None) - width, height = eval(h) - formatted = modules.config.add_ratio(f'{width}*{height}') - if formatted in modules.config.available_aspect_ratios: - results.append(formatted) - results.append(-1) - results.append(-1) - else: - results.append(gr.update()) - results.append(width) - results.append(height) - except: - results.append(gr.update()) - results.append(gr.update()) - results.append(gr.update()) - - try: - h = loaded_parameter_dict.get('Sharpness', None) - assert h is not None - h = float(h) - results.append(h) - except: - results.append(gr.update()) - - try: - h = loaded_parameter_dict.get('Guidance Scale', None) - assert h is not None - h = float(h) - results.append(h) - except: - results.append(gr.update()) - - try: - h = loaded_parameter_dict.get('ADM Guidance', None) - p, n, e = eval(h) - results.append(float(p)) - results.append(float(n)) - results.append(float(e)) - except: - results.append(gr.update()) - results.append(gr.update()) - results.append(gr.update()) - - try: - h = loaded_parameter_dict.get('Base Model', None) - assert isinstance(h, str) - results.append(h) - except: - results.append(gr.update()) - - try: - h = loaded_parameter_dict.get('Refiner Model', None) - assert isinstance(h, str) - results.append(h) - except: - results.append(gr.update()) - - try: - h = loaded_parameter_dict.get('Refiner Switch', None) - assert h is not None - h = float(h) - results.append(h) - except: - results.append(gr.update()) - - try: - h = loaded_parameter_dict.get('Sampler', None) - assert isinstance(h, str) - results.append(h) - except: - results.append(gr.update()) - - try: - h = loaded_parameter_dict.get('Scheduler', None) - assert isinstance(h, str) - results.append(h) - except: - results.append(gr.update()) - - try: - h = loaded_parameter_dict.get('Seed', None) - assert h is not None - h = int(h) - results.append(False) - results.append(h) - except: - results.append(gr.update()) - results.append(gr.update()) + get_str('prompt', 'Prompt', loaded_parameter_dict, results) + get_str('negative_prompt', 'Negative Prompt', loaded_parameter_dict, results) + get_list('styles', 'Styles', loaded_parameter_dict, results) + get_str('performance', 'Performance', loaded_parameter_dict, results) + get_resolution('resolution', 'Resolution', loaded_parameter_dict, results) + get_float('sharpness', 'Sharpness', loaded_parameter_dict, results) + get_float('guidance_scale', 'Guidance Scale', loaded_parameter_dict, results) + get_adm_guidance('adm_guidance', 'ADM Guidance', loaded_parameter_dict, results) + get_str('base_model', 'Base Model', loaded_parameter_dict, results) + get_str('refiner_model', 'Refiner Model', loaded_parameter_dict, results) + get_float('refiner_switch', 'Refiner Switch', loaded_parameter_dict, results) + get_str('sampler', 'Sampler', loaded_parameter_dict, results) + get_str('scheduler', 'Scheduler', loaded_parameter_dict, results) + get_seed('seed', 'Seed', loaded_parameter_dict, results) if is_generating: results.append(gr.update()) else: results.append(gr.update(visible=True)) - + results.append(gr.update(visible=False)) for i in range(1, 6): @@ -146,3 +46,94 @@ def load_parameter_button_click(raw_prompt_txt, is_generating): results.append(gr.update()) return results + + +def get_str(key: str, fallback: str | None, source_dict: dict, results: list, default=None): + try: + h = source_dict.get(key, default) + assert isinstance(h, str) + results.append(h) + except: + if fallback is not None: + get_str(fallback, None, source_dict, results, default) + return + results.append(gr.update()) + + +def get_list(key: str, fallback: str | None, source_dict: dict, results: list, default=None): + try: + h = source_dict.get(key, default) + h = eval(h) + assert isinstance(h, list) + results.append(h) + except: + if fallback is not None: + get_list(fallback, None, source_dict, results, default) + return + results.append(gr.update()) + + +def get_float(key: str, fallback: str | None, source_dict: dict, results: list, default=None): + try: + h = source_dict.get(key, default) + assert h is not None + h = float(h) + results.append(h) + except: + if fallback is not None: + get_float(fallback, None, source_dict, results, default) + return + results.append(gr.update()) + + +def get_resolution(key: str, fallback: str | None, source_dict: dict, results: list, default=None): + try: + h = source_dict.get(key, default) + width, height = eval(h) + formatted = modules.config.add_ratio(f'{width}*{height}') + if formatted in modules.config.available_aspect_ratios: + results.append(formatted) + results.append(-1) + results.append(-1) + else: + results.append(gr.update()) + results.append(width) + results.append(height) + except: + if fallback is not None: + get_resolution(fallback, None, source_dict, results, default) + return + results.append(gr.update()) + results.append(gr.update()) + results.append(gr.update()) + + +def get_seed(key: str, fallback: str | None, source_dict: dict, results: list, default=None): + try: + h = source_dict.get(key, default) + assert h is not None + h = int(h) + results.append(False) + results.append(h) + except: + if fallback is not None: + get_seed(fallback, None, source_dict, results, default) + return + results.append(gr.update()) + results.append(gr.update()) + + +def get_adm_guidance(key: str, fallback: str | None, source_dict: dict, results: list, default=None): + try: + h = source_dict.get(key, default) + p, n, e = eval(h) + results.append(float(p)) + results.append(float(n)) + results.append(float(e)) + except: + if fallback is not None: + get_adm_guidance(fallback, None, source_dict, results, default) + return + results.append(gr.update()) + results.append(gr.update()) + results.append(gr.update()) diff --git a/modules/metadata.py b/modules/metadata.py new file mode 100644 index 00000000..397f7333 --- /dev/null +++ b/modules/metadata.py @@ -0,0 +1,209 @@ +import json +from abc import ABC, abstractmethod +from enum import Enum +from PIL import Image + +import modules.config +import fooocus_version +# import advanced_parameters +from modules.util import quote, is_json + + +class MetadataScheme(Enum): + FOOOCUS = 'fooocus' + A1111 = 'a1111' + + +class MetadataParser(ABC): + @abstractmethod + def parse_json(self, metadata: dict): + raise NotImplementedError + + # TODO add data to parse + @abstractmethod + def parse_string(self, metadata: dict) -> str: + raise NotImplementedError + + +class A1111MetadataParser(MetadataParser): + + def parse_json(self, metadata: dict): + # TODO add correct mapping + pass + + def parse_string(self, metadata: dict) -> str: + # TODO add correct mapping + + data = {k: v for _, k, v, _, _ in metadata} + + # TODO check if correct + width, heigth = data['resolution'].split(', ') + + generation_params = { + "Steps": data['steps'], + "Sampler": data['sampler'], + "CFG scale": data['guidance_scale'], + "Seed": data['seed'], + "Size": f"{width}x{heigth}", + # "Model hash": base_model_hash, + "Model": data['base_model'].split('.')[0], + # "Lora hashes": lora_hashes_string, + # "Denoising strength": data['denoising_strength'], + "Version": f"Fooocus {data['version']}" + } + + generation_params_text = ", ".join( + [k if k == v else f'{k}: {quote(v)}' for k, v in generation_params.items() if v is not None]) + positive_prompt_resolved = ', '.join(data['full_prompt']) + negative_prompt_resolved = ', '.join(data['full_negative_prompt']) + negative_prompt_text = f"\nNegative prompt: {negative_prompt_resolved}" if negative_prompt_resolved else "" + return f"{positive_prompt_resolved}{negative_prompt_text}\n{generation_params_text}".strip() + + +class FooocusMetadataParser(MetadataParser): + + def parse_json(self, metadata: dict): + # TODO add mapping if necessary + return metadata + + def parse_string(self, metadata: dict) -> str: + + return json.dumps({k: v for _, k, v, _, _ in metadata}) + # metadata = { + # # prompt with wildcards + # 'prompt': raw_prompt, 'negative_prompt': raw_negative_prompt, + # # prompt with resolved wildcards + # 'real_prompt': task['log_positive_prompt'], 'real_negative_prompt': task['log_negative_prompt'], + # # prompt with resolved wildcards, styles and prompt expansion + # 'complete_prompt_positive': task['positive'], 'complete_prompt_negative': task['negative'], + # 'styles': str(raw_style_selections), + # 'seed': task['task_seed'], 'width': width, 'height': height, + # 'sampler': sampler_name, 'scheduler': scheduler_name, 'performance': performance_selection, + # 'steps': steps, 'refiner_switch': refiner_switch, 'sharpness': sharpness, 'cfg': cfg_scale, + # 'base_model': base_model_name, 'base_model_hash': base_model_hash, 'refiner_model': refiner_model_name, + # 'denoising_strength': denoising_strength, + # 'freeu': advanced_parameters.freeu_enabled, + # 'img2img': input_image_checkbox, + # 'prompt_expansion': task['expansion'] + # } + # + # if advanced_parameters.freeu_enabled: + # metadata |= { + # 'freeu_b1': advanced_parameters.freeu_b1, 'freeu_b2': advanced_parameters.freeu_b2, + # 'freeu_s1': advanced_parameters.freeu_s1, 'freeu_s2': advanced_parameters.freeu_s2 + # } + # + # if 'vary' in goals: + # metadata |= { + # 'uov_method': uov_method + # } + # + # if 'upscale' in goals: + # metadata |= { + # 'uov_method': uov_method, 'scale': f + # } + # + # if 'inpaint' in goals: + # if len(outpaint_selections) > 0: + # metadata |= { + # 'outpaint_selections': outpaint_selections + # } + # + # metadata |= { + # 'inpaint_additional_prompt': inpaint_additional_prompt, + # 'inpaint_mask_upload': advanced_parameters.inpaint_mask_upload_checkbox, + # 'invert_mask': advanced_parameters.invert_mask_checkbox, + # 'inpaint_disable_initial_latent': advanced_parameters.inpaint_disable_initial_latent, + # 'inpaint_engine': advanced_parameters.inpaint_engine, + # 'inpaint_strength': advanced_parameters.inpaint_strength, + # 'inpaint_respective_field': advanced_parameters.inpaint_respective_field, + # } + # + # if 'cn' in goals: + # metadata |= { + # 'canny_low_threshold': advanced_parameters.canny_low_threshold, + # 'canny_high_threshold': advanced_parameters.canny_high_threshold, + # } + # + # ip_list = {x: [] for x in flags.ip_list} + # cn_task_index = 1 + # for cn_type in ip_list: + # for cn_task in cn_tasks[cn_type]: + # cn_img, cn_stop, cn_weight = cn_task + # metadata |= { + # f'image_prompt_{cn_task_index}': { + # 'cn_type': cn_type, 'cn_stop': cn_stop, 'cn_weight': cn_weight, + # } + # } + # cn_task_index += 1 + # + # metadata |= { + # 'software': f'Fooocus v{fooocus_version.version}', + # } + # if modules.config.metadata_created_by != '': + # metadata |= { + # 'created_by': modules.config.metadata_created_by + # } + # # return json.dumps(metadata, ensure_ascii=True) TODO check if possible + # return json.dumps(metadata, ensure_ascii=False) + + +def get_metadata_parser(metadata_scheme: str) -> MetadataParser: + match metadata_scheme: + case MetadataScheme.FOOOCUS.value: + return FooocusMetadataParser() + case MetadataScheme.A1111.value: + return A1111MetadataParser() + case _: + raise NotImplementedError + +# IGNORED_INFO_KEYS = { +# 'jfif', 'jfif_version', 'jfif_unit', 'jfif_density', 'dpi', 'exif', +# 'loop', 'background', 'timestamp', 'duration', 'progressive', 'progression', +# 'icc_profile', 'chromaticity', 'photoshop', +# } + + +def read_info_from_image(filepath) -> tuple[str | None, dict, str | None]: + with Image.open(filepath) as image: + items = (image.info or {}).copy() + + parameters = items.pop('parameters', None) + if parameters is not None and is_json(parameters): + parameters = json.loads(parameters) + + metadata_scheme = items.pop('fooocus_scheme', None) + + # if "exif" in items: + # exif_data = items["exif"] + # try: + # exif = piexif.load(exif_data) + # except OSError: + # # memory / exif was not valid so piexif tried to read from a file + # exif = None + # exif_comment = (exif or {}).get("Exif", {}).get(piexif.ExifIFD.UserComment, b'') + # try: + # exif_comment = piexif.helper.UserComment.load(exif_comment) + # except ValueError: + # exif_comment = exif_comment.decode('utf8', errors="ignore") + # + # if exif_comment: + # items['exif comment'] = exif_comment + # parameters = exif_comment + + # for field in IGNORED_INFO_KEYS: + # items.pop(field, None) + + # if items.get("Software", None) == "NovelAI": + # try: + # json_info = json.loads(items["Comment"]) + # sampler = sd_samplers.samplers_map.get(json_info["sampler"], "Euler a") + # + # geninfo = f"""{items["Description"]} + # Negative prompt: {json_info["uc"]} + # Steps: {json_info["steps"]}, Sampler: {sampler}, CFG scale: {json_info["scale"]}, Seed: {json_info["seed"]}, Size: {image.width}x{image.height}, Clip skip: 2, ENSD: 31337""" + # except Exception: + # errors.report("Error parsing NovelAI image generation parameters", + # exc_info=True) + + return parameters, items, metadata_scheme diff --git a/modules/private_logger.py b/modules/private_logger.py index 223463b3..3e186e1a 100644 --- a/modules/private_logger.py +++ b/modules/private_logger.py @@ -7,6 +7,7 @@ import urllib.parse from PIL import Image from PIL.PngImagePlugin import PngInfo from modules.util import generate_temp_filename +from modules.metadata import MetadataScheme log_cache = {} @@ -19,7 +20,9 @@ def get_current_html_path(): return html_name -def log(img, dic, metadata=None, save_metadata_to_image=False): +def log(img, metadata, save_metadata_to_image=False, metadata_scheme: str = MetadataScheme.FOOOCUS.value): + assert metadata_scheme in [item.value for item in MetadataScheme] + if args_manager.args.disable_image_log: return @@ -27,8 +30,12 @@ def log(img, dic, metadata=None, save_metadata_to_image=False): os.makedirs(os.path.dirname(local_temp_filename), exist_ok=True) if save_metadata_to_image: + metadata_parser = modules.metadata.get_metadata_parser(metadata_scheme) + parsed_parameters = metadata_parser.parse_string(metadata) + pnginfo = PngInfo() - pnginfo.add_text('parameters', metadata) + pnginfo.add_text('parameters', parsed_parameters) + pnginfo.add_text('fooocus_scheme', metadata_scheme) else: pnginfo = None Image.fromarray(img).save(local_temp_filename, pnginfo=pnginfo) @@ -40,7 +47,7 @@ def log(img, dic, metadata=None, save_metadata_to_image=False): "body { background-color: #121212; color: #E0E0E0; } " "a { color: #BB86FC; } " ".metadata { border-collapse: collapse; width: 100%; } " - ".metadata .key { width: 15%; } " + ".metadata .label { width: 15%; } " ".metadata .value { width: 85%; font-weight: bold; } " ".metadata th, .metadata td { border: 1px solid #4d4d4d; padding: 4px; } " ".image-container img { height: auto; max-width: 512px; display: block; padding-right:10px; } " @@ -93,12 +100,13 @@ def log(img, dic, metadata=None, save_metadata_to_image=False): item = f"

\n" item += f"" item += "" diff --git a/modules/util.py b/modules/util.py index 89270138..8e304d6c 100644 --- a/modules/util.py +++ b/modules/util.py @@ -178,6 +178,7 @@ def get_files_from_folder(folder_path, exensions=None, name_filter=None): return sorted(filenames, key=lambda x: -1 if os.sep in x else 1) + def calculate_sha256(filename): hash_sha256 = sha256() blksize = 1024 * 1024 @@ -188,8 +189,18 @@ def calculate_sha256(filename): return hash_sha256.hexdigest() + def quote(text): if ',' not in str(text) and '\n' not in str(text) and ':' not in str(text): return text - return json.dumps(text, ensure_ascii=False) \ No newline at end of file + return json.dumps(text, ensure_ascii=False) + + +def is_json(data: str) -> bool: + try: + loaded_json = json.loads(data) + assert isinstance(loaded_json, dict) + except ValueError: + return False + return True diff --git a/webui.py b/webui.py index 21ed6210..0688215a 100644 --- a/webui.py +++ b/webui.py @@ -14,6 +14,7 @@ import modules.gradio_hijack as grh import modules.advanced_parameters as advanced_parameters import modules.style_sorter as style_sorter import modules.meta_parser +import modules.metadata import args_manager import copy @@ -21,6 +22,7 @@ from modules.sdxl_styles import legal_style_names from modules.private_logger import get_current_html_path from modules.ui_gradio_extensions import reload_javascript from modules.auth import auth_enabled, check_auth +from modules.util import is_json def generate_clicked(*args): @@ -208,6 +210,28 @@ with shared.gradio_root: value=flags.desc_type_photo) desc_btn = gr.Button(value='Describe this Image into Prompt') gr.HTML('\U0001F4D4 Document') + with gr.TabItem(label='Load Metadata') as load_tab: + with gr.Column(): + metadata_input_image = grh.Image(label='Drag any image generated by Fooocus here', source='upload', type='filepath') + metadata_json = gr.JSON(label='Metadata') + metadata_import_button = gr.Button(value='Overwrite Input Values') + + def trigger_metadata_preview(filepath): + parameters, items, metadata_scheme = modules.metadata.read_info_from_image(filepath) + + results = {} + if parameters is not None: + results['parameters'] = parameters + if items: + results['items'] = items + if metadata_scheme is not None: + results['metadata_scheme'] = metadata_scheme + + return results + + metadata_input_image.upload(trigger_metadata_preview, inputs=metadata_input_image, + outputs=metadata_json) + switch_js = "(x) => {if(x){viewer_to_bottom(100);viewer_to_bottom(500);}else{viewer_to_top();} return x;}" down_js = "() => {viewer_to_bottom();}" @@ -548,14 +572,8 @@ with shared.gradio_root: def parse_meta(raw_prompt_txt, is_generating): loaded_json = None - try: - if '{' in raw_prompt_txt: - if '}' in raw_prompt_txt: - if ':' in raw_prompt_txt: - loaded_json = json.loads(raw_prompt_txt) - assert isinstance(loaded_json, dict) - except: - loaded_json = None + if is_json(raw_prompt_txt): + loaded_json = json.loads(raw_prompt_txt) if loaded_json is None: if is_generating: @@ -567,31 +585,30 @@ with shared.gradio_root: prompt.input(parse_meta, inputs=[prompt, state_is_generating], outputs=[prompt, generate_button, load_parameter_button], queue=False, show_progress=False) - load_parameter_button.click(modules.meta_parser.load_parameter_button_click, inputs=[prompt, state_is_generating], outputs=[ - advanced_checkbox, - image_number, - prompt, - negative_prompt, - style_selections, - performance_selection, - aspect_ratios_selection, - overwrite_width, - overwrite_height, - sharpness, - guidance_scale, - adm_scaler_positive, - adm_scaler_negative, - adm_scaler_end, - base_model, - refiner_model, - refiner_switch, - sampler_name, - scheduler_name, - seed_random, - image_seed, - generate_button, - load_parameter_button - ] + lora_ctrls, queue=False, show_progress=False) + load_data_outputs = [advanced_checkbox, image_number, prompt, negative_prompt, style_selections, + performance_selection, aspect_ratios_selection, overwrite_width, overwrite_height, + sharpness, guidance_scale, adm_scaler_positive, adm_scaler_negative, adm_scaler_end, + base_model, refiner_model, refiner_switch, sampler_name, scheduler_name, seed_random, + image_seed, generate_button, load_parameter_button] + lora_ctrls + + load_parameter_button.click(modules.meta_parser.load_parameter_button_click, inputs=[prompt, state_is_generating], outputs=load_data_outputs, queue=False, show_progress=False) + + def trigger_metadata_import(filepath, state_is_generating): + parameters, items, metadata_scheme = modules.metadata.read_info_from_image(filepath) + + if parameters is None: + pass + + if metadata_scheme is None and isinstance(parameters, dict): + metadata_scheme = modules.metadata.MetadataScheme.FOOOCUS.value + + + metadata_parser = modules.metadata.get_metadata_parser(metadata_scheme) + parsed_parameters = metadata_parser.parse_json(parameters) + return modules.meta_parser.load_parameter_button_click(parsed_parameters, state_is_generating) + + + metadata_import_button.click(trigger_metadata_import, inputs=[metadata_input_image, state_is_generating], outputs=load_data_outputs, queue=False, show_progress=False) generate_button.click(lambda: (gr.update(visible=True, interactive=True), gr.update(visible=True, interactive=True), gr.update(visible=False, interactive=False), [], True), outputs=[stop_button, skip_button, generate_button, gallery, state_is_generating]) \ From ee21c2b6bc55beb5ec058df2dc8ed025a7208727 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Sun, 28 Jan 2024 18:02:00 +0100 Subject: [PATCH 05/52] feat: rename metadata tab and import button label --- webui.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/webui.py b/webui.py index 0688215a..0ed8b379 100644 --- a/webui.py +++ b/webui.py @@ -210,11 +210,11 @@ with shared.gradio_root: value=flags.desc_type_photo) desc_btn = gr.Button(value='Describe this Image into Prompt') gr.HTML('\U0001F4D4 Document') - with gr.TabItem(label='Load Metadata') as load_tab: + with gr.TabItem(label='Metadata') as load_tab: with gr.Column(): metadata_input_image = grh.Image(label='Drag any image generated by Fooocus here', source='upload', type='filepath') metadata_json = gr.JSON(label='Metadata') - metadata_import_button = gr.Button(value='Overwrite Input Values') + metadata_import_button = gr.Button(value='Apply Metadata') def trigger_metadata_preview(filepath): parameters, items, metadata_scheme = modules.metadata.read_info_from_image(filepath) From e19596c2df7201c94cf81039570bb397e56fe59a Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Sun, 28 Jan 2024 18:04:40 +0100 Subject: [PATCH 06/52] feat: map basic information for scheme A1111 --- modules/async_worker.py | 25 +++++---- modules/metadata.py | 120 ++++++++++++++++++++++++++++++++++------ modules/util.py | 10 ++++ 3 files changed, 128 insertions(+), 27 deletions(-) diff --git a/modules/async_worker.py b/modules/async_worker.py index ddef06c1..ea4018ed 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -203,16 +203,16 @@ def worker(): modules.patch.adm_scaler_end = advanced_parameters.adm_scaler_end = 0.0 steps = 8 - if save_metadata_to_images: - base_model_path = os.path.join(modules.config.path_checkpoints, base_model_name) - base_model_hash = calculate_sha256(base_model_path)[0:10] + base_model_path = os.path.join(modules.config.path_checkpoints, base_model_name) + base_model_hash = calculate_sha256(base_model_path)[0:10] - lora_hashes = [] - for (n, w) in loras: - if n != 'None': - lora_path = os.path.join(modules.config.path_loras, n) - lora_hashes.append(f'{n.split(".")[0]}: {calculate_sha256(lora_path)[0:10]}') - lora_hashes_string = ", ".join(lora_hashes) + refiner_model_path = os.path.join(modules.config.path_checkpoints, refiner_model_name) + refiner_model_hash = calculate_sha256(refiner_model_path)[0:10] if refiner_model_name != 'None' else '' + + lora_hashes = [] + for (n, w) in loras: + lora_path = os.path.join(modules.config.path_loras, n) if n != 'None' else '' + lora_hashes.append(calculate_sha256(lora_path)[0:10] if n != 'None' else '') modules.patch.adaptive_cfg = advanced_parameters.adaptive_cfg print(f'[Parameters] Adaptive CFG = {modules.patch.adaptive_cfg}') @@ -812,7 +812,9 @@ def worker(): modules.patch.negative_adm_scale, modules.patch.adm_scaler_end)), True, True), ('Base Model', 'base_model', base_model_name, True, True), + ('Base Model Hash', 'base_model_hash', base_model_hash, False, False), ('Refiner Model', 'refiner_model', refiner_model_name, True, True), + ('Refiner Model Hash', 'refiner_model_hash', refiner_model_hash, False, False), ('Refiner Switch', 'refiner_switch', refiner_switch, True, True), ('Sampler', 'sampler', sampler_name, True, True), ('Scheduler', 'scheduler', scheduler_name, True, True), @@ -821,8 +823,9 @@ def worker(): for li, (n, w) in enumerate(loras): if n != 'None': d.append((f'LoRA {li + 1}', f'lora{li + 1}_combined', f'{n} : {w}', True, True)) - # d.append((f'LoRA {li + 1} Name', f'lora{li + 1}_name', n, False, False)) - # d.append((f'LoRA {li + 1} Weight', f'lora{li + 1}_weight', n, False, False)) + d.append((f'LoRA {li + 1} Name', f'lora_name_{li + 1}', n, False, False)) + d.append((f'LoRA {li + 1} Weight', f'lora_weight_{li + 1}', w, False, False)) + d.append((f'LoRA {li + 1} Hash', f'lora_hash_{li + 1}', lora_hashes[li], False, False)) d.append(('Version', 'version', 'v' + fooocus_version.version, True, True)) log(x, d, save_metadata_to_images, metadata_scheme) diff --git a/modules/metadata.py b/modules/metadata.py index 397f7333..4cdf534a 100644 --- a/modules/metadata.py +++ b/modules/metadata.py @@ -1,4 +1,5 @@ import json +import re from abc import ABC, abstractmethod from enum import Enum from PIL import Image @@ -6,7 +7,11 @@ from PIL import Image import modules.config import fooocus_version # import advanced_parameters -from modules.util import quote, is_json +from modules.util import quote, unquote, is_json + +re_param_code = r'\s*(\w[\w \-/]+):\s*("(?:\\.|[^\\"])+"|[^,]*)(?:,|$)' +re_param = re.compile(re_param_code) +re_imagesize = re.compile(r"^(\d+)x(\d+)$") class MetadataScheme(Enum): @@ -16,7 +21,7 @@ class MetadataScheme(Enum): class MetadataParser(ABC): @abstractmethod - def parse_json(self, metadata: dict): + def parse_json(self, metadata: dict) -> dict: raise NotImplementedError # TODO add data to parse @@ -27,9 +32,67 @@ class MetadataParser(ABC): class A1111MetadataParser(MetadataParser): - def parse_json(self, metadata: dict): + fooocus_to_a1111 = { + 'negative_prompt': 'Negative prompt', + 'steps': 'Steps', + 'sampler': 'Sampler', + 'guidance_scale': 'CFG scale', + 'seed': 'Seed', + 'resolution': 'Size', + 'base_model': 'Model', + 'base_model_hash': 'Model hash', + 'refiner_model': 'Refiner', + 'refiner_model_hash': 'Refiner hash', + 'lora_hashes': 'Lora hashes', + 'version': 'Version' + } + + def parse_json(self, metadata: str) -> dict: # TODO add correct mapping - pass + + prompt = '' + negative_prompt = '' + + done_with_prompt = False + + *lines, lastline = metadata.strip().split("\n") + if len(re_param.findall(lastline)) < 3: + lines.append(lastline) + lastline = '' + + for line in lines: + line = line.strip() + if line.startswith(f"{self.fooocus_to_a1111['negative_prompt']}:"): + done_with_prompt = True + line = line[len(f"{self.fooocus_to_a1111['negative_prompt']}:"):].strip() + if done_with_prompt: + negative_prompt += ('' if negative_prompt == '' else "\n") + line + else: + prompt += ('' if prompt == '' else "\n") + line + + + data = { + 'prompt': prompt, + 'negative_prompt': negative_prompt + } + + for k, v in re_param.findall(lastline): + try: + if v[0] == '"' and v[-1] == '"': + v = unquote(v) + + m = re_imagesize.match(v) + if m is not None: + # TODO check + data[f"{k}-1"] = m.group(1) + data[f"{k}-2"] = m.group(2) + else: + key = list(self.fooocus_to_a1111.keys())[list(self.fooocus_to_a1111.values()).index(k)] + data[key] = v + except Exception: + print(f"Error parsing \"{k}: {v}\"") + + return data def parse_string(self, metadata: dict) -> str: # TODO add correct mapping @@ -39,30 +102,54 @@ class A1111MetadataParser(MetadataParser): # TODO check if correct width, heigth = data['resolution'].split(', ') + lora_hashes = [] + for index in range(5): + name = f'lora_name_{index + 1}' + if name in data: + # weight = f'lora_weight_{index}' + hash = data[f'lora_hash_{index + 1}'] + lora_hashes.append(f'{name.split(".")[0]}: {hash}') + lora_hashes_string = ", ".join(lora_hashes) + + # set static defaults generation_params = { - "Steps": data['steps'], - "Sampler": data['sampler'], - "CFG scale": data['guidance_scale'], - "Seed": data['seed'], - "Size": f"{width}x{heigth}", - # "Model hash": base_model_hash, - "Model": data['base_model'].split('.')[0], - # "Lora hashes": lora_hashes_string, + 'styles': [], + } + + generation_params |= { + self.fooocus_to_a1111['steps']: data['steps'], + self.fooocus_to_a1111['sampler']: data['sampler'], + self.fooocus_to_a1111['guidance_scale']: data['guidance_scale'], + self.fooocus_to_a1111['seed']: data['seed'], + self.fooocus_to_a1111['resolution']: f'{width}x{heigth}', + self.fooocus_to_a1111['base_model']: data['base_model'].split('.')[0], + self.fooocus_to_a1111['base_model_hash']: data['base_model_hash'] + } + + if 'refiner_model' in data and data['refiner_model'] != 'None' and 'refiner_model_hash' in data: + generation_params |= { + self.fooocus_to_a1111['refiner_model']: data['refiner_model'].split('.')[0], + self.fooocus_to_a1111['refiner_model_hash']: data['refiner_model_hash'], + } + + generation_params |= { + self.fooocus_to_a1111['lora_hashes']: lora_hashes_string, # "Denoising strength": data['denoising_strength'], - "Version": f"Fooocus {data['version']}" + self.fooocus_to_a1111['version']: f"Fooocus {data['version']}" } generation_params_text = ", ".join( [k if k == v else f'{k}: {quote(v)}' for k, v in generation_params.items() if v is not None]) - positive_prompt_resolved = ', '.join(data['full_prompt']) - negative_prompt_resolved = ', '.join(data['full_negative_prompt']) + # TODO check if multiline positive prompt is correctly processed + positive_prompt_resolved = ', '.join(data['full_prompt']) #TODO add loras to positive prompt if even possible + negative_prompt_resolved = ', '.join(data['full_negative_prompt']) #TODO add loras to positive prompt if even possible negative_prompt_text = f"\nNegative prompt: {negative_prompt_resolved}" if negative_prompt_resolved else "" return f"{positive_prompt_resolved}{negative_prompt_text}\n{generation_params_text}".strip() class FooocusMetadataParser(MetadataParser): - def parse_json(self, metadata: dict): + def parse_json(self, metadata: dict) -> dict: # TODO add mapping if necessary return metadata @@ -140,6 +227,7 @@ class FooocusMetadataParser(MetadataParser): # metadata |= { # 'software': f'Fooocus v{fooocus_version.version}', # } + # TODO add metadata_created_by # if modules.config.metadata_created_by != '': # metadata |= { # 'created_by': modules.config.metadata_created_by diff --git a/modules/util.py b/modules/util.py index 8e304d6c..f7fcc4e7 100644 --- a/modules/util.py +++ b/modules/util.py @@ -197,6 +197,16 @@ def quote(text): return json.dumps(text, ensure_ascii=False) +def unquote(text): + if len(text) == 0 or text[0] != '"' or text[-1] != '"': + return text + + try: + return json.loads(text) + except Exception: + return text + + def is_json(data: str) -> bool: try: loaded_json = json.loads(data) From 7ddd4e5209218f40a2f7ef0872599cf4c79e6e46 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Sun, 28 Jan 2024 19:58:36 +0100 Subject: [PATCH 07/52] wip: optimize handling for metadata in Gradio calls --- webui.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/webui.py b/webui.py index 0ed8b379..32fddb98 100644 --- a/webui.py +++ b/webui.py @@ -595,14 +595,11 @@ with shared.gradio_root: def trigger_metadata_import(filepath, state_is_generating): parameters, items, metadata_scheme = modules.metadata.read_info_from_image(filepath) - + # TODO check what happens if metadata_scheme is empty and A1111 string if parameters is None: + print('Could not find metadata in the image!') pass - if metadata_scheme is None and isinstance(parameters, dict): - metadata_scheme = modules.metadata.MetadataScheme.FOOOCUS.value - - metadata_parser = modules.metadata.get_metadata_parser(metadata_scheme) parsed_parameters = metadata_parser.parse_json(parameters) return modules.meta_parser.load_parameter_button_click(parsed_parameters, state_is_generating) From cbc63ebba38042473bf2803b192d58994d03fe1c Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Sun, 28 Jan 2024 20:01:33 +0100 Subject: [PATCH 08/52] feat: add enums for Performance, Steps and StepsUOV also move MetadataSchema enum to prevent circular dependency --- modules/async_worker.py | 34 ++++++------------------- modules/config.py | 6 ++--- modules/flags.py | 40 +++++++++++++++++++++++++++-- modules/metadata.py | 53 ++++++++++++++++++++++++++++----------- modules/private_logger.py | 6 ++--- webui.py | 6 +++-- 6 files changed, 93 insertions(+), 52 deletions(-) diff --git a/modules/async_worker.py b/modules/async_worker.py index ea4018ed..e6ec8c39 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -44,7 +44,7 @@ def worker(): from modules.util import remove_empty_str, HWC3, resize_image, \ get_image_shape_ceil, set_image_shape_ceil, get_shape_ceil, resample_image, erode_or_dilate, calculate_sha256 from modules.upscaler import perform_upscale - from modules.metadata import MetadataScheme + from modules.flags import Performance, MetadataScheme try: async_gradio_app = shared.gradio_root @@ -125,7 +125,7 @@ def worker(): prompt = args.pop() negative_prompt = args.pop() style_selections = args.pop() - performance_selection = args.pop() + performance_selection = Performance(args.pop()) aspect_ratios_selection = args.pop() image_number = args.pop() image_seed = args.pop() @@ -144,8 +144,7 @@ def worker(): inpaint_additional_prompt = args.pop() inpaint_mask_image_upload = args.pop() save_metadata_to_images = args.pop() if not args_manager.args.disable_metadata else False - metadata_scheme = args.pop() if not args_manager.args.disable_metadata else MetadataScheme.FOOOCUS.value - assert metadata_scheme in [item.value for item in MetadataScheme] + metadata_scheme = MetadataScheme(args.pop()) if not args_manager.args.disable_metadata else MetadataScheme.FOOOCUS cn_tasks = {x: [] for x in flags.ip_list} for _ in range(4): @@ -173,17 +172,9 @@ def worker(): print(f'Refiner disabled because base model and refiner are same.') refiner_model_name = 'None' - assert performance_selection in ['Speed', 'Quality', 'Extreme Speed'] + steps = performance_selection.steps() - steps = 30 - - if performance_selection == 'Speed': - steps = 30 - - if performance_selection == 'Quality': - steps = 60 - - if performance_selection == 'Extreme Speed': + if performance_selection == Performance.EXTREME_SPEED: print('Enter LCM mode.') progressbar(async_task, 1, 'Downloading LCM components ...') loras += [(modules.config.downloading_sdxl_lcm_lora(), 1.0)] @@ -201,7 +192,6 @@ def worker(): modules.patch.positive_adm_scale = advanced_parameters.adm_scaler_positive = 1.0 modules.patch.negative_adm_scale = advanced_parameters.adm_scaler_negative = 1.0 modules.patch.adm_scaler_end = advanced_parameters.adm_scaler_end = 0.0 - steps = 8 base_model_path = os.path.join(modules.config.path_checkpoints, base_model_name) base_model_hash = calculate_sha256(base_model_path)[0:10] @@ -274,16 +264,7 @@ def worker(): if 'fast' in uov_method: skip_prompt_processing = True else: - steps = 18 - - if performance_selection == 'Speed': - steps = 18 - - if performance_selection == 'Quality': - steps = 36 - - if performance_selection == 'Extreme Speed': - steps = 8 + steps = performance_selection.steps_uov() progressbar(async_task, 1, 'Downloading upscale models ...') modules.config.downloading_upscale_model() @@ -802,11 +783,12 @@ def worker(): ('Full Negative Prompt', 'full_negative_prompt', task['negative'], False, False), ('Fooocus V2 Expansion', 'prompt_expansion', task['expansion'], True, True), ('Styles', 'styles', str(raw_style_selections), True, True), - ('Performance', 'performance', performance_selection, True, True), + ('Performance', 'performance', performance_selection.value, True, True), ('Steps', 'steps', steps, False, False), ('Resolution', 'resolution', str((width, height)), True, True), ('Sharpness', 'sharpness', sharpness, True, True), ('Guidance Scale', 'guidance_scale', guidance_scale, True, True), + # ('Denoising Strength', 'denoising_strength', denoising_strength, False, False), ('ADM Guidance', 'adm_guidance', str(( modules.patch.positive_adm_scale, modules.patch.negative_adm_scale, diff --git a/modules/config.py b/modules/config.py index 4a9b6837..7b42ed62 100644 --- a/modules/config.py +++ b/modules/config.py @@ -6,9 +6,9 @@ import args_manager import modules.flags import modules.sdxl_styles -from modules.metadata import MetadataScheme from modules.model_loader import load_file_from_url from modules.util import get_files_from_folder +from modules.flags import Performance, MetadataScheme config_path = os.path.abspath("./config.txt") @@ -236,8 +236,8 @@ default_prompt = get_config_item_or_set_default( ) default_performance = get_config_item_or_set_default( key='default_performance', - default_value='Speed', - validator=lambda x: x in modules.flags.performance_selections + default_value=Performance.SPEED.value, + validator=lambda x: x in Performance.list() ) default_advanced_checkbox = get_config_item_or_set_default( key='default_advanced_checkbox', diff --git a/modules/flags.py b/modules/flags.py index abcd3f60..5b22c5ec 100644 --- a/modules/flags.py +++ b/modules/flags.py @@ -1,4 +1,4 @@ -from modules.metadata import MetadataScheme +from enum import Enum disabled = 'Disabled' enabled = 'Enabled' @@ -34,6 +34,12 @@ default_parameters = { cn_ip: (0.5, 0.6), cn_ip_face: (0.9, 0.75), cn_canny: (0.5, 1.0), cn_cpds: (0.5, 1.0) } # stop, weight + +class MetadataScheme(Enum): + FOOOCUS = 'fooocus' + A1111 = 'a1111' + + # TODO use translation here metadata_scheme = [ ('Fooocus (json)', MetadataScheme.FOOOCUS.value), @@ -41,7 +47,37 @@ metadata_scheme = [ ] inpaint_engine_versions = ['None', 'v1', 'v2.5', 'v2.6'] -performance_selections = ['Speed', 'Quality', 'Extreme Speed'] + + +class Steps(Enum): + QUALITY = 60 + SPEED = 30 + EXTREME_SPEED = 8 + + +class StepsUOV(Enum): + QUALITY = 36 + SPEED = 18 + EXTREME_SPEED = 8 + + +class Performance(Enum): + QUALITY = 'Quality' + SPEED = 'Speed' + EXTREME_SPEED = 'Extreme Speed' + + @classmethod + def list(cls) -> list: + return list(map(lambda c: c.value, cls)) + + def steps(self) -> int: + return Steps[self.name].value if Steps[self.name] else None + + def steps_uov(self) -> int: + return StepsUOV[self.name].value if Steps[self.name] else None + + +performance_selections = Performance.list() inpaint_option_default = 'Inpaint or Outpaint (default)' inpaint_option_detail = 'Improve Detail (face, hand, eyes, etc.)' diff --git a/modules/metadata.py b/modules/metadata.py index 4cdf534a..79a37719 100644 --- a/modules/metadata.py +++ b/modules/metadata.py @@ -8,17 +8,13 @@ import modules.config import fooocus_version # import advanced_parameters from modules.util import quote, unquote, is_json +from modules.flags import MetadataScheme, Performance, Steps re_param_code = r'\s*(\w[\w \-/]+):\s*("(?:\\.|[^\\"])+"|[^,]*)(?:,|$)' re_param = re.compile(re_param_code) re_imagesize = re.compile(r"^(\d+)x(\d+)$") -class MetadataScheme(Enum): - FOOOCUS = 'fooocus' - A1111 = 'a1111' - - class MetadataParser(ABC): @abstractmethod def parse_json(self, metadata: dict) -> dict: @@ -70,6 +66,14 @@ class A1111MetadataParser(MetadataParser): else: prompt += ('' if prompt == '' else "\n") + line + # if shared.opts.infotext_styles != "Ignore": + # found_styles, prompt, negative_prompt = shared.prompt_styles.extract_styles_from_prompt(prompt, + # negative_prompt) + # + # if shared.opts.infotext_styles == "Apply": + # res["Styles array"] = found_styles + # elif shared.opts.infotext_styles == "Apply if any" and found_styles: + # res["Styles array"] = found_styles data = { 'prompt': prompt, @@ -87,11 +91,17 @@ class A1111MetadataParser(MetadataParser): data[f"{k}-1"] = m.group(1) data[f"{k}-2"] = m.group(2) else: - key = list(self.fooocus_to_a1111.keys())[list(self.fooocus_to_a1111.values()).index(k)] - data[key] = v + data[list(self.fooocus_to_a1111.keys())[list(self.fooocus_to_a1111.values()).index(k)]] = v except Exception: print(f"Error parsing \"{k}: {v}\"") + # try to load performance based on steps + if 'steps' in data: + try: + data['performance'] = Performance[Steps(int(data['steps'])).name].value + except Exception: + pass + return data def parse_string(self, metadata: dict) -> str: @@ -104,9 +114,10 @@ class A1111MetadataParser(MetadataParser): lora_hashes = [] for index in range(5): - name = f'lora_name_{index + 1}' - if name in data: - # weight = f'lora_weight_{index}' + key = f'lora_name_{index + 1}' + if key in data: + name = data[f'lora_name_{index + 1}'] + # weight = data[f'lora_weight_{index + 1}'] hash = data[f'lora_hash_{index + 1}'] lora_hashes.append(f'{name.split(".")[0]}: {hash}') lora_hashes_string = ", ".join(lora_hashes) @@ -121,6 +132,7 @@ class A1111MetadataParser(MetadataParser): self.fooocus_to_a1111['sampler']: data['sampler'], self.fooocus_to_a1111['guidance_scale']: data['guidance_scale'], self.fooocus_to_a1111['seed']: data['seed'], + # TODO check resolution value, should be string self.fooocus_to_a1111['resolution']: f'{width}x{heigth}', self.fooocus_to_a1111['base_model']: data['base_model'].split('.')[0], self.fooocus_to_a1111['base_model_hash']: data['base_model_hash'] @@ -236,11 +248,11 @@ class FooocusMetadataParser(MetadataParser): # return json.dumps(metadata, ensure_ascii=False) -def get_metadata_parser(metadata_scheme: str) -> MetadataParser: +def get_metadata_parser(metadata_scheme: MetadataScheme) -> MetadataParser: match metadata_scheme: - case MetadataScheme.FOOOCUS.value: + case MetadataScheme.FOOOCUS: return FooocusMetadataParser() - case MetadataScheme.A1111.value: + case MetadataScheme.A1111: return A1111MetadataParser() case _: raise NotImplementedError @@ -252,7 +264,7 @@ def get_metadata_parser(metadata_scheme: str) -> MetadataParser: # } -def read_info_from_image(filepath) -> tuple[str | None, dict, str | None]: +def read_info_from_image(filepath) -> tuple[str | None, dict, MetadataScheme | None]: with Image.open(filepath) as image: items = (image.info or {}).copy() @@ -260,8 +272,19 @@ def read_info_from_image(filepath) -> tuple[str | None, dict, str | None]: if parameters is not None and is_json(parameters): parameters = json.loads(parameters) - metadata_scheme = items.pop('fooocus_scheme', None) + try: + metadata_scheme = MetadataScheme(items.pop('fooocus_scheme', None)) + except Exception: + metadata_scheme = None + # broad fallback + if metadata_scheme is None and isinstance(parameters, dict): + metadata_scheme = modules.metadata.MetadataScheme.FOOOCUS + + if metadata_scheme is None and isinstance(parameters, str): + metadata_scheme = modules.metadata.MetadataScheme.A1111 + + # TODO code cleanup # if "exif" in items: # exif_data = items["exif"] # try: diff --git a/modules/private_logger.py b/modules/private_logger.py index 3e186e1a..1afcaa55 100644 --- a/modules/private_logger.py +++ b/modules/private_logger.py @@ -20,9 +20,7 @@ def get_current_html_path(): return html_name -def log(img, metadata, save_metadata_to_image=False, metadata_scheme: str = MetadataScheme.FOOOCUS.value): - assert metadata_scheme in [item.value for item in MetadataScheme] - +def log(img, metadata, save_metadata_to_image=False, metadata_scheme: MetadataScheme = MetadataScheme.FOOOCUS): if args_manager.args.disable_image_log: return @@ -35,7 +33,7 @@ def log(img, metadata, save_metadata_to_image=False, metadata_scheme: str = Meta pnginfo = PngInfo() pnginfo.add_text('parameters', parsed_parameters) - pnginfo.add_text('fooocus_scheme', metadata_scheme) + pnginfo.add_text('fooocus_scheme', metadata_scheme.value) else: pnginfo = None Image.fromarray(img).save(local_temp_filename, pnginfo=pnginfo) diff --git a/webui.py b/webui.py index 32fddb98..5c2d4a8c 100644 --- a/webui.py +++ b/webui.py @@ -222,10 +222,12 @@ with shared.gradio_root: results = {} if parameters is not None: results['parameters'] = parameters + if items: results['items'] = items - if metadata_scheme is not None: - results['metadata_scheme'] = metadata_scheme + + if isinstance(metadata_scheme, flags.MetadataScheme): + results['metadata_scheme'] = metadata_scheme.value return results From 5dcb2bc57388f6b4621252934d1e563c8ead94e0 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Sun, 28 Jan 2024 20:42:58 +0100 Subject: [PATCH 09/52] fix: correctly map resolution, use empty styles for A1111 --- modules/metadata.py | 23 +++++++++-------------- 1 file changed, 9 insertions(+), 14 deletions(-) diff --git a/modules/metadata.py b/modules/metadata.py index 79a37719..eb97498f 100644 --- a/modules/metadata.py +++ b/modules/metadata.py @@ -1,7 +1,6 @@ import json import re from abc import ABC, abstractmethod -from enum import Enum from PIL import Image import modules.config @@ -30,6 +29,7 @@ class A1111MetadataParser(MetadataParser): fooocus_to_a1111 = { 'negative_prompt': 'Negative prompt', + 'styles': 'Styles', 'steps': 'Steps', 'sampler': 'Sampler', 'guidance_scale': 'CFG scale', @@ -44,8 +44,6 @@ class A1111MetadataParser(MetadataParser): } def parse_json(self, metadata: str) -> dict: - # TODO add correct mapping - prompt = '' negative_prompt = '' @@ -66,6 +64,10 @@ class A1111MetadataParser(MetadataParser): else: prompt += ('' if prompt == '' else "\n") + line + # set defaults + data = { + 'styles': '[]' + } # if shared.opts.infotext_styles != "Ignore": # found_styles, prompt, negative_prompt = shared.prompt_styles.extract_styles_from_prompt(prompt, # negative_prompt) @@ -75,9 +77,9 @@ class A1111MetadataParser(MetadataParser): # elif shared.opts.infotext_styles == "Apply if any" and found_styles: # res["Styles array"] = found_styles - data = { + data |= { 'prompt': prompt, - 'negative_prompt': negative_prompt + 'negative_prompt': negative_prompt, } for k, v in re_param.findall(lastline): @@ -87,9 +89,7 @@ class A1111MetadataParser(MetadataParser): m = re_imagesize.match(v) if m is not None: - # TODO check - data[f"{k}-1"] = m.group(1) - data[f"{k}-2"] = m.group(2) + data[f'resolution'] = str((m.group(1), m.group(2))) else: data[list(self.fooocus_to_a1111.keys())[list(self.fooocus_to_a1111.values()).index(k)]] = v except Exception: @@ -110,7 +110,7 @@ class A1111MetadataParser(MetadataParser): data = {k: v for _, k, v, _, _ in metadata} # TODO check if correct - width, heigth = data['resolution'].split(', ') + width, heigth = eval(data['resolution']) lora_hashes = [] for index in range(5): @@ -122,12 +122,7 @@ class A1111MetadataParser(MetadataParser): lora_hashes.append(f'{name.split(".")[0]}: {hash}') lora_hashes_string = ", ".join(lora_hashes) - # set static defaults generation_params = { - 'styles': [], - } - - generation_params |= { self.fooocus_to_a1111['steps']: data['steps'], self.fooocus_to_a1111['sampler']: data['sampler'], self.fooocus_to_a1111['guidance_scale']: data['guidance_scale'], From 236278948b0ee133dedea7e39670c029b5446dda Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Sun, 28 Jan 2024 23:50:56 +0100 Subject: [PATCH 10/52] chore: code cleanup --- modules/config.py | 1 - modules/flags.py | 19 +++++++++---------- 2 files changed, 9 insertions(+), 11 deletions(-) diff --git a/modules/config.py b/modules/config.py index 7b42ed62..6c5da0e9 100644 --- a/modules/config.py +++ b/modules/config.py @@ -10,7 +10,6 @@ from modules.model_loader import load_file_from_url from modules.util import get_files_from_folder from modules.flags import Performance, MetadataScheme - config_path = os.path.abspath("./config.txt") config_example_path = os.path.abspath("config_modification_tutorial.txt") config_dict = {} diff --git a/modules/flags.py b/modules/flags.py index 5b22c5ec..5008d5cd 100644 --- a/modules/flags.py +++ b/modules/flags.py @@ -34,6 +34,15 @@ default_parameters = { cn_ip: (0.5, 0.6), cn_ip_face: (0.9, 0.75), cn_canny: (0.5, 1.0), cn_cpds: (0.5, 1.0) } # stop, weight +inpaint_engine_versions = ['None', 'v1', 'v2.5', 'v2.6'] +inpaint_option_default = 'Inpaint or Outpaint (default)' +inpaint_option_detail = 'Improve Detail (face, hand, eyes, etc.)' +inpaint_option_modify = 'Modify Content (add objects, change background, etc.)' +inpaint_options = [inpaint_option_default, inpaint_option_detail, inpaint_option_modify] + +desc_type_photo = 'Photograph' +desc_type_anime = 'Art/Anime' + class MetadataScheme(Enum): FOOOCUS = 'fooocus' @@ -46,8 +55,6 @@ metadata_scheme = [ ('A1111 (plain text)', MetadataScheme.A1111.value), ] -inpaint_engine_versions = ['None', 'v1', 'v2.5', 'v2.6'] - class Steps(Enum): QUALITY = 60 @@ -78,11 +85,3 @@ class Performance(Enum): performance_selections = Performance.list() - -inpaint_option_default = 'Inpaint or Outpaint (default)' -inpaint_option_detail = 'Improve Detail (face, hand, eyes, etc.)' -inpaint_option_modify = 'Modify Content (add objects, change background, etc.)' -inpaint_options = [inpaint_option_default, inpaint_option_detail, inpaint_option_modify] - -desc_type_photo = 'Photograph' -desc_type_anime = 'Art/Anime' From 5e84a45e22eb969e7dfce8e31f7c3b44b318627b Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Sun, 28 Jan 2024 23:52:06 +0100 Subject: [PATCH 11/52] feat: add A1111 prompt style detection only detects one style as Fooocus doesn't wrap {prompt} with the whole style, but has a separate prompt string for each style --- modules/metadata.py | 17 +++----- modules/util.py | 99 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 104 insertions(+), 12 deletions(-) diff --git a/modules/metadata.py b/modules/metadata.py index eb97498f..0d585f1c 100644 --- a/modules/metadata.py +++ b/modules/metadata.py @@ -6,7 +6,7 @@ from PIL import Image import modules.config import fooocus_version # import advanced_parameters -from modules.util import quote, unquote, is_json +from modules.util import quote, unquote, extract_styles_from_prompt, is_json from modules.flags import MetadataScheme, Performance, Steps re_param_code = r'\s*(\w[\w \-/]+):\s*("(?:\\.|[^\\"])+"|[^,]*)(?:,|$)' @@ -65,17 +65,10 @@ class A1111MetadataParser(MetadataParser): prompt += ('' if prompt == '' else "\n") + line # set defaults - data = { - 'styles': '[]' - } - # if shared.opts.infotext_styles != "Ignore": - # found_styles, prompt, negative_prompt = shared.prompt_styles.extract_styles_from_prompt(prompt, - # negative_prompt) - # - # if shared.opts.infotext_styles == "Apply": - # res["Styles array"] = found_styles - # elif shared.opts.infotext_styles == "Apply if any" and found_styles: - # res["Styles array"] = found_styles + data = {} + + found_styles, prompt, negative_prompt = extract_styles_from_prompt(prompt, negative_prompt) + data['styles'] = str(found_styles) data |= { 'prompt': prompt, diff --git a/modules/util.py b/modules/util.py index f7fcc4e7..a6804a9a 100644 --- a/modules/util.py +++ b/modules/util.py @@ -1,3 +1,5 @@ +import typing + import numpy as np import datetime import random @@ -9,6 +11,7 @@ import json from PIL import Image from hashlib import sha256 +import modules.sdxl_styles LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS) @@ -207,6 +210,102 @@ def unquote(text): return text +def unwrap_style_text_from_prompt(style_text, prompt): + """ + Checks the prompt to see if the style text is wrapped around it. If so, + returns True plus the prompt text without the style text. Otherwise, returns + False with the original prompt. + + Note that the "cleaned" version of the style text is only used for matching + purposes here. It isn't returned; the original style text is not modified. + """ + stripped_prompt = prompt + stripped_style_text = style_text + if "{prompt}" in stripped_style_text: + # Work out whether the prompt is wrapped in the style text. If so, we + # return True and the "inner" prompt text that isn't part of the style. + try: + left, right = stripped_style_text.split("{prompt}", 2) + except ValueError as e: + # If the style text has multple "{prompt}"s, we can't split it into + # two parts. This is an error, but we can't do anything about it. + print(f"Unable to compare style text to prompt:\n{style_text}") + print(f"Error: {e}") + return False, prompt + if stripped_prompt.startswith(left) and stripped_prompt.endswith(right): + prompt = stripped_prompt[len(left) : len(stripped_prompt) - len(right)] + return True, prompt + else: + # Work out whether the given prompt ends with the style text. If so, we + # return True and the prompt text up to where the style text starts. + if stripped_prompt.endswith(stripped_style_text): + prompt = stripped_prompt[: len(stripped_prompt) - len(stripped_style_text)] + if prompt.endswith(", "): + prompt = prompt[:-2] + return True, prompt + + return False, prompt + + +def extract_original_prompts(style, prompt, negative_prompt): + """ + Takes a style and compares it to the prompt and negative prompt. If the style + matches, returns True plus the prompt and negative prompt with the style text + removed. Otherwise, returns False with the original prompt and negative prompt. + """ + if not style.prompt and not style.negative_prompt: + return False, prompt, negative_prompt + + match_positive, extracted_positive = unwrap_style_text_from_prompt( + style.prompt, prompt + ) + if not match_positive: + return False, prompt, negative_prompt + + match_negative, extracted_negative = unwrap_style_text_from_prompt( + style.negative_prompt, negative_prompt + ) + if not match_negative: + return False, prompt, negative_prompt + + return True, extracted_positive, extracted_negative + + +def extract_styles_from_prompt(prompt, negative_prompt): + extracted = [] + applicable_styles = [] + + for style_name, (style_prompt, style_negative_prompt) in modules.sdxl_styles.styles.items(): + applicable_styles.append(PromptStyle(name=style_name, prompt=style_prompt, negative_prompt=style_negative_prompt)) + + while True: + found_style = None + + for style in applicable_styles: + is_match, new_prompt, new_neg_prompt = extract_original_prompts( + style, prompt, negative_prompt + ) + if is_match: + found_style = style + prompt = new_prompt + negative_prompt = new_neg_prompt + break + + if not found_style: + break + + applicable_styles.remove(found_style) + extracted.append(found_style.name) + + return list(reversed(extracted)), prompt, negative_prompt + + +class PromptStyle(typing.NamedTuple): + name: str + prompt: str + negative_prompt: str + + def is_json(data: str) -> bool: try: loaded_json = json.loads(data) From f94b96f6eb7aae9103cef7fe64ff7013dffaf49f Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Mon, 29 Jan 2024 01:52:12 +0100 Subject: [PATCH 12/52] wip: add prompt style extraction for A1111 scheme --- modules/util.py | 47 +++++++++++++++++++++++++++++++++-------------- 1 file changed, 33 insertions(+), 14 deletions(-) diff --git a/modules/util.py b/modules/util.py index a6804a9a..4a709c31 100644 --- a/modules/util.py +++ b/modules/util.py @@ -231,20 +231,28 @@ def unwrap_style_text_from_prompt(style_text, prompt): # two parts. This is an error, but we can't do anything about it. print(f"Unable to compare style text to prompt:\n{style_text}") print(f"Error: {e}") - return False, prompt - if stripped_prompt.startswith(left) and stripped_prompt.endswith(right): - prompt = stripped_prompt[len(left) : len(stripped_prompt) - len(right)] - return True, prompt + return False, prompt, '' + + left_pos = stripped_prompt.find(left) + right_pos = stripped_prompt.find(right) + if 0 <= left_pos < right_pos: + real_prompt = stripped_prompt[left_pos + len(left):right_pos] + prompt = stripped_prompt.replace(left + real_prompt + right, '', 1) + if prompt.startswith(", "): + prompt = prompt[2:] + if prompt.endswith(", "): + prompt = prompt[:-2] + return True, prompt, real_prompt else: - # Work out whether the given prompt ends with the style text. If so, we + # Work out whether the given prompt starts with the style text. If so, we # return True and the prompt text up to where the style text starts. if stripped_prompt.endswith(stripped_style_text): prompt = stripped_prompt[: len(stripped_prompt) - len(stripped_style_text)] if prompt.endswith(", "): prompt = prompt[:-2] - return True, prompt + return True, prompt, prompt - return False, prompt + return False, prompt, '' def extract_original_prompts(style, prompt, negative_prompt): @@ -256,19 +264,19 @@ def extract_original_prompts(style, prompt, negative_prompt): if not style.prompt and not style.negative_prompt: return False, prompt, negative_prompt - match_positive, extracted_positive = unwrap_style_text_from_prompt( + match_positive, extracted_positive, real_prompt = unwrap_style_text_from_prompt( style.prompt, prompt ) if not match_positive: - return False, prompt, negative_prompt + return False, prompt, negative_prompt, '' - match_negative, extracted_negative = unwrap_style_text_from_prompt( + match_negative, extracted_negative, _ = unwrap_style_text_from_prompt( style.negative_prompt, negative_prompt ) if not match_negative: - return False, prompt, negative_prompt + return False, prompt, negative_prompt, '' - return True, extracted_positive, extracted_negative + return True, extracted_positive, extracted_negative, real_prompt def extract_styles_from_prompt(prompt, negative_prompt): @@ -278,17 +286,22 @@ def extract_styles_from_prompt(prompt, negative_prompt): for style_name, (style_prompt, style_negative_prompt) in modules.sdxl_styles.styles.items(): applicable_styles.append(PromptStyle(name=style_name, prompt=style_prompt, negative_prompt=style_negative_prompt)) + real_prompt = '' + while True: found_style = None for style in applicable_styles: - is_match, new_prompt, new_neg_prompt = extract_original_prompts( + is_match, new_prompt, new_neg_prompt, new_real_prompt = extract_original_prompts( style, prompt, negative_prompt ) if is_match: found_style = style prompt = new_prompt negative_prompt = new_neg_prompt + # TODO this is a bit hacky tbh but works perfectly fine, check if all conditions are needed + if real_prompt == '' and new_real_prompt != '' and new_real_prompt != prompt: + real_prompt = new_real_prompt break if not found_style: @@ -297,7 +310,13 @@ def extract_styles_from_prompt(prompt, negative_prompt): applicable_styles.remove(found_style) extracted.append(found_style.name) - return list(reversed(extracted)), prompt, negative_prompt + # add prompt expansion if not all styles could be resolved + # TODO check if it's better to not add fooocus_expansion but just return prompt incl. fooocus_expansion words + # TODO evaluate if adding prompt expansion to metadata is a good idea + if prompt != '' and prompt != real_prompt: + extracted.append(modules.sdxl_styles.fooocus_expansion) + + return list(reversed(extracted)), real_prompt, negative_prompt class PromptStyle(typing.NamedTuple): From 13d0341a025712f9849e861842de5999162fc95d Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Mon, 29 Jan 2024 01:52:42 +0100 Subject: [PATCH 13/52] feat: sort styles after metadata import --- webui.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/webui.py b/webui.py index 5c2d4a8c..b245df17 100644 --- a/webui.py +++ b/webui.py @@ -607,7 +607,8 @@ with shared.gradio_root: return modules.meta_parser.load_parameter_button_click(parsed_parameters, state_is_generating) - metadata_import_button.click(trigger_metadata_import, inputs=[metadata_input_image, state_is_generating], outputs=load_data_outputs, queue=False, show_progress=False) + metadata_import_button.click(trigger_metadata_import, inputs=[metadata_input_image, state_is_generating], outputs=load_data_outputs, queue=False, show_progress=False) \ + .then(style_sorter.sort_styles, inputs=style_selections, outputs=style_selections, queue=False, show_progress=False) generate_button.click(lambda: (gr.update(visible=True, interactive=True), gr.update(visible=True, interactive=True), gr.update(visible=False, interactive=False), [], True), outputs=[stop_button, skip_button, generate_button, gallery, state_is_generating]) \ From c3ab9f1f3071b9c32a74802288a3f8615ba1c6d8 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Mon, 29 Jan 2024 14:22:36 +0100 Subject: [PATCH 14/52] refactor: use central flag for LoRA count --- modules/async_worker.py | 4 ++-- modules/config.py | 4 ++-- modules/flags.py | 2 ++ modules/meta_parser.py | 3 ++- modules/metadata.py | 4 ++-- webui.py | 2 +- 6 files changed, 11 insertions(+), 8 deletions(-) diff --git a/modules/async_worker.py b/modules/async_worker.py index e6ec8c39..1c788b62 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -44,7 +44,7 @@ def worker(): from modules.util import remove_empty_str, HWC3, resize_image, \ get_image_shape_ceil, set_image_shape_ceil, get_shape_ceil, resample_image, erode_or_dilate, calculate_sha256 from modules.upscaler import perform_upscale - from modules.flags import Performance, MetadataScheme + from modules.flags import Performance, MetadataScheme, lora_count try: async_gradio_app = shared.gradio_root @@ -134,7 +134,7 @@ def worker(): base_model_name = args.pop() refiner_model_name = args.pop() refiner_switch = args.pop() - loras = [[str(args.pop()), float(args.pop())] for _ in range(5)] + loras = [[str(args.pop()), float(args.pop())] for _ in range(lora_count)] input_image_checkbox = args.pop() current_tab = args.pop() uov_method = args.pop() diff --git a/modules/config.py b/modules/config.py index 6c5da0e9..e9152b40 100644 --- a/modules/config.py +++ b/modules/config.py @@ -8,7 +8,7 @@ import modules.sdxl_styles from modules.model_loader import load_file_from_url from modules.util import get_files_from_folder -from modules.flags import Performance, MetadataScheme +from modules.flags import Performance, MetadataScheme, lora_count config_path = os.path.abspath("./config.txt") config_example_path = os.path.abspath("config_modification_tutorial.txt") @@ -333,7 +333,7 @@ metadata_created_by = get_config_item_or_set_default( example_inpaint_prompts = [[x] for x in example_inpaint_prompts] -config_dict["default_loras"] = default_loras = default_loras[:5] + [['None', 1.0] for _ in range(5 - len(default_loras))] +config_dict["default_loras"] = default_loras = default_loras[:lora_count] + [['None', 1.0] for _ in range(lora_count - len(default_loras))] possible_preset_keys = [ "default_model", diff --git a/modules/flags.py b/modules/flags.py index 5008d5cd..f0297783 100644 --- a/modules/flags.py +++ b/modules/flags.py @@ -55,6 +55,8 @@ metadata_scheme = [ ('A1111 (plain text)', MetadataScheme.A1111.value), ] +lora_count = 5 +lora_count_with_lcm = lora_count + 1 class Steps(Enum): QUALITY = 60 diff --git a/modules/meta_parser.py b/modules/meta_parser.py index e7cf8a47..3c3e416b 100644 --- a/modules/meta_parser.py +++ b/modules/meta_parser.py @@ -3,6 +3,7 @@ import json import gradio as gr import modules.config +from modules.flags import lora_count_with_lcm def load_parameter_button_click(raw_metadata: dict | str, is_generating: bool): @@ -35,7 +36,7 @@ def load_parameter_button_click(raw_metadata: dict | str, is_generating: bool): results.append(gr.update(visible=False)) - for i in range(1, 6): + for i in range(1, lora_count_with_lcm): try: n, w = loaded_parameter_dict.get(f'LoRA {i}').split(' : ') w = float(w) diff --git a/modules/metadata.py b/modules/metadata.py index 0d585f1c..0978e862 100644 --- a/modules/metadata.py +++ b/modules/metadata.py @@ -7,7 +7,7 @@ import modules.config import fooocus_version # import advanced_parameters from modules.util import quote, unquote, extract_styles_from_prompt, is_json -from modules.flags import MetadataScheme, Performance, Steps +from modules.flags import MetadataScheme, Performance, Steps, lora_count_with_lcm re_param_code = r'\s*(\w[\w \-/]+):\s*("(?:\\.|[^\\"])+"|[^,]*)(?:,|$)' re_param = re.compile(re_param_code) @@ -106,7 +106,7 @@ class A1111MetadataParser(MetadataParser): width, heigth = eval(data['resolution']) lora_hashes = [] - for index in range(5): + for index in range(lora_count_with_lcm): key = f'lora_name_{index + 1}' if key in data: name = data[f'lora_name_{index + 1}'] diff --git a/webui.py b/webui.py index b245df17..60d6540d 100644 --- a/webui.py +++ b/webui.py @@ -502,7 +502,7 @@ with shared.gradio_root: modules.config.update_all_model_names() results = [] results += [gr.update(choices=modules.config.model_filenames), gr.update(choices=['None'] + modules.config.model_filenames)] - for i in range(5): + for i in range(flags.lora_count): results += [gr.update(choices=['None'] + modules.config.lora_filenames), gr.update()] return results From 20e53028a4c5baa7b348092793f93907db4fc289 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Mon, 29 Jan 2024 14:27:51 +0100 Subject: [PATCH 15/52] refactor: use central flag for ControlNet image count --- modules/async_worker.py | 2 +- modules/flags.py | 2 ++ webui.py | 2 +- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/modules/async_worker.py b/modules/async_worker.py index 1c788b62..28104a94 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -147,7 +147,7 @@ def worker(): metadata_scheme = MetadataScheme(args.pop()) if not args_manager.args.disable_metadata else MetadataScheme.FOOOCUS cn_tasks = {x: [] for x in flags.ip_list} - for _ in range(4): + for _ in range(flags.controlnet_image_count): cn_img = args.pop() cn_stop = args.pop() cn_weight = args.pop() diff --git a/modules/flags.py b/modules/flags.py index f0297783..31100284 100644 --- a/modules/flags.py +++ b/modules/flags.py @@ -58,6 +58,8 @@ metadata_scheme = [ lora_count = 5 lora_count_with_lcm = lora_count + 1 +controlnet_image_count = 4 + class Steps(Enum): QUALITY = 60 SPEED = 30 diff --git a/webui.py b/webui.py index 60d6540d..666d0a00 100644 --- a/webui.py +++ b/webui.py @@ -152,7 +152,7 @@ with shared.gradio_root: ip_weights = [] ip_ctrls = [] ip_ad_cols = [] - for _ in range(4): + for _ in range(flags.controlnet_image_count): with gr.Column(): ip_image = grh.Image(label='Image', source='upload', type='numpy', show_label=False, height=300) ip_images.append(ip_image) From c80011b1d115197bdbc821106b7ec7dde5c09cd9 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Mon, 29 Jan 2024 15:45:55 +0100 Subject: [PATCH 16/52] fix: use correct LoRA mapping, add fallback for backwards compatibility --- modules/async_worker.py | 2 +- modules/meta_parser.py | 29 ++++++++++++++++++----------- 2 files changed, 19 insertions(+), 12 deletions(-) diff --git a/modules/async_worker.py b/modules/async_worker.py index 28104a94..539c58fd 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -804,7 +804,7 @@ def worker(): ] for li, (n, w) in enumerate(loras): if n != 'None': - d.append((f'LoRA {li + 1}', f'lora{li + 1}_combined', f'{n} : {w}', True, True)) + d.append((f'LoRA {li + 1}', f'lora_combined_{li + 1}', f'{n} : {w}', True, True)) d.append((f'LoRA {li + 1} Name', f'lora_name_{li + 1}', n, False, False)) d.append((f'LoRA {li + 1} Weight', f'lora_weight_{li + 1}', w, False, False)) d.append((f'LoRA {li + 1} Hash', f'lora_hash_{li + 1}', lora_hashes[li], False, False)) diff --git a/modules/meta_parser.py b/modules/meta_parser.py index 3c3e416b..d9ee7048 100644 --- a/modules/meta_parser.py +++ b/modules/meta_parser.py @@ -3,7 +3,7 @@ import json import gradio as gr import modules.config -from modules.flags import lora_count_with_lcm +from modules.flags import lora_count def load_parameter_button_click(raw_metadata: dict | str, is_generating: bool): @@ -29,6 +29,9 @@ def load_parameter_button_click(raw_metadata: dict | str, is_generating: bool): get_str('scheduler', 'Scheduler', loaded_parameter_dict, results) get_seed('seed', 'Seed', loaded_parameter_dict, results) + for i in range(lora_count): + get_lora(f'lora_combined_{i + 1}', f'LoRA {i + 1}', loaded_parameter_dict, results) + if is_generating: results.append(gr.update()) else: @@ -36,16 +39,6 @@ def load_parameter_button_click(raw_metadata: dict | str, is_generating: bool): results.append(gr.update(visible=False)) - for i in range(1, lora_count_with_lcm): - try: - n, w = loaded_parameter_dict.get(f'LoRA {i}').split(' : ') - w = float(w) - results.append(n) - results.append(w) - except: - results.append(gr.update()) - results.append(gr.update()) - return results @@ -138,3 +131,17 @@ def get_adm_guidance(key: str, fallback: str | None, source_dict: dict, results: results.append(gr.update()) results.append(gr.update()) results.append(gr.update()) + + +def get_lora(key: str, fallback: str | None, source_dict: dict, results: list, default=None): + try: + n, w = source_dict.get(key).split(' : ') + w = float(w) + results.append(n) + results.append(w) + except: + if fallback is not None: + get_lora(fallback, None, source_dict, results, default) + return + results.append(gr.update()) + results.append(gr.update()) From 7fefe3a3c25ae75768ce8e1bbfa56d73a2a3399b Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Mon, 29 Jan 2024 16:28:47 +0100 Subject: [PATCH 17/52] feat: add created_by again --- modules/async_worker.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/modules/async_worker.py b/modules/async_worker.py index 539c58fd..6de720dd 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -809,6 +809,9 @@ def worker(): d.append((f'LoRA {li + 1} Weight', f'lora_weight_{li + 1}', w, False, False)) d.append((f'LoRA {li + 1} Hash', f'lora_hash_{li + 1}', lora_hashes[li], False, False)) d.append(('Version', 'version', 'v' + fooocus_version.version, True, True)) + if modules.config.metadata_created_by != '': + d.append(('Created By', 'created_by', modules.config.metadata_created_by, False, False)) + log(x, d, save_metadata_to_images, metadata_scheme) yield_result(async_task, imgs, do_not_show_finished_images=len(tasks) == 1) From 33d644f4a53ccf2852511c1201a818135ee2d0c2 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Mon, 29 Jan 2024 16:29:40 +0100 Subject: [PATCH 18/52] feat: add prefix "Fooocus" to version --- modules/async_worker.py | 2 +- modules/metadata.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/async_worker.py b/modules/async_worker.py index 6de720dd..89302a02 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -808,7 +808,7 @@ def worker(): d.append((f'LoRA {li + 1} Name', f'lora_name_{li + 1}', n, False, False)) d.append((f'LoRA {li + 1} Weight', f'lora_weight_{li + 1}', w, False, False)) d.append((f'LoRA {li + 1} Hash', f'lora_hash_{li + 1}', lora_hashes[li], False, False)) - d.append(('Version', 'version', 'v' + fooocus_version.version, True, True)) + d.append(('Version', 'version', 'Fooocus v' + fooocus_version.version, True, True)) if modules.config.metadata_created_by != '': d.append(('Created By', 'created_by', modules.config.metadata_created_by, False, False)) diff --git a/modules/metadata.py b/modules/metadata.py index 0978e862..576619a8 100644 --- a/modules/metadata.py +++ b/modules/metadata.py @@ -135,7 +135,7 @@ class A1111MetadataParser(MetadataParser): generation_params |= { self.fooocus_to_a1111['lora_hashes']: lora_hashes_string, # "Denoising strength": data['denoising_strength'], - self.fooocus_to_a1111['version']: f"Fooocus {data['version']}" + self.fooocus_to_a1111['version']: {data['version']} } generation_params_text = ", ".join( From e388f6fff6a887e4c805d4216ed000f138c1eb89 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Mon, 29 Jan 2024 16:38:39 +0100 Subject: [PATCH 19/52] wip: code cleanup, update todos --- modules/flags.py | 1 + modules/metadata.py | 13 ++++--------- 2 files changed, 5 insertions(+), 9 deletions(-) diff --git a/modules/flags.py b/modules/flags.py index 31100284..ae11586e 100644 --- a/modules/flags.py +++ b/modules/flags.py @@ -60,6 +60,7 @@ lora_count_with_lcm = lora_count + 1 controlnet_image_count = 4 + class Steps(Enum): QUALITY = 60 SPEED = 30 diff --git a/modules/metadata.py b/modules/metadata.py index 576619a8..0f24f9f6 100644 --- a/modules/metadata.py +++ b/modules/metadata.py @@ -64,15 +64,12 @@ class A1111MetadataParser(MetadataParser): else: prompt += ('' if prompt == '' else "\n") + line - # set defaults - data = {} - found_styles, prompt, negative_prompt = extract_styles_from_prompt(prompt, negative_prompt) - data['styles'] = str(found_styles) - data |= { + data = { 'prompt': prompt, 'negative_prompt': negative_prompt, + 'styles': str(found_styles) } for k, v in re_param.findall(lastline): @@ -98,8 +95,6 @@ class A1111MetadataParser(MetadataParser): return data def parse_string(self, metadata: dict) -> str: - # TODO add correct mapping - data = {k: v for _, k, v, _, _ in metadata} # TODO check if correct @@ -110,6 +105,7 @@ class A1111MetadataParser(MetadataParser): key = f'lora_name_{index + 1}' if key in data: name = data[f'lora_name_{index + 1}'] + # TODO handle LoRA weight # weight = data[f'lora_weight_{index + 1}'] hash = data[f'lora_hash_{index + 1}'] lora_hashes.append(f'{name.split(".")[0]}: {hash}') @@ -122,6 +118,7 @@ class A1111MetadataParser(MetadataParser): self.fooocus_to_a1111['seed']: data['seed'], # TODO check resolution value, should be string self.fooocus_to_a1111['resolution']: f'{width}x{heigth}', + # TODO load model by name / hash self.fooocus_to_a1111['base_model']: data['base_model'].split('.')[0], self.fooocus_to_a1111['base_model_hash']: data['base_model_hash'] } @@ -150,11 +147,9 @@ class A1111MetadataParser(MetadataParser): class FooocusMetadataParser(MetadataParser): def parse_json(self, metadata: dict) -> dict: - # TODO add mapping if necessary return metadata def parse_string(self, metadata: dict) -> str: - return json.dumps({k: v for _, k, v, _, _ in metadata}) # metadata = { # # prompt with wildcards From 26563562064b504e1d57548660ac70d225164c2e Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Mon, 29 Jan 2024 18:17:51 +0100 Subject: [PATCH 20/52] fix: use correct order to read LoRA in meta parser --- modules/meta_parser.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/modules/meta_parser.py b/modules/meta_parser.py index d9ee7048..a93ef9c8 100644 --- a/modules/meta_parser.py +++ b/modules/meta_parser.py @@ -29,9 +29,6 @@ def load_parameter_button_click(raw_metadata: dict | str, is_generating: bool): get_str('scheduler', 'Scheduler', loaded_parameter_dict, results) get_seed('seed', 'Seed', loaded_parameter_dict, results) - for i in range(lora_count): - get_lora(f'lora_combined_{i + 1}', f'LoRA {i + 1}', loaded_parameter_dict, results) - if is_generating: results.append(gr.update()) else: @@ -39,6 +36,9 @@ def load_parameter_button_click(raw_metadata: dict | str, is_generating: bool): results.append(gr.update(visible=False)) + for i in range(lora_count): + get_lora(f'lora_combined_{i + 1}', f'LoRA {i + 1}', loaded_parameter_dict, results) + return results From e54109745166fa0c3cf484a24d6eaa86f71f0850 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Mon, 29 Jan 2024 21:52:41 +0100 Subject: [PATCH 21/52] wip: code cleanup, update todos --- modules/async_worker.py | 2 +- modules/metadata.py | 4 +--- modules/util.py | 2 +- webui.py | 2 +- 4 files changed, 4 insertions(+), 6 deletions(-) diff --git a/modules/async_worker.py b/modules/async_worker.py index 89302a02..237d7ce6 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -193,6 +193,7 @@ def worker(): modules.patch.negative_adm_scale = advanced_parameters.adm_scaler_negative = 1.0 modules.patch.adm_scaler_end = advanced_parameters.adm_scaler_end = 0.0 + # TODO move hashing to metadata mapper as this slows down the generation process base_model_path = os.path.join(modules.config.path_checkpoints, base_model_name) base_model_hash = calculate_sha256(base_model_path)[0:10] @@ -788,7 +789,6 @@ def worker(): ('Resolution', 'resolution', str((width, height)), True, True), ('Sharpness', 'sharpness', sharpness, True, True), ('Guidance Scale', 'guidance_scale', guidance_scale, True, True), - # ('Denoising Strength', 'denoising_strength', denoising_strength, False, False), ('ADM Guidance', 'adm_guidance', str(( modules.patch.positive_adm_scale, modules.patch.negative_adm_scale, diff --git a/modules/metadata.py b/modules/metadata.py index 0f24f9f6..818494c2 100644 --- a/modules/metadata.py +++ b/modules/metadata.py @@ -114,7 +114,6 @@ class A1111MetadataParser(MetadataParser): generation_params = { self.fooocus_to_a1111['steps']: data['steps'], self.fooocus_to_a1111['sampler']: data['sampler'], - self.fooocus_to_a1111['guidance_scale']: data['guidance_scale'], self.fooocus_to_a1111['seed']: data['seed'], # TODO check resolution value, should be string self.fooocus_to_a1111['resolution']: f'{width}x{heigth}', @@ -131,7 +130,6 @@ class A1111MetadataParser(MetadataParser): generation_params |= { self.fooocus_to_a1111['lora_hashes']: lora_hashes_string, - # "Denoising strength": data['denoising_strength'], self.fooocus_to_a1111['version']: {data['version']} } @@ -139,7 +137,7 @@ class A1111MetadataParser(MetadataParser): [k if k == v else f'{k}: {quote(v)}' for k, v in generation_params.items() if v is not None]) # TODO check if multiline positive prompt is correctly processed positive_prompt_resolved = ', '.join(data['full_prompt']) #TODO add loras to positive prompt if even possible - negative_prompt_resolved = ', '.join(data['full_negative_prompt']) #TODO add loras to positive prompt if even possible + negative_prompt_resolved = ', '.join(data['full_negative_prompt']) #TODO add loras to negative prompt if even possible negative_prompt_text = f"\nNegative prompt: {negative_prompt_resolved}" if negative_prompt_resolved else "" return f"{positive_prompt_resolved}{negative_prompt_text}\n{generation_params_text}".strip() diff --git a/modules/util.py b/modules/util.py index 4a709c31..21a00ef6 100644 --- a/modules/util.py +++ b/modules/util.py @@ -175,7 +175,7 @@ def get_files_from_folder(folder_path, exensions=None, name_filter=None): relative_path = "" for filename in files: _, file_extension = os.path.splitext(filename) - if (exensions == None or file_extension.lower() in exensions) and (name_filter == None or name_filter in _): + if (exensions is None or file_extension.lower() in exensions) and (name_filter is None or name_filter in _): path = os.path.join(relative_path, filename) filenames.append(path) diff --git a/webui.py b/webui.py index 666d0a00..8a22072c 100644 --- a/webui.py +++ b/webui.py @@ -607,7 +607,7 @@ with shared.gradio_root: return modules.meta_parser.load_parameter_button_click(parsed_parameters, state_is_generating) - metadata_import_button.click(trigger_metadata_import, inputs=[metadata_input_image, state_is_generating], outputs=load_data_outputs, queue=False, show_progress=False) \ + metadata_import_button.click(trigger_metadata_import, inputs=[metadata_input_image, state_is_generating], outputs=load_data_outputs, queue=False, show_progress=True) \ .then(style_sorter.sort_styles, inputs=style_selections, outputs=style_selections, queue=False, show_progress=False) generate_button.click(lambda: (gr.update(visible=True, interactive=True), gr.update(visible=True, interactive=True), gr.update(visible=False, interactive=False), [], True), From 89c8e3a812c19733e0d035ed5b605cb2d4273a68 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Mon, 29 Jan 2024 21:54:39 +0100 Subject: [PATCH 22/52] feat: make sha256 with length 10 default --- modules/async_worker.py | 6 +++--- modules/util.py | 7 ++++--- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/modules/async_worker.py b/modules/async_worker.py index 237d7ce6..f58ab79f 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -195,15 +195,15 @@ def worker(): # TODO move hashing to metadata mapper as this slows down the generation process base_model_path = os.path.join(modules.config.path_checkpoints, base_model_name) - base_model_hash = calculate_sha256(base_model_path)[0:10] + base_model_hash = calculate_sha256(base_model_path) refiner_model_path = os.path.join(modules.config.path_checkpoints, refiner_model_name) - refiner_model_hash = calculate_sha256(refiner_model_path)[0:10] if refiner_model_name != 'None' else '' + refiner_model_hash = calculate_sha256(refiner_model_path) if refiner_model_name != 'None' else '' lora_hashes = [] for (n, w) in loras: lora_path = os.path.join(modules.config.path_loras, n) if n != 'None' else '' - lora_hashes.append(calculate_sha256(lora_path)[0:10] if n != 'None' else '') + lora_hashes.append(calculate_sha256(lora_path) if n != 'None' else '') modules.patch.adaptive_cfg = advanced_parameters.adaptive_cfg print(f'[Parameters] Adaptive CFG = {modules.patch.adaptive_cfg}') diff --git a/modules/util.py b/modules/util.py index 21a00ef6..046ac5e7 100644 --- a/modules/util.py +++ b/modules/util.py @@ -14,7 +14,7 @@ from hashlib import sha256 import modules.sdxl_styles LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS) - +HASH_SHA256_LENGTH = 10 def erode_or_dilate(x, k): k = int(k) @@ -182,7 +182,7 @@ def get_files_from_folder(folder_path, exensions=None, name_filter=None): return sorted(filenames, key=lambda x: -1 if os.sep in x else 1) -def calculate_sha256(filename): +def calculate_sha256(filename, length=HASH_SHA256_LENGTH): hash_sha256 = sha256() blksize = 1024 * 1024 @@ -190,7 +190,8 @@ def calculate_sha256(filename): for chunk in iter(lambda: f.read(blksize), b""): hash_sha256.update(chunk) - return hash_sha256.hexdigest() + res = hash_sha256.hexdigest() + return res[:length] if length else res def quote(text): From 78d1ad3962aedcb27629e9b78c8a06f1a6bc61bb Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Mon, 29 Jan 2024 21:56:10 +0100 Subject: [PATCH 23/52] feat: add lora handling to A1111 scheme --- modules/metadata.py | 39 ++++++++++++++++++++++++++++++--------- 1 file changed, 30 insertions(+), 9 deletions(-) diff --git a/modules/metadata.py b/modules/metadata.py index 818494c2..f88f804f 100644 --- a/modules/metadata.py +++ b/modules/metadata.py @@ -1,5 +1,6 @@ import json import re +from pathlib import Path from abc import ABC, abstractmethod from PIL import Image @@ -40,6 +41,7 @@ class A1111MetadataParser(MetadataParser): 'refiner_model': 'Refiner', 'refiner_model_hash': 'Refiner hash', 'lora_hashes': 'Lora hashes', + 'lora_weights': 'Lora weights', 'version': 'Version' } @@ -92,6 +94,25 @@ class A1111MetadataParser(MetadataParser): except Exception: pass + if 'base_model' in data: + for filename in modules.config.model_filenames: + path = Path(filename) + if data['base_model'] == path.stem: + data['base_model'] = path.name + break + + if 'lora_hashes' in data: + # TODO optimize by using hash for matching. Problem is speed of creating the hash per model, even on startup + lora_filenames = modules.config.lora_filenames.copy() + lora_filenames.remove(modules.config.downloading_sdxl_lcm_lora()) + for li, lora in enumerate(data['lora_hashes'].split(', ')): + name, _, weight = lora.split(': ') + for filename in lora_filenames: + path = Path(filename) + if name == path.stem: + data[f'lora_combined_{li + 1}'] = f'{path.name} : {weight}' + break + return data def parse_string(self, metadata: dict) -> str: @@ -101,30 +122,30 @@ class A1111MetadataParser(MetadataParser): width, heigth = eval(data['resolution']) lora_hashes = [] + lora_weights = [] for index in range(lora_count_with_lcm): key = f'lora_name_{index + 1}' if key in data: - name = data[f'lora_name_{index + 1}'] - # TODO handle LoRA weight - # weight = data[f'lora_weight_{index + 1}'] - hash = data[f'lora_hash_{index + 1}'] - lora_hashes.append(f'{name.split(".")[0]}: {hash}') - lora_hashes_string = ", ".join(lora_hashes) + lora_name = Path(data[f'lora_name_{index + 1}']).stem + lora_weight = data[f'lora_weight_{index + 1}'] + lora_hash = data[f'lora_hash_{index + 1}'] + # workaround for Fooocus not knowing LoRA name in LoRA metadata + lora_hashes.append(f'{lora_name}: {lora_hash}: {lora_weight}') + lora_hashes_string = ', '.join(lora_hashes) generation_params = { self.fooocus_to_a1111['steps']: data['steps'], self.fooocus_to_a1111['sampler']: data['sampler'], self.fooocus_to_a1111['seed']: data['seed'], - # TODO check resolution value, should be string self.fooocus_to_a1111['resolution']: f'{width}x{heigth}', # TODO load model by name / hash - self.fooocus_to_a1111['base_model']: data['base_model'].split('.')[0], + self.fooocus_to_a1111['base_model']: Path(data['base_model']).stem, self.fooocus_to_a1111['base_model_hash']: data['base_model_hash'] } if 'refiner_model' in data and data['refiner_model'] != 'None' and 'refiner_model_hash' in data: generation_params |= { - self.fooocus_to_a1111['refiner_model']: data['refiner_model'].split('.')[0], + self.fooocus_to_a1111['refiner_model']: Path(data['refiner_model']).stem, self.fooocus_to_a1111['refiner_model_hash']: data['refiner_model_hash'], } From dcc48744551d7e325b23e8a65eeb2647c4d1fbbc Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Mon, 29 Jan 2024 21:57:02 +0100 Subject: [PATCH 24/52] feat: override existing LoRA values when importing, would cause images to differ --- modules/meta_parser.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/meta_parser.py b/modules/meta_parser.py index a93ef9c8..33772140 100644 --- a/modules/meta_parser.py +++ b/modules/meta_parser.py @@ -143,5 +143,5 @@ def get_lora(key: str, fallback: str | None, source_dict: dict, results: list, d if fallback is not None: get_lora(fallback, None, source_dict, results, default) return - results.append(gr.update()) - results.append(gr.update()) + results.append('None') + results.append(1) From 6939f7904a6feb43e551bfb462b3844432c42ff5 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Mon, 29 Jan 2024 22:27:33 +0100 Subject: [PATCH 25/52] fix: correctly extract prompt style when only prompt expansion is selected --- modules/util.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/modules/util.py b/modules/util.py index 046ac5e7..663e7975 100644 --- a/modules/util.py +++ b/modules/util.py @@ -314,8 +314,17 @@ def extract_styles_from_prompt(prompt, negative_prompt): # add prompt expansion if not all styles could be resolved # TODO check if it's better to not add fooocus_expansion but just return prompt incl. fooocus_expansion words # TODO evaluate if adding prompt expansion to metadata is a good idea - if prompt != '' and prompt != real_prompt: - extracted.append(modules.sdxl_styles.fooocus_expansion) + if prompt != '': + if prompt != real_prompt: + extracted.append(modules.sdxl_styles.fooocus_expansion) + + # find real_prompt when only prompt expansion is selected + if real_prompt == '': + first_word = prompt.split(', ')[0] + first_word_positions = [i for i in range(len(prompt)) if prompt.startswith(first_word, i)] + real_prompt = prompt[:first_word_positions[-1]] + if real_prompt.endswith(', '): + real_prompt = real_prompt[:-2] return list(reversed(extracted)), real_prompt, negative_prompt From 5811234d23b0f54920796a6256ee2c5e20409a7c Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Mon, 29 Jan 2024 22:45:30 +0100 Subject: [PATCH 26/52] feat: allow model / LoRA loading from subfolders --- modules/metadata.py | 46 ++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 41 insertions(+), 5 deletions(-) diff --git a/modules/metadata.py b/modules/metadata.py index f88f804f..87bb08b0 100644 --- a/modules/metadata.py +++ b/modules/metadata.py @@ -98,7 +98,7 @@ class A1111MetadataParser(MetadataParser): for filename in modules.config.model_filenames: path = Path(filename) if data['base_model'] == path.stem: - data['base_model'] = path.name + data['base_model'] = filename break if 'lora_hashes' in data: @@ -110,7 +110,7 @@ class A1111MetadataParser(MetadataParser): for filename in lora_filenames: path = Path(filename) if name == path.stem: - data[f'lora_combined_{li + 1}'] = f'{path.name} : {weight}' + data[f'lora_combined_{li + 1}'] = f'{filename} : {weight}' break return data @@ -146,12 +146,12 @@ class A1111MetadataParser(MetadataParser): if 'refiner_model' in data and data['refiner_model'] != 'None' and 'refiner_model_hash' in data: generation_params |= { self.fooocus_to_a1111['refiner_model']: Path(data['refiner_model']).stem, - self.fooocus_to_a1111['refiner_model_hash']: data['refiner_model_hash'], + self.fooocus_to_a1111['refiner_model_hash']: data['refiner_model_hash'] } generation_params |= { self.fooocus_to_a1111['lora_hashes']: lora_hashes_string, - self.fooocus_to_a1111['version']: {data['version']} + self.fooocus_to_a1111['version']: data['version'] } generation_params_text = ", ".join( @@ -166,9 +166,35 @@ class A1111MetadataParser(MetadataParser): class FooocusMetadataParser(MetadataParser): def parse_json(self, metadata: dict) -> dict: + model_filenames = modules.config.model_filenames.copy() + lora_filenames = modules.config.lora_filenames.copy() + + for key, value in metadata.items(): + if value == '' or value == 'None': + continue + if key in ['base_model', 'refiner_model']: + metadata[key] = self.replace_value_with_filename(key, value, model_filenames) + elif key.startswith(('lora_combined_', 'lora_name_')): + metadata[key] = self.replace_value_with_filename(key, value, lora_filenames) + else: + continue + return metadata - def parse_string(self, metadata: dict) -> str: + def parse_string(self, metadata: list) -> str: + # remove model folder paths from metadata + for li, (label, key, value, show_in_log, copy_in_log) in enumerate(metadata): + if value == '' or value == 'None': + continue + if key in ['base_model', 'refiner_model'] or key.startswith(('lora_combined_', 'lora_name_')): + if key.startswith('lora_combined_'): + name, weight = value.split(' : ') + name = Path(name).stem + value = f'{name} : {weight}' + else: + value = Path(value).stem + metadata[li] = (label, key, value, show_in_log, copy_in_log) + return json.dumps({k: v for _, k, v, _, _ in metadata}) # metadata = { # # prompt with wildcards @@ -249,6 +275,16 @@ class FooocusMetadataParser(MetadataParser): # # return json.dumps(metadata, ensure_ascii=True) TODO check if possible # return json.dumps(metadata, ensure_ascii=False) + def replace_value_with_filename(self, key, value, filenames): + for filename in filenames: + path = Path(filename) + if key.startswith('lora_combined_'): + name, weight = value.split(' : ') + if name == path.stem: + return f'{filename} : {weight}' + elif value == path.stem: + return filename + def get_metadata_parser(metadata_scheme: MetadataScheme) -> MetadataParser: match metadata_scheme: From e93a345ccf83a4b045e924dfa11316676d77d5b9 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Tue, 30 Jan 2024 00:20:46 +0100 Subject: [PATCH 27/52] feat: code cleanup, do not queue metadata preview on image upload --- modules/metadata.py | 5 ++--- webui.py | 2 +- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/modules/metadata.py b/modules/metadata.py index 87bb08b0..9d68fd17 100644 --- a/modules/metadata.py +++ b/modules/metadata.py @@ -118,11 +118,9 @@ class A1111MetadataParser(MetadataParser): def parse_string(self, metadata: dict) -> str: data = {k: v for _, k, v, _, _ in metadata} - # TODO check if correct width, heigth = eval(data['resolution']) lora_hashes = [] - lora_weights = [] for index in range(lora_count_with_lcm): key = f'lora_name_{index + 1}' if key in data: @@ -275,7 +273,8 @@ class FooocusMetadataParser(MetadataParser): # # return json.dumps(metadata, ensure_ascii=True) TODO check if possible # return json.dumps(metadata, ensure_ascii=False) - def replace_value_with_filename(self, key, value, filenames): + @staticmethod + def replace_value_with_filename(key, value, filenames): for filename in filenames: path = Path(filename) if key.startswith('lora_combined_'): diff --git a/webui.py b/webui.py index 8a22072c..9703e908 100644 --- a/webui.py +++ b/webui.py @@ -232,7 +232,7 @@ with shared.gradio_root: return results metadata_input_image.upload(trigger_metadata_preview, inputs=metadata_input_image, - outputs=metadata_json) + outputs=metadata_json, queue=False, show_progress=True) switch_js = "(x) => {if(x){viewer_to_bottom(100);viewer_to_bottom(500);}else{viewer_to_top();} return x;}" down_js = "() => {viewer_to_bottom();}" From 7772eb7965fa565a8354fae5015da4e1ba249d20 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Wed, 31 Jan 2024 01:17:10 +0100 Subject: [PATCH 28/52] refactor: add flag for refiner_swap_method --- modules/flags.py | 2 ++ webui.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/modules/flags.py b/modules/flags.py index ae11586e..cbfd904d 100644 --- a/modules/flags.py +++ b/modules/flags.py @@ -22,6 +22,8 @@ SAMPLER_NAMES = KSAMPLER_NAMES + ["ddim", "uni_pc", "uni_pc_bh2"] sampler_list = SAMPLER_NAMES scheduler_list = SCHEDULER_NAMES +refiner_swap_method = 'joint' + cn_ip = "ImagePrompt" cn_ip_face = "FaceSwap" cn_canny = "PyraCanny" diff --git a/webui.py b/webui.py index 9703e908..816a122f 100644 --- a/webui.py +++ b/webui.py @@ -367,7 +367,7 @@ with shared.gradio_root: step=0.001, value=0.3, info='When to end the guidance from positive/negative ADM. ') - refiner_swap_method = gr.Dropdown(label='Refiner swap method', value='joint', + refiner_swap_method = gr.Dropdown(label='Refiner swap method', value=flags.refiner_swap_method, choices=['joint', 'separate', 'vae']) adaptive_cfg = gr.Slider(label='CFG Mimicking from TSNR', minimum=1.0, maximum=30.0, step=0.01, From 9bdb65ec5d77410ce92c75299476ad7d5193e686 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Wed, 31 Jan 2024 01:18:09 +0100 Subject: [PATCH 29/52] feat: add metadata handling for all non-img2img parameters --- modules/async_worker.py | 71 +++++++++++++++++++++++-------------- modules/meta_parser.py | 77 +++++++++++++++++++++++++---------------- modules/metadata.py | 35 +++++++++++++++++-- webui.py | 9 ++--- 4 files changed, 130 insertions(+), 62 deletions(-) diff --git a/modules/async_worker.py b/modules/async_worker.py index f58ab79f..ff12f5ec 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -274,7 +274,7 @@ def worker(): and isinstance(inpaint_input_image, dict): inpaint_image = inpaint_input_image['image'] inpaint_mask = inpaint_input_image['mask'][:, :, 0] - + if advanced_parameters.inpaint_mask_upload_checkbox: if isinstance(inpaint_mask_image_upload, np.ndarray): if inpaint_mask_image_upload.ndim == 3: @@ -777,38 +777,57 @@ def worker(): imgs = [inpaint_worker.current_task.post_process(x) for x in imgs] for x in imgs: - d = [ - ('Prompt', 'prompt', task['log_positive_prompt'], True, True), - ('Full Positive Prompt', 'full_prompt', task['positive'], False, False), - ('Negative Prompt', 'negative_prompt', task['log_negative_prompt'], True, True), - ('Full Negative Prompt', 'full_negative_prompt', task['negative'], False, False), - ('Fooocus V2 Expansion', 'prompt_expansion', task['expansion'], True, True), - ('Styles', 'styles', str(raw_style_selections), True, True), - ('Performance', 'performance', performance_selection.value, True, True), - ('Steps', 'steps', steps, False, False), - ('Resolution', 'resolution', str((width, height)), True, True), - ('Sharpness', 'sharpness', sharpness, True, True), - ('Guidance Scale', 'guidance_scale', guidance_scale, True, True), - ('ADM Guidance', 'adm_guidance', str(( - modules.patch.positive_adm_scale, - modules.patch.negative_adm_scale, - modules.patch.adm_scaler_end)), True, True), - ('Base Model', 'base_model', base_model_name, True, True), - ('Base Model Hash', 'base_model_hash', base_model_hash, False, False), - ('Refiner Model', 'refiner_model', refiner_model_name, True, True), - ('Refiner Model Hash', 'refiner_model_hash', refiner_model_hash, False, False), - ('Refiner Switch', 'refiner_switch', refiner_switch, True, True), - ('Sampler', 'sampler', sampler_name, True, True), - ('Scheduler', 'scheduler', scheduler_name, True, True), - ('Seed', 'seed', task['task_seed'], True, True) - ] + d = [('Prompt', 'prompt', task['log_positive_prompt'], True, True), + ('Full Positive Prompt', 'full_prompt', task['positive'], False, False), + ('Negative Prompt', 'negative_prompt', task['log_negative_prompt'], True, True), + ('Full Negative Prompt', 'full_negative_prompt', task['negative'], False, False), + ('Fooocus V2 Expansion', 'prompt_expansion', task['expansion'], True, True), + ('Styles', 'styles', str(raw_style_selections), True, True), + ('Performance', 'performance', performance_selection.value, True, True), + ('Steps', 'steps', steps, False, False), + ('Resolution', 'resolution', str((width, height)), True, True), + ('Guidance Scale', 'guidance_scale', guidance_scale, True, True), + ('Sharpness', 'sharpness', sharpness, True, True), + ('ADM Guidance', 'adm_guidance', str(( + modules.patch.positive_adm_scale, + modules.patch.negative_adm_scale, + modules.patch.adm_scaler_end)), True, True), + ('Base Model', 'base_model', base_model_name, True, True), + ('Base Model Hash', 'base_model_hash', base_model_hash, False, False), # TODO move to metadata and use cache + ('Refiner Model', 'refiner_model', refiner_model_name, True, True), + ('Refiner Model Hash', 'refiner_model_hash', refiner_model_hash, False, False), # TODO move to metadata and use cache + ('Refiner Switch', 'refiner_switch', refiner_switch, True, True)] + + # TODO evaluate if this should always be added + if refiner_model_name != 'None': + if advanced_parameters.overwrite_switch > 0: + d.append(('Overwrite Switch', 'overwrite_switch', advanced_parameters.overwrite_switch, True, True)) + if refiner_swap_method != flags.refiner_swap_method: + d.append(('Refiner Swap Method', 'refiner_swap_method', refiner_swap_method, True, True)) + if advanced_parameters.adaptive_cfg != modules.config.default_cfg_tsnr: + d.append(('CFG Mimicking from TSNR', 'adaptive_cfg', advanced_parameters.adaptive_cfg, True, True)) + + d.append(('Sampler', 'sampler', sampler_name, True, True)) + d.append(('Scheduler', 'scheduler', scheduler_name, True, True)) + d.append(('Seed', 'seed', task['task_seed'], True, True)) + + if advanced_parameters.freeu_enabled: + d.append(('FreeU', 'freeu', str(( + advanced_parameters.freeu_b1, + advanced_parameters.freeu_b2, + advanced_parameters.freeu_s1, + advanced_parameters.freeu_s2)), True, True)) + for li, (n, w) in enumerate(loras): if n != 'None': d.append((f'LoRA {li + 1}', f'lora_combined_{li + 1}', f'{n} : {w}', True, True)) d.append((f'LoRA {li + 1} Name', f'lora_name_{li + 1}', n, False, False)) d.append((f'LoRA {li + 1} Weight', f'lora_weight_{li + 1}', w, False, False)) + # TODO move hashes to metadata handling d.append((f'LoRA {li + 1} Hash', f'lora_hash_{li + 1}', lora_hashes[li], False, False)) + d.append(('Version', 'version', 'Fooocus v' + fooocus_version.version, True, True)) + if modules.config.metadata_created_by != '': d.append(('Created By', 'created_by', modules.config.metadata_created_by, False, False)) diff --git a/modules/meta_parser.py b/modules/meta_parser.py index 33772140..6d4e542c 100644 --- a/modules/meta_parser.py +++ b/modules/meta_parser.py @@ -3,7 +3,7 @@ import json import gradio as gr import modules.config -from modules.flags import lora_count +from modules.flags import lora_count, Steps def load_parameter_button_click(raw_metadata: dict | str, is_generating: bool): @@ -18,10 +18,14 @@ def load_parameter_button_click(raw_metadata: dict | str, is_generating: bool): get_str('negative_prompt', 'Negative Prompt', loaded_parameter_dict, results) get_list('styles', 'Styles', loaded_parameter_dict, results) get_str('performance', 'Performance', loaded_parameter_dict, results) + get_steps('steps', 'Steps', loaded_parameter_dict, results) + get_float('overwrite_switch', 'Overwrite Switch', loaded_parameter_dict, results) get_resolution('resolution', 'Resolution', loaded_parameter_dict, results) - get_float('sharpness', 'Sharpness', loaded_parameter_dict, results) get_float('guidance_scale', 'Guidance Scale', loaded_parameter_dict, results) + get_float('sharpness', 'Sharpness', loaded_parameter_dict, results) get_adm_guidance('adm_guidance', 'ADM Guidance', loaded_parameter_dict, results) + get_str('refiner_swap_method', 'Refiner Swap Method', loaded_parameter_dict, results) + get_str('adaptive_cfg', 'CFG Mimicking from TSNR', loaded_parameter_dict, results) get_str('base_model', 'Base Model', loaded_parameter_dict, results) get_str('refiner_model', 'Refiner Model', loaded_parameter_dict, results) get_float('refiner_switch', 'Refiner Switch', loaded_parameter_dict, results) @@ -36,6 +40,8 @@ def load_parameter_button_click(raw_metadata: dict | str, is_generating: bool): results.append(gr.update(visible=False)) + get_freeu('freeu', 'FreeU', loaded_parameter_dict, results) + for i in range(lora_count): get_lora(f'lora_combined_{i + 1}', f'LoRA {i + 1}', loaded_parameter_dict, results) @@ -44,45 +50,51 @@ def load_parameter_button_click(raw_metadata: dict | str, is_generating: bool): def get_str(key: str, fallback: str | None, source_dict: dict, results: list, default=None): try: - h = source_dict.get(key, default) + h = source_dict.get(key, source_dict.get(fallback, default)) assert isinstance(h, str) results.append(h) except: - if fallback is not None: - get_str(fallback, None, source_dict, results, default) - return results.append(gr.update()) def get_list(key: str, fallback: str | None, source_dict: dict, results: list, default=None): try: - h = source_dict.get(key, default) + h = source_dict.get(key, source_dict.get(fallback, default)) h = eval(h) assert isinstance(h, list) results.append(h) except: - if fallback is not None: - get_list(fallback, None, source_dict, results, default) - return results.append(gr.update()) +# TODO try get generic + def get_float(key: str, fallback: str | None, source_dict: dict, results: list, default=None): try: - h = source_dict.get(key, default) + h = source_dict.get(key, source_dict.get(fallback, default)) assert h is not None h = float(h) results.append(h) except: - if fallback is not None: - get_float(fallback, None, source_dict, results, default) - return results.append(gr.update()) +def get_steps(key: str, fallback: str | None, source_dict: dict, results: list, default=None): + try: + h = source_dict.get(key, source_dict.get(fallback, default)) + assert h is not None + h = int(h) + if h not in set(item.value for item in Steps): + results.append(h) + return + results.append(-1) + except: + results.append(-1) + + def get_resolution(key: str, fallback: str | None, source_dict: dict, results: list, default=None): try: - h = source_dict.get(key, default) + h = source_dict.get(key, source_dict.get(fallback, default)) width, height = eval(h) formatted = modules.config.add_ratio(f'{width}*{height}') if formatted in modules.config.available_aspect_ratios: @@ -94,9 +106,6 @@ def get_resolution(key: str, fallback: str | None, source_dict: dict, results: l results.append(width) results.append(height) except: - if fallback is not None: - get_resolution(fallback, None, source_dict, results, default) - return results.append(gr.update()) results.append(gr.update()) results.append(gr.update()) @@ -104,30 +113,41 @@ def get_resolution(key: str, fallback: str | None, source_dict: dict, results: l def get_seed(key: str, fallback: str | None, source_dict: dict, results: list, default=None): try: - h = source_dict.get(key, default) + h = source_dict.get(key, source_dict.get(fallback, default)) assert h is not None h = int(h) results.append(False) results.append(h) except: - if fallback is not None: - get_seed(fallback, None, source_dict, results, default) - return results.append(gr.update()) results.append(gr.update()) def get_adm_guidance(key: str, fallback: str | None, source_dict: dict, results: list, default=None): try: - h = source_dict.get(key, default) + h = source_dict.get(key, source_dict.get(fallback, default)) p, n, e = eval(h) results.append(float(p)) results.append(float(n)) results.append(float(e)) except: - if fallback is not None: - get_adm_guidance(fallback, None, source_dict, results, default) - return + results.append(gr.update()) + results.append(gr.update()) + results.append(gr.update()) + + +def get_freeu(key: str, fallback: str | None, source_dict: dict, results: list, default=None): + try: + h = source_dict.get(key, source_dict.get(fallback, default)) + b1, b2, s1, s2 = eval(h) + results.append(True) + results.append(float(b1)) + results.append(float(b2)) + results.append(float(s1)) + results.append(float(s2)) + except: + results.append(False) + results.append(gr.update()) results.append(gr.update()) results.append(gr.update()) results.append(gr.update()) @@ -135,13 +155,10 @@ def get_adm_guidance(key: str, fallback: str | None, source_dict: dict, results: def get_lora(key: str, fallback: str | None, source_dict: dict, results: list, default=None): try: - n, w = source_dict.get(key).split(' : ') + n, w = source_dict.get(key, source_dict.get(fallback)).split(' : ') w = float(w) results.append(n) results.append(w) except: - if fallback is not None: - get_lora(fallback, None, source_dict, results, default) - return results.append('None') results.append(1) diff --git a/modules/metadata.py b/modules/metadata.py index 9d68fd17..34cc1923 100644 --- a/modules/metadata.py +++ b/modules/metadata.py @@ -31,11 +31,18 @@ class A1111MetadataParser(MetadataParser): fooocus_to_a1111 = { 'negative_prompt': 'Negative prompt', 'styles': 'Styles', + 'performance': 'Performance', 'steps': 'Steps', 'sampler': 'Sampler', 'guidance_scale': 'CFG scale', 'seed': 'Seed', 'resolution': 'Size', + 'sharpness': 'Sharpness', + 'adm_guidance': 'ADM Guidance', + 'refiner_swap_method': 'Refiner Swap Method', + 'adaptive_cfg': 'Adaptive CFG', + 'overwrite_switch': 'Overwrite Switch', + 'freeu': 'FreeU', 'base_model': 'Model', 'base_model_hash': 'Model hash', 'refiner_model': 'Refiner', @@ -87,8 +94,8 @@ class A1111MetadataParser(MetadataParser): except Exception: print(f"Error parsing \"{k}: {v}\"") - # try to load performance based on steps - if 'steps' in data: + # try to load performance based on steps, fallback for direct A1111 imports + if 'steps' in data and 'performance' not in data: try: data['performance'] = Performance[Steps(int(data['steps'])).name].value except Exception: @@ -132,10 +139,14 @@ class A1111MetadataParser(MetadataParser): lora_hashes_string = ', '.join(lora_hashes) generation_params = { + self.fooocus_to_a1111['performance']: data['performance'], self.fooocus_to_a1111['steps']: data['steps'], self.fooocus_to_a1111['sampler']: data['sampler'], self.fooocus_to_a1111['seed']: data['seed'], self.fooocus_to_a1111['resolution']: f'{width}x{heigth}', + self.fooocus_to_a1111['guidance_scale']: data['guidance_scale'], + self.fooocus_to_a1111['sharpness']: data['sharpness'], + self.fooocus_to_a1111['adm_guidance']: data['adm_guidance'], # TODO load model by name / hash self.fooocus_to_a1111['base_model']: Path(data['base_model']).stem, self.fooocus_to_a1111['base_model_hash']: data['base_model_hash'] @@ -147,6 +158,26 @@ class A1111MetadataParser(MetadataParser): self.fooocus_to_a1111['refiner_model_hash']: data['refiner_model_hash'] } + if 'refiner_swap_method' in data: + generation_params |= { + self.fooocus_to_a1111['refiner_swap_method']: data['refiner_swap_method'], + } + + # TODO unify with for and call with key + + if 'freeu' in data: + generation_params |= { + self.fooocus_to_a1111['freeu']: data['freeu'], + } + if 'adaptive_cfg' in data: + generation_params |= { + self.fooocus_to_a1111['adaptive_cfg']: data['adaptive_cfg'], + } + if 'overwrite_switch' in data: + generation_params |= { + self.fooocus_to_a1111['overwrite_switch']: data['overwrite_switch'], + } + generation_params |= { self.fooocus_to_a1111['lora_hashes']: lora_hashes_string, self.fooocus_to_a1111['version']: data['version'] diff --git a/webui.py b/webui.py index 816a122f..51193fac 100644 --- a/webui.py +++ b/webui.py @@ -588,10 +588,11 @@ with shared.gradio_root: prompt.input(parse_meta, inputs=[prompt, state_is_generating], outputs=[prompt, generate_button, load_parameter_button], queue=False, show_progress=False) load_data_outputs = [advanced_checkbox, image_number, prompt, negative_prompt, style_selections, - performance_selection, aspect_ratios_selection, overwrite_width, overwrite_height, - sharpness, guidance_scale, adm_scaler_positive, adm_scaler_negative, adm_scaler_end, - base_model, refiner_model, refiner_switch, sampler_name, scheduler_name, seed_random, - image_seed, generate_button, load_parameter_button] + lora_ctrls + performance_selection, overwrite_step, overwrite_switch, aspect_ratios_selection, + overwrite_width, overwrite_height, guidance_scale, sharpness, adm_scaler_positive, + adm_scaler_negative, adm_scaler_end, refiner_swap_method, adaptive_cfg, base_model, + refiner_model, refiner_switch, sampler_name, scheduler_name, seed_random, image_seed, + generate_button, load_parameter_button] + freeu_ctrls + lora_ctrls load_parameter_button.click(modules.meta_parser.load_parameter_button_click, inputs=[prompt, state_is_generating], outputs=load_data_outputs, queue=False, show_progress=False) From 6b9c0bd448a2ef0ddea16730e4c61e5eb5c1ef22 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Wed, 31 Jan 2024 01:35:51 +0100 Subject: [PATCH 30/52] refactor: code cleanup --- modules/meta_parser.py | 4 +--- modules/metadata.py | 22 +++------------------- 2 files changed, 4 insertions(+), 22 deletions(-) diff --git a/modules/meta_parser.py b/modules/meta_parser.py index 6d4e542c..e4f3c813 100644 --- a/modules/meta_parser.py +++ b/modules/meta_parser.py @@ -67,8 +67,6 @@ def get_list(key: str, fallback: str | None, source_dict: dict, results: list, d results.append(gr.update()) -# TODO try get generic - def get_float(key: str, fallback: str | None, source_dict: dict, results: list, default=None): try: h = source_dict.get(key, source_dict.get(fallback, default)) @@ -153,7 +151,7 @@ def get_freeu(key: str, fallback: str | None, source_dict: dict, results: list, results.append(gr.update()) -def get_lora(key: str, fallback: str | None, source_dict: dict, results: list, default=None): +def get_lora(key: str, fallback: str | None, source_dict: dict, results: list): try: n, w = source_dict.get(key, source_dict.get(fallback)).split(' : ') w = float(w) diff --git a/modules/metadata.py b/modules/metadata.py index 34cc1923..aa0925d6 100644 --- a/modules/metadata.py +++ b/modules/metadata.py @@ -158,25 +158,9 @@ class A1111MetadataParser(MetadataParser): self.fooocus_to_a1111['refiner_model_hash']: data['refiner_model_hash'] } - if 'refiner_swap_method' in data: - generation_params |= { - self.fooocus_to_a1111['refiner_swap_method']: data['refiner_swap_method'], - } - - # TODO unify with for and call with key - - if 'freeu' in data: - generation_params |= { - self.fooocus_to_a1111['freeu']: data['freeu'], - } - if 'adaptive_cfg' in data: - generation_params |= { - self.fooocus_to_a1111['adaptive_cfg']: data['adaptive_cfg'], - } - if 'overwrite_switch' in data: - generation_params |= { - self.fooocus_to_a1111['overwrite_switch']: data['overwrite_switch'], - } + for key in ['adaptive_cfg', 'overwrite_switch', 'refiner_swap_method', 'freeu']: + if key in data: + generation_params[self.fooocus_to_a1111[key]] = data[key] generation_params |= { self.fooocus_to_a1111['lora_hashes']: lora_hashes_string, From 23ba05015af2cfe5f9efea7058ba88d592f62f99 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Fri, 2 Feb 2024 01:10:26 +0100 Subject: [PATCH 31/52] chore: use str as return type in calculate_sha256 --- modules/util.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/util.py b/modules/util.py index 663e7975..d1935848 100644 --- a/modules/util.py +++ b/modules/util.py @@ -182,7 +182,7 @@ def get_files_from_folder(folder_path, exensions=None, name_filter=None): return sorted(filenames, key=lambda x: -1 if os.sep in x else 1) -def calculate_sha256(filename, length=HASH_SHA256_LENGTH): +def calculate_sha256(filename, length=HASH_SHA256_LENGTH) -> str: hash_sha256 = sha256() blksize = 1024 * 1024 From bc9b6252215428f21671085630e891600c5b9ffe Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Fri, 2 Feb 2024 01:11:50 +0100 Subject: [PATCH 32/52] feat: add hash cache to metadata --- modules/metadata.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/modules/metadata.py b/modules/metadata.py index aa0925d6..60b1dd4c 100644 --- a/modules/metadata.py +++ b/modules/metadata.py @@ -7,13 +7,23 @@ from PIL import Image import modules.config import fooocus_version # import advanced_parameters -from modules.util import quote, unquote, extract_styles_from_prompt, is_json from modules.flags import MetadataScheme, Performance, Steps, lora_count_with_lcm +from modules.util import quote, unquote, extract_styles_from_prompt, is_json, calculate_sha256 re_param_code = r'\s*(\w[\w \-/]+):\s*("(?:\\.|[^\\"])+"|[^,]*)(?:,|$)' re_param = re.compile(re_param_code) re_imagesize = re.compile(r"^(\d+)x(\d+)$") +hash_cache = {} + + +def get_sha256(filepath): + global hash_cache + + if filepath not in hash_cache: + hash_cache[filepath] = calculate_sha256(filepath) + + return hash_cache[filepath] class MetadataParser(ABC): @abstractmethod From ea6839be83affae1f80c2b683c1cc53499115186 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Fri, 2 Feb 2024 01:14:40 +0100 Subject: [PATCH 33/52] chore: code cleanup --- modules/metadata.py | 122 +------------------------------------------- 1 file changed, 1 insertion(+), 121 deletions(-) diff --git a/modules/metadata.py b/modules/metadata.py index 60b1dd4c..d80bad01 100644 --- a/modules/metadata.py +++ b/modules/metadata.py @@ -1,12 +1,10 @@ import json import re -from pathlib import Path from abc import ABC, abstractmethod +from pathlib import Path from PIL import Image import modules.config -import fooocus_version -# import advanced_parameters from modules.flags import MetadataScheme, Performance, Steps, lora_count_with_lcm from modules.util import quote, unquote, extract_styles_from_prompt, is_json, calculate_sha256 @@ -30,7 +28,6 @@ class MetadataParser(ABC): def parse_json(self, metadata: dict) -> dict: raise NotImplementedError - # TODO add data to parse @abstractmethod def parse_string(self, metadata: dict) -> str: raise NotImplementedError @@ -219,84 +216,6 @@ class FooocusMetadataParser(MetadataParser): metadata[li] = (label, key, value, show_in_log, copy_in_log) return json.dumps({k: v for _, k, v, _, _ in metadata}) - # metadata = { - # # prompt with wildcards - # 'prompt': raw_prompt, 'negative_prompt': raw_negative_prompt, - # # prompt with resolved wildcards - # 'real_prompt': task['log_positive_prompt'], 'real_negative_prompt': task['log_negative_prompt'], - # # prompt with resolved wildcards, styles and prompt expansion - # 'complete_prompt_positive': task['positive'], 'complete_prompt_negative': task['negative'], - # 'styles': str(raw_style_selections), - # 'seed': task['task_seed'], 'width': width, 'height': height, - # 'sampler': sampler_name, 'scheduler': scheduler_name, 'performance': performance_selection, - # 'steps': steps, 'refiner_switch': refiner_switch, 'sharpness': sharpness, 'cfg': cfg_scale, - # 'base_model': base_model_name, 'base_model_hash': base_model_hash, 'refiner_model': refiner_model_name, - # 'denoising_strength': denoising_strength, - # 'freeu': advanced_parameters.freeu_enabled, - # 'img2img': input_image_checkbox, - # 'prompt_expansion': task['expansion'] - # } - # - # if advanced_parameters.freeu_enabled: - # metadata |= { - # 'freeu_b1': advanced_parameters.freeu_b1, 'freeu_b2': advanced_parameters.freeu_b2, - # 'freeu_s1': advanced_parameters.freeu_s1, 'freeu_s2': advanced_parameters.freeu_s2 - # } - # - # if 'vary' in goals: - # metadata |= { - # 'uov_method': uov_method - # } - # - # if 'upscale' in goals: - # metadata |= { - # 'uov_method': uov_method, 'scale': f - # } - # - # if 'inpaint' in goals: - # if len(outpaint_selections) > 0: - # metadata |= { - # 'outpaint_selections': outpaint_selections - # } - # - # metadata |= { - # 'inpaint_additional_prompt': inpaint_additional_prompt, - # 'inpaint_mask_upload': advanced_parameters.inpaint_mask_upload_checkbox, - # 'invert_mask': advanced_parameters.invert_mask_checkbox, - # 'inpaint_disable_initial_latent': advanced_parameters.inpaint_disable_initial_latent, - # 'inpaint_engine': advanced_parameters.inpaint_engine, - # 'inpaint_strength': advanced_parameters.inpaint_strength, - # 'inpaint_respective_field': advanced_parameters.inpaint_respective_field, - # } - # - # if 'cn' in goals: - # metadata |= { - # 'canny_low_threshold': advanced_parameters.canny_low_threshold, - # 'canny_high_threshold': advanced_parameters.canny_high_threshold, - # } - # - # ip_list = {x: [] for x in flags.ip_list} - # cn_task_index = 1 - # for cn_type in ip_list: - # for cn_task in cn_tasks[cn_type]: - # cn_img, cn_stop, cn_weight = cn_task - # metadata |= { - # f'image_prompt_{cn_task_index}': { - # 'cn_type': cn_type, 'cn_stop': cn_stop, 'cn_weight': cn_weight, - # } - # } - # cn_task_index += 1 - # - # metadata |= { - # 'software': f'Fooocus v{fooocus_version.version}', - # } - # TODO add metadata_created_by - # if modules.config.metadata_created_by != '': - # metadata |= { - # 'created_by': modules.config.metadata_created_by - # } - # # return json.dumps(metadata, ensure_ascii=True) TODO check if possible - # return json.dumps(metadata, ensure_ascii=False) @staticmethod def replace_value_with_filename(key, value, filenames): @@ -319,12 +238,6 @@ def get_metadata_parser(metadata_scheme: MetadataScheme) -> MetadataParser: case _: raise NotImplementedError -# IGNORED_INFO_KEYS = { -# 'jfif', 'jfif_version', 'jfif_unit', 'jfif_density', 'dpi', 'exif', -# 'loop', 'background', 'timestamp', 'duration', 'progressive', 'progression', -# 'icc_profile', 'chromaticity', 'photoshop', -# } - def read_info_from_image(filepath) -> tuple[str | None, dict, MetadataScheme | None]: with Image.open(filepath) as image: @@ -346,37 +259,4 @@ def read_info_from_image(filepath) -> tuple[str | None, dict, MetadataScheme | N if metadata_scheme is None and isinstance(parameters, str): metadata_scheme = modules.metadata.MetadataScheme.A1111 - # TODO code cleanup - # if "exif" in items: - # exif_data = items["exif"] - # try: - # exif = piexif.load(exif_data) - # except OSError: - # # memory / exif was not valid so piexif tried to read from a file - # exif = None - # exif_comment = (exif or {}).get("Exif", {}).get(piexif.ExifIFD.UserComment, b'') - # try: - # exif_comment = piexif.helper.UserComment.load(exif_comment) - # except ValueError: - # exif_comment = exif_comment.decode('utf8', errors="ignore") - # - # if exif_comment: - # items['exif comment'] = exif_comment - # parameters = exif_comment - - # for field in IGNORED_INFO_KEYS: - # items.pop(field, None) - - # if items.get("Software", None) == "NovelAI": - # try: - # json_info = json.loads(items["Comment"]) - # sampler = sd_samplers.samplers_map.get(json_info["sampler"], "Euler a") - # - # geninfo = f"""{items["Description"]} - # Negative prompt: {json_info["uc"]} - # Steps: {json_info["steps"]}, Sampler: {sampler}, CFG scale: {json_info["scale"]}, Seed: {json_info["seed"]}, Size: {image.width}x{image.height}, Clip skip: 2, ENSD: 31337""" - # except Exception: - # errors.report("Error parsing NovelAI image generation parameters", - # exc_info=True) - return parameters, items, metadata_scheme From f4afc4af10a8d3468114e205087e45d63b4f1b0d Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Fri, 2 Feb 2024 01:19:21 +0100 Subject: [PATCH 34/52] feat: add method get_scheme to Metadata --- modules/metadata.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/modules/metadata.py b/modules/metadata.py index d80bad01..5c43bef0 100644 --- a/modules/metadata.py +++ b/modules/metadata.py @@ -23,7 +23,12 @@ def get_sha256(filepath): return hash_cache[filepath] + class MetadataParser(ABC): + @abstractmethod + def get_scheme(self) -> MetadataScheme: + raise NotImplementedError + @abstractmethod def parse_json(self, metadata: dict) -> dict: raise NotImplementedError @@ -34,6 +39,8 @@ class MetadataParser(ABC): class A1111MetadataParser(MetadataParser): + def get_scheme(self) -> MetadataScheme: + return MetadataScheme.A1111 fooocus_to_a1111 = { 'negative_prompt': 'Negative prompt', @@ -184,6 +191,8 @@ class A1111MetadataParser(MetadataParser): class FooocusMetadataParser(MetadataParser): + def get_scheme(self) -> MetadataScheme: + return MetadataScheme.FOOOCUS def parse_json(self, metadata: dict) -> dict: model_filenames = modules.config.model_filenames.copy() From 796cf3c78dbda0623fc54ee30f20084547f000af Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Fri, 2 Feb 2024 01:20:28 +0100 Subject: [PATCH 35/52] fix: align handling for scheme Fooocus by removing lcm lora from json parsing --- modules/metadata.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/metadata.py b/modules/metadata.py index 5c43bef0..868c2b48 100644 --- a/modules/metadata.py +++ b/modules/metadata.py @@ -197,6 +197,7 @@ class FooocusMetadataParser(MetadataParser): def parse_json(self, metadata: dict) -> dict: model_filenames = modules.config.model_filenames.copy() lora_filenames = modules.config.lora_filenames.copy() + lora_filenames.remove(modules.config.downloading_sdxl_lcm_lora()) for key, value in metadata.items(): if value == '' or value == 'None': From e55870124b4ad898c0cca5f9ab74b2884cf881b0 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Fri, 2 Feb 2024 01:25:35 +0100 Subject: [PATCH 36/52] refactor: add step before parsing to set data in parser - add constructor for MetadataSchema class - remove showable and copyable from log output - add functional hash cache (model hashing takes about 5 seconds, only required once per model, using hash lazy loading) --- modules/async_worker.py | 78 +++++++++-------------- modules/metadata.py | 127 ++++++++++++++++++++++++++------------ modules/private_logger.py | 22 +++---- 3 files changed, 126 insertions(+), 101 deletions(-) diff --git a/modules/async_worker.py b/modules/async_worker.py index ff12f5ec..0de7daa4 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -42,9 +42,10 @@ def worker(): from modules.private_logger import log from extras.expansion import safe_str from modules.util import remove_empty_str, HWC3, resize_image, \ - get_image_shape_ceil, set_image_shape_ceil, get_shape_ceil, resample_image, erode_or_dilate, calculate_sha256 + get_image_shape_ceil, set_image_shape_ceil, get_shape_ceil, resample_image, erode_or_dilate from modules.upscaler import perform_upscale - from modules.flags import Performance, MetadataScheme, lora_count + from modules.flags import Performance, lora_count + from modules.metadata import get_metadata_parser, MetadataScheme try: async_gradio_app = shared.gradio_root @@ -193,18 +194,6 @@ def worker(): modules.patch.negative_adm_scale = advanced_parameters.adm_scaler_negative = 1.0 modules.patch.adm_scaler_end = advanced_parameters.adm_scaler_end = 0.0 - # TODO move hashing to metadata mapper as this slows down the generation process - base_model_path = os.path.join(modules.config.path_checkpoints, base_model_name) - base_model_hash = calculate_sha256(base_model_path) - - refiner_model_path = os.path.join(modules.config.path_checkpoints, refiner_model_name) - refiner_model_hash = calculate_sha256(refiner_model_path) if refiner_model_name != 'None' else '' - - lora_hashes = [] - for (n, w) in loras: - lora_path = os.path.join(modules.config.path_loras, n) if n != 'None' else '' - lora_hashes.append(calculate_sha256(lora_path) if n != 'None' else '') - modules.patch.adaptive_cfg = advanced_parameters.adaptive_cfg print(f'[Parameters] Adaptive CFG = {modules.patch.adaptive_cfg}') @@ -777,61 +766,54 @@ def worker(): imgs = [inpaint_worker.current_task.post_process(x) for x in imgs] for x in imgs: - d = [('Prompt', 'prompt', task['log_positive_prompt'], True, True), - ('Full Positive Prompt', 'full_prompt', task['positive'], False, False), - ('Negative Prompt', 'negative_prompt', task['log_negative_prompt'], True, True), - ('Full Negative Prompt', 'full_negative_prompt', task['negative'], False, False), - ('Fooocus V2 Expansion', 'prompt_expansion', task['expansion'], True, True), - ('Styles', 'styles', str(raw_style_selections), True, True), - ('Performance', 'performance', performance_selection.value, True, True), - ('Steps', 'steps', steps, False, False), - ('Resolution', 'resolution', str((width, height)), True, True), - ('Guidance Scale', 'guidance_scale', guidance_scale, True, True), - ('Sharpness', 'sharpness', sharpness, True, True), + d = [('Prompt', 'prompt', task['log_positive_prompt']), + ('Negative Prompt', 'negative_prompt', task['log_negative_prompt']), + ('Fooocus V2 Expansion', 'prompt_expansion', task['expansion']), + ('Styles', 'styles', str(raw_style_selections)), + ('Performance', 'performance', performance_selection.value), + ('Resolution', 'resolution', str((width, height))), + ('Guidance Scale', 'guidance_scale', guidance_scale), + ('Sharpness', 'sharpness', sharpness), ('ADM Guidance', 'adm_guidance', str(( modules.patch.positive_adm_scale, modules.patch.negative_adm_scale, - modules.patch.adm_scaler_end)), True, True), - ('Base Model', 'base_model', base_model_name, True, True), - ('Base Model Hash', 'base_model_hash', base_model_hash, False, False), # TODO move to metadata and use cache - ('Refiner Model', 'refiner_model', refiner_model_name, True, True), - ('Refiner Model Hash', 'refiner_model_hash', refiner_model_hash, False, False), # TODO move to metadata and use cache - ('Refiner Switch', 'refiner_switch', refiner_switch, True, True)] + modules.patch.adm_scaler_end))), + ('Base Model', 'base_model', base_model_name), + ('Refiner Model', 'refiner_model', refiner_model_name), + ('Refiner Switch', 'refiner_switch', refiner_switch)] # TODO evaluate if this should always be added if refiner_model_name != 'None': if advanced_parameters.overwrite_switch > 0: - d.append(('Overwrite Switch', 'overwrite_switch', advanced_parameters.overwrite_switch, True, True)) + d.append(('Overwrite Switch', 'overwrite_switch', advanced_parameters.overwrite_switch)) if refiner_swap_method != flags.refiner_swap_method: - d.append(('Refiner Swap Method', 'refiner_swap_method', refiner_swap_method, True, True)) + d.append(('Refiner Swap Method', 'refiner_swap_method', refiner_swap_method)) if advanced_parameters.adaptive_cfg != modules.config.default_cfg_tsnr: - d.append(('CFG Mimicking from TSNR', 'adaptive_cfg', advanced_parameters.adaptive_cfg, True, True)) + d.append(('CFG Mimicking from TSNR', 'adaptive_cfg', advanced_parameters.adaptive_cfg)) - d.append(('Sampler', 'sampler', sampler_name, True, True)) - d.append(('Scheduler', 'scheduler', scheduler_name, True, True)) - d.append(('Seed', 'seed', task['task_seed'], True, True)) + d.append(('Sampler', 'sampler', sampler_name)) + d.append(('Scheduler', 'scheduler', scheduler_name)) + d.append(('Seed', 'seed', task['task_seed'])) if advanced_parameters.freeu_enabled: d.append(('FreeU', 'freeu', str(( advanced_parameters.freeu_b1, advanced_parameters.freeu_b2, advanced_parameters.freeu_s1, - advanced_parameters.freeu_s2)), True, True)) + advanced_parameters.freeu_s2)))) + + metadata_parser = None + if save_metadata_to_images: + metadata_parser = modules.metadata.get_metadata_parser(metadata_scheme) + metadata_parser.set_data(task['positive'], task['negative'], steps, base_model_name, refiner_model_name, loras) for li, (n, w) in enumerate(loras): if n != 'None': - d.append((f'LoRA {li + 1}', f'lora_combined_{li + 1}', f'{n} : {w}', True, True)) - d.append((f'LoRA {li + 1} Name', f'lora_name_{li + 1}', n, False, False)) - d.append((f'LoRA {li + 1} Weight', f'lora_weight_{li + 1}', w, False, False)) - # TODO move hashes to metadata handling - d.append((f'LoRA {li + 1} Hash', f'lora_hash_{li + 1}', lora_hashes[li], False, False)) + d.append((f'LoRA {li + 1}', f'lora_combined_{li + 1}', f'{n} : {w}')) - d.append(('Version', 'version', 'Fooocus v' + fooocus_version.version, True, True)) + d.append(('Version', 'version', 'Fooocus v' + fooocus_version.version)) - if modules.config.metadata_created_by != '': - d.append(('Created By', 'created_by', modules.config.metadata_created_by, False, False)) - - log(x, d, save_metadata_to_images, metadata_scheme) + log(x, d, metadata_parser) yield_result(async_task, imgs, do_not_show_finished_images=len(tasks) == 1) except ldm_patched.modules.model_management.InterruptProcessingException as e: diff --git a/modules/metadata.py b/modules/metadata.py index 868c2b48..6c03edc5 100644 --- a/modules/metadata.py +++ b/modules/metadata.py @@ -1,11 +1,12 @@ import json +import os import re from abc import ABC, abstractmethod from pathlib import Path from PIL import Image import modules.config -from modules.flags import MetadataScheme, Performance, Steps, lora_count_with_lcm +from modules.flags import MetadataScheme, Performance, Steps from modules.util import quote, unquote, extract_styles_from_prompt, is_json, calculate_sha256 re_param_code = r'\s*(\w[\w \-/]+):\s*("(?:\\.|[^\\"])+"|[^,]*)(?:,|$)' @@ -25,6 +26,16 @@ def get_sha256(filepath): class MetadataParser(ABC): + def __init__(self): + self.full_prompt: str = '' + self.full_negative_prompt: str = '' + self.steps: int = 30 + self.base_model_name: str = '' + self.base_model_hash: str = '' + self.refiner_model_name: str = '' + self.refiner_model_hash: str = '' + self.loras: list = [] + @abstractmethod def get_scheme(self) -> MetadataScheme: raise NotImplementedError @@ -37,6 +48,27 @@ class MetadataParser(ABC): def parse_string(self, metadata: dict) -> str: raise NotImplementedError + def set_data(self, full_prompt, full_negative_prompt, steps, base_model_name, refiner_model_name, loras): + self.full_prompt = full_prompt + self.full_negative_prompt = full_negative_prompt + self.steps = steps + self.base_model_name = Path(base_model_name).stem + + base_model_path = os.path.join(modules.config.path_checkpoints, base_model_name) + self.base_model_hash = get_sha256(base_model_path) + + if refiner_model_name not in ['', 'None']: + self.refiner_model_name = Path(refiner_model_name).stem + refiner_model_path = os.path.join(modules.config.path_checkpoints, refiner_model_name) + self.refiner_model_hash = get_sha256(refiner_model_path) + + self.loras = [] + for (lora_name, lora_weight) in loras: + if lora_name != 'None': + lora_path = os.path.join(modules.config.path_loras, lora_name) + lora_hash = get_sha256(lora_path) + self.loras.append((Path(lora_name).stem, lora_weight, lora_hash)) + class A1111MetadataParser(MetadataParser): def get_scheme(self) -> MetadataScheme: @@ -63,6 +95,7 @@ class A1111MetadataParser(MetadataParser): 'refiner_model_hash': 'Refiner hash', 'lora_hashes': 'Lora hashes', 'lora_weights': 'Lora weights', + 'created_by': 'User', 'version': 'Version' } @@ -127,65 +160,64 @@ class A1111MetadataParser(MetadataParser): lora_filenames = modules.config.lora_filenames.copy() lora_filenames.remove(modules.config.downloading_sdxl_lcm_lora()) for li, lora in enumerate(data['lora_hashes'].split(', ')): - name, _, weight = lora.split(': ') + lora_name, lora_hash, lora_weight = lora.split(': ') for filename in lora_filenames: path = Path(filename) - if name == path.stem: - data[f'lora_combined_{li + 1}'] = f'{filename} : {weight}' + if lora_name == path.stem: + data[f'lora_combined_{li + 1}'] = f'{filename} : {lora_weight}' break return data def parse_string(self, metadata: dict) -> str: - data = {k: v for _, k, v, _, _ in metadata} + data = {k: v for _, k, v in metadata} - width, heigth = eval(data['resolution']) - - lora_hashes = [] - for index in range(lora_count_with_lcm): - key = f'lora_name_{index + 1}' - if key in data: - lora_name = Path(data[f'lora_name_{index + 1}']).stem - lora_weight = data[f'lora_weight_{index + 1}'] - lora_hash = data[f'lora_hash_{index + 1}'] - # workaround for Fooocus not knowing LoRA name in LoRA metadata - lora_hashes.append(f'{lora_name}: {lora_hash}: {lora_weight}') - lora_hashes_string = ', '.join(lora_hashes) + width, height = eval(data['resolution']) generation_params = { self.fooocus_to_a1111['performance']: data['performance'], - self.fooocus_to_a1111['steps']: data['steps'], + self.fooocus_to_a1111['steps']: self.steps, self.fooocus_to_a1111['sampler']: data['sampler'], self.fooocus_to_a1111['seed']: data['seed'], - self.fooocus_to_a1111['resolution']: f'{width}x{heigth}', + self.fooocus_to_a1111['resolution']: f'{width}x{height}', self.fooocus_to_a1111['guidance_scale']: data['guidance_scale'], self.fooocus_to_a1111['sharpness']: data['sharpness'], self.fooocus_to_a1111['adm_guidance']: data['adm_guidance'], - # TODO load model by name / hash self.fooocus_to_a1111['base_model']: Path(data['base_model']).stem, - self.fooocus_to_a1111['base_model_hash']: data['base_model_hash'] + self.fooocus_to_a1111['base_model_hash']: self.base_model_hash, } - if 'refiner_model' in data and data['refiner_model'] != 'None' and 'refiner_model_hash' in data: + # TODO evaluate if this should always be added + if self.refiner_model_name not in ['', 'None']: generation_params |= { - self.fooocus_to_a1111['refiner_model']: Path(data['refiner_model']).stem, - self.fooocus_to_a1111['refiner_model_hash']: data['refiner_model_hash'] + self.fooocus_to_a1111['refiner_model']: self.refiner_model_name, + self.fooocus_to_a1111['refiner_model_hash']: self.refiner_model_hash } for key in ['adaptive_cfg', 'overwrite_switch', 'refiner_swap_method', 'freeu']: if key in data: generation_params[self.fooocus_to_a1111[key]] = data[key] + lora_hashes = [] + for index, (lora_name, lora_weight, lora_hash) in enumerate(self.loras): + # workaround for Fooocus not knowing LoRA name in LoRA metadata + lora_hashes.append(f'{lora_name}: {lora_hash}: {lora_weight}') + lora_hashes_string = ', '.join(lora_hashes) + generation_params |= { self.fooocus_to_a1111['lora_hashes']: lora_hashes_string, self.fooocus_to_a1111['version']: data['version'] } + if modules.config.metadata_created_by != '': + generation_params[self.fooocus_to_a1111['created_by']] = modules.config.metadata_created_by + generation_params_text = ", ".join( [k if k == v else f'{k}: {quote(v)}' for k, v in generation_params.items() if v is not None]) # TODO check if multiline positive prompt is correctly processed - positive_prompt_resolved = ', '.join(data['full_prompt']) #TODO add loras to positive prompt if even possible - negative_prompt_resolved = ', '.join(data['full_negative_prompt']) #TODO add loras to negative prompt if even possible + positive_prompt_resolved = ', '.join(self.full_prompt) # TODO add loras to positive prompt if even possible + negative_prompt_resolved = ', '.join( + self.full_negative_prompt) # TODO add loras to negative prompt if even possible negative_prompt_text = f"\nNegative prompt: {negative_prompt_resolved}" if negative_prompt_resolved else "" return f"{positive_prompt_resolved}{negative_prompt_text}\n{generation_params_text}".strip() @@ -200,11 +232,11 @@ class FooocusMetadataParser(MetadataParser): lora_filenames.remove(modules.config.downloading_sdxl_lcm_lora()) for key, value in metadata.items(): - if value == '' or value == 'None': + if value in ['', 'None']: continue if key in ['base_model', 'refiner_model']: metadata[key] = self.replace_value_with_filename(key, value, model_filenames) - elif key.startswith(('lora_combined_', 'lora_name_')): + elif key.startswith('lora_combined_'): metadata[key] = self.replace_value_with_filename(key, value, lora_filenames) else: continue @@ -212,20 +244,33 @@ class FooocusMetadataParser(MetadataParser): return metadata def parse_string(self, metadata: list) -> str: - # remove model folder paths from metadata - for li, (label, key, value, show_in_log, copy_in_log) in enumerate(metadata): - if value == '' or value == 'None': - continue - if key in ['base_model', 'refiner_model'] or key.startswith(('lora_combined_', 'lora_name_')): - if key.startswith('lora_combined_'): - name, weight = value.split(' : ') - name = Path(name).stem - value = f'{name} : {weight}' - else: - value = Path(value).stem - metadata[li] = (label, key, value, show_in_log, copy_in_log) + for li, (label, key, value) in enumerate(metadata): + # remove model folder paths from metadata + if key.startswith('lora_combined_'): + name, weight = value.split(' : ') + name = Path(name).stem + value = f'{name} : {weight}' + metadata[li] = (label, key, value) - return json.dumps({k: v for _, k, v, _, _ in metadata}) + res = {k: v for _, k, v in metadata} + + res['full_prompt'] = self.full_prompt + res['full_negative_prompt'] = self.full_negative_prompt + res['steps'] = self.steps + res['base_model'] = self.base_model_name + res['base_model_hash'] = self.base_model_hash + + # TODO evaluate if this should always be added + if self.refiner_model_name not in ['', 'None']: + res['refiner_model'] = self.refiner_model_name + res['refiner_model_hash'] = self.refiner_model_hash + + res['loras'] = self.loras + + if modules.config.metadata_created_by != '': + res['created_by'] = modules.config.metadata_created_by + + return json.dumps(res) @staticmethod def replace_value_with_filename(key, value, filenames): diff --git a/modules/private_logger.py b/modules/private_logger.py index 1afcaa55..61264da0 100644 --- a/modules/private_logger.py +++ b/modules/private_logger.py @@ -7,7 +7,7 @@ import urllib.parse from PIL import Image from PIL.PngImagePlugin import PngInfo from modules.util import generate_temp_filename -from modules.metadata import MetadataScheme +from modules.metadata import MetadataParser log_cache = {} @@ -20,22 +20,21 @@ def get_current_html_path(): return html_name -def log(img, metadata, save_metadata_to_image=False, metadata_scheme: MetadataScheme = MetadataScheme.FOOOCUS): +def log(img, metadata, metadata_parser: MetadataParser | None = None): if args_manager.args.disable_image_log: return date_string, local_temp_filename, only_name = generate_temp_filename(folder=modules.config.path_outputs, extension='png') os.makedirs(os.path.dirname(local_temp_filename), exist_ok=True) - if save_metadata_to_image: - metadata_parser = modules.metadata.get_metadata_parser(metadata_scheme) + pnginfo = None + if metadata_parser is not None: parsed_parameters = metadata_parser.parse_string(metadata) pnginfo = PngInfo() pnginfo.add_text('parameters', parsed_parameters) - pnginfo.add_text('fooocus_scheme', metadata_scheme.value) - else: - pnginfo = None + pnginfo.add_text('fooocus_scheme', metadata_parser.get_scheme().value) + Image.fromarray(img).save(local_temp_filename, pnginfo=pnginfo) html_name = os.path.join(os.path.dirname(local_temp_filename), 'log.html') @@ -98,13 +97,12 @@ def log(img, metadata, save_metadata_to_image=False, metadata_scheme: MetadataSc item = f"

{only_name}
" - for key, value in dic: - value_txt = str(value).replace('\n', '
') - item += f"\n" + for label, key, value, showable, copyable in metadata: + if showable: + value_txt = str(value).replace('\n', '
') + item += f"\n" item += "" - js_txt = urllib.parse.quote(json.dumps({k: v for k, v in dic}, indent=0), safe='') + js_txt = urllib.parse.quote(json.dumps({k: v for _, k, v, _, copyable in metadata if copyable}, indent=0), safe='') item += f"
" item += "
\n" item += f"" item += "" From f7e24bdb1fae1a4047f54c94ebc98da42df950b9 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Fri, 2 Feb 2024 01:26:20 +0100 Subject: [PATCH 37/52] feat: sort metadata attributes before writing to image --- modules/metadata.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/metadata.py b/modules/metadata.py index 6c03edc5..25a4a332 100644 --- a/modules/metadata.py +++ b/modules/metadata.py @@ -213,7 +213,7 @@ class A1111MetadataParser(MetadataParser): generation_params[self.fooocus_to_a1111['created_by']] = modules.config.metadata_created_by generation_params_text = ", ".join( - [k if k == v else f'{k}: {quote(v)}' for k, v in generation_params.items() if v is not None]) + [k if k == v else f'{k}: {quote(v)}' for k, v in dict(sorted(generation_params.items())).items() if v is not None]) # TODO check if multiline positive prompt is correctly processed positive_prompt_resolved = ', '.join(self.full_prompt) # TODO add loras to positive prompt if even possible negative_prompt_resolved = ', '.join( @@ -270,7 +270,7 @@ class FooocusMetadataParser(MetadataParser): if modules.config.metadata_created_by != '': res['created_by'] = modules.config.metadata_created_by - return json.dumps(res) + return json.dumps(dict(sorted(res.items()))) @staticmethod def replace_value_with_filename(key, value, filenames): From 934bdb180a97d6689d047bf13fc9f70f6d6913d6 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Fri, 2 Feb 2024 01:35:49 +0100 Subject: [PATCH 38/52] feat: add translations and hint for image prompt parameters --- language/en.json | 9 ++++++++- webui.py | 2 +- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/language/en.json b/language/en.json index fd40ca2f..0617ae1e 100644 --- a/language/en.json +++ b/language/en.json @@ -368,5 +368,12 @@ "* Powered by Fooocus Inpaint Engine (beta)": "* Powered by Fooocus Inpaint Engine (beta)", "Fooocus Enhance": "Fooocus Enhance", "Fooocus Cinematic": "Fooocus Cinematic", - "Fooocus Sharp": "Fooocus Sharp" + "Fooocus Sharp": "Fooocus Sharp", + "Drag any image generated by Fooocus here": "Drag any image generated by Fooocus here", + "Metadata": "Metadata", + "Apply Metadata": "Apply Metadata", + "Metadata Scheme": "Metadata Scheme", + "Image Prompt parameters are not included. Use A1111 for compatibility with Civitai.": "Image Prompt parameters are not included. Use A1111 for compatibility with Civitai.", + "Fooocus (json)": "Fooocus (json)", + "A1111 (plain text)": "A1111 (plain text)" } \ No newline at end of file diff --git a/webui.py b/webui.py index 51193fac..cb641b4e 100644 --- a/webui.py +++ b/webui.py @@ -412,7 +412,7 @@ with shared.gradio_root: save_metadata_to_images = gr.Checkbox(label='Save Metadata to Images', value=modules.config.default_save_metadata_to_images, info='Adds parameters to generated images allowing manual regeneration.') metadata_scheme = gr.Radio(label='Metadata Scheme', choices=flags.metadata_scheme, value=modules.config.default_metadata_scheme, - info='Use A1111 for compatibility with Civitai.', + info='Image Prompt parameters are not included. Use A1111 for compatibility with Civitai.', visible=modules.config.default_save_metadata_to_images) save_metadata_to_images.change(lambda x: gr.update(visible=x), inputs=[save_metadata_to_images], outputs=[metadata_scheme], From b438f7b43f52cad5fb540174c7fdfba0007c4d50 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Fri, 2 Feb 2024 01:45:08 +0100 Subject: [PATCH 39/52] chore: check and remove ToDo's --- modules/flags.py | 1 - modules/metadata.py | 8 +++----- modules/util.py | 3 --- webui.py | 1 - 4 files changed, 3 insertions(+), 10 deletions(-) diff --git a/modules/flags.py b/modules/flags.py index cbfd904d..db26394e 100644 --- a/modules/flags.py +++ b/modules/flags.py @@ -51,7 +51,6 @@ class MetadataScheme(Enum): A1111 = 'a1111' -# TODO use translation here metadata_scheme = [ ('Fooocus (json)', MetadataScheme.FOOOCUS.value), ('A1111 (plain text)', MetadataScheme.A1111.value), diff --git a/modules/metadata.py b/modules/metadata.py index 25a4a332..aba8822d 100644 --- a/modules/metadata.py +++ b/modules/metadata.py @@ -3,6 +3,7 @@ import os import re from abc import ABC, abstractmethod from pathlib import Path + from PIL import Image import modules.config @@ -156,7 +157,6 @@ class A1111MetadataParser(MetadataParser): break if 'lora_hashes' in data: - # TODO optimize by using hash for matching. Problem is speed of creating the hash per model, even on startup lora_filenames = modules.config.lora_filenames.copy() lora_filenames.remove(modules.config.downloading_sdxl_lcm_lora()) for li, lora in enumerate(data['lora_hashes'].split(', ')): @@ -214,10 +214,8 @@ class A1111MetadataParser(MetadataParser): generation_params_text = ", ".join( [k if k == v else f'{k}: {quote(v)}' for k, v in dict(sorted(generation_params.items())).items() if v is not None]) - # TODO check if multiline positive prompt is correctly processed - positive_prompt_resolved = ', '.join(self.full_prompt) # TODO add loras to positive prompt if even possible - negative_prompt_resolved = ', '.join( - self.full_negative_prompt) # TODO add loras to negative prompt if even possible + positive_prompt_resolved = ', '.join(self.full_prompt) + negative_prompt_resolved = ', '.join(self.full_negative_prompt) negative_prompt_text = f"\nNegative prompt: {negative_prompt_resolved}" if negative_prompt_resolved else "" return f"{positive_prompt_resolved}{negative_prompt_text}\n{generation_params_text}".strip() diff --git a/modules/util.py b/modules/util.py index d1935848..15594ad6 100644 --- a/modules/util.py +++ b/modules/util.py @@ -300,7 +300,6 @@ def extract_styles_from_prompt(prompt, negative_prompt): found_style = style prompt = new_prompt negative_prompt = new_neg_prompt - # TODO this is a bit hacky tbh but works perfectly fine, check if all conditions are needed if real_prompt == '' and new_real_prompt != '' and new_real_prompt != prompt: real_prompt = new_real_prompt break @@ -312,8 +311,6 @@ def extract_styles_from_prompt(prompt, negative_prompt): extracted.append(found_style.name) # add prompt expansion if not all styles could be resolved - # TODO check if it's better to not add fooocus_expansion but just return prompt incl. fooocus_expansion words - # TODO evaluate if adding prompt expansion to metadata is a good idea if prompt != '': if prompt != real_prompt: extracted.append(modules.sdxl_styles.fooocus_expansion) diff --git a/webui.py b/webui.py index cb641b4e..6469ea68 100644 --- a/webui.py +++ b/webui.py @@ -598,7 +598,6 @@ with shared.gradio_root: def trigger_metadata_import(filepath, state_is_generating): parameters, items, metadata_scheme = modules.metadata.read_info_from_image(filepath) - # TODO check what happens if metadata_scheme is empty and A1111 string if parameters is None: print('Could not find metadata in the image!') pass From f745d40687de67bbe2b9c28619ca929d344b1b07 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Fri, 2 Feb 2024 01:55:32 +0100 Subject: [PATCH 40/52] refactor: merge metadata.py into meta_parser.py --- modules/async_worker.py | 4 +- modules/meta_parser.py | 315 +++++++++++++++++++++++++++++++++++++- modules/metadata.py | 315 -------------------------------------- modules/private_logger.py | 2 +- webui.py | 7 +- 5 files changed, 320 insertions(+), 323 deletions(-) delete mode 100644 modules/metadata.py diff --git a/modules/async_worker.py b/modules/async_worker.py index 0de7daa4..c3f9be36 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -45,7 +45,7 @@ def worker(): get_image_shape_ceil, set_image_shape_ceil, get_shape_ceil, resample_image, erode_or_dilate from modules.upscaler import perform_upscale from modules.flags import Performance, lora_count - from modules.metadata import get_metadata_parser, MetadataScheme + from modules.meta_parser import get_metadata_parser, MetadataScheme try: async_gradio_app = shared.gradio_root @@ -804,7 +804,7 @@ def worker(): metadata_parser = None if save_metadata_to_images: - metadata_parser = modules.metadata.get_metadata_parser(metadata_scheme) + metadata_parser = modules.meta_parser.get_metadata_parser(metadata_scheme) metadata_parser.set_data(task['positive'], task['negative'], steps, base_model_name, refiner_model_name, loras) for li, (n, w) in enumerate(loras): diff --git a/modules/meta_parser.py b/modules/meta_parser.py index e4f3c813..1e12b6cd 100644 --- a/modules/meta_parser.py +++ b/modules/meta_parser.py @@ -1,9 +1,23 @@ import json +import os +import re +from abc import ABC, abstractmethod +from pathlib import Path import gradio as gr +from PIL import Image import modules.config -from modules.flags import lora_count, Steps +import modules.config +from modules.flags import MetadataScheme, Performance, Steps +from modules.flags import lora_count +from modules.util import quote, unquote, extract_styles_from_prompt, is_json, calculate_sha256 + +re_param_code = r'\s*(\w[\w \-/]+):\s*("(?:\\.|[^\\"])+"|[^,]*)(?:,|$)' +re_param = re.compile(re_param_code) +re_imagesize = re.compile(r"^(\d+)x(\d+)$") + +hash_cache = {} def load_parameter_button_click(raw_metadata: dict | str, is_generating: bool): @@ -160,3 +174,302 @@ def get_lora(key: str, fallback: str | None, source_dict: dict, results: list): except: results.append('None') results.append(1) + + +def get_sha256(filepath): + global hash_cache + + if filepath not in hash_cache: + hash_cache[filepath] = calculate_sha256(filepath) + + return hash_cache[filepath] + + +class MetadataParser(ABC): + def __init__(self): + self.full_prompt: str = '' + self.full_negative_prompt: str = '' + self.steps: int = 30 + self.base_model_name: str = '' + self.base_model_hash: str = '' + self.refiner_model_name: str = '' + self.refiner_model_hash: str = '' + self.loras: list = [] + + @abstractmethod + def get_scheme(self) -> MetadataScheme: + raise NotImplementedError + + @abstractmethod + def parse_json(self, metadata: dict) -> dict: + raise NotImplementedError + + @abstractmethod + def parse_string(self, metadata: dict) -> str: + raise NotImplementedError + + def set_data(self, full_prompt, full_negative_prompt, steps, base_model_name, refiner_model_name, loras): + self.full_prompt = full_prompt + self.full_negative_prompt = full_negative_prompt + self.steps = steps + self.base_model_name = Path(base_model_name).stem + + base_model_path = os.path.join(modules.config.path_checkpoints, base_model_name) + self.base_model_hash = get_sha256(base_model_path) + + if refiner_model_name not in ['', 'None']: + self.refiner_model_name = Path(refiner_model_name).stem + refiner_model_path = os.path.join(modules.config.path_checkpoints, refiner_model_name) + self.refiner_model_hash = get_sha256(refiner_model_path) + + self.loras = [] + for (lora_name, lora_weight) in loras: + if lora_name != 'None': + lora_path = os.path.join(modules.config.path_loras, lora_name) + lora_hash = get_sha256(lora_path) + self.loras.append((Path(lora_name).stem, lora_weight, lora_hash)) + + +class A1111MetadataParser(MetadataParser): + def get_scheme(self) -> MetadataScheme: + return MetadataScheme.A1111 + + fooocus_to_a1111 = { + 'negative_prompt': 'Negative prompt', + 'styles': 'Styles', + 'performance': 'Performance', + 'steps': 'Steps', + 'sampler': 'Sampler', + 'guidance_scale': 'CFG scale', + 'seed': 'Seed', + 'resolution': 'Size', + 'sharpness': 'Sharpness', + 'adm_guidance': 'ADM Guidance', + 'refiner_swap_method': 'Refiner Swap Method', + 'adaptive_cfg': 'Adaptive CFG', + 'overwrite_switch': 'Overwrite Switch', + 'freeu': 'FreeU', + 'base_model': 'Model', + 'base_model_hash': 'Model hash', + 'refiner_model': 'Refiner', + 'refiner_model_hash': 'Refiner hash', + 'lora_hashes': 'Lora hashes', + 'lora_weights': 'Lora weights', + 'created_by': 'User', + 'version': 'Version' + } + + def parse_json(self, metadata: str) -> dict: + prompt = '' + negative_prompt = '' + + done_with_prompt = False + + *lines, lastline = metadata.strip().split("\n") + if len(re_param.findall(lastline)) < 3: + lines.append(lastline) + lastline = '' + + for line in lines: + line = line.strip() + if line.startswith(f"{self.fooocus_to_a1111['negative_prompt']}:"): + done_with_prompt = True + line = line[len(f"{self.fooocus_to_a1111['negative_prompt']}:"):].strip() + if done_with_prompt: + negative_prompt += ('' if negative_prompt == '' else "\n") + line + else: + prompt += ('' if prompt == '' else "\n") + line + + found_styles, prompt, negative_prompt = extract_styles_from_prompt(prompt, negative_prompt) + + data = { + 'prompt': prompt, + 'negative_prompt': negative_prompt, + 'styles': str(found_styles) + } + + for k, v in re_param.findall(lastline): + try: + if v[0] == '"' and v[-1] == '"': + v = unquote(v) + + m = re_imagesize.match(v) + if m is not None: + data[f'resolution'] = str((m.group(1), m.group(2))) + else: + data[list(self.fooocus_to_a1111.keys())[list(self.fooocus_to_a1111.values()).index(k)]] = v + except Exception: + print(f"Error parsing \"{k}: {v}\"") + + # try to load performance based on steps, fallback for direct A1111 imports + if 'steps' in data and 'performance' not in data: + try: + data['performance'] = Performance[Steps(int(data['steps'])).name].value + except Exception: + pass + + if 'base_model' in data: + for filename in modules.config.model_filenames: + path = Path(filename) + if data['base_model'] == path.stem: + data['base_model'] = filename + break + + if 'lora_hashes' in data: + lora_filenames = modules.config.lora_filenames.copy() + lora_filenames.remove(modules.config.downloading_sdxl_lcm_lora()) + for li, lora in enumerate(data['lora_hashes'].split(', ')): + lora_name, lora_hash, lora_weight = lora.split(': ') + for filename in lora_filenames: + path = Path(filename) + if lora_name == path.stem: + data[f'lora_combined_{li + 1}'] = f'{filename} : {lora_weight}' + break + + return data + + def parse_string(self, metadata: dict) -> str: + data = {k: v for _, k, v in metadata} + + width, height = eval(data['resolution']) + + generation_params = { + self.fooocus_to_a1111['performance']: data['performance'], + self.fooocus_to_a1111['steps']: self.steps, + self.fooocus_to_a1111['sampler']: data['sampler'], + self.fooocus_to_a1111['seed']: data['seed'], + self.fooocus_to_a1111['resolution']: f'{width}x{height}', + self.fooocus_to_a1111['guidance_scale']: data['guidance_scale'], + self.fooocus_to_a1111['sharpness']: data['sharpness'], + self.fooocus_to_a1111['adm_guidance']: data['adm_guidance'], + self.fooocus_to_a1111['base_model']: Path(data['base_model']).stem, + self.fooocus_to_a1111['base_model_hash']: self.base_model_hash, + } + + # TODO evaluate if this should always be added + if self.refiner_model_name not in ['', 'None']: + generation_params |= { + self.fooocus_to_a1111['refiner_model']: self.refiner_model_name, + self.fooocus_to_a1111['refiner_model_hash']: self.refiner_model_hash + } + + for key in ['adaptive_cfg', 'overwrite_switch', 'refiner_swap_method', 'freeu']: + if key in data: + generation_params[self.fooocus_to_a1111[key]] = data[key] + + lora_hashes = [] + for index, (lora_name, lora_weight, lora_hash) in enumerate(self.loras): + # workaround for Fooocus not knowing LoRA name in LoRA metadata + lora_hashes.append(f'{lora_name}: {lora_hash}: {lora_weight}') + lora_hashes_string = ', '.join(lora_hashes) + + generation_params |= { + self.fooocus_to_a1111['lora_hashes']: lora_hashes_string, + self.fooocus_to_a1111['version']: data['version'] + } + + if modules.config.metadata_created_by != '': + generation_params[self.fooocus_to_a1111['created_by']] = modules.config.metadata_created_by + + generation_params_text = ", ".join( + [k if k == v else f'{k}: {quote(v)}' for k, v in dict(sorted(generation_params.items())).items() if + v is not None]) + positive_prompt_resolved = ', '.join(self.full_prompt) + negative_prompt_resolved = ', '.join(self.full_negative_prompt) + negative_prompt_text = f"\nNegative prompt: {negative_prompt_resolved}" if negative_prompt_resolved else "" + return f"{positive_prompt_resolved}{negative_prompt_text}\n{generation_params_text}".strip() + + +class FooocusMetadataParser(MetadataParser): + def get_scheme(self) -> MetadataScheme: + return MetadataScheme.FOOOCUS + + def parse_json(self, metadata: dict) -> dict: + model_filenames = modules.config.model_filenames.copy() + lora_filenames = modules.config.lora_filenames.copy() + lora_filenames.remove(modules.config.downloading_sdxl_lcm_lora()) + + for key, value in metadata.items(): + if value in ['', 'None']: + continue + if key in ['base_model', 'refiner_model']: + metadata[key] = self.replace_value_with_filename(key, value, model_filenames) + elif key.startswith('lora_combined_'): + metadata[key] = self.replace_value_with_filename(key, value, lora_filenames) + else: + continue + + return metadata + + def parse_string(self, metadata: list) -> str: + for li, (label, key, value) in enumerate(metadata): + # remove model folder paths from metadata + if key.startswith('lora_combined_'): + name, weight = value.split(' : ') + name = Path(name).stem + value = f'{name} : {weight}' + metadata[li] = (label, key, value) + + res = {k: v for _, k, v in metadata} + + res['full_prompt'] = self.full_prompt + res['full_negative_prompt'] = self.full_negative_prompt + res['steps'] = self.steps + res['base_model'] = self.base_model_name + res['base_model_hash'] = self.base_model_hash + + # TODO evaluate if this should always be added + if self.refiner_model_name not in ['', 'None']: + res['refiner_model'] = self.refiner_model_name + res['refiner_model_hash'] = self.refiner_model_hash + + res['loras'] = self.loras + + if modules.config.metadata_created_by != '': + res['created_by'] = modules.config.metadata_created_by + + return json.dumps(dict(sorted(res.items()))) + + @staticmethod + def replace_value_with_filename(key, value, filenames): + for filename in filenames: + path = Path(filename) + if key.startswith('lora_combined_'): + name, weight = value.split(' : ') + if name == path.stem: + return f'{filename} : {weight}' + elif value == path.stem: + return filename + + +def get_metadata_parser(metadata_scheme: MetadataScheme) -> MetadataParser: + match metadata_scheme: + case MetadataScheme.FOOOCUS: + return FooocusMetadataParser() + case MetadataScheme.A1111: + return A1111MetadataParser() + case _: + raise NotImplementedError + + +def read_info_from_image(filepath) -> tuple[str | None, dict, MetadataScheme | None]: + with Image.open(filepath) as image: + items = (image.info or {}).copy() + + parameters = items.pop('parameters', None) + if parameters is not None and is_json(parameters): + parameters = json.loads(parameters) + + try: + metadata_scheme = MetadataScheme(items.pop('fooocus_scheme', None)) + except Exception: + metadata_scheme = None + + # broad fallback + if metadata_scheme is None and isinstance(parameters, dict): + metadata_scheme = MetadataScheme.FOOOCUS + + if metadata_scheme is None and isinstance(parameters, str): + metadata_scheme = MetadataScheme.A1111 + + return parameters, items, metadata_scheme diff --git a/modules/metadata.py b/modules/metadata.py deleted file mode 100644 index aba8822d..00000000 --- a/modules/metadata.py +++ /dev/null @@ -1,315 +0,0 @@ -import json -import os -import re -from abc import ABC, abstractmethod -from pathlib import Path - -from PIL import Image - -import modules.config -from modules.flags import MetadataScheme, Performance, Steps -from modules.util import quote, unquote, extract_styles_from_prompt, is_json, calculate_sha256 - -re_param_code = r'\s*(\w[\w \-/]+):\s*("(?:\\.|[^\\"])+"|[^,]*)(?:,|$)' -re_param = re.compile(re_param_code) -re_imagesize = re.compile(r"^(\d+)x(\d+)$") - -hash_cache = {} - - -def get_sha256(filepath): - global hash_cache - - if filepath not in hash_cache: - hash_cache[filepath] = calculate_sha256(filepath) - - return hash_cache[filepath] - - -class MetadataParser(ABC): - def __init__(self): - self.full_prompt: str = '' - self.full_negative_prompt: str = '' - self.steps: int = 30 - self.base_model_name: str = '' - self.base_model_hash: str = '' - self.refiner_model_name: str = '' - self.refiner_model_hash: str = '' - self.loras: list = [] - - @abstractmethod - def get_scheme(self) -> MetadataScheme: - raise NotImplementedError - - @abstractmethod - def parse_json(self, metadata: dict) -> dict: - raise NotImplementedError - - @abstractmethod - def parse_string(self, metadata: dict) -> str: - raise NotImplementedError - - def set_data(self, full_prompt, full_negative_prompt, steps, base_model_name, refiner_model_name, loras): - self.full_prompt = full_prompt - self.full_negative_prompt = full_negative_prompt - self.steps = steps - self.base_model_name = Path(base_model_name).stem - - base_model_path = os.path.join(modules.config.path_checkpoints, base_model_name) - self.base_model_hash = get_sha256(base_model_path) - - if refiner_model_name not in ['', 'None']: - self.refiner_model_name = Path(refiner_model_name).stem - refiner_model_path = os.path.join(modules.config.path_checkpoints, refiner_model_name) - self.refiner_model_hash = get_sha256(refiner_model_path) - - self.loras = [] - for (lora_name, lora_weight) in loras: - if lora_name != 'None': - lora_path = os.path.join(modules.config.path_loras, lora_name) - lora_hash = get_sha256(lora_path) - self.loras.append((Path(lora_name).stem, lora_weight, lora_hash)) - - -class A1111MetadataParser(MetadataParser): - def get_scheme(self) -> MetadataScheme: - return MetadataScheme.A1111 - - fooocus_to_a1111 = { - 'negative_prompt': 'Negative prompt', - 'styles': 'Styles', - 'performance': 'Performance', - 'steps': 'Steps', - 'sampler': 'Sampler', - 'guidance_scale': 'CFG scale', - 'seed': 'Seed', - 'resolution': 'Size', - 'sharpness': 'Sharpness', - 'adm_guidance': 'ADM Guidance', - 'refiner_swap_method': 'Refiner Swap Method', - 'adaptive_cfg': 'Adaptive CFG', - 'overwrite_switch': 'Overwrite Switch', - 'freeu': 'FreeU', - 'base_model': 'Model', - 'base_model_hash': 'Model hash', - 'refiner_model': 'Refiner', - 'refiner_model_hash': 'Refiner hash', - 'lora_hashes': 'Lora hashes', - 'lora_weights': 'Lora weights', - 'created_by': 'User', - 'version': 'Version' - } - - def parse_json(self, metadata: str) -> dict: - prompt = '' - negative_prompt = '' - - done_with_prompt = False - - *lines, lastline = metadata.strip().split("\n") - if len(re_param.findall(lastline)) < 3: - lines.append(lastline) - lastline = '' - - for line in lines: - line = line.strip() - if line.startswith(f"{self.fooocus_to_a1111['negative_prompt']}:"): - done_with_prompt = True - line = line[len(f"{self.fooocus_to_a1111['negative_prompt']}:"):].strip() - if done_with_prompt: - negative_prompt += ('' if negative_prompt == '' else "\n") + line - else: - prompt += ('' if prompt == '' else "\n") + line - - found_styles, prompt, negative_prompt = extract_styles_from_prompt(prompt, negative_prompt) - - data = { - 'prompt': prompt, - 'negative_prompt': negative_prompt, - 'styles': str(found_styles) - } - - for k, v in re_param.findall(lastline): - try: - if v[0] == '"' and v[-1] == '"': - v = unquote(v) - - m = re_imagesize.match(v) - if m is not None: - data[f'resolution'] = str((m.group(1), m.group(2))) - else: - data[list(self.fooocus_to_a1111.keys())[list(self.fooocus_to_a1111.values()).index(k)]] = v - except Exception: - print(f"Error parsing \"{k}: {v}\"") - - # try to load performance based on steps, fallback for direct A1111 imports - if 'steps' in data and 'performance' not in data: - try: - data['performance'] = Performance[Steps(int(data['steps'])).name].value - except Exception: - pass - - if 'base_model' in data: - for filename in modules.config.model_filenames: - path = Path(filename) - if data['base_model'] == path.stem: - data['base_model'] = filename - break - - if 'lora_hashes' in data: - lora_filenames = modules.config.lora_filenames.copy() - lora_filenames.remove(modules.config.downloading_sdxl_lcm_lora()) - for li, lora in enumerate(data['lora_hashes'].split(', ')): - lora_name, lora_hash, lora_weight = lora.split(': ') - for filename in lora_filenames: - path = Path(filename) - if lora_name == path.stem: - data[f'lora_combined_{li + 1}'] = f'{filename} : {lora_weight}' - break - - return data - - def parse_string(self, metadata: dict) -> str: - data = {k: v for _, k, v in metadata} - - width, height = eval(data['resolution']) - - generation_params = { - self.fooocus_to_a1111['performance']: data['performance'], - self.fooocus_to_a1111['steps']: self.steps, - self.fooocus_to_a1111['sampler']: data['sampler'], - self.fooocus_to_a1111['seed']: data['seed'], - self.fooocus_to_a1111['resolution']: f'{width}x{height}', - self.fooocus_to_a1111['guidance_scale']: data['guidance_scale'], - self.fooocus_to_a1111['sharpness']: data['sharpness'], - self.fooocus_to_a1111['adm_guidance']: data['adm_guidance'], - self.fooocus_to_a1111['base_model']: Path(data['base_model']).stem, - self.fooocus_to_a1111['base_model_hash']: self.base_model_hash, - } - - # TODO evaluate if this should always be added - if self.refiner_model_name not in ['', 'None']: - generation_params |= { - self.fooocus_to_a1111['refiner_model']: self.refiner_model_name, - self.fooocus_to_a1111['refiner_model_hash']: self.refiner_model_hash - } - - for key in ['adaptive_cfg', 'overwrite_switch', 'refiner_swap_method', 'freeu']: - if key in data: - generation_params[self.fooocus_to_a1111[key]] = data[key] - - lora_hashes = [] - for index, (lora_name, lora_weight, lora_hash) in enumerate(self.loras): - # workaround for Fooocus not knowing LoRA name in LoRA metadata - lora_hashes.append(f'{lora_name}: {lora_hash}: {lora_weight}') - lora_hashes_string = ', '.join(lora_hashes) - - generation_params |= { - self.fooocus_to_a1111['lora_hashes']: lora_hashes_string, - self.fooocus_to_a1111['version']: data['version'] - } - - if modules.config.metadata_created_by != '': - generation_params[self.fooocus_to_a1111['created_by']] = modules.config.metadata_created_by - - generation_params_text = ", ".join( - [k if k == v else f'{k}: {quote(v)}' for k, v in dict(sorted(generation_params.items())).items() if v is not None]) - positive_prompt_resolved = ', '.join(self.full_prompt) - negative_prompt_resolved = ', '.join(self.full_negative_prompt) - negative_prompt_text = f"\nNegative prompt: {negative_prompt_resolved}" if negative_prompt_resolved else "" - return f"{positive_prompt_resolved}{negative_prompt_text}\n{generation_params_text}".strip() - - -class FooocusMetadataParser(MetadataParser): - def get_scheme(self) -> MetadataScheme: - return MetadataScheme.FOOOCUS - - def parse_json(self, metadata: dict) -> dict: - model_filenames = modules.config.model_filenames.copy() - lora_filenames = modules.config.lora_filenames.copy() - lora_filenames.remove(modules.config.downloading_sdxl_lcm_lora()) - - for key, value in metadata.items(): - if value in ['', 'None']: - continue - if key in ['base_model', 'refiner_model']: - metadata[key] = self.replace_value_with_filename(key, value, model_filenames) - elif key.startswith('lora_combined_'): - metadata[key] = self.replace_value_with_filename(key, value, lora_filenames) - else: - continue - - return metadata - - def parse_string(self, metadata: list) -> str: - for li, (label, key, value) in enumerate(metadata): - # remove model folder paths from metadata - if key.startswith('lora_combined_'): - name, weight = value.split(' : ') - name = Path(name).stem - value = f'{name} : {weight}' - metadata[li] = (label, key, value) - - res = {k: v for _, k, v in metadata} - - res['full_prompt'] = self.full_prompt - res['full_negative_prompt'] = self.full_negative_prompt - res['steps'] = self.steps - res['base_model'] = self.base_model_name - res['base_model_hash'] = self.base_model_hash - - # TODO evaluate if this should always be added - if self.refiner_model_name not in ['', 'None']: - res['refiner_model'] = self.refiner_model_name - res['refiner_model_hash'] = self.refiner_model_hash - - res['loras'] = self.loras - - if modules.config.metadata_created_by != '': - res['created_by'] = modules.config.metadata_created_by - - return json.dumps(dict(sorted(res.items()))) - - @staticmethod - def replace_value_with_filename(key, value, filenames): - for filename in filenames: - path = Path(filename) - if key.startswith('lora_combined_'): - name, weight = value.split(' : ') - if name == path.stem: - return f'{filename} : {weight}' - elif value == path.stem: - return filename - - -def get_metadata_parser(metadata_scheme: MetadataScheme) -> MetadataParser: - match metadata_scheme: - case MetadataScheme.FOOOCUS: - return FooocusMetadataParser() - case MetadataScheme.A1111: - return A1111MetadataParser() - case _: - raise NotImplementedError - - -def read_info_from_image(filepath) -> tuple[str | None, dict, MetadataScheme | None]: - with Image.open(filepath) as image: - items = (image.info or {}).copy() - - parameters = items.pop('parameters', None) - if parameters is not None and is_json(parameters): - parameters = json.loads(parameters) - - try: - metadata_scheme = MetadataScheme(items.pop('fooocus_scheme', None)) - except Exception: - metadata_scheme = None - - # broad fallback - if metadata_scheme is None and isinstance(parameters, dict): - metadata_scheme = modules.metadata.MetadataScheme.FOOOCUS - - if metadata_scheme is None and isinstance(parameters, str): - metadata_scheme = modules.metadata.MetadataScheme.A1111 - - return parameters, items, metadata_scheme diff --git a/modules/private_logger.py b/modules/private_logger.py index 61264da0..37f2a82c 100644 --- a/modules/private_logger.py +++ b/modules/private_logger.py @@ -7,7 +7,7 @@ import urllib.parse from PIL import Image from PIL.PngImagePlugin import PngInfo from modules.util import generate_temp_filename -from modules.metadata import MetadataParser +from modules.meta_parser import MetadataParser log_cache = {} diff --git a/webui.py b/webui.py index 6469ea68..8722cb88 100644 --- a/webui.py +++ b/webui.py @@ -14,7 +14,6 @@ import modules.gradio_hijack as grh import modules.advanced_parameters as advanced_parameters import modules.style_sorter as style_sorter import modules.meta_parser -import modules.metadata import args_manager import copy @@ -217,7 +216,7 @@ with shared.gradio_root: metadata_import_button = gr.Button(value='Apply Metadata') def trigger_metadata_preview(filepath): - parameters, items, metadata_scheme = modules.metadata.read_info_from_image(filepath) + parameters, items, metadata_scheme = modules.meta_parser.read_info_from_image(filepath) results = {} if parameters is not None: @@ -597,12 +596,12 @@ with shared.gradio_root: load_parameter_button.click(modules.meta_parser.load_parameter_button_click, inputs=[prompt, state_is_generating], outputs=load_data_outputs, queue=False, show_progress=False) def trigger_metadata_import(filepath, state_is_generating): - parameters, items, metadata_scheme = modules.metadata.read_info_from_image(filepath) + parameters, items, metadata_scheme = modules.meta_parser.read_info_from_image(filepath) if parameters is None: print('Could not find metadata in the image!') pass - metadata_parser = modules.metadata.get_metadata_parser(metadata_scheme) + metadata_parser = modules.meta_parser.get_metadata_parser(metadata_scheme) parsed_parameters = metadata_parser.parse_json(parameters) return modules.meta_parser.load_parameter_button_click(parsed_parameters, state_is_generating) From 9aa82aa80a19474e410b3d09ed622483d6e9c82d Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Fri, 2 Feb 2024 01:57:33 +0100 Subject: [PATCH 41/52] fix: add missing refiner in A1111 parse_json --- modules/meta_parser.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/modules/meta_parser.py b/modules/meta_parser.py index 1e12b6cd..a452686d 100644 --- a/modules/meta_parser.py +++ b/modules/meta_parser.py @@ -308,12 +308,13 @@ class A1111MetadataParser(MetadataParser): except Exception: pass - if 'base_model' in data: - for filename in modules.config.model_filenames: - path = Path(filename) - if data['base_model'] == path.stem: - data['base_model'] = filename - break + for key in ['base_model', 'refiner_model']: + if key in data: + for filename in modules.config.model_filenames: + path = Path(filename) + if data[key] == path.stem: + data[key] = filename + break if 'lora_hashes' in data: lora_filenames = modules.config.lora_filenames.copy() From 1c3431e6a7629fb6b0dbb102d8de5afc1750fd57 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Fri, 2 Feb 2024 02:02:41 +0100 Subject: [PATCH 42/52] wip: add TODO for ultiline prompt style resolution --- modules/util.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/util.py b/modules/util.py index 15594ad6..d4596799 100644 --- a/modules/util.py +++ b/modules/util.py @@ -300,6 +300,7 @@ def extract_styles_from_prompt(prompt, negative_prompt): found_style = style prompt = new_prompt negative_prompt = new_neg_prompt + # TODO check how to resolve multiline input prompts if real_prompt == '' and new_real_prompt != '' and new_real_prompt != prompt: real_prompt = new_real_prompt break From 349556bfa6c1a2d8246957a78a705b6c0ce5ca1f Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Fri, 2 Feb 2024 20:58:16 +0100 Subject: [PATCH 43/52] fix: remove sorting for A1111, change performance key position fixes https://github.com/lllyasviel/Fooocus/pull/1940#issuecomment-1924444633 --- modules/meta_parser.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/meta_parser.py b/modules/meta_parser.py index a452686d..b2931c46 100644 --- a/modules/meta_parser.py +++ b/modules/meta_parser.py @@ -335,7 +335,6 @@ class A1111MetadataParser(MetadataParser): width, height = eval(data['resolution']) generation_params = { - self.fooocus_to_a1111['performance']: data['performance'], self.fooocus_to_a1111['steps']: self.steps, self.fooocus_to_a1111['sampler']: data['sampler'], self.fooocus_to_a1111['seed']: data['seed'], @@ -345,6 +344,7 @@ class A1111MetadataParser(MetadataParser): self.fooocus_to_a1111['adm_guidance']: data['adm_guidance'], self.fooocus_to_a1111['base_model']: Path(data['base_model']).stem, self.fooocus_to_a1111['base_model_hash']: self.base_model_hash, + self.fooocus_to_a1111['performance']: data['performance'], } # TODO evaluate if this should always be added @@ -373,7 +373,7 @@ class A1111MetadataParser(MetadataParser): generation_params[self.fooocus_to_a1111['created_by']] = modules.config.metadata_created_by generation_params_text = ", ".join( - [k if k == v else f'{k}: {quote(v)}' for k, v in dict(sorted(generation_params.items())).items() if + [k if k == v else f'{k}: {quote(v)}' for k, v in generation_params.items() if v is not None]) positive_prompt_resolved = ', '.join(self.full_prompt) negative_prompt_resolved = ', '.join(self.full_negative_prompt) From ed4a958da862b53c5541ab6dd9961073ceea566c Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Fri, 2 Feb 2024 22:04:28 +0100 Subject: [PATCH 44/52] fix: add workaround for multiline prompts --- modules/async_worker.py | 7 +++---- modules/meta_parser.py | 41 +++++++++++++++++++++++++++++++---------- modules/util.py | 18 ++++++++++-------- 3 files changed, 44 insertions(+), 22 deletions(-) diff --git a/modules/async_worker.py b/modules/async_worker.py index c3f9be36..1e501ebb 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -342,9 +342,6 @@ def worker(): progressbar(async_task, 1, 'Initializing ...') - raw_prompt = prompt - raw_negative_prompt = negative_prompt - if not skip_prompt_processing: prompts = remove_empty_str([safe_str(p) for p in prompt.splitlines()], default='') @@ -805,7 +802,9 @@ def worker(): metadata_parser = None if save_metadata_to_images: metadata_parser = modules.meta_parser.get_metadata_parser(metadata_scheme) - metadata_parser.set_data(task['positive'], task['negative'], steps, base_model_name, refiner_model_name, loras) + metadata_parser.set_data(task['log_positive_prompt'], task['positive'], + task['log_negative_prompt'], task['negative'], + steps, base_model_name, refiner_model_name, loras) for li, (n, w) in enumerate(loras): if n != 'None': diff --git a/modules/meta_parser.py b/modules/meta_parser.py index b2931c46..aa0cd10e 100644 --- a/modules/meta_parser.py +++ b/modules/meta_parser.py @@ -8,7 +8,7 @@ import gradio as gr from PIL import Image import modules.config -import modules.config +import modules.sdxl_styles from modules.flags import MetadataScheme, Performance, Steps from modules.flags import lora_count from modules.util import quote, unquote, extract_styles_from_prompt, is_json, calculate_sha256 @@ -187,7 +187,9 @@ def get_sha256(filepath): class MetadataParser(ABC): def __init__(self): + self.raw_prompt: str = '' self.full_prompt: str = '' + self.raw_negative_prompt: str = '' self.full_negative_prompt: str = '' self.steps: int = 30 self.base_model_name: str = '' @@ -208,8 +210,10 @@ class MetadataParser(ABC): def parse_string(self, metadata: dict) -> str: raise NotImplementedError - def set_data(self, full_prompt, full_negative_prompt, steps, base_model_name, refiner_model_name, loras): + def set_data(self, raw_prompt, full_prompt, raw_negative_prompt, full_negative_prompt, steps, base_model_name, refiner_model_name, loras): + self.raw_prompt = raw_prompt self.full_prompt = full_prompt + self.raw_negative_prompt = raw_negative_prompt self.full_negative_prompt = full_negative_prompt self.steps = steps self.base_model_name = Path(base_model_name).stem @@ -235,6 +239,8 @@ class A1111MetadataParser(MetadataParser): return MetadataScheme.A1111 fooocus_to_a1111 = { + 'raw_prompt': 'Raw prompt', + 'raw_negative_prompt': 'Raw negative prompt', 'negative_prompt': 'Negative prompt', 'styles': 'Styles', 'performance': 'Performance', @@ -260,8 +266,8 @@ class A1111MetadataParser(MetadataParser): } def parse_json(self, metadata: str) -> dict: - prompt = '' - negative_prompt = '' + metadata_prompt = '' + metadata_negative_prompt = '' done_with_prompt = False @@ -276,16 +282,15 @@ class A1111MetadataParser(MetadataParser): done_with_prompt = True line = line[len(f"{self.fooocus_to_a1111['negative_prompt']}:"):].strip() if done_with_prompt: - negative_prompt += ('' if negative_prompt == '' else "\n") + line + metadata_negative_prompt += ('' if metadata_negative_prompt == '' else "\n") + line else: - prompt += ('' if prompt == '' else "\n") + line + metadata_prompt += ('' if metadata_prompt == '' else "\n") + line - found_styles, prompt, negative_prompt = extract_styles_from_prompt(prompt, negative_prompt) + found_styles, prompt, negative_prompt = extract_styles_from_prompt(metadata_prompt, metadata_negative_prompt) data = { 'prompt': prompt, - 'negative_prompt': negative_prompt, - 'styles': str(found_styles) + 'negative_prompt': negative_prompt } for k, v in re_param.findall(lastline): @@ -295,12 +300,24 @@ class A1111MetadataParser(MetadataParser): m = re_imagesize.match(v) if m is not None: - data[f'resolution'] = str((m.group(1), m.group(2))) + data['resolution'] = str((m.group(1), m.group(2))) else: data[list(self.fooocus_to_a1111.keys())[list(self.fooocus_to_a1111.values()).index(k)]] = v except Exception: print(f"Error parsing \"{k}: {v}\"") + # workaround for multiline prompts + if 'raw_prompt' in data: + data['prompt'] = data['raw_prompt'] + raw_prompt = data['raw_prompt'].replace("\n", ', ') + if metadata_prompt != raw_prompt and modules.sdxl_styles.fooocus_expansion not in found_styles: + found_styles.append(modules.sdxl_styles.fooocus_expansion) + + if 'raw_negative_prompt' in data: + data['negative_prompt'] = data['raw_negative_prompt'] + + data['styles'] = str(found_styles) + # try to load performance based on steps, fallback for direct A1111 imports if 'steps' in data and 'performance' not in data: try: @@ -344,7 +361,11 @@ class A1111MetadataParser(MetadataParser): self.fooocus_to_a1111['adm_guidance']: data['adm_guidance'], self.fooocus_to_a1111['base_model']: Path(data['base_model']).stem, self.fooocus_to_a1111['base_model_hash']: self.base_model_hash, + self.fooocus_to_a1111['performance']: data['performance'], + # workaround for multiline prompts + self.fooocus_to_a1111['raw_prompt']: self.raw_prompt, + self.fooocus_to_a1111['raw_negative_prompt']: self.raw_negative_prompt, } # TODO evaluate if this should always be added diff --git a/modules/util.py b/modules/util.py index d4596799..4a590f51 100644 --- a/modules/util.py +++ b/modules/util.py @@ -311,18 +311,20 @@ def extract_styles_from_prompt(prompt, negative_prompt): applicable_styles.remove(found_style) extracted.append(found_style.name) + # TODO check multiline prompt # add prompt expansion if not all styles could be resolved if prompt != '': - if prompt != real_prompt: + if real_prompt != '': extracted.append(modules.sdxl_styles.fooocus_expansion) - - # find real_prompt when only prompt expansion is selected - if real_prompt == '': + else: + # find real_prompt when only prompt expansion is selected first_word = prompt.split(', ')[0] first_word_positions = [i for i in range(len(prompt)) if prompt.startswith(first_word, i)] - real_prompt = prompt[:first_word_positions[-1]] - if real_prompt.endswith(', '): - real_prompt = real_prompt[:-2] + if len(first_word_positions) > 1: + real_prompt = prompt[:first_word_positions[-1]] + extracted.append(modules.sdxl_styles.fooocus_expansion) + if real_prompt.endswith(', '): + real_prompt = real_prompt[:-2] return list(reversed(extracted)), real_prompt, negative_prompt @@ -337,6 +339,6 @@ def is_json(data: str) -> bool: try: loaded_json = json.loads(data) assert isinstance(loaded_json, dict) - except ValueError: + except (ValueError, AssertionError): return False return True From 63403d614e196b54d374058e992f92476d46d040 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Fri, 2 Feb 2024 23:44:47 +0100 Subject: [PATCH 45/52] feat: add sampler mapping --- modules/flags.py | 39 +++++++++++++++++++++++++++++++++++---- modules/meta_parser.py | 19 +++++++++++++++++-- 2 files changed, 52 insertions(+), 6 deletions(-) diff --git a/modules/flags.py b/modules/flags.py index db26394e..06ced601 100644 --- a/modules/flags.py +++ b/modules/flags.py @@ -12,12 +12,43 @@ uov_list = [ disabled, subtle_variation, strong_variation, upscale_15, upscale_2, upscale_fast ] -KSAMPLER_NAMES = ["euler", "euler_ancestral", "heun", "heunpp2","dpm_2", "dpm_2_ancestral", - "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_sde", "dpmpp_sde_gpu", - "dpmpp_2m", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm"] +CIVITAI_NO_KARRAS = ["euler", "euler_ancestral", "heun", "dpm_fast", "dpm_adaptive", "ddim", "uni_pc"] + +# fooocus: a1111 (Civitai) +KSAMPLER = { + "euler": "Euler", + "euler_ancestral": "Euler a", + "heun": "Heun", + "heunpp2": "", + "dpm_2": "DPM2", + "dpm_2_ancestral": "DPM2 a", + "lms": "LMS", + "dpm_fast": "DPM fast", + "dpm_adaptive": "DPM adaptive", + "dpmpp_2s_ancestral": "DPM++ 2S a", + "dpmpp_sde": "DPM++ SDE", + "dpmpp_sde_gpu": "", + "dpmpp_2m": "DPM++ 2M", + "dpmpp_2m_sde": "DPM++ 2M SDE", + "dpmpp_2m_sde_gpu": "", + "dpmpp_3m_sde": "", + "dpmpp_3m_sde_gpu": "", + "ddpm": "", + "lcm": "LCM" +} + +SAMPLER_EXTRA = { + "ddim": "DDIM", + "uni_pc": "UniPC", + "uni_pc_bh2": "" +} + +SAMPLERS = KSAMPLER | SAMPLER_EXTRA + +KSAMPLER_NAMES = list(KSAMPLER.keys()) SCHEDULER_NAMES = ["normal", "karras", "exponential", "sgm_uniform", "simple", "ddim_uniform", "lcm", "turbo"] -SAMPLER_NAMES = KSAMPLER_NAMES + ["ddim", "uni_pc", "uni_pc_bh2"] +SAMPLER_NAMES = KSAMPLER_NAMES + list(SAMPLER_EXTRA.keys()) sampler_list = SAMPLER_NAMES scheduler_list = SCHEDULER_NAMES diff --git a/modules/meta_parser.py b/modules/meta_parser.py index aa0cd10e..1bbb1abf 100644 --- a/modules/meta_parser.py +++ b/modules/meta_parser.py @@ -10,7 +10,7 @@ from PIL import Image import modules.config import modules.sdxl_styles from modules.flags import MetadataScheme, Performance, Steps -from modules.flags import lora_count +from modules.flags import lora_count, SAMPLERS, CIVITAI_NO_KARRAS from modules.util import quote, unquote, extract_styles_from_prompt, is_json, calculate_sha256 re_param_code = r'\s*(\w[\w \-/]+):\s*("(?:\\.|[^\\"])+"|[^,]*)(?:,|$)' @@ -246,6 +246,7 @@ class A1111MetadataParser(MetadataParser): 'performance': 'Performance', 'steps': 'Steps', 'sampler': 'Sampler', + 'scheduler': 'Scheduler', 'guidance_scale': 'CFG scale', 'seed': 'Seed', 'resolution': 'Size', @@ -325,6 +326,12 @@ class A1111MetadataParser(MetadataParser): except Exception: pass + if 'sampler' in data: + sampler = data['sampler'].replace(' Karras', '') + # get key + data['sampler'] = [k for k, v in SAMPLERS.items() if v == sampler][0] + + for key in ['base_model', 'refiner_model']: if key in data: for filename in modules.config.model_filenames: @@ -351,9 +358,16 @@ class A1111MetadataParser(MetadataParser): width, height = eval(data['resolution']) + sampler = data['sampler'] + scheduler = data['scheduler'] + if sampler in SAMPLERS and SAMPLERS[sampler] != '': + sampler = SAMPLERS[sampler] + if sampler not in CIVITAI_NO_KARRAS and scheduler == 'karras': + sampler += f' Karras' + generation_params = { self.fooocus_to_a1111['steps']: self.steps, - self.fooocus_to_a1111['sampler']: data['sampler'], + self.fooocus_to_a1111['sampler']: sampler, self.fooocus_to_a1111['seed']: data['seed'], self.fooocus_to_a1111['resolution']: f'{width}x{height}', self.fooocus_to_a1111['guidance_scale']: data['guidance_scale'], @@ -363,6 +377,7 @@ class A1111MetadataParser(MetadataParser): self.fooocus_to_a1111['base_model_hash']: self.base_model_hash, self.fooocus_to_a1111['performance']: data['performance'], + self.fooocus_to_a1111['scheduler']: scheduler, # workaround for multiline prompts self.fooocus_to_a1111['raw_prompt']: self.raw_prompt, self.fooocus_to_a1111['raw_negative_prompt']: self.raw_negative_prompt, From 1419231e7451ef87a465102cb6a72a275d07a649 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Sat, 3 Feb 2024 22:41:26 +0100 Subject: [PATCH 46/52] feat: prevent config reset by renaming metadata_scheme to match config options --- language/en.json | 6 +++--- modules/flags.py | 8 ++++---- webui.py | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/language/en.json b/language/en.json index 0617ae1e..d9d185a1 100644 --- a/language/en.json +++ b/language/en.json @@ -373,7 +373,7 @@ "Metadata": "Metadata", "Apply Metadata": "Apply Metadata", "Metadata Scheme": "Metadata Scheme", - "Image Prompt parameters are not included. Use A1111 for compatibility with Civitai.": "Image Prompt parameters are not included. Use A1111 for compatibility with Civitai.", - "Fooocus (json)": "Fooocus (json)", - "A1111 (plain text)": "A1111 (plain text)" + "Image Prompt parameters are not included. Use a1111 for compatibility with Civitai.": "Image Prompt parameters are not included. Use a1111 for compatibility with Civitai.", + "fooocus (json)": "fooocus (json)", + "a1111 (plain text)": "a1111 (plain text)" } \ No newline at end of file diff --git a/modules/flags.py b/modules/flags.py index 06ced601..278f7000 100644 --- a/modules/flags.py +++ b/modules/flags.py @@ -83,8 +83,8 @@ class MetadataScheme(Enum): metadata_scheme = [ - ('Fooocus (json)', MetadataScheme.FOOOCUS.value), - ('A1111 (plain text)', MetadataScheme.A1111.value), + (f'{MetadataScheme.FOOOCUS.value} (json)', MetadataScheme.FOOOCUS.value), + (f'{MetadataScheme.A1111.value} (plain text)', MetadataScheme.A1111.value), ] lora_count = 5 @@ -114,10 +114,10 @@ class Performance(Enum): def list(cls) -> list: return list(map(lambda c: c.value, cls)) - def steps(self) -> int: + def steps(self) -> int | None: return Steps[self.name].value if Steps[self.name] else None - def steps_uov(self) -> int: + def steps_uov(self) -> int | None: return StepsUOV[self.name].value if Steps[self.name] else None diff --git a/webui.py b/webui.py index 8722cb88..74778eac 100644 --- a/webui.py +++ b/webui.py @@ -411,7 +411,7 @@ with shared.gradio_root: save_metadata_to_images = gr.Checkbox(label='Save Metadata to Images', value=modules.config.default_save_metadata_to_images, info='Adds parameters to generated images allowing manual regeneration.') metadata_scheme = gr.Radio(label='Metadata Scheme', choices=flags.metadata_scheme, value=modules.config.default_metadata_scheme, - info='Image Prompt parameters are not included. Use A1111 for compatibility with Civitai.', + info='Image Prompt parameters are not included. Use a1111 for compatibility with Civitai.', visible=modules.config.default_save_metadata_to_images) save_metadata_to_images.change(lambda x: gr.update(visible=x), inputs=[save_metadata_to_images], outputs=[metadata_scheme], From 8af73e622f8324f1c3a0f60c64ab62b8cb3d2510 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Sun, 4 Feb 2024 00:44:26 +0100 Subject: [PATCH 47/52] chore: remove remaining todos after analysis refiner is added when set restoring multiline prompts has been resolved by using separate parameters "raw_prompt" and "raw_negative_prompt" --- modules/async_worker.py | 1 - modules/meta_parser.py | 2 -- modules/util.py | 2 -- 3 files changed, 5 deletions(-) diff --git a/modules/async_worker.py b/modules/async_worker.py index 1e501ebb..9df44412 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -779,7 +779,6 @@ def worker(): ('Refiner Model', 'refiner_model', refiner_model_name), ('Refiner Switch', 'refiner_switch', refiner_switch)] - # TODO evaluate if this should always be added if refiner_model_name != 'None': if advanced_parameters.overwrite_switch > 0: d.append(('Overwrite Switch', 'overwrite_switch', advanced_parameters.overwrite_switch)) diff --git a/modules/meta_parser.py b/modules/meta_parser.py index 1bbb1abf..d5453f78 100644 --- a/modules/meta_parser.py +++ b/modules/meta_parser.py @@ -383,7 +383,6 @@ class A1111MetadataParser(MetadataParser): self.fooocus_to_a1111['raw_negative_prompt']: self.raw_negative_prompt, } - # TODO evaluate if this should always be added if self.refiner_model_name not in ['', 'None']: generation_params |= { self.fooocus_to_a1111['refiner_model']: self.refiner_model_name, @@ -455,7 +454,6 @@ class FooocusMetadataParser(MetadataParser): res['base_model'] = self.base_model_name res['base_model_hash'] = self.base_model_hash - # TODO evaluate if this should always be added if self.refiner_model_name not in ['', 'None']: res['refiner_model'] = self.refiner_model_name res['refiner_model_hash'] = self.refiner_model_hash diff --git a/modules/util.py b/modules/util.py index 4a590f51..2f42e3eb 100644 --- a/modules/util.py +++ b/modules/util.py @@ -300,7 +300,6 @@ def extract_styles_from_prompt(prompt, negative_prompt): found_style = style prompt = new_prompt negative_prompt = new_neg_prompt - # TODO check how to resolve multiline input prompts if real_prompt == '' and new_real_prompt != '' and new_real_prompt != prompt: real_prompt = new_real_prompt break @@ -311,7 +310,6 @@ def extract_styles_from_prompt(prompt, negative_prompt): applicable_styles.remove(found_style) extracted.append(found_style.name) - # TODO check multiline prompt # add prompt expansion if not all styles could be resolved if prompt != '': if real_prompt != '': From c668228fe8facd86e33f751a4329efe3bdd799e4 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Sun, 4 Feb 2024 01:31:24 +0100 Subject: [PATCH 48/52] chore: specify too broad exception types --- modules/meta_parser.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/modules/meta_parser.py b/modules/meta_parser.py index d5453f78..020dd19f 100644 --- a/modules/meta_parser.py +++ b/modules/meta_parser.py @@ -323,14 +323,16 @@ class A1111MetadataParser(MetadataParser): if 'steps' in data and 'performance' not in data: try: data['performance'] = Performance[Steps(int(data['steps'])).name].value - except Exception: + except ValueError | KeyError: pass if 'sampler' in data: - sampler = data['sampler'].replace(' Karras', '') + data['sampler'] = data['sampler'].replace(' Karras', '') # get key - data['sampler'] = [k for k, v in SAMPLERS.items() if v == sampler][0] - + for k, v in SAMPLERS.items(): + if v == data['sampler']: + data['sampler'] = k + break for key in ['base_model', 'refiner_model']: if key in data: @@ -497,7 +499,7 @@ def read_info_from_image(filepath) -> tuple[str | None, dict, MetadataScheme | N try: metadata_scheme = MetadataScheme(items.pop('fooocus_scheme', None)) - except Exception: + except ValueError: metadata_scheme = None # broad fallback From fe33cc71145f6b9510870ff1a7d5e2386af89137 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Sun, 4 Feb 2024 13:54:38 +0100 Subject: [PATCH 49/52] feat: add mapping for _gpu samplers to cpu samplers gpu samplers are less deterministic than cpu but in general similar, see https://www.reddit.com/r/comfyui/comments/15hayzo/comment/juqcpep/ --- modules/flags.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/flags.py b/modules/flags.py index 278f7000..fe6800ed 100644 --- a/modules/flags.py +++ b/modules/flags.py @@ -27,10 +27,10 @@ KSAMPLER = { "dpm_adaptive": "DPM adaptive", "dpmpp_2s_ancestral": "DPM++ 2S a", "dpmpp_sde": "DPM++ SDE", - "dpmpp_sde_gpu": "", + "dpmpp_sde_gpu": "DPM++ SDE", "dpmpp_2m": "DPM++ 2M", "dpmpp_2m_sde": "DPM++ 2M SDE", - "dpmpp_2m_sde_gpu": "", + "dpmpp_2m_sde_gpu": "DPM++ 2M SDE", "dpmpp_3m_sde": "", "dpmpp_3m_sde_gpu": "", "ddpm": "", From dfb48fd7541b342a138724496c90f26514861cab Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Sun, 4 Feb 2024 19:24:45 +0100 Subject: [PATCH 50/52] feat: add better handling for image import with empty metadata --- modules/meta_parser.py | 2 +- webui.py | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/modules/meta_parser.py b/modules/meta_parser.py index 020dd19f..03943373 100644 --- a/modules/meta_parser.py +++ b/modules/meta_parser.py @@ -26,7 +26,7 @@ def load_parameter_button_click(raw_metadata: dict | str, is_generating: bool): loaded_parameter_dict = json.loads(raw_metadata) assert isinstance(loaded_parameter_dict, dict) - results = [True, 1] + results = [len(loaded_parameter_dict) > 0, 1] get_str('prompt', 'Prompt', loaded_parameter_dict, results) get_str('negative_prompt', 'Negative Prompt', loaded_parameter_dict, results) diff --git a/webui.py b/webui.py index 74778eac..db508a52 100644 --- a/webui.py +++ b/webui.py @@ -599,10 +599,11 @@ with shared.gradio_root: parameters, items, metadata_scheme = modules.meta_parser.read_info_from_image(filepath) if parameters is None: print('Could not find metadata in the image!') - pass + parsed_parameters = {} + else: + metadata_parser = modules.meta_parser.get_metadata_parser(metadata_scheme) + parsed_parameters = metadata_parser.parse_json(parameters) - metadata_parser = modules.meta_parser.get_metadata_parser(metadata_scheme) - parsed_parameters = metadata_parser.parse_json(parameters) return modules.meta_parser.load_parameter_button_click(parsed_parameters, state_is_generating) From c104d58f76aaf0c8744286c320a48184e5b8c3a1 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Sun, 4 Feb 2024 19:25:20 +0100 Subject: [PATCH 51/52] fix: parse adaptive_cfg as float instead of string --- modules/meta_parser.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/meta_parser.py b/modules/meta_parser.py index 03943373..60a999cb 100644 --- a/modules/meta_parser.py +++ b/modules/meta_parser.py @@ -39,7 +39,7 @@ def load_parameter_button_click(raw_metadata: dict | str, is_generating: bool): get_float('sharpness', 'Sharpness', loaded_parameter_dict, results) get_adm_guidance('adm_guidance', 'ADM Guidance', loaded_parameter_dict, results) get_str('refiner_swap_method', 'Refiner Swap Method', loaded_parameter_dict, results) - get_str('adaptive_cfg', 'CFG Mimicking from TSNR', loaded_parameter_dict, results) + get_float('adaptive_cfg', 'CFG Mimicking from TSNR', loaded_parameter_dict, results) get_str('base_model', 'Base Model', loaded_parameter_dict, results) get_str('refiner_model', 'Refiner Model', loaded_parameter_dict, results) get_float('refiner_switch', 'Refiner Switch', loaded_parameter_dict, results) From 832441e86d4caeca78208309680c3ab2eb2fef15 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Sun, 4 Feb 2024 19:26:10 +0100 Subject: [PATCH 52/52] chore: loosen strict type for parse_json, fix indent --- modules/meta_parser.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/modules/meta_parser.py b/modules/meta_parser.py index 60a999cb..19d24fc4 100644 --- a/modules/meta_parser.py +++ b/modules/meta_parser.py @@ -203,7 +203,7 @@ class MetadataParser(ABC): raise NotImplementedError @abstractmethod - def parse_json(self, metadata: dict) -> dict: + def parse_json(self, metadata: dict | str) -> dict: raise NotImplementedError @abstractmethod @@ -502,11 +502,11 @@ def read_info_from_image(filepath) -> tuple[str | None, dict, MetadataScheme | N except ValueError: metadata_scheme = None - # broad fallback - if metadata_scheme is None and isinstance(parameters, dict): - metadata_scheme = MetadataScheme.FOOOCUS + # broad fallback + if isinstance(parameters, dict): + metadata_scheme = MetadataScheme.FOOOCUS - if metadata_scheme is None and isinstance(parameters, str): - metadata_scheme = MetadataScheme.A1111 + if isinstance(parameters, str): + metadata_scheme = MetadataScheme.A1111 return parameters, items, metadata_scheme
{only_name}
" - for label, key, value, showable, copyable in metadata: - if showable: - value_txt = str(value).replace('\n', '
') - item += f"\n" + for label, key, value in metadata: + value_txt = str(value).replace('\n', '
') + item += f"\n" item += "" - js_txt = urllib.parse.quote(json.dumps({k: v for _, k, v, _, copyable in metadata if copyable}, indent=0), safe='') + js_txt = urllib.parse.quote(json.dumps({k: v for _, k, v in metadata}, indent=0), safe='') item += f"
" item += "