From 4127fa410b3efff48e59915c2b575d2a768f7c00 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Thu, 30 May 2024 00:11:07 +0200 Subject: [PATCH 01/12] feat: add remove_performance_lora method --- modules/util.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/modules/util.py b/modules/util.py index 8317dd50..3f712214 100644 --- a/modules/util.py +++ b/modules/util.py @@ -16,6 +16,7 @@ from PIL import Image import modules.config import modules.sdxl_styles +from modules.flags import Performance LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS) @@ -440,6 +441,22 @@ def parse_lora_references_from_prompt(prompt: str, loras: List[Tuple[AnyStr, flo return updated_loras[:loras_limit], cleaned_prompt +def remove_performance_lora(filenames: list, performance: Performance | None): + loras_without_performance = filenames.copy() + + if performance is None: + return loras_without_performance + + performance_lora = performance.lora_filename() + + for filename in filenames: + path = Path(filename) + if performance_lora == path.name: + loras_without_performance.remove(filename) + + return loras_without_performance + + def cleanup_prompt(prompt): prompt = re.sub(' +', ' ', prompt) prompt = re.sub(',+', ',', prompt) From 9564341fbd27edd5472efdfba81a10f20565e488 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Thu, 30 May 2024 00:13:14 +0200 Subject: [PATCH 02/12] feat: use class PerformanceLoRA instead of strings in config --- modules/config.py | 13 +++++++------ modules/flags.py | 8 ++++++++ 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/modules/config.py b/modules/config.py index cb651c5b..87b27d90 100644 --- a/modules/config.py +++ b/modules/config.py @@ -634,26 +634,27 @@ def downloading_sdxl_lcm_lora(): load_file_from_url( url='https://huggingface.co/lllyasviel/misc/resolve/main/sdxl_lcm_lora.safetensors', model_dir=paths_loras[0], - file_name=sdxl_lcm_lora + file_name=modules.flags.PerformanceLoRA.EXTREME_SPEED.value ) - return sdxl_lcm_lora + return modules.flags.PerformanceLoRA.EXTREME_SPEED.value + def downloading_sdxl_lightning_lora(): load_file_from_url( url='https://huggingface.co/mashb1t/misc/resolve/main/sdxl_lightning_4step_lora.safetensors', model_dir=paths_loras[0], - file_name=sdxl_lightning_lora + file_name=modules.flags.PerformanceLoRA.LIGHTNING.value ) - return sdxl_lightning_lora + return modules.flags.PerformanceLoRA.LIGHTNING.value def downloading_sdxl_hyper_sd_lora(): load_file_from_url( url='https://huggingface.co/mashb1t/misc/resolve/main/sdxl_hyper_sd_4step_lora.safetensors', model_dir=paths_loras[0], - file_name=sdxl_hyper_sd_lora + file_name=modules.flags.PerformanceLoRA.HYPER_SD.value ) - return sdxl_hyper_sd_lora + return modules.flags.PerformanceLoRA.HYPER_SD.value def downloading_controlnet_canny(): diff --git a/modules/flags.py b/modules/flags.py index e48052e1..924f8ce2 100644 --- a/modules/flags.py +++ b/modules/flags.py @@ -115,6 +115,14 @@ class OutputFormat(Enum): return list(map(lambda c: c.value, cls)) +class PerformanceLoRA(Enum): + QUALITY = None + SPEED = None + EXTREME_SPEED = 'sdxl_lcm_lora.safetensors' + LIGHTNING = 'sdxl_lightning_4step_lora.safetensors' + HYPER_SD = 'sdxl_hyper_sd_4step_lora.safetensors' + + class Steps(IntEnum): QUALITY = 60 SPEED = 30 From 55b01a81a6f3ee4cce3f510305af172780aec7a7 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Thu, 30 May 2024 00:14:27 +0200 Subject: [PATCH 03/12] refactor: cleanup flags, use __member__ to check if enums contains key --- modules/flags.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/modules/flags.py b/modules/flags.py index 924f8ce2..cc191c92 100644 --- a/modules/flags.py +++ b/modules/flags.py @@ -48,7 +48,8 @@ SAMPLERS = KSAMPLER | SAMPLER_EXTRA KSAMPLER_NAMES = list(KSAMPLER.keys()) -SCHEDULER_NAMES = ["normal", "karras", "exponential", "sgm_uniform", "simple", "ddim_uniform", "lcm", "turbo", "align_your_steps", "tcd"] +SCHEDULER_NAMES = ["normal", "karras", "exponential", "sgm_uniform", "simple", "ddim_uniform", "lcm", "turbo", + "align_your_steps", "tcd"] SAMPLER_NAMES = KSAMPLER_NAMES + list(SAMPLER_EXTRA.keys()) sampler_list = SAMPLER_NAMES @@ -91,6 +92,7 @@ sdxl_aspect_ratios = [ '1664*576', '1728*576' ] + class MetadataScheme(Enum): FOOOCUS = 'fooocus' A1111 = 'a1111' @@ -157,7 +159,10 @@ class Performance(Enum): return x in [cls.EXTREME_SPEED.value, cls.LIGHTNING.value, cls.HYPER_SD.value] def steps(self) -> int | None: - return Steps[self.name].value if Steps[self.name] else None + return Steps[self.name].value if self.name in Steps.__members__ else None def steps_uov(self) -> int | None: - return StepsUOV[self.name].value if Steps[self.name] else None + return StepsUOV[self.name].value if self.name in StepsUOV.__members__ else None + + def lora_filename(self) -> str | None: + return PerformanceLoRA[self.name].value if self.name in PerformanceLoRA.__members__ else None From e3060e00d4a2ecf587847af370ca71fb47fea1a1 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Thu, 30 May 2024 00:16:34 +0200 Subject: [PATCH 04/12] feat: only filter lora of selected performance instead of all performance LoRAs --- modules/async_worker.py | 4 ++- modules/util.py | 9 +++++-- tests/test_utils.py | 58 ++++++++++++++++++++++++++++++++++++++++- 3 files changed, 67 insertions(+), 4 deletions(-) diff --git a/modules/async_worker.py b/modules/async_worker.py index d7d9b9fd..9c16d6fc 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -462,8 +462,10 @@ def worker(): progressbar(async_task, 2, 'Loading models ...') - loras, prompt = parse_lora_references_from_prompt(prompt, loras, modules.config.default_max_lora_number) + lora_filenames = modules.util.remove_performance_lora(modules.config.lora_filenames, performance_selection) + loras, prompt = parse_lora_references_from_prompt(prompt, loras, modules.config.default_max_lora_number, lora_filenames=lora_filenames) loras += performance_loras + pipeline.refresh_everything(refiner_model_name=refiner_model_name, base_model_name=base_model_name, loras=loras, base_model_additional_loras=base_model_additional_loras, use_synthetic_refiner=use_synthetic_refiner, vae_name=vae_name) diff --git a/modules/util.py b/modules/util.py index 3f712214..09d36770 100644 --- a/modules/util.py +++ b/modules/util.py @@ -398,10 +398,15 @@ def get_enabled_loras(loras: list, remove_none=True) -> list: def parse_lora_references_from_prompt(prompt: str, loras: List[Tuple[AnyStr, float]], loras_limit: int = 5, - skip_file_check=False, prompt_cleanup=True, deduplicate_loras=True) -> tuple[List[Tuple[AnyStr, float]], str]: + skip_file_check=False, prompt_cleanup=True, deduplicate_loras=True, + lora_filenames=None) -> tuple[List[Tuple[AnyStr, float]], str]: + if lora_filenames is None: + lora_filenames = [] + found_loras = [] prompt_without_loras = '' cleaned_prompt = '' + for token in prompt.split(','): matches = LORAS_PROMPT_PATTERN.findall(token) @@ -411,7 +416,7 @@ def parse_lora_references_from_prompt(prompt: str, loras: List[Tuple[AnyStr, flo for match in matches: lora_name = match[1] + '.safetensors' if not skip_file_check: - lora_name = get_filname_by_stem(match[1], modules.config.lora_filenames_no_special) + lora_name = get_filname_by_stem(match[1], lora_filenames) if lora_name is not None: found_loras.append((lora_name, float(match[2]))) token = token.replace(match[0], '') diff --git a/tests/test_utils.py b/tests/test_utils.py index 6fd550db..c1f49c13 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -1,5 +1,7 @@ +import os import unittest +import modules.flags from modules import util @@ -77,5 +79,59 @@ class TestUtils(unittest.TestCase): for test in test_cases: prompt, loras, loras_limit, skip_file_check = test["input"] expected = test["output"] - actual = util.parse_lora_references_from_prompt(prompt, loras, loras_limit=loras_limit, skip_file_check=skip_file_check) + actual = util.parse_lora_references_from_prompt(prompt, loras, loras_limit=loras_limit, + skip_file_check=skip_file_check) + self.assertEqual(expected, actual) + + def test_can_parse_tokens_and_strip_performance_lora(self): + lora_filenames = [ + 'hey-lora.safetensors', + modules.flags.PerformanceLoRA.EXTREME_SPEED.value, + modules.flags.PerformanceLoRA.LIGHTNING.value, + os.path.join('subfolder', modules.flags.PerformanceLoRA.HYPER_SD.value) + ] + + test_cases = [ + { + "input": ("some prompt, ", [], 5, True, modules.flags.Performance.QUALITY), + "output": ( + [('hey-lora.safetensors', 0.4)], + 'some prompt' + ), + }, + { + "input": ("some prompt, ", [], 5, True, modules.flags.Performance.SPEED), + "output": ( + [('hey-lora.safetensors', 0.4)], + 'some prompt' + ), + }, + { + "input": ("some prompt, , ", [], 5, True, modules.flags.Performance.EXTREME_SPEED), + "output": ( + [('hey-lora.safetensors', 0.4)], + 'some prompt' + ), + }, + { + "input": ("some prompt, , ", [], 5, True, modules.flags.Performance.LIGHTNING), + "output": ( + [('hey-lora.safetensors', 0.4)], + 'some prompt' + ), + }, + { + "input": ("some prompt, , ", [], 5, True, modules.flags.Performance.HYPER_SD), + "output": ( + [('hey-lora.safetensors', 0.4)], + 'some prompt' + ), + } + ] + + for test in test_cases: + prompt, loras, loras_limit, skip_file_check, performance = test["input"] + lora_filenames = modules.util.remove_performance_lora(lora_filenames, performance) + expected = test["output"] + actual = util.parse_lora_references_from_prompt(prompt, loras, loras_limit=loras_limit, lora_filenames=lora_filenames) self.assertEqual(expected, actual) From 8fea1cd9bb0af13cf4bea61b3ab57ce2c153b017 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Thu, 30 May 2024 00:17:37 +0200 Subject: [PATCH 05/12] fix: disable intermediate results for all restricted performances too fast for Gradio, which becomes a bottleneck --- webui.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/webui.py b/webui.py index 0dd86350..de8cb126 100644 --- a/webui.py +++ b/webui.py @@ -461,8 +461,8 @@ with shared.gradio_root: interactive=not modules.config.default_black_out_nsfw, info='Disable preview during generation.') disable_intermediate_results = gr.Checkbox(label='Disable Intermediate Results', - value=modules.config.default_performance == flags.Performance.EXTREME_SPEED.value, - interactive=modules.config.default_performance != flags.Performance.EXTREME_SPEED.value, + value=flags.Performance.has_restricted_features(modules.config.default_performance), + interactive=not flags.Performance.has_restricted_features(modules.config.default_performance), info='Disable intermediate results during generation, only show final gallery.') disable_seed_increment = gr.Checkbox(label='Disable seed increment', info='Disable automatic seed increment when image number is > 1.', From db8228d8f56c7f0bb1285a6f287346c37c6bdb32 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Thu, 30 May 2024 00:19:35 +0200 Subject: [PATCH 06/12] refactor: rename parse_json to to_json, rename parse_string to to_string --- modules/meta_parser.py | 12 ++++++------ modules/private_logger.py | 2 +- webui.py | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/modules/meta_parser.py b/modules/meta_parser.py index 586e62da..23ec01ce 100644 --- a/modules/meta_parser.py +++ b/modules/meta_parser.py @@ -261,11 +261,11 @@ class MetadataParser(ABC): raise NotImplementedError @abstractmethod - def parse_json(self, metadata: dict | str) -> dict: + def to_json(self, metadata: dict | str) -> dict: raise NotImplementedError @abstractmethod - def parse_string(self, metadata: dict) -> str: + def to_string(self, metadata: dict) -> str: raise NotImplementedError def set_data(self, raw_prompt, full_prompt, raw_negative_prompt, full_negative_prompt, steps, base_model_name, @@ -328,7 +328,7 @@ class A1111MetadataParser(MetadataParser): 'version': 'Version' } - def parse_json(self, metadata: str) -> dict: + def to_json(self, metadata: str) -> dict: metadata_prompt = '' metadata_negative_prompt = '' @@ -422,7 +422,7 @@ class A1111MetadataParser(MetadataParser): return data - def parse_string(self, metadata: dict) -> str: + def to_string(self, metadata: dict) -> str: data = {k: v for _, k, v in metadata} width, height = eval(data['resolution']) @@ -502,7 +502,7 @@ class FooocusMetadataParser(MetadataParser): def get_scheme(self) -> MetadataScheme: return MetadataScheme.FOOOCUS - def parse_json(self, metadata: dict) -> dict: + def to_json(self, metadata: dict) -> dict: for key, value in metadata.items(): if value in ['', 'None']: continue @@ -517,7 +517,7 @@ class FooocusMetadataParser(MetadataParser): return metadata - def parse_string(self, metadata: list) -> str: + def to_string(self, metadata: list) -> str: for li, (label, key, value) in enumerate(metadata): # remove model folder paths from metadata if key.startswith('lora_combined_'): diff --git a/modules/private_logger.py b/modules/private_logger.py index eb8f0cc5..6fdb680c 100644 --- a/modules/private_logger.py +++ b/modules/private_logger.py @@ -27,7 +27,7 @@ def log(img, metadata, metadata_parser: MetadataParser | None = None, output_for date_string, local_temp_filename, only_name = generate_temp_filename(folder=path_outputs, extension=output_format) os.makedirs(os.path.dirname(local_temp_filename), exist_ok=True) - parsed_parameters = metadata_parser.parse_string(metadata.copy()) if metadata_parser is not None else '' + parsed_parameters = metadata_parser.to_string(metadata.copy()) if metadata_parser is not None else '' image = Image.fromarray(img) if output_format == OutputFormat.PNG.value: diff --git a/webui.py b/webui.py index de8cb126..6f08757d 100644 --- a/webui.py +++ b/webui.py @@ -713,7 +713,7 @@ with shared.gradio_root: parsed_parameters = {} else: metadata_parser = modules.meta_parser.get_metadata_parser(metadata_scheme) - parsed_parameters = metadata_parser.parse_json(parameters) + parsed_parameters = metadata_parser.to_json(parameters) return modules.meta_parser.load_parameter_button_click(parsed_parameters, state_is_generating) From f8f2455b65637ef5e451ba7622493ed779ce0e8a Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Thu, 30 May 2024 00:20:23 +0200 Subject: [PATCH 07/12] feat: use speed steps as default instead of hardcoded 30 --- modules/meta_parser.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/meta_parser.py b/modules/meta_parser.py index 23ec01ce..315d5fd5 100644 --- a/modules/meta_parser.py +++ b/modules/meta_parser.py @@ -248,7 +248,7 @@ class MetadataParser(ABC): self.full_prompt: str = '' self.raw_negative_prompt: str = '' self.full_negative_prompt: str = '' - self.steps: int = 30 + self.steps: int = Steps.SPEED.value self.base_model_name: str = '' self.base_model_hash: str = '' self.refiner_model_name: str = '' From 1981ace9e14058c257cedef9774ff8b1cd2f495f Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Thu, 30 May 2024 00:20:53 +0200 Subject: [PATCH 08/12] feat: add method to_steps to Performance --- modules/flags.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/modules/flags.py b/modules/flags.py index cc191c92..25b0caae 100644 --- a/modules/flags.py +++ b/modules/flags.py @@ -152,6 +152,10 @@ class Performance(Enum): def list(cls) -> list: return list(map(lambda c: c.value, cls)) + @classmethod + def by_steps(cls, steps: int | str): + return cls[Steps(int(steps)).name] + @classmethod def has_restricted_features(cls, x) -> bool: if isinstance(x, Performance): From 9c8ffbbe1804b45869bef2806d116d7f8a1954d6 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Thu, 30 May 2024 00:21:18 +0200 Subject: [PATCH 09/12] refactor: remove method ordinal_suffix, not needed anymore --- modules/util.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/modules/util.py b/modules/util.py index 09d36770..5003f79a 100644 --- a/modules/util.py +++ b/modules/util.py @@ -382,9 +382,6 @@ def get_file_from_folder_list(name, folders): return os.path.abspath(os.path.realpath(os.path.join(folders[0], name))) -def ordinal_suffix(number: int) -> str: - return 'th' if 10 <= number % 100 <= 20 else {1: 'st', 2: 'nd', 3: 'rd'}.get(number % 10, 'th') - def makedirs_with_log(path): try: From 91281e5561891801c80dfcb971ae5e51b9e57602 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Thu, 30 May 2024 00:22:31 +0200 Subject: [PATCH 10/12] feat: only filter lora of selected performance instead of all performance LoRAs both metadata and history log --- modules/config.py | 19 +----------------- modules/meta_parser.py | 45 ++++++++++++++++++++++++++++++++++-------- 2 files changed, 38 insertions(+), 26 deletions(-) diff --git a/modules/config.py b/modules/config.py index 87b27d90..29a16d6d 100644 --- a/modules/config.py +++ b/modules/config.py @@ -548,25 +548,9 @@ with open(config_example_path, "w", encoding="utf-8") as json_file: model_filenames = [] lora_filenames = [] -lora_filenames_no_special = [] vae_filenames = [] wildcard_filenames = [] -sdxl_lcm_lora = 'sdxl_lcm_lora.safetensors' -sdxl_lightning_lora = 'sdxl_lightning_4step_lora.safetensors' -sdxl_hyper_sd_lora = 'sdxl_hyper_sd_4step_lora.safetensors' -loras_metadata_remove = [sdxl_lcm_lora, sdxl_lightning_lora, sdxl_hyper_sd_lora] - - -def remove_special_loras(lora_filenames): - global loras_metadata_remove - - loras_no_special = lora_filenames.copy() - for lora_to_remove in loras_metadata_remove: - if lora_to_remove in loras_no_special: - loras_no_special.remove(lora_to_remove) - return loras_no_special - def get_model_filenames(folder_paths, extensions=None, name_filter=None): if extensions is None: @@ -582,10 +566,9 @@ def get_model_filenames(folder_paths, extensions=None, name_filter=None): def update_files(): - global model_filenames, lora_filenames, lora_filenames_no_special, vae_filenames, wildcard_filenames, available_presets + global model_filenames, lora_filenames, vae_filenames, wildcard_filenames, available_presets model_filenames = get_model_filenames(paths_checkpoints) lora_filenames = get_model_filenames(paths_loras) - lora_filenames_no_special = remove_special_loras(lora_filenames) vae_filenames = get_model_filenames(path_vae) wildcard_filenames = get_files_from_folder(path_wildcards, ['.txt']) available_presets = get_presets() diff --git a/modules/meta_parser.py b/modules/meta_parser.py index 315d5fd5..ec35b428 100644 --- a/modules/meta_parser.py +++ b/modules/meta_parser.py @@ -32,7 +32,7 @@ def load_parameter_button_click(raw_metadata: dict | str, is_generating: bool): get_str('prompt', 'Prompt', loaded_parameter_dict, results) get_str('negative_prompt', 'Negative Prompt', loaded_parameter_dict, results) get_list('styles', 'Styles', loaded_parameter_dict, results) - get_str('performance', 'Performance', loaded_parameter_dict, results) + performance = get_str('performance', 'Performance', loaded_parameter_dict, results) get_steps('steps', 'Steps', loaded_parameter_dict, results) get_number('overwrite_switch', 'Overwrite Switch', loaded_parameter_dict, results) get_resolution('resolution', 'Resolution', loaded_parameter_dict, results) @@ -59,19 +59,26 @@ def load_parameter_button_click(raw_metadata: dict | str, is_generating: bool): get_freeu('freeu', 'FreeU', loaded_parameter_dict, results) + performance_filename = None + if performance is not None and performance in Performance.list(): + performance = Performance(performance) + performance_filename = performance.lora_filename() + for i in range(modules.config.default_max_lora_number): - get_lora(f'lora_combined_{i + 1}', f'LoRA {i + 1}', loaded_parameter_dict, results) + get_lora(f'lora_combined_{i + 1}', f'LoRA {i + 1}', loaded_parameter_dict, results, performance_filename) return results -def get_str(key: str, fallback: str | None, source_dict: dict, results: list, default=None): +def get_str(key: str, fallback: str | None, source_dict: dict, results: list, default=None) -> str | None: try: h = source_dict.get(key, source_dict.get(fallback, default)) assert isinstance(h, str) results.append(h) + return h except: results.append(gr.update()) + return None def get_list(key: str, fallback: str | None, source_dict: dict, results: list, default=None): @@ -181,7 +188,7 @@ def get_freeu(key: str, fallback: str | None, source_dict: dict, results: list, results.append(gr.update()) -def get_lora(key: str, fallback: str | None, source_dict: dict, results: list): +def get_lora(key: str, fallback: str | None, source_dict: dict, results: list, performance_filename: str | None): try: split_data = source_dict.get(key, source_dict.get(fallback)).split(' : ') enabled = True @@ -193,6 +200,9 @@ def get_lora(key: str, fallback: str | None, source_dict: dict, results: list): name = split_data[1] weight = split_data[2] + if name == performance_filename: + raise Exception + weight = float(weight) results.append(enabled) results.append(name) @@ -381,10 +391,19 @@ class A1111MetadataParser(MetadataParser): data['styles'] = str(found_styles) + performance: Performance | None = None + performance_lora = None + if 'performance' in data and data['performance'] in Performance.list(): + performance = Performance(data['performance']) + data['performance'] = performance.value + performance_lora = performance.lora_filename() + # try to load performance based on steps, fallback for direct A1111 imports - if 'steps' in data and 'performance' not in data: + if 'steps' in data and performance is None: try: - data['performance'] = Performance[Steps(int(data['steps'])).name].value + performance = Performance.by_steps(data['steps']) + data['performance'] = performance.value + performance_lora = performance.lora_filename() except ValueError | KeyError: pass @@ -414,8 +433,10 @@ class A1111MetadataParser(MetadataParser): lora_split = lora.split(': ') lora_name = lora_split[0] lora_weight = lora_split[2] if len(lora_split) == 3 else lora_split[1] - for filename in modules.config.lora_filenames_no_special: + for filename in modules.config.lora_filenames: path = Path(filename) + if performance_lora is not None and path.name == performance_lora: + break if lora_name == path.stem: data[f'lora_combined_{li + 1}'] = f'{filename} : {lora_weight}' break @@ -503,13 +524,19 @@ class FooocusMetadataParser(MetadataParser): return MetadataScheme.FOOOCUS def to_json(self, metadata: dict) -> dict: + performance = None + if 'performance' in metadata and metadata['performance'] in Performance.list(): + performance = Performance(metadata['performance']) + + lora_filenames = modules.util.remove_performance_lora(modules.config.lora_filenames, performance) + for key, value in metadata.items(): if value in ['', 'None']: continue if key in ['base_model', 'refiner_model']: metadata[key] = self.replace_value_with_filename(key, value, modules.config.model_filenames) elif key.startswith('lora_combined_'): - metadata[key] = self.replace_value_with_filename(key, value, modules.config.lora_filenames_no_special) + metadata[key] = self.replace_value_with_filename(key, value, lora_filenames) elif key == 'vae': metadata[key] = self.replace_value_with_filename(key, value, modules.config.vae_filenames) else: @@ -557,6 +584,8 @@ class FooocusMetadataParser(MetadataParser): elif value == path.stem: return filename + return None + def get_metadata_parser(metadata_scheme: MetadataScheme) -> MetadataParser: match metadata_scheme: From 2dca5ba638edd11a7586697bfb7f1c0461d800e0 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Thu, 30 May 2024 01:32:09 +0200 Subject: [PATCH 11/12] feat: do not filter LoRAs in metadata parser but rather in metadata load action --- modules/meta_parser.py | 24 ++++-------------------- 1 file changed, 4 insertions(+), 20 deletions(-) diff --git a/modules/meta_parser.py b/modules/meta_parser.py index ec35b428..0d509a19 100644 --- a/modules/meta_parser.py +++ b/modules/meta_parser.py @@ -59,6 +59,7 @@ def load_parameter_button_click(raw_metadata: dict | str, is_generating: bool): get_freeu('freeu', 'FreeU', loaded_parameter_dict, results) + # prevent performance LoRAs to be added twice, by performance and by lora performance_filename = None if performance is not None and performance in Performance.list(): performance = Performance(performance) @@ -391,19 +392,10 @@ class A1111MetadataParser(MetadataParser): data['styles'] = str(found_styles) - performance: Performance | None = None - performance_lora = None - if 'performance' in data and data['performance'] in Performance.list(): - performance = Performance(data['performance']) - data['performance'] = performance.value - performance_lora = performance.lora_filename() - # try to load performance based on steps, fallback for direct A1111 imports - if 'steps' in data and performance is None: + if 'steps' in data and 'performance' in data is None: try: - performance = Performance.by_steps(data['steps']) - data['performance'] = performance.value - performance_lora = performance.lora_filename() + data['performance'] = Performance.by_steps(data['steps']).value except ValueError | KeyError: pass @@ -435,8 +427,6 @@ class A1111MetadataParser(MetadataParser): lora_weight = lora_split[2] if len(lora_split) == 3 else lora_split[1] for filename in modules.config.lora_filenames: path = Path(filename) - if performance_lora is not None and path.name == performance_lora: - break if lora_name == path.stem: data[f'lora_combined_{li + 1}'] = f'{filename} : {lora_weight}' break @@ -524,19 +514,13 @@ class FooocusMetadataParser(MetadataParser): return MetadataScheme.FOOOCUS def to_json(self, metadata: dict) -> dict: - performance = None - if 'performance' in metadata and metadata['performance'] in Performance.list(): - performance = Performance(metadata['performance']) - - lora_filenames = modules.util.remove_performance_lora(modules.config.lora_filenames, performance) - for key, value in metadata.items(): if value in ['', 'None']: continue if key in ['base_model', 'refiner_model']: metadata[key] = self.replace_value_with_filename(key, value, modules.config.model_filenames) elif key.startswith('lora_combined_'): - metadata[key] = self.replace_value_with_filename(key, value, lora_filenames) + metadata[key] = self.replace_value_with_filename(key, value, modules.config.lora_filenames) elif key == 'vae': metadata[key] = self.replace_value_with_filename(key, value, modules.config.vae_filenames) else: From 83ef32a88b6cc29445b2ba5015425827181acb3f Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Thu, 30 May 2024 14:39:43 +0200 Subject: [PATCH 12/12] feat: make disable_intermediate_results interactive again even if performance has restricted features users who want to disable this option should be able to do so, even if performance will be impacted --- webui.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/webui.py b/webui.py index 6f08757d..7fc6f2cf 100644 --- a/webui.py +++ b/webui.py @@ -462,7 +462,6 @@ with shared.gradio_root: info='Disable preview during generation.') disable_intermediate_results = gr.Checkbox(label='Disable Intermediate Results', value=flags.Performance.has_restricted_features(modules.config.default_performance), - interactive=not flags.Performance.has_restricted_features(modules.config.default_performance), info='Disable intermediate results during generation, only show final gallery.') disable_seed_increment = gr.Checkbox(label='Disable seed increment', info='Disable automatic seed increment when image number is > 1.', @@ -616,7 +615,7 @@ with shared.gradio_root: performance_selection.change(lambda x: [gr.update(interactive=not flags.Performance.has_restricted_features(x))] * 11 + [gr.update(visible=not flags.Performance.has_restricted_features(x))] * 1 + - [gr.update(interactive=not flags.Performance.has_restricted_features(x), value=flags.Performance.has_restricted_features(x))] * 1, + [gr.update(value=flags.Performance.has_restricted_features(x))] * 1, inputs=performance_selection, outputs=[ guidance_scale, sharpness, adm_scaler_end, adm_scaler_positive,