diff --git a/modules/flags.py b/modules/flags.py index cbfd904d..db26394e 100644 --- a/modules/flags.py +++ b/modules/flags.py @@ -51,7 +51,6 @@ class MetadataScheme(Enum): A1111 = 'a1111' -# TODO use translation here metadata_scheme = [ ('Fooocus (json)', MetadataScheme.FOOOCUS.value), ('A1111 (plain text)', MetadataScheme.A1111.value), diff --git a/modules/metadata.py b/modules/metadata.py index 25a4a332..aba8822d 100644 --- a/modules/metadata.py +++ b/modules/metadata.py @@ -3,6 +3,7 @@ import os import re from abc import ABC, abstractmethod from pathlib import Path + from PIL import Image import modules.config @@ -156,7 +157,6 @@ class A1111MetadataParser(MetadataParser): break if 'lora_hashes' in data: - # TODO optimize by using hash for matching. Problem is speed of creating the hash per model, even on startup lora_filenames = modules.config.lora_filenames.copy() lora_filenames.remove(modules.config.downloading_sdxl_lcm_lora()) for li, lora in enumerate(data['lora_hashes'].split(', ')): @@ -214,10 +214,8 @@ class A1111MetadataParser(MetadataParser): generation_params_text = ", ".join( [k if k == v else f'{k}: {quote(v)}' for k, v in dict(sorted(generation_params.items())).items() if v is not None]) - # TODO check if multiline positive prompt is correctly processed - positive_prompt_resolved = ', '.join(self.full_prompt) # TODO add loras to positive prompt if even possible - negative_prompt_resolved = ', '.join( - self.full_negative_prompt) # TODO add loras to negative prompt if even possible + positive_prompt_resolved = ', '.join(self.full_prompt) + negative_prompt_resolved = ', '.join(self.full_negative_prompt) negative_prompt_text = f"\nNegative prompt: {negative_prompt_resolved}" if negative_prompt_resolved else "" return f"{positive_prompt_resolved}{negative_prompt_text}\n{generation_params_text}".strip() diff --git a/modules/util.py b/modules/util.py index d1935848..15594ad6 100644 --- a/modules/util.py +++ b/modules/util.py @@ -300,7 +300,6 @@ def extract_styles_from_prompt(prompt, negative_prompt): found_style = style prompt = new_prompt negative_prompt = new_neg_prompt - # TODO this is a bit hacky tbh but works perfectly fine, check if all conditions are needed if real_prompt == '' and new_real_prompt != '' and new_real_prompt != prompt: real_prompt = new_real_prompt break @@ -312,8 +311,6 @@ def extract_styles_from_prompt(prompt, negative_prompt): extracted.append(found_style.name) # add prompt expansion if not all styles could be resolved - # TODO check if it's better to not add fooocus_expansion but just return prompt incl. fooocus_expansion words - # TODO evaluate if adding prompt expansion to metadata is a good idea if prompt != '': if prompt != real_prompt: extracted.append(modules.sdxl_styles.fooocus_expansion) diff --git a/webui.py b/webui.py index cb641b4e..6469ea68 100644 --- a/webui.py +++ b/webui.py @@ -598,7 +598,6 @@ with shared.gradio_root: def trigger_metadata_import(filepath, state_is_generating): parameters, items, metadata_scheme = modules.metadata.read_info_from_image(filepath) - # TODO check what happens if metadata_scheme is empty and A1111 string if parameters is None: print('Could not find metadata in the image!') pass