diff --git a/modules/async_worker.py b/modules/async_worker.py index 89302a02..237d7ce6 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -193,6 +193,7 @@ def worker(): modules.patch.negative_adm_scale = advanced_parameters.adm_scaler_negative = 1.0 modules.patch.adm_scaler_end = advanced_parameters.adm_scaler_end = 0.0 + # TODO move hashing to metadata mapper as this slows down the generation process base_model_path = os.path.join(modules.config.path_checkpoints, base_model_name) base_model_hash = calculate_sha256(base_model_path)[0:10] @@ -788,7 +789,6 @@ def worker(): ('Resolution', 'resolution', str((width, height)), True, True), ('Sharpness', 'sharpness', sharpness, True, True), ('Guidance Scale', 'guidance_scale', guidance_scale, True, True), - # ('Denoising Strength', 'denoising_strength', denoising_strength, False, False), ('ADM Guidance', 'adm_guidance', str(( modules.patch.positive_adm_scale, modules.patch.negative_adm_scale, diff --git a/modules/metadata.py b/modules/metadata.py index 0f24f9f6..818494c2 100644 --- a/modules/metadata.py +++ b/modules/metadata.py @@ -114,7 +114,6 @@ class A1111MetadataParser(MetadataParser): generation_params = { self.fooocus_to_a1111['steps']: data['steps'], self.fooocus_to_a1111['sampler']: data['sampler'], - self.fooocus_to_a1111['guidance_scale']: data['guidance_scale'], self.fooocus_to_a1111['seed']: data['seed'], # TODO check resolution value, should be string self.fooocus_to_a1111['resolution']: f'{width}x{heigth}', @@ -131,7 +130,6 @@ class A1111MetadataParser(MetadataParser): generation_params |= { self.fooocus_to_a1111['lora_hashes']: lora_hashes_string, - # "Denoising strength": data['denoising_strength'], self.fooocus_to_a1111['version']: {data['version']} } @@ -139,7 +137,7 @@ class A1111MetadataParser(MetadataParser): [k if k == v else f'{k}: {quote(v)}' for k, v in generation_params.items() if v is not None]) # TODO check if multiline positive prompt is correctly processed positive_prompt_resolved = ', '.join(data['full_prompt']) #TODO add loras to positive prompt if even possible - negative_prompt_resolved = ', '.join(data['full_negative_prompt']) #TODO add loras to positive prompt if even possible + negative_prompt_resolved = ', '.join(data['full_negative_prompt']) #TODO add loras to negative prompt if even possible negative_prompt_text = f"\nNegative prompt: {negative_prompt_resolved}" if negative_prompt_resolved else "" return f"{positive_prompt_resolved}{negative_prompt_text}\n{generation_params_text}".strip() diff --git a/modules/util.py b/modules/util.py index 4a709c31..21a00ef6 100644 --- a/modules/util.py +++ b/modules/util.py @@ -175,7 +175,7 @@ def get_files_from_folder(folder_path, exensions=None, name_filter=None): relative_path = "" for filename in files: _, file_extension = os.path.splitext(filename) - if (exensions == None or file_extension.lower() in exensions) and (name_filter == None or name_filter in _): + if (exensions is None or file_extension.lower() in exensions) and (name_filter is None or name_filter in _): path = os.path.join(relative_path, filename) filenames.append(path) diff --git a/webui.py b/webui.py index 666d0a00..8a22072c 100644 --- a/webui.py +++ b/webui.py @@ -607,7 +607,7 @@ with shared.gradio_root: return modules.meta_parser.load_parameter_button_click(parsed_parameters, state_is_generating) - metadata_import_button.click(trigger_metadata_import, inputs=[metadata_input_image, state_is_generating], outputs=load_data_outputs, queue=False, show_progress=False) \ + metadata_import_button.click(trigger_metadata_import, inputs=[metadata_input_image, state_is_generating], outputs=load_data_outputs, queue=False, show_progress=True) \ .then(style_sorter.sort_styles, inputs=style_selections, outputs=style_selections, queue=False, show_progress=False) generate_button.click(lambda: (gr.update(visible=True, interactive=True), gr.update(visible=True, interactive=True), gr.update(visible=False, interactive=False), [], True),