diff --git a/launch.py b/launch.py index b0ff7f1a..0cfde8fb 100644 --- a/launch.py +++ b/launch.py @@ -77,7 +77,6 @@ prepare_environment() build_launcher() args = ini_args() - if args.gpu_device_id is not None: os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_device_id) print("Set device to:", args.gpu_device_id) diff --git a/webui.py b/webui.py index e444da86..819f69cd 100644 --- a/webui.py +++ b/webui.py @@ -632,15 +632,18 @@ with shared.gradio_root: preset_content = modules.config.try_get_preset_content(preset) if preset != 'initial' else {} preset_prepared = modules.meta_parser.parse_meta_from_preset(preset_content) - default_model = preset_prepared['base_model'] - previous_default_models = preset_prepared['previous_default_models'] - checkpoint_downloads = preset_prepared['checkpoint_downloads'] - embeddings_downloads = preset_prepared['embeddings_downloads'] - lora_downloads = preset_prepared['lora_downloads'] + default_model = preset_prepared.get('base_model') + previous_default_models = preset_prepared.get('previous_default_models', []) + checkpoint_downloads = preset_prepared.get('checkpoint_downloads', {}) + embeddings_downloads = preset_prepared.get('embeddings_downloads', {}) + lora_downloads = preset_prepared.get('lora_downloads', {}) preset_prepared['base_model'], preset_prepared['lora_downloads'] = launch.download_models( default_model, previous_default_models, checkpoint_downloads, embeddings_downloads, lora_downloads) + if 'prompt' in preset_prepared and preset_prepared.get('prompt') == '': + del preset_prepared['prompt'] + return modules.meta_parser.load_parameter_button_click(json.dumps(preset_prepared), is_generating) preset_selection.change(preset_selection_change, inputs=[preset_selection, state_is_generating], outputs=load_data_outputs, queue=False, show_progress=True) \