diff --git a/readme.md b/readme.md index 09944172..8d951193 100644 --- a/readme.md +++ b/readme.md @@ -106,6 +106,8 @@ Note that the minimal requirement is **4GB Nvidia GPU memory (4GB VRAM)** and ** Please open an issue if you use similar devices but still cannot achieve acceptable performances. +See also the common problems and troubleshoots [here](troubleshoot.md). + ### Colab (Last tested - 2023 Nov 15) diff --git a/webui.py b/webui.py index 0b9700c3..ae89ef93 100644 --- a/webui.py +++ b/webui.py @@ -443,12 +443,13 @@ with shared.gradio_root: model_refresh.click(model_refresh_clicked, [], [base_model, refiner_model] + lora_ctrls, queue=False, show_progress=False) - performance_selection.change(lambda x: [gr.update(interactive=x != 'Extreme Speed')] * 11, + performance_selection.change(lambda x: [gr.update(interactive=x != 'Extreme Speed')] * 11 + + [gr.update(visible=x != 'Extreme Speed')] * 1, inputs=performance_selection, outputs=[ guidance_scale, sharpness, adm_scaler_end, adm_scaler_positive, adm_scaler_negative, refiner_switch, refiner_model, sampler_name, - scheduler_name, adaptive_cfg, refiner_swap_method + scheduler_name, adaptive_cfg, refiner_swap_method, negative_prompt ], queue=False, show_progress=False) advanced_checkbox.change(lambda x: gr.update(visible=x), advanced_checkbox, advanced_column,