From 10e2db67d61a10626b92fdade4f4e72ff726fa45 Mon Sep 17 00:00:00 2001 From: lvmin Date: Fri, 13 Oct 2023 04:53:02 -0700 Subject: [PATCH] log --- modules/async_worker.py | 6 ------ modules/patch.py | 14 ++++++++++++++ 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/modules/async_worker.py b/modules/async_worker.py index e1d771dd..d647c0a1 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -482,12 +482,6 @@ def worker(): print(f'Preparation time: {preparation_time:.2f} seconds') outputs.append(['preview', (13, 'Moving model to GPU ...', None)]) - execution_start_time = time.perf_counter() - fcbh.model_management.load_models_gpu([pipeline.final_unet]) - moving_time = time.perf_counter() - execution_start_time - print(f'Moving model to GPU: {moving_time:.2f} seconds') - - outputs.append(['preview', (13, 'Starting tasks ...', None)]) def callback(step, x0, x, total_steps, y): done_steps = current_task_id * steps + step diff --git a/modules/patch.py b/modules/patch.py index bd92956d..996c2529 100644 --- a/modules/patch.py +++ b/modules/patch.py @@ -1,4 +1,5 @@ import torch +import time import fcbh.model_base import fcbh.ldm.modules.diffusionmodules.openaimodel import fcbh.samplers @@ -469,6 +470,15 @@ def patched_get_autocast_device(dev): return 'cpu' +def patched_load_models_gpu(*args, **kwargs): + execution_start_time = time.perf_counter() + y = fcbh.model_management.load_models_gpu_origin(*args, **kwargs) + moving_time = time.perf_counter() - execution_start_time + if moving_time > 0.1: + print(f'[Fooocus Model Management] Moving model(s) has taken {moving_time:.2f} seconds') + return y + + def patch_all(): if not fcbh.model_management.DISABLE_SMART_MEMORY: vram_inadequate = fcbh.model_management.total_vram < 20 * 1024 @@ -481,6 +491,10 @@ def patch_all(): args_manager.args.disable_smart_memory = True fcbh.cli_args.args.disable_smart_memory = True + if not hasattr(fcbh.model_management, 'load_models_gpu_origin'): + fcbh.model_management.load_models_gpu_origin = fcbh.model_management.load_models_gpu + + fcbh.model_management.load_models_gpu = patched_load_models_gpu fcbh.model_management.get_autocast_device = patched_get_autocast_device fcbh.samplers.SAMPLER_NAMES += ['dpmpp_fooocus_2m_sde_inpaint_seamless'] fcbh.model_management.text_encoder_device = text_encoder_device_patched