diff --git a/fooocus_version.py b/fooocus_version.py index 4d0c90e0..7f2135fb 100644 --- a/fooocus_version.py +++ b/fooocus_version.py @@ -1 +1 @@ -version = '1.0.19' +version = '1.0.20' diff --git a/modules/async_worker.py b/modules/async_worker.py new file mode 100644 index 00000000..ded27cb6 --- /dev/null +++ b/modules/async_worker.py @@ -0,0 +1,77 @@ +import threading + + +buffer = [] +outputs = [] + + +def worker(): + global buffer, outputs + + import time + import random + import modules.default_pipeline as pipeline + import modules.path + + from PIL import Image + from modules.sdxl_styles import apply_style, aspect_ratios + from modules.util import generate_temp_filename + + def handler(task): + prompt, negative_prompt, style_selction, performance_selction, \ + aspect_ratios_selction, image_number, image_seed, base_model_name, refiner_model_name, \ + l1, w1, l2, w2, l3, w3, l4, w4, l5, w5 = task + + loras = [(l1, w1), (l2, w2), (l3, w3), (l4, w4), (l5, w5)] + + pipeline.refresh_base_model(base_model_name) + pipeline.refresh_refiner_model(refiner_model_name) + pipeline.refresh_loras(loras) + + p_txt, n_txt = apply_style(style_selction, prompt, negative_prompt) + + if performance_selction == 'Speed': + steps = 30 + switch = 20 + else: + steps = 60 + switch = 40 + + width, height = aspect_ratios[aspect_ratios_selction] + + results = [] + seed = image_seed + if not isinstance(seed, int) or seed < 0 or seed > 65535: + seed = random.randint(1, 65535) + + all_steps = steps * image_number + + def callback(step, x0, x, total_steps, y): + done_steps = i * steps + step + outputs.append(['preview', ( + int(100.0 * float(done_steps) / float(all_steps)), + f'Step {step}/{total_steps} in the {i}-th Sampling', + y)]) + + for i in range(image_number): + imgs = pipeline.process(p_txt, n_txt, steps, switch, width, height, seed, callback=callback) + + for x in imgs: + local_temp_filename = generate_temp_filename(folder=modules.path.temp_outputs_path, extension='png') + Image.fromarray(x).save(local_temp_filename) + + seed += 1 + results += imgs + + outputs.append(['results', results]) + return + + while True: + time.sleep(0.01) + if len(buffer) > 0: + task = buffer.pop(0) + handler(task) + pass + + +threading.Thread(target=worker, daemon=True).start() diff --git a/modules/core.py b/modules/core.py index 6abd3216..24e354af 100644 --- a/modules/core.py +++ b/modules/core.py @@ -1,6 +1,5 @@ import os import random -import cv2 import einops import torch import numpy as np @@ -8,12 +7,11 @@ import numpy as np import comfy.model_management import comfy.utils -from comfy.sd import load_checkpoint_guess_config, load_lora_for_models +from comfy.sd import load_checkpoint_guess_config from nodes import VAEDecode, EmptyLatentImage, CLIPTextEncode from comfy.sample import prepare_mask, broadcast_cond, load_additional_models, cleanup_additional_models from modules.samplers_advanced import KSampler, KSamplerWithRefiner from modules.adm_patch import patch_negative_adm -from modules.cv2win32 import show_preview patch_negative_adm() @@ -86,11 +84,7 @@ def get_previewer(device, latent_format): x_sample = taesd.decoder(torch.nn.functional.avg_pool2d(x0, kernel_size=(2, 2))).detach() * 255.0 x_sample = einops.rearrange(x_sample, 'b c h w -> b h w c') x_sample = x_sample.cpu().numpy().clip(0, 255).astype(np.uint8) - for i, s in enumerate(x_sample): - if i > 0: - show_preview(f'cv2_preview_{i}', s, title=f'Preview Image {i}, step = [{step}/{total_steps}') - else: - show_preview(f'cv2_preview_{i}', s, title=f'Preview Image, step = {step}/{total_steps}') + return x_sample[0] taesd.preview = preview_function @@ -126,10 +120,11 @@ def ksampler(model, positive, negative, latent, seed=None, steps=30, cfg=7.0, sa pbar = comfy.utils.ProgressBar(steps) def callback(step, x0, x, total_steps): - if callback_function is not None: - callback_function(step, x0, x, total_steps) + y = None if previewer and step % 3 == 0: - previewer.preview(x0, step, total_steps) + y = previewer.preview(x0, step, total_steps) + if callback_function is not None: + callback_function(step, x0, x, total_steps, y) pbar.update_absolute(step + 1, total_steps, None) sigmas = None @@ -197,10 +192,11 @@ def ksampler_with_refiner(model, positive, negative, refiner, refiner_positive, pbar = comfy.utils.ProgressBar(steps) def callback(step, x0, x, total_steps): - if callback_function is not None: - callback_function(step, x0, x, total_steps) + y = None if previewer and step % 3 == 0: - previewer.preview(x0, step, total_steps) + y = previewer.preview(x0, step, total_steps) + if callback_function is not None: + callback_function(step, x0, x, total_steps, y) pbar.update_absolute(step + 1, total_steps, None) sigmas = None diff --git a/modules/cv2win32.py b/modules/cv2win32.py deleted file mode 100644 index 6a76cfdd..00000000 --- a/modules/cv2win32.py +++ /dev/null @@ -1,43 +0,0 @@ -import threading -import cv2 -import os - - -buffer = [] - - -def worker(): - global buffer - try: - while True: - cv2.waitKey(50) - if len(buffer) > 0: - task = buffer.pop(0) - if task is None: - cv2.destroyAllWindows() - else: - flag, img, title = task - cv2.imshow(flag, img) - cv2.setWindowTitle(flag, title) - cv2.setWindowProperty(flag, cv2.WND_PROP_TOPMOST, 1) - except Exception as e: - print('Failed to open preview window. You are not using a local device with GUI support.') - print(e) - pass - - -def save_image(path, img): - os.makedirs(os.path.dirname(path), exist_ok=True) - cv2.imwrite(path, img[..., ::-1].copy()) - print(f'Image saved: {path}') - - -def show_preview(flag, img, title='preview'): - buffer.append((flag, img[..., ::-1].copy(), title)) - - -def close_all_preview(): - buffer.append(None) - - -threading.Thread(target=worker, daemon=True).start() diff --git a/modules/html.py b/modules/html.py new file mode 100644 index 00000000..0e031cc9 --- /dev/null +++ b/modules/html.py @@ -0,0 +1,84 @@ +css = ''' +.loader-container { + display: flex; /* Use flex to align items horizontally */ + align-items: center; /* Center items vertically within the container */ + white-space: nowrap; /* Prevent line breaks within the container */ +} + +.loader { + border: 8px solid #f3f3f3; /* Light grey */ + border-top: 8px solid #3498db; /* Blue */ + border-radius: 50%; + width: 30px; + height: 30px; + animation: spin 2s linear infinite; +} + +@keyframes spin { + 0% { transform: rotate(0deg); } + 100% { transform: rotate(360deg); } +} + +/* Style the progress bar */ +progress { + appearance: none; /* Remove default styling */ + height: 20px; /* Set the height of the progress bar */ + border-radius: 5px; /* Round the corners of the progress bar */ + background-color: #f3f3f3; /* Light grey background */ + width: 100%; +} + +/* Style the progress bar container */ +.progress-container { + margin-left: 20px; + margin-right: 20px; + flex-grow: 1; /* Allow the progress container to take up remaining space */ +} + +/* Set the color of the progress bar fill */ +progress::-webkit-progress-value { + background-color: #3498db; /* Blue color for the fill */ +} + +progress::-moz-progress-bar { + background-color: #3498db; /* Blue color for the fill in Firefox */ +} + +/* Style the text on the progress bar */ +progress::after { + content: attr(value '%'); /* Display the progress value followed by '%' */ + position: absolute; + top: 50%; + left: 50%; + transform: translate(-50%, -50%); + color: white; /* Set text color */ + font-size: 14px; /* Set font size */ +} + +/* Style other texts */ +.loader-container > span { + margin-left: 5px; /* Add spacing between the progress bar and the text */ +} + +.progress-bar > .generating { + display: none !important; +} + +.progress-bar{ + height: 30px !important; +} + +''' +progress_html = ''' +
+
+
+ +
+ *text* +
+''' + + +def make_progress_html(number, text): + return progress_html.replace('*number*', str(number)).replace('*text*', text) diff --git a/requirements_versions.txt b/requirements_versions.txt index 06962832..54264ccc 100644 --- a/requirements_versions.txt +++ b/requirements_versions.txt @@ -8,8 +8,8 @@ Pillow==9.2.0 scipy==1.9.3 tqdm==4.64.1 psutil==5.9.5 -opencv-python==4.7.0.72 numpy==1.23.5 pytorch_lightning==1.9.4 omegaconf==2.2.3 gradio==3.39.0 +pygit2==1.12.2 diff --git a/update_log.md b/update_log.md index 0f150375..b8da9040 100644 --- a/update_log.md +++ b/update_log.md @@ -1,3 +1,9 @@ +### 1.0.20 + +* Re-write UI to use async codes: (1) for faster start, and (2) for better live preview. +* Removed opencv dependency +* Plan to support Linux soon + ### 1.0.19 * Unlock to allow changing model. diff --git a/webui.py b/webui.py index f9bc4a8c..a1e90c0d 100644 --- a/webui.py +++ b/webui.py @@ -1,65 +1,49 @@ import gradio as gr +import sys +import time import modules.path -import random import fooocus_version -import modules.default_pipeline as pipeline +import modules.html +import modules.async_worker as worker -from modules.sdxl_styles import apply_style, style_keys, aspect_ratios -from modules.cv2win32 import close_all_preview, save_image -from modules.util import generate_temp_filename +from modules.sdxl_styles import style_keys, aspect_ratios -def generate_clicked(prompt, negative_prompt, style_selction, performance_selction, - aspect_ratios_selction, image_number, image_seed, base_model_name, refiner_model_name, - l1, w1, l2, w2, l3, w3, l4, w4, l5, w5, progress=gr.Progress()): +def generate_clicked(*args): + yield gr.update(interactive=False), \ + gr.update(visible=True, value=modules.html.make_progress_html(1, 'Processing text encoding ...')), \ + gr.update(visible=True, value=None), \ + gr.update(visible=False) - loras = [(l1, w1), (l2, w2), (l3, w3), (l4, w4), (l5, w5)] + worker.buffer.append(list(args)) + finished = False - pipeline.refresh_base_model(base_model_name) - pipeline.refresh_refiner_model(refiner_model_name) - pipeline.refresh_loras(loras) - - p_txt, n_txt = apply_style(style_selction, prompt, negative_prompt) - - if performance_selction == 'Speed': - steps = 30 - switch = 20 - else: - steps = 60 - switch = 40 - - width, height = aspect_ratios[aspect_ratios_selction] - - results = [] - seed = image_seed - if not isinstance(seed, int) or seed < 0 or seed > 65535: - seed = random.randint(1, 65535) - - all_steps = steps * image_number - - def callback(step, x0, x, total_steps): - done_steps = i * steps + step - progress(float(done_steps) / float(all_steps), f'Step {step}/{total_steps} in the {i}-th Sampling') - - for i in range(image_number): - imgs = pipeline.process(p_txt, n_txt, steps, switch, width, height, seed, callback=callback) - - for x in imgs: - local_temp_filename = generate_temp_filename(folder=modules.path.temp_outputs_path, extension='png') - save_image(local_temp_filename, x) - - seed += 1 - results += imgs - - close_all_preview() - return results + while not finished: + time.sleep(0.01) + if len(worker.outputs) > 0: + flag, product = worker.outputs.pop(0) + if flag == 'preview': + percentage, title, image = product + yield gr.update(interactive=False), \ + gr.update(visible=True, value=modules.html.make_progress_html(percentage, title)), \ + gr.update(visible=True, value=image) if image is not None else gr.update(), \ + gr.update(visible=False) + if flag == 'results': + yield gr.update(interactive=True), \ + gr.update(visible=False), \ + gr.update(visible=False), \ + gr.update(visible=True, value=product) + finished = True + return -block = gr.Blocks(title='Fooocus ' + fooocus_version.version).queue() +block = gr.Blocks(title='Fooocus ' + fooocus_version.version, css=modules.html.css).queue() with block: with gr.Row(): with gr.Column(): - gallery = gr.Gallery(label='Gallery', show_label=False, object_fit='contain', height=720) + progress_window = gr.Image(label='Preview', show_label=True, height=640, visible=False) + progress_html = gr.HTML(value=modules.html.make_progress_html(32, 'Progress 32%'), visible=False, elem_id='progress-bar', elem_classes='progress-bar') + gallery = gr.Gallery(label='Gallery', show_label=False, object_fit='contain', height=720, visible=True) with gr.Row(): with gr.Column(scale=0.85): prompt = gr.Textbox(show_label=False, placeholder="Type prompt here.", container=False, autofocus=True) @@ -107,6 +91,6 @@ with block: performance_selction, aspect_ratios_selction, image_number, image_seed ] ctrls += [base_model, refiner_model] + lora_ctrls - run_button.click(fn=generate_clicked, inputs=ctrls, outputs=[gallery]) + run_button.click(fn=generate_clicked, inputs=ctrls, outputs=[run_button, progress_html, progress_window, gallery]) -block.launch(inbrowser=True) +block.launch(inbrowser=True, server_name='0.0.0.0' if '--listen' in sys.argv else None)