diff --git a/a.png b/a.png new file mode 100644 index 00000000..18b8182d Binary files /dev/null and b/a.png differ diff --git a/launch.py b/launch.py index 030daeac..7178b952 100644 --- a/launch.py +++ b/launch.py @@ -18,7 +18,7 @@ def prepare_environment(): xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.20') - comfy_repo = os.environ.get('COMFY_REPO', "https://github.com/comfyanonymous/ComfyUI.git") + comfy_repo = os.environ.get('COMFY_REPO', "https://github.com/lllyasviel/ComfyUI-Embedded.git") comfy_commit_hash = os.environ.get('COMFY_COMMIT_HASH', "5ac96897e9782805cd5e8fe85bd98ad03eae2b6f") commit = commit_hash() diff --git a/modules/sd.py b/modules/sd.py new file mode 100644 index 00000000..a329f8ef --- /dev/null +++ b/modules/sd.py @@ -0,0 +1,50 @@ +import os +import random +import torch +import numpy as np + +from comfy.sd import load_checkpoint_guess_config +from nodes import VAEDecode, KSamplerAdvanced, EmptyLatentImage, CLIPTextEncode +from modules.path import modelfile_path + + +xl_base_filename = os.path.join(modelfile_path, 'sd_xl_base_1.0.safetensors') +xl_refiner_filename = os.path.join(modelfile_path, 'sd_xl_refiner_1.0.safetensors') + +xl_base, xl_base_clip, xl_base_vae, xl_base_clipvision = load_checkpoint_guess_config(xl_base_filename) +del xl_base_clipvision + +opCLIPTextEncode = CLIPTextEncode() +opEmptyLatentImage = EmptyLatentImage() +opKSamplerAdvanced = KSamplerAdvanced() +opVAEDecode = VAEDecode() + +with torch.no_grad(): + positive_conditions = opCLIPTextEncode.encode(clip=xl_base_clip, text='a handsome man in forest')[0] + negative_conditions = opCLIPTextEncode.encode(clip=xl_base_clip, text='bad, ugly')[0] + + initial_latent_image = opEmptyLatentImage.generate(width=1024, height=1024, batch_size=1)[0] + + samples = opKSamplerAdvanced.sample( + add_noise="enable", + noise_seed=random.randint(1, 2 ** 64), + steps=25, + cfg=9, + sampler_name="euler", + scheduler="normal", + start_at_step=0, + end_at_step=25, + return_with_leftover_noise="enable", + model=xl_base, + positive=positive_conditions, + negative=negative_conditions, + latent_image=initial_latent_image, + )[0] + + vae_decoded = opVAEDecode.decode(samples=samples, vae=xl_base_vae)[0] + + for image in vae_decoded: + i = 255. * image.cpu().numpy() + img = np.clip(i, 0, 255).astype(np.uint8) + import cv2 + cv2.imwrite('a.png', img[:, :, ::-1])