This commit is contained in:
lvmin 2023-08-10 06:20:01 -07:00
parent 9820dac837
commit c051604808
2 changed files with 44 additions and 32 deletions

BIN
a.png

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.5 MiB

After

Width:  |  Height:  |  Size: 1.5 MiB

View File

@ -1,50 +1,62 @@
import os
import random
import torch
import numpy as np
from comfy.sd import load_checkpoint_guess_config
from nodes import VAEDecode, KSamplerAdvanced, EmptyLatentImage, CLIPTextEncode
from modules.path import modelfile_path
xl_base_filename = os.path.join(modelfile_path, 'sd_xl_base_1.0.safetensors')
xl_refiner_filename = os.path.join(modelfile_path, 'sd_xl_refiner_1.0.safetensors')
xl_base, xl_base_clip, xl_base_vae, xl_base_clipvision = load_checkpoint_guess_config(xl_base_filename)
del xl_base_clipvision
opCLIPTextEncode = CLIPTextEncode()
opEmptyLatentImage = EmptyLatentImage()
opKSamplerAdvanced = KSamplerAdvanced()
opVAEDecode = VAEDecode()
with torch.no_grad():
positive_conditions = opCLIPTextEncode.encode(clip=xl_base_clip, text='a handsome man in forest')[0]
negative_conditions = opCLIPTextEncode.encode(clip=xl_base_clip, text='bad, ugly')[0]
initial_latent_image = opEmptyLatentImage.generate(width=1024, height=1024, batch_size=1)[0]
class StableDiffusionModel:
def __init__(self, unet, vae, clip, clip_vision):
self.unet = unet
self.vae = vae
self.clip = clip
self.clip_vision = clip_vision
samples = opKSamplerAdvanced.sample(
add_noise="enable",
noise_seed=random.randint(1, 2 ** 64),
steps=25,
cfg=9,
sampler_name="euler",
scheduler="normal",
start_at_step=0,
end_at_step=25,
return_with_leftover_noise="enable",
model=xl_base,
positive=positive_conditions,
negative=negative_conditions,
latent_image=initial_latent_image,
@torch.no_grad()
def load_model(ckpt_filename):
unet, clip, vae, clip_vision = load_checkpoint_guess_config(ckpt_filename)
return StableDiffusionModel(unet=unet, clip=clip, vae=vae, clip_vision=clip_vision)
@torch.no_grad()
def encode_prompt_condition(clip, prompt):
return opCLIPTextEncode.encode(clip=clip, text=prompt)[0]
@torch.no_grad()
def decode_vae(vae, latent_image):
return opVAEDecode.decode(samples=latent_image, vae=vae)[0]
@torch.no_grad()
def ksample(model, positive_condition, negative_condition, latent_image, add_noise=True, noise_seed=None, steps=25, cfg=9,
sampler_name='euler_ancestral', scheduler='normal', start_at_step=None, end_at_step=None,
return_with_leftover_noise=False):
return opKSamplerAdvanced.sample(
add_noise='enable' if add_noise else 'disable',
noise_seed=noise_seed if isinstance(noise_seed, int) else random.randint(1, 2 ** 64),
steps=steps,
cfg=cfg,
sampler_name=sampler_name,
scheduler=scheduler,
start_at_step=0 if start_at_step is None else start_at_step,
end_at_step=steps if end_at_step is None else end_at_step,
return_with_leftover_noise='enable' if return_with_leftover_noise else 'disable',
model=model,
positive=positive_condition,
negative=negative_condition,
latent_image=latent_image,
)[0]
vae_decoded = opVAEDecode.decode(samples=samples, vae=xl_base_vae)[0]
for image in vae_decoded:
i = 255. * image.cpu().numpy()
img = np.clip(i, 0, 255).astype(np.uint8)
import cv2
cv2.imwrite('a.png', img[:, :, ::-1])
@torch.no_grad()
def image_to_numpy(x):
return [np.clip(255. * y.cpu().numpy(), 0, 255).astype(np.uint8) for y in x]