This commit is contained in:
lvmin 2023-08-10 05:43:30 -07:00
parent 3cac92a303
commit fbd3a3ed50
1 changed files with 24 additions and 31 deletions

View File

@ -1,19 +1,12 @@
import os
import random
import torch
from comfy.sd import load_checkpoint_guess_config
from comfy.model_management import unload_model
from nodes import (
VAEDecode,
KSamplerAdvanced,
EmptyLatentImage,
SaveImage,
CLIPTextEncode,
)
from nodes import VAEDecode, KSamplerAdvanced, EmptyLatentImage, SaveImage, CLIPTextEncode
from modules.path import modelfile_path
xl_base_filename = os.path.join(modelfile_path, 'sd_xl_base_1.0.safetensors')
xl_refiner_filename = os.path.join(modelfile_path, 'sd_xl_refiner_1.0.safetensors')
@ -25,28 +18,28 @@ opEmptyLatentImage = EmptyLatentImage()
opKSamplerAdvanced = KSamplerAdvanced()
opVAEDecode = VAEDecode()
positive_conditions = opCLIPTextEncode.encode(clip=xl_base_clip, text='a handsome man in forest')[0]
negative_conditions = opCLIPTextEncode.encode(clip=xl_base_clip, text='bad, ugly')[0]
with torch.no_grad():
positive_conditions = opCLIPTextEncode.encode(clip=xl_base_clip, text='a handsome man in forest')[0]
negative_conditions = opCLIPTextEncode.encode(clip=xl_base_clip, text='bad, ugly')[0]
initial_latent_image = opEmptyLatentImage.generate(width=1024, height=1024, batch_size=1)[0]
initial_latent_image = opEmptyLatentImage.generate(width=1024, height=1024, batch_size=1)[0]
samples = opKSamplerAdvanced.sample(
add_noise="enable",
noise_seed=random.randint(1, 2 ** 64),
steps=25,
cfg=9,
sampler_name="euler",
scheduler="normal",
start_at_step=0,
end_at_step=25,
return_with_leftover_noise="enable",
model=xl_base,
positive=positive_conditions,
negative=negative_conditions,
latent_image=initial_latent_image,
)[0]
unload_model()
samples = opKSamplerAdvanced.sample(
add_noise="enable",
noise_seed=random.randint(1, 2 ** 64),
steps=25,
cfg=9,
sampler_name="euler",
scheduler="normal",
start_at_step=0,
end_at_step=25,
return_with_leftover_noise="enable",
model=xl_base,
positive=positive_conditions,
negative=negative_conditions,
latent_image=initial_latent_image,
)[0]
vae_decoded = opVAEDecode.decode(samples=samples, vae=xl_base_vae)[0]
vae_decoded = opVAEDecode.decode(samples=samples, vae=xl_base_vae)[0]
a = 0
a = 0