chore: cleanup pid code
This commit is contained in:
parent
21f4767c65
commit
031b1f8b11
|
|
@ -869,7 +869,7 @@ def worker():
|
|||
task.yields.append(['finish', task.results])
|
||||
finally:
|
||||
if pid in modules.patch.patch_settings:
|
||||
del modules.patch.patch_settings[os.getpid()]
|
||||
del modules.patch.patch_settings[pid]
|
||||
pass
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -210,12 +210,13 @@ class BrownianTreeNoiseSamplerPatched:
|
|||
|
||||
|
||||
def compute_cfg(uncond, cond, cfg_scale, t):
|
||||
mimic_cfg = float(patch_settings[os.getpid()].adaptive_cfg)
|
||||
pid = os.getpid()
|
||||
mimic_cfg = float(patch_settings[pid].adaptive_cfg)
|
||||
real_cfg = float(cfg_scale)
|
||||
|
||||
real_eps = uncond + real_cfg * (cond - uncond)
|
||||
|
||||
if cfg_scale > patch_settings[os.getpid()].adaptive_cfg:
|
||||
if cfg_scale > patch_settings[pid].adaptive_cfg:
|
||||
mimicked_eps = uncond + mimic_cfg * (cond - uncond)
|
||||
return real_eps * t + mimicked_eps * (1 - t)
|
||||
else:
|
||||
|
|
@ -223,11 +224,13 @@ def compute_cfg(uncond, cond, cfg_scale, t):
|
|||
|
||||
|
||||
def patched_sampling_function(model, x, timestep, uncond, cond, cond_scale, model_options=None, seed=None):
|
||||
pid = os.getpid()
|
||||
|
||||
if math.isclose(cond_scale, 1.0) and not model_options.get("disable_cfg1_optimization", False):
|
||||
final_x0 = calc_cond_uncond_batch(model, cond, None, x, timestep, model_options)[0]
|
||||
|
||||
if patch_settings[os.getpid()].eps_record is not None:
|
||||
patch_settings[os.getpid()].eps_record = ((x - final_x0) / timestep).cpu()
|
||||
if patch_settings[pid].eps_record is not None:
|
||||
patch_settings[pid].eps_record = ((x - final_x0) / timestep).cpu()
|
||||
|
||||
return final_x0
|
||||
|
||||
|
|
@ -236,16 +239,16 @@ def patched_sampling_function(model, x, timestep, uncond, cond, cond_scale, mode
|
|||
positive_eps = x - positive_x0
|
||||
negative_eps = x - negative_x0
|
||||
|
||||
alpha = 0.001 * patch_settings[os.getpid()].sharpness * patch_settings[os.getpid()].global_diffusion_progress
|
||||
alpha = 0.001 * patch_settings[pid].sharpness * patch_settings[pid].global_diffusion_progress
|
||||
|
||||
positive_eps_degraded = anisotropic.adaptive_anisotropic_filter(x=positive_eps, g=positive_x0)
|
||||
positive_eps_degraded_weighted = positive_eps_degraded * alpha + positive_eps * (1.0 - alpha)
|
||||
|
||||
final_eps = compute_cfg(uncond=negative_eps, cond=positive_eps_degraded_weighted,
|
||||
cfg_scale=cond_scale, t=patch_settings[os.getpid()].global_diffusion_progress)
|
||||
cfg_scale=cond_scale, t=patch_settings[pid].global_diffusion_progress)
|
||||
|
||||
if patch_settings[os.getpid()].eps_record is not None:
|
||||
patch_settings[os.getpid()].eps_record = (final_eps / timestep).cpu()
|
||||
if patch_settings[pid].eps_record is not None:
|
||||
patch_settings[pid].eps_record = (final_eps / timestep).cpu()
|
||||
|
||||
return x - final_eps
|
||||
|
||||
|
|
@ -265,13 +268,14 @@ def sdxl_encode_adm_patched(self, **kwargs):
|
|||
height = kwargs.get("height", 1024)
|
||||
target_width = width
|
||||
target_height = height
|
||||
pid = os.getpid()
|
||||
|
||||
if kwargs.get("prompt_type", "") == "negative":
|
||||
width = float(width) * patch_settings[os.getpid()].negative_adm_scale
|
||||
height = float(height) * patch_settings[os.getpid()].negative_adm_scale
|
||||
width = float(width) * patch_settings[pid].negative_adm_scale
|
||||
height = float(height) * patch_settings[pid].negative_adm_scale
|
||||
elif kwargs.get("prompt_type", "") == "positive":
|
||||
width = float(width) * patch_settings[os.getpid()].positive_adm_scale
|
||||
height = float(height) * patch_settings[os.getpid()].positive_adm_scale
|
||||
width = float(width) * patch_settings[pid].positive_adm_scale
|
||||
height = float(height) * patch_settings[pid].positive_adm_scale
|
||||
|
||||
def embedder(number_list):
|
||||
h = self.embedder(torch.tensor(number_list, dtype=torch.float32))
|
||||
|
|
@ -335,6 +339,7 @@ def timed_adm(y, timesteps):
|
|||
def patched_cldm_forward(self, x, hint, timesteps, context, y=None, **kwargs):
|
||||
t_emb = ldm_patched.ldm.modules.diffusionmodules.openaimodel.timestep_embedding(timesteps, self.model_channels, repeat_only=False).to(x.dtype)
|
||||
emb = self.time_embed(t_emb)
|
||||
pid = os.getpid()
|
||||
|
||||
guided_hint = self.input_hint_block(hint, emb, context)
|
||||
|
||||
|
|
@ -360,10 +365,10 @@ def patched_cldm_forward(self, x, hint, timesteps, context, y=None, **kwargs):
|
|||
h = self.middle_block(h, emb, context)
|
||||
outs.append(self.middle_block_out(h, emb, context))
|
||||
|
||||
if patch_settings[os.getpid()].controlnet_softness > 0:
|
||||
if patch_settings[pid].controlnet_softness > 0:
|
||||
for i in range(10):
|
||||
k = 1.0 - float(i) / 9.0
|
||||
outs[i] = outs[i] * (1.0 - patch_settings[os.getpid()].controlnet_softness * k)
|
||||
outs[i] = outs[i] * (1.0 - patch_settings[pid].controlnet_softness * k)
|
||||
|
||||
return outs
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue