suffix mode
This commit is contained in:
parent
9e9821b3ed
commit
d50714b261
|
|
@ -1 +1 @@
|
|||
version = '1.0.61'
|
||||
version = '1.0.62'
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@ def worker():
|
|||
|
||||
from modules.sdxl_styles import apply_style_negative, apply_style_positive, aspect_ratios
|
||||
from modules.private_logger import log
|
||||
from modules.expansion import safe_str
|
||||
|
||||
try:
|
||||
async_gradio_app = shared.gradio_root
|
||||
|
|
@ -39,6 +40,9 @@ def worker():
|
|||
|
||||
outputs.append(['preview', (1, 'Initializing ...', None)])
|
||||
|
||||
prompt = safe_str(prompt)
|
||||
negative_prompt = safe_str(negative_prompt)
|
||||
|
||||
seed = image_seed
|
||||
max_seed = int(1024 * 1024 * 1024)
|
||||
if not isinstance(seed, int):
|
||||
|
|
@ -59,8 +63,7 @@ def worker():
|
|||
n_txt = apply_style_negative(style_selction, negative_prompt)
|
||||
n_cond = pipeline.process_prompt(n_txt)
|
||||
outputs.append(['preview', (9, 'Encoding positive text ...', None)])
|
||||
p_txt_a, p_txt_b = apply_style_positive(style_selction, prompt)
|
||||
p_txt = p_txt_a + p_txt_b
|
||||
p_txt = apply_style_positive(style_selction, prompt)
|
||||
p_cond = pipeline.process_prompt(p_txt)
|
||||
|
||||
for i in range(image_number):
|
||||
|
|
@ -78,11 +81,11 @@ def worker():
|
|||
outputs.append(['preview', (5, f'Preparing positive text #{i + 1} ...', None)])
|
||||
current_seed = seed + i
|
||||
|
||||
p_txt_a, p_txt_b = apply_style_positive(style_selction, prompt)
|
||||
p_txt_e = pipeline.expand_txt(p_txt_a, current_seed)
|
||||
print(f'Expanded prompt: \n\n{p_txt_e}\n\n')
|
||||
suffix = pipeline.expansion(prompt, current_seed)
|
||||
print(f'[Prompt Expansion] New suffix: {suffix}')
|
||||
|
||||
p_txt = p_txt_e + p_txt_b
|
||||
p_txt = apply_style_positive(style_selction, prompt)
|
||||
p_txt = safe_str(p_txt) + suffix
|
||||
|
||||
tasks.append(dict(
|
||||
prompt=prompt,
|
||||
|
|
|
|||
|
|
@ -103,11 +103,7 @@ refresh_base_model(modules.path.default_base_model_name)
|
|||
refresh_refiner_model(modules.path.default_refiner_model_name)
|
||||
refresh_loras([(modules.path.default_lora_name, 0.5), ('None', 0.5), ('None', 0.5), ('None', 0.5), ('None', 0.5)])
|
||||
|
||||
expansion_model = FooocusExpansion()
|
||||
|
||||
|
||||
def expand_txt(*args, **kwargs):
|
||||
return expansion_model(*args, **kwargs)
|
||||
expansion = FooocusExpansion()
|
||||
|
||||
|
||||
def process_prompt(text):
|
||||
|
|
|
|||
|
|
@ -33,10 +33,9 @@ class FooocusExpansion:
|
|||
def __call__(self, prompt, seed):
|
||||
seed = int(seed)
|
||||
set_seed(seed)
|
||||
|
||||
prompt = safe_str(prompt) + fooocus_magic_split[seed % len(fooocus_magic_split)]
|
||||
|
||||
origin = safe_str(prompt)
|
||||
prompt = origin + fooocus_magic_split[seed % len(fooocus_magic_split)]
|
||||
response = self.pipe(prompt, max_length=len(prompt) + 256)
|
||||
result = response[0]['generated_text']
|
||||
result = response[0]['generated_text'][len(origin):]
|
||||
result = safe_str(result)
|
||||
return result
|
||||
|
|
|
|||
|
|
@ -961,10 +961,7 @@ aspect_ratios = {str(v[0]) + '×' + str(v[1]): v for k, v in SD_XL_BASE_RATIOS.i
|
|||
|
||||
def apply_style_positive(style, txt):
|
||||
p, n = styles.get(style, default_style)
|
||||
ps = p.split('{prompt}')
|
||||
if len(ps) != 2:
|
||||
return txt, ''
|
||||
return ps[0] + txt, ps[1]
|
||||
return p.replace('{prompt}', txt)
|
||||
|
||||
|
||||
def apply_style_negative(style, txt):
|
||||
|
|
|
|||
|
|
@ -1,3 +1,7 @@
|
|||
### 1.0.62
|
||||
|
||||
* Change prompt expansion to suffix mode for better balance of semantic and style (and debugging).
|
||||
|
||||
### 1.0.60
|
||||
|
||||
* Tune the balance between style and Prompt Expansion.
|
||||
|
|
|
|||
Loading…
Reference in New Issue