Merge branch 'main_upstream'

This commit is contained in:
Manuel Schmid 2023-12-23 16:40:20 +01:00
commit fc909a3a28
No known key found for this signature in database
GPG Key ID: 32C4F7569B40B84B
7 changed files with 229 additions and 9 deletions

View File

@ -1 +1 @@
version = '2.1.853'
version = '2.1.855'

View File

@ -291,9 +291,10 @@ def worker():
inpaint_image = HWC3(inpaint_image)
if isinstance(inpaint_image, np.ndarray) and isinstance(inpaint_mask, np.ndarray) \
and (np.any(inpaint_mask > 127) or len(outpaint_selections) > 0):
progressbar(async_task, 1, 'Downloading upscale models ...')
modules.config.downloading_upscale_model()
if inpaint_parameterized:
progressbar(async_task, 1, 'Downloading inpainter ...')
modules.config.downloading_upscale_model()
inpaint_head_model_path, inpaint_patch_model_path = modules.config.downloading_inpaint_models(
advanced_parameters.inpaint_engine)
base_model_additional_loras += [(inpaint_patch_model_path, 1.0)]
@ -411,8 +412,8 @@ def worker():
uc=None,
positive_top_k=len(positive_basic_workloads),
negative_top_k=len(negative_basic_workloads),
log_positive_prompt='; '.join([task_prompt] + task_extra_positive_prompts),
log_negative_prompt='; '.join([task_negative_prompt] + task_extra_negative_prompts),
log_positive_prompt='\n'.join([task_prompt] + task_extra_positive_prompts),
log_negative_prompt='\n'.join([task_negative_prompt] + task_extra_negative_prompts),
))
if use_expansion:
@ -793,9 +794,9 @@ def worker():
('Scheduler', scheduler_name),
('Seed', task['task_seed']),
]
for n, w in loras:
for li, (n, w) in enumerate(loras):
if n != 'None':
d.append((f'LoRA', f'{n} : {w}'))
d.append((f'LoRA {li + 1}', f'{n} : {w}'))
d.append(('Version', 'v' + fooocus_version.version))
log(x, d)

144
modules/meta_parser.py Normal file
View File

@ -0,0 +1,144 @@
import json
import gradio as gr
import modules.config
def load_parameter_button_click(raw_prompt_txt):
loaded_parameter_dict = json.loads(raw_prompt_txt)
assert isinstance(loaded_parameter_dict, dict)
results = [True, 1]
try:
h = loaded_parameter_dict.get('Prompt', None)
assert isinstance(h, str)
results.append(h)
except:
results.append(gr.update())
try:
h = loaded_parameter_dict.get('Negative Prompt', None)
assert isinstance(h, str)
results.append(h)
except:
results.append(gr.update())
try:
h = loaded_parameter_dict.get('Styles', None)
h = eval(h)
assert isinstance(h, list)
results.append(h)
except:
results.append(gr.update())
try:
h = loaded_parameter_dict.get('Performance', None)
assert isinstance(h, str)
results.append(h)
except:
results.append(gr.update())
try:
h = loaded_parameter_dict.get('Resolution', None)
width, height = eval(h)
formatted = modules.config.add_ratio(f'{width}*{height}')
if formatted in modules.config.available_aspect_ratios:
results.append(formatted)
results.append(-1)
results.append(-1)
else:
results.append(gr.update())
results.append(width)
results.append(height)
except:
results.append(gr.update())
results.append(gr.update())
results.append(gr.update())
try:
h = loaded_parameter_dict.get('Sharpness', None)
assert h is not None
h = float(h)
results.append(h)
except:
results.append(gr.update())
try:
h = loaded_parameter_dict.get('Guidance Scale', None)
assert h is not None
h = float(h)
results.append(h)
except:
results.append(gr.update())
try:
h = loaded_parameter_dict.get('ADM Guidance', None)
p, n, e = eval(h)
results.append(float(p))
results.append(float(n))
results.append(float(e))
except:
results.append(gr.update())
results.append(gr.update())
results.append(gr.update())
try:
h = loaded_parameter_dict.get('Base Model', None)
assert isinstance(h, str)
results.append(h)
except:
results.append(gr.update())
try:
h = loaded_parameter_dict.get('Refiner Model', None)
assert isinstance(h, str)
results.append(h)
except:
results.append(gr.update())
try:
h = loaded_parameter_dict.get('Refiner Switch', None)
assert h is not None
h = float(h)
results.append(h)
except:
results.append(gr.update())
try:
h = loaded_parameter_dict.get('Sampler', None)
assert isinstance(h, str)
results.append(h)
except:
results.append(gr.update())
try:
h = loaded_parameter_dict.get('Scheduler', None)
assert isinstance(h, str)
results.append(h)
except:
results.append(gr.update())
try:
h = loaded_parameter_dict.get('Seed', None)
assert h is not None
h = int(h)
results.append(False)
results.append(h)
except:
results.append(gr.update())
results.append(gr.update())
results.append(gr.update(visible=True))
results.append(gr.update(visible=False))
for i in range(1, 6):
try:
n, w = loaded_parameter_dict.get(f'LoRA {i}').split(' : ')
w = float(w)
results.append(n)
results.append(w)
except:
results.append(gr.update())
results.append(gr.update())
return results

View File

@ -1,6 +1,8 @@
import os
import args_manager
import modules.config
import json
import urllib.parse
from PIL import Image
from modules.util import generate_temp_filename
@ -36,10 +38,22 @@ def log(img, dic):
".image-container img { height: auto; max-width: 512px; display: block; padding-right:10px; } "
".image-container div { text-align: center; padding: 4px; } "
"hr { border-color: gray; } "
"button { background-color: black; color: white; border: 1px solid grey; border-radius: 5px; padding: 5px 10px; text-align: center; display: inline-block; font-size: 16px; cursor: pointer; }"
"button:hover {background-color: grey; color: black;}"
"</style>"
)
begin_part = f"<html><head><title>Fooocus Log {date_string}</title>{css_styles}</head><body><p>Fooocus Log {date_string} (private)</p>\n<p>All images are clean, without any hidden data/meta, and safe to share with others.</p><!--fooocus-log-split-->\n\n"
js = (
"<script>"
"function to_clipboard(txt) { "
"txt = decodeURIComponent(txt);"
"navigator.clipboard.writeText(txt);"
"alert('Copied to Clipboard!\\nPaste to prompt area to load parameters.\\nCurrent clipboard content is:\\n\\n' + txt);"
"}"
"</script>"
)
begin_part = f"<html><head><title>Fooocus Log {date_string}</title>{css_styles}</head><body>{js}<p>Fooocus Log {date_string} (private)</p>\n<p>All images are clean, without any hidden data/meta, and safe to share with others.</p><!--fooocus-log-split-->\n\n"
end_part = f'\n<!--fooocus-log-split--></body></html>'
middle_part = log_cache.get(html_name, "")
@ -57,8 +71,13 @@ def log(img, dic):
item += f"<td><a href=\"{only_name}\" target=\"_blank\"><img src='{only_name}' onerror=\"this.closest('.image-container').style.display='none';\" loading='lazy'></img></a><div>{only_name}</div></td>"
item += "<td><table class='metadata'>"
for key, value in dic:
item += f"<tr><td class='key'>{key}</td><td class='value'>{value}</td></tr>\n"
value_txt = str(value).replace('\n', ' </br> ')
item += f"<tr><td class='key'>{key}</td><td class='value'>{value_txt}</td></tr>\n"
item += "</table>"
js_txt = urllib.parse.quote(json.dumps({k: v for k, v in dic}, indent=0), safe='')
item += f"</br><button onclick=\"to_clipboard('{js_txt}')\">Copy to Clipboard</button>"
item += "</td>"
item += "</tr></table></div>\n\n"

View File

@ -28,6 +28,8 @@ Fooocus has simplified the installation. Between pressing "download" and generat
`[1]` David Holz, 2019.
**Recently many fake websites exist on Google when you search “fooocus”. Do not trust those here is the only official source of Fooocus.**
## [Installing Fooocus](#download)
# Moving from Midjourney to Fooocus

View File

@ -1,3 +1,10 @@
**(2023 Dec 21) Hi all, the feature updating of Fooocus will be paused for about two or three weeks because we have some other workloads. See you soon and we will come back in mid or late Jan. However, you may still see updates if other collaborators are fixing bugs or solving problems.**
# 2.1.854
* Add a button to copy parameters to clipboard in log.
* Allow users to load parameters directly by pasting parameters to prompt.
# 2.1.853
* Add Marc K3nt3L's styles. Thanks [Marc K3nt3L](https://github.com/K3nt3L)!

View File

@ -1,6 +1,7 @@
import gradio as gr
import random
import os
import json
import time
import shared
import modules.config
@ -12,6 +13,7 @@ import modules.flags as flags
import modules.gradio_hijack as grh
import modules.advanced_parameters as advanced_parameters
import modules.style_sorter as style_sorter
import modules.meta_parser
import args_manager
import copy
@ -103,7 +105,7 @@ with shared.gradio_root:
elem_id='final_gallery')
with gr.Row(elem_classes='type_row'):
with gr.Column(scale=17):
prompt = gr.Textbox(show_label=False, placeholder="Type prompt here.", elem_id='positive_prompt',
prompt = gr.Textbox(show_label=False, placeholder="Type prompt here or paste parameters.", elem_id='positive_prompt',
container=False, autofocus=True, elem_classes='type_row', lines=1024)
default_prompt = modules.config.default_prompt
@ -112,6 +114,7 @@ with shared.gradio_root:
with gr.Column(scale=3, min_width=0):
generate_button = gr.Button(label="Generate", value="Generate", elem_classes='type_row', elem_id='generate_button', visible=True)
load_parameter_button = gr.Button(label="Load Parameters", value="Load Parameters", elem_classes='type_row', elem_id='load_parameter_button', visible=False)
skip_button = gr.Button(label="Skip", value="Skip", elem_classes='type_row_half', visible=False)
stop_button = gr.Button(label="Stop", value="Stop", elem_classes='type_row_half', elem_id='stop_button', visible=False)
@ -552,6 +555,50 @@ with shared.gradio_root:
ctrls += [outpaint_selections, inpaint_input_image, inpaint_additional_prompt]
ctrls += ip_ctrls
def parse_meta(raw_prompt_txt):
loaded_json = None
try:
if '{' in raw_prompt_txt:
if '}' in raw_prompt_txt:
if ':' in raw_prompt_txt:
loaded_json = json.loads(raw_prompt_txt)
assert isinstance(loaded_json, dict)
except:
loaded_json = None
if loaded_json is None:
return gr.update(), gr.update(visible=True), gr.update(visible=False)
return json.dumps(loaded_json), gr.update(visible=False), gr.update(visible=True)
prompt.input(parse_meta, inputs=prompt, outputs=[prompt, generate_button, load_parameter_button], queue=False, show_progress=False)
load_parameter_button.click(modules.meta_parser.load_parameter_button_click, inputs=prompt, outputs=[
advanced_checkbox,
image_number,
prompt,
negative_prompt,
style_selections,
performance_selection,
aspect_ratios_selection,
overwrite_width,
overwrite_height,
sharpness,
guidance_scale,
adm_scaler_positive,
adm_scaler_negative,
adm_scaler_end,
base_model,
refiner_model,
refiner_switch,
sampler_name,
scheduler_name,
seed_random,
image_seed,
generate_button,
load_parameter_button
] + lora_ctrls, queue=False, show_progress=False)
generate_button.click(lambda: (gr.update(visible=True, interactive=True), gr.update(visible=True, interactive=True), gr.update(visible=False), []), outputs=[stop_button, skip_button, generate_button, gallery]) \
.then(fn=refresh_seed, inputs=[seed_random, image_seed], outputs=image_seed) \
.then(advanced_parameters.set_all_advanced_parameters, inputs=adps) \