diff --git a/launch.py b/launch.py index e333e287..937c0cdb 100644 --- a/launch.py +++ b/launch.py @@ -98,7 +98,8 @@ if config.temp_path_cleanup_on_launch: else: print(f"[Cleanup] Failed to delete content of temp dir.") -def download_models(default_model, previous_default_models, checkpoint_downloads, embeddings_downloads, lora_downloads): + +def download_models(default_model, previous_default_models, checkpoint_downloads, embeddings_downloads, lora_downloads, vae_downloads): for file_name, url in vae_approx_filenames: load_file_from_url(url=url, model_dir=config.path_vae_approx, file_name=file_name) @@ -130,12 +131,14 @@ def download_models(default_model, previous_default_models, checkpoint_downloads load_file_from_url(url=url, model_dir=config.path_embeddings, file_name=file_name) for file_name, url in lora_downloads.items(): load_file_from_url(url=url, model_dir=config.paths_loras[0], file_name=file_name) + for file_name, url in vae_downloads.items(): + load_file_from_url(url=url, model_dir=config.path_vae, file_name=file_name) return default_model, checkpoint_downloads config.default_base_model_name, config.checkpoint_downloads = download_models( config.default_base_model_name, config.previous_default_models, config.checkpoint_downloads, - config.embeddings_downloads, config.lora_downloads) + config.embeddings_downloads, config.lora_downloads, config.vae_downloads) from webui import * diff --git a/modules/config.py b/modules/config.py index da061aef..6470f601 100644 --- a/modules/config.py +++ b/modules/config.py @@ -445,6 +445,12 @@ embeddings_downloads = get_config_item_or_set_default( validator=lambda x: isinstance(x, dict) and all(isinstance(k, str) and isinstance(v, str) for k, v in x.items()), expected_type=dict ) +vae_downloads = get_config_item_or_set_default( + key='vae_downloads', + default_value={}, + validator=lambda x: isinstance(x, dict) and all(isinstance(k, str) and isinstance(v, str) for k, v in x.items()), + expected_type=dict +) available_aspect_ratios = get_config_item_or_set_default( key='available_aspect_ratios', default_value=modules.flags.sdxl_aspect_ratios, diff --git a/presets/.gitignore b/presets/.gitignore index 27e74136..e39511a9 100644 --- a/presets/.gitignore +++ b/presets/.gitignore @@ -3,5 +3,6 @@ !default.json !lcm.json !playground_v2.5.json +!pony_v6.json !realistic.json !sai.json \ No newline at end of file diff --git a/presets/pony_v6.json b/presets/pony_v6.json new file mode 100644 index 00000000..f28064b8 --- /dev/null +++ b/presets/pony_v6.json @@ -0,0 +1,53 @@ +{ + "default_model": "ponyDiffusionV6XL.safetensors", + "default_refiner": "None", + "default_refiner_switch": 0.5, + "default_vae": "ponyDiffusionV6XL_vae.safetensors", + "default_loras": [ + [ + true, + "None", + 1.0 + ], + [ + true, + "None", + 1.0 + ], + [ + true, + "None", + 1.0 + ], + [ + true, + "None", + 1.0 + ], + [ + true, + "None", + 1.0 + ] + ], + "default_cfg_scale": 7.0, + "default_sample_sharpness": 2.0, + "default_sampler": "dpmpp_2m_sde_gpu", + "default_scheduler": "karras", + "default_performance": "Speed", + "default_prompt": "", + "default_prompt_negative": "", + "default_styles": [ + "Fooocus Pony" + ], + "default_aspect_ratio": "896*1152", + "default_overwrite_step": -1, + "checkpoint_downloads": { + "ponyDiffusionV6XL.safetensors": "https://huggingface.co/mashb1t/fav_models/resolve/main/fav/ponyDiffusionV6XL.safetensors" + }, + "embeddings_downloads": {}, + "lora_downloads": {}, + "vae_downloads": { + "ponyDiffusionV6XL_vae.safetensors": "https://huggingface.co/mashb1t/fav_models/resolve/main/fav/ponyDiffusionV6XL_vae.safetensors" + } +} diff --git a/sdxl_styles/samples/fooocus_pony.jpg b/sdxl_styles/samples/fooocus_pony.jpg new file mode 100644 index 00000000..b34e5c76 Binary files /dev/null and b/sdxl_styles/samples/fooocus_pony.jpg differ diff --git a/sdxl_styles/sdxl_styles_fooocus.json b/sdxl_styles/sdxl_styles_fooocus.json index 33337214..c589f686 100644 --- a/sdxl_styles/sdxl_styles_fooocus.json +++ b/sdxl_styles/sdxl_styles_fooocus.json @@ -30,5 +30,10 @@ "name": "Fooocus Cinematic", "prompt": "cinematic still {prompt} . emotional, harmonious, vignette, highly detailed, high budget, bokeh, cinemascope, moody, epic, gorgeous, film grain, grainy", "negative_prompt": "anime, cartoon, graphic, text, painting, crayon, graphite, abstract, glitch, deformed, mutated, ugly, disfigured" + }, + { + "name": "Fooocus Pony", + "prompt": "score_9, score_8_up, score_7_up, {prompt}", + "negative_prompt": "score_6, score_5, score_4" } ] diff --git a/webui.py b/webui.py index 9bb6351d..315dcfcf 100644 --- a/webui.py +++ b/webui.py @@ -886,9 +886,11 @@ with shared.gradio_root: checkpoint_downloads = preset_prepared.get('checkpoint_downloads', {}) embeddings_downloads = preset_prepared.get('embeddings_downloads', {}) lora_downloads = preset_prepared.get('lora_downloads', {}) + vae_downloads = preset_prepared.get('vae_downloads', {}) - preset_prepared['base_model'], preset_prepared['lora_downloads'] = launch.download_models( - default_model, previous_default_models, checkpoint_downloads, embeddings_downloads, lora_downloads) + preset_prepared['base_model'], preset_prepared['checkpoint_downloads'] = launch.download_models( + default_model, previous_default_models, checkpoint_downloads, embeddings_downloads, lora_downloads, + vae_downloads) if 'prompt' in preset_prepared and preset_prepared.get('prompt') == '': del preset_prepared['prompt']