diff --git a/fooocus_colab.ipynb b/fooocus_colab.ipynb
index 1d824185..189c4ee4 100644
--- a/fooocus_colab.ipynb
+++ b/fooocus_colab.ipynb
@@ -1,35 +1,736 @@
{
- "cells": [
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "VjYy0F2gZIPR"
- },
- "outputs": [],
- "source": [
- "!pip install pygit2==1.15.1\n",
- "%cd /content\n",
- "!git clone https://github.com/lllyasviel/Fooocus.git\n",
- "%cd /content/Fooocus\n",
- "!python entry_with_update.py --share --always-high-vram\n"
- ]
- }
- ],
- "metadata": {
- "accelerator": "GPU",
- "colab": {
- "gpuType": "T4",
- "provenance": []
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "view-in-github",
+ "colab_type": "text"
+ },
+ "source": [
+ "
"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {
+ "id": "VjYy0F2gZIPR",
+ "outputId": "044f20a6-0df2-4127-e4e5-589d9dc3cbbf",
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ }
+ },
+ "outputs": [
+ {
+ "output_type": "stream",
+ "name": "stdout",
+ "text": [
+ "Requirement already satisfied: pygit2==1.15.1 in /usr/local/lib/python3.11/dist-packages (1.15.1)\n",
+ "Requirement already satisfied: cffi>=1.16.0 in /usr/local/lib/python3.11/dist-packages (from pygit2==1.15.1) (1.17.1)\n",
+ "Requirement already satisfied: pycparser in /usr/local/lib/python3.11/dist-packages (from cffi>=1.16.0->pygit2==1.15.1) (2.22)\n",
+ "/content\n",
+ "fatal: destination path 'Fooocus' already exists and is not an empty directory.\n",
+ "/content/Fooocus\n",
+ "Already up-to-date\n",
+ "Update succeeded.\n",
+ "[System ARGV] ['entry_with_update.py', '--share', '--always-high-vram']\n",
+ "Python 3.11.11 (main, Dec 4 2024, 08:55:07) [GCC 11.4.0]\n",
+ "Fooocus version: 2.5.5\n",
+ "[Cleanup] Attempting to delete content of temp dir /tmp/fooocus\n",
+ "[Cleanup] Cleanup successful\n",
+ "Total VRAM 15095 MB, total RAM 12979 MB\n",
+ "Set vram state to: HIGH_VRAM\n",
+ "Always offload VRAM\n",
+ "Device: cuda:0 Tesla T4 : native\n",
+ "VAE dtype: torch.float32\n",
+ "Using pytorch cross attention\n",
+ "Refiner unloaded.\n",
+ "IMPORTANT: You are using gradio version 3.41.2, however version 4.44.1 is available, please upgrade.\n",
+ "--------\n",
+ "Running on local URL: http://127.0.0.1:7865\n",
+ "Running on public URL: https://57500bc9274af38df3.gradio.live\n",
+ "\n",
+ "This share link expires in 72 hours. For free permanent hosting and GPU upgrades, run `gradio deploy` from Terminal to deploy to Spaces (https://huggingface.co/spaces)\n",
+ "model_type EPS\n",
+ "UNet ADM Dimension 2816\n",
+ "Using pytorch attention in VAE\n",
+ "Working with z of shape (1, 4, 32, 32) = 4096 dimensions.\n",
+ "Using pytorch attention in VAE\n",
+ "extra {'cond_stage_model.clip_l.text_projection', 'cond_stage_model.clip_l.logit_scale'}\n",
+ "left over keys: dict_keys(['cond_stage_model.clip_l.transformer.text_model.embeddings.position_ids'])\n",
+ "loaded straight to GPU\n",
+ "Requested to load SDXL\n",
+ "Loading 1 new model\n",
+ "Base model loaded: /content/Fooocus/models/checkpoints/juggernautXL_v8Rundiffusion.safetensors\n",
+ "VAE loaded: None\n",
+ "Request to load LoRAs [('sd_xl_offset_example-lora_1.0.safetensors', 0.1)] for model [/content/Fooocus/models/checkpoints/juggernautXL_v8Rundiffusion.safetensors].\n",
+ "Loaded LoRA [/content/Fooocus/models/loras/sd_xl_offset_example-lora_1.0.safetensors] for UNet [/content/Fooocus/models/checkpoints/juggernautXL_v8Rundiffusion.safetensors] with 788 keys at weight 0.1.\n",
+ "Fooocus V2 Expansion: Vocab with 642 words.\n",
+ "Fooocus Expansion engine loaded for cuda:0, use_fp16 = True.\n",
+ "Requested to load SDXLClipModel\n",
+ "Requested to load GPT2LMHeadModel\n",
+ "Loading 2 new models\n",
+ "[Fooocus Model Management] Moving model(s) has taken 0.60 seconds\n",
+ "2025-04-02 04:49:26.032323: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:477] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n",
+ "WARNING: All log messages before absl::InitializeLog() is called are written to STDERR\n",
+ "E0000 00:00:1743569366.337837 11421 cuda_dnn.cc:8310] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n",
+ "E0000 00:00:1743569366.425152 11421 cuda_blas.cc:1418] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n",
+ "2025-04-02 04:49:27.078099: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n",
+ "To enable the following instructions: AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n",
+ "Started worker with PID 11332\n",
+ "App started successful. Use the app with http://127.0.0.1:7865/ or 127.0.0.1:7865 or https://57500bc9274af38df3.gradio.live\n",
+ "Keyboard interruption in main thread... closing server.\n",
+ "Traceback (most recent call last):\n",
+ " File \"/usr/local/lib/python3.11/dist-packages/gradio/blocks.py\", line 2199, in block_thread\n",
+ " time.sleep(0.1)\n",
+ "KeyboardInterrupt\n",
+ "\n",
+ "During handling of the above exception, another exception occurred:\n",
+ "\n",
+ "Traceback (most recent call last):\n",
+ " File \"/content/Fooocus/entry_with_update.py\", line 46, in \n",
+ " from launch import *\n",
+ " File \"/content/Fooocus/launch.py\", line 152, in \n",
+ " from webui import *\n",
+ " File \"/content/Fooocus/webui.py\", line 1120, in \n",
+ " shared.gradio_root.launch(\n",
+ " File \"/usr/local/lib/python3.11/dist-packages/gradio/blocks.py\", line 2115, in launch\n",
+ " self.block_thread()\n",
+ " File \"/usr/local/lib/python3.11/dist-packages/gradio/blocks.py\", line 2203, in block_thread\n",
+ " self.server.close()\n",
+ " File \"/usr/local/lib/python3.11/dist-packages/gradio/networking.py\", line 49, in close\n",
+ " self.thread.join()\n",
+ " File \"/usr/lib/python3.11/threading.py\", line 1119, in join\n",
+ " self._wait_for_tstate_lock()\n",
+ " File \"/usr/lib/python3.11/threading.py\", line 1139, in _wait_for_tstate_lock\n",
+ " if lock.acquire(block, timeout):\n",
+ " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
+ "KeyboardInterrupt\n",
+ "Killing tunnel 127.0.0.1:7865 <> https://57500bc9274af38df3.gradio.live\n",
+ "^C\n"
+ ]
+ }
+ ],
+ "source": [
+ "!pip install pygit2==1.15.1\n",
+ "%cd /content\n",
+ "!git clone https://github.com/Alen-joy47/Fooocus.git\n",
+ "%cd /content/Fooocus\n",
+ "!python entry_with_update.py --share --always-high-vram\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "%cd /content\n",
+ "!rm -rf Fooocus # Remove old version if needed\n",
+ "!git clone https://github.com/lllyasviel/Fooocus.git\n",
+ "\n",
+ "\n"
+ ],
+ "metadata": {
+ "id": "7t3ozAe52W3S",
+ "outputId": "7f884eec-68b0-494e-97e7-82283b45e577",
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ }
+ },
+ "execution_count": 7,
+ "outputs": [
+ {
+ "output_type": "stream",
+ "name": "stdout",
+ "text": [
+ "/content\n",
+ "Cloning into 'Fooocus'...\n",
+ "remote: Enumerating objects: 6725, done.\u001b[K\n",
+ "remote: Total 6725 (delta 0), reused 0 (delta 0), pack-reused 6725 (from 1)\u001b[K\n",
+ "Receiving objects: 100% (6725/6725), 33.36 MiB | 36.49 MiB/s, done.\n",
+ "Resolving deltas: 100% (3837/3837), done.\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "%cd /content/Fooocus\n",
+ "!pip install -r requirements_versions.txt\n",
+ "\n"
+ ],
+ "metadata": {
+ "id": "eu9iTc8C3lE7",
+ "outputId": "fbba42b5-9afe-4665-a956-db63a2fae62a",
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ }
+ },
+ "execution_count": 10,
+ "outputs": [
+ {
+ "output_type": "stream",
+ "name": "stdout",
+ "text": [
+ "/content/Fooocus\n",
+ "Requirement already satisfied: torchsde==0.2.6 in /usr/local/lib/python3.11/dist-packages (from -r requirements_versions.txt (line 1)) (0.2.6)\n",
+ "Requirement already satisfied: einops==0.8.0 in /usr/local/lib/python3.11/dist-packages (from -r requirements_versions.txt (line 2)) (0.8.0)\n",
+ "Requirement already satisfied: transformers==4.42.4 in /usr/local/lib/python3.11/dist-packages (from -r requirements_versions.txt (line 3)) (4.42.4)\n",
+ "Requirement already satisfied: safetensors==0.4.3 in /usr/local/lib/python3.11/dist-packages (from -r requirements_versions.txt (line 4)) (0.4.3)\n",
+ "Requirement already satisfied: accelerate==0.32.1 in /usr/local/lib/python3.11/dist-packages (from -r requirements_versions.txt (line 5)) (0.32.1)\n",
+ "Requirement already satisfied: pyyaml==6.0.1 in /usr/local/lib/python3.11/dist-packages (from -r requirements_versions.txt (line 6)) (6.0.1)\n",
+ "Requirement already satisfied: pillow==10.4.0 in /usr/local/lib/python3.11/dist-packages (from -r requirements_versions.txt (line 7)) (10.4.0)\n",
+ "Requirement already satisfied: scipy==1.14.0 in /usr/local/lib/python3.11/dist-packages (from -r requirements_versions.txt (line 8)) (1.14.0)\n",
+ "Requirement already satisfied: tqdm==4.66.4 in /usr/local/lib/python3.11/dist-packages (from -r requirements_versions.txt (line 9)) (4.66.4)\n",
+ "Requirement already satisfied: psutil==6.0.0 in /usr/local/lib/python3.11/dist-packages (from -r requirements_versions.txt (line 10)) (6.0.0)\n",
+ "Requirement already satisfied: pytorch_lightning==2.3.3 in /usr/local/lib/python3.11/dist-packages (from -r requirements_versions.txt (line 11)) (2.3.3)\n",
+ "Requirement already satisfied: omegaconf==2.3.0 in /usr/local/lib/python3.11/dist-packages (from -r requirements_versions.txt (line 12)) (2.3.0)\n",
+ "Requirement already satisfied: gradio==3.41.2 in /usr/local/lib/python3.11/dist-packages (from -r requirements_versions.txt (line 13)) (3.41.2)\n",
+ "Requirement already satisfied: pygit2==1.15.1 in /usr/local/lib/python3.11/dist-packages (from -r requirements_versions.txt (line 14)) (1.15.1)\n",
+ "Requirement already satisfied: opencv-contrib-python-headless==4.10.0.84 in /usr/local/lib/python3.11/dist-packages (from -r requirements_versions.txt (line 15)) (4.10.0.84)\n",
+ "Requirement already satisfied: httpx==0.27.0 in /usr/local/lib/python3.11/dist-packages (from -r requirements_versions.txt (line 16)) (0.27.0)\n",
+ "Requirement already satisfied: onnxruntime==1.18.1 in /usr/local/lib/python3.11/dist-packages (from -r requirements_versions.txt (line 17)) (1.18.1)\n",
+ "Requirement already satisfied: timm==1.0.7 in /usr/local/lib/python3.11/dist-packages (from -r requirements_versions.txt (line 18)) (1.0.7)\n",
+ "Requirement already satisfied: numpy==1.26.4 in /usr/local/lib/python3.11/dist-packages (from -r requirements_versions.txt (line 19)) (1.26.4)\n",
+ "Requirement already satisfied: tokenizers==0.19.1 in /usr/local/lib/python3.11/dist-packages (from -r requirements_versions.txt (line 20)) (0.19.1)\n",
+ "Requirement already satisfied: packaging==24.1 in /usr/local/lib/python3.11/dist-packages (from -r requirements_versions.txt (line 21)) (24.1)\n",
+ "Requirement already satisfied: rembg==2.0.57 in /usr/local/lib/python3.11/dist-packages (from -r requirements_versions.txt (line 22)) (2.0.57)\n",
+ "Requirement already satisfied: groundingdino-py==0.4.0 in /usr/local/lib/python3.11/dist-packages (from -r requirements_versions.txt (line 23)) (0.4.0)\n",
+ "Requirement already satisfied: segment_anything==1.0 in /usr/local/lib/python3.11/dist-packages (from -r requirements_versions.txt (line 24)) (1.0)\n",
+ "Requirement already satisfied: torch>=1.6.0 in /usr/local/lib/python3.11/dist-packages (from torchsde==0.2.6->-r requirements_versions.txt (line 1)) (2.6.0+cu124)\n",
+ "Requirement already satisfied: trampoline>=0.1.2 in /usr/local/lib/python3.11/dist-packages (from torchsde==0.2.6->-r requirements_versions.txt (line 1)) (0.1.2)\n",
+ "Requirement already satisfied: filelock in /usr/local/lib/python3.11/dist-packages (from transformers==4.42.4->-r requirements_versions.txt (line 3)) (3.18.0)\n",
+ "Requirement already satisfied: huggingface-hub<1.0,>=0.23.2 in /usr/local/lib/python3.11/dist-packages (from transformers==4.42.4->-r requirements_versions.txt (line 3)) (0.29.3)\n",
+ "Requirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.11/dist-packages (from transformers==4.42.4->-r requirements_versions.txt (line 3)) (2024.11.6)\n",
+ "Requirement already satisfied: requests in /usr/local/lib/python3.11/dist-packages (from transformers==4.42.4->-r requirements_versions.txt (line 3)) (2.32.3)\n",
+ "Requirement already satisfied: fsspec>=2022.5.0 in /usr/local/lib/python3.11/dist-packages (from fsspec[http]>=2022.5.0->pytorch_lightning==2.3.3->-r requirements_versions.txt (line 11)) (2025.3.0)\n",
+ "Requirement already satisfied: torchmetrics>=0.7.0 in /usr/local/lib/python3.11/dist-packages (from pytorch_lightning==2.3.3->-r requirements_versions.txt (line 11)) (1.7.0)\n",
+ "Requirement already satisfied: typing-extensions>=4.4.0 in /usr/local/lib/python3.11/dist-packages (from pytorch_lightning==2.3.3->-r requirements_versions.txt (line 11)) (4.13.0)\n",
+ "Requirement already satisfied: lightning-utilities>=0.10.0 in /usr/local/lib/python3.11/dist-packages (from pytorch_lightning==2.3.3->-r requirements_versions.txt (line 11)) (0.14.2)\n",
+ "Requirement already satisfied: antlr4-python3-runtime==4.9.* in /usr/local/lib/python3.11/dist-packages (from omegaconf==2.3.0->-r requirements_versions.txt (line 12)) (4.9.3)\n",
+ "Requirement already satisfied: aiofiles<24.0,>=22.0 in /usr/local/lib/python3.11/dist-packages (from gradio==3.41.2->-r requirements_versions.txt (line 13)) (23.2.1)\n",
+ "Requirement already satisfied: altair<6.0,>=4.2.0 in /usr/local/lib/python3.11/dist-packages (from gradio==3.41.2->-r requirements_versions.txt (line 13)) (5.5.0)\n",
+ "Requirement already satisfied: fastapi in /usr/local/lib/python3.11/dist-packages (from gradio==3.41.2->-r requirements_versions.txt (line 13)) (0.115.12)\n",
+ "Requirement already satisfied: ffmpy in /usr/local/lib/python3.11/dist-packages (from gradio==3.41.2->-r requirements_versions.txt (line 13)) (0.5.0)\n",
+ "Requirement already satisfied: gradio-client==0.5.0 in /usr/local/lib/python3.11/dist-packages (from gradio==3.41.2->-r requirements_versions.txt (line 13)) (0.5.0)\n",
+ "Requirement already satisfied: importlib-resources<7.0,>=1.3 in /usr/local/lib/python3.11/dist-packages (from gradio==3.41.2->-r requirements_versions.txt (line 13)) (6.5.2)\n",
+ "Requirement already satisfied: jinja2<4.0 in /usr/local/lib/python3.11/dist-packages (from gradio==3.41.2->-r requirements_versions.txt (line 13)) (3.1.6)\n",
+ "Requirement already satisfied: markupsafe~=2.0 in /usr/local/lib/python3.11/dist-packages (from gradio==3.41.2->-r requirements_versions.txt (line 13)) (2.1.5)\n",
+ "Requirement already satisfied: matplotlib~=3.0 in /usr/local/lib/python3.11/dist-packages (from gradio==3.41.2->-r requirements_versions.txt (line 13)) (3.10.0)\n",
+ "Requirement already satisfied: orjson~=3.0 in /usr/local/lib/python3.11/dist-packages (from gradio==3.41.2->-r requirements_versions.txt (line 13)) (3.10.16)\n",
+ "Requirement already satisfied: pandas<3.0,>=1.0 in /usr/local/lib/python3.11/dist-packages (from gradio==3.41.2->-r requirements_versions.txt (line 13)) (2.2.2)\n",
+ "Requirement already satisfied: pydantic!=1.8,!=1.8.1,!=2.0.0,!=2.0.1,<3.0.0,>=1.7.4 in /usr/local/lib/python3.11/dist-packages (from gradio==3.41.2->-r requirements_versions.txt (line 13)) (2.11.0)\n",
+ "Requirement already satisfied: pydub in /usr/local/lib/python3.11/dist-packages (from gradio==3.41.2->-r requirements_versions.txt (line 13)) (0.25.1)\n",
+ "Requirement already satisfied: python-multipart in /usr/local/lib/python3.11/dist-packages (from gradio==3.41.2->-r requirements_versions.txt (line 13)) (0.0.20)\n",
+ "Requirement already satisfied: semantic-version~=2.0 in /usr/local/lib/python3.11/dist-packages (from gradio==3.41.2->-r requirements_versions.txt (line 13)) (2.10.0)\n",
+ "Requirement already satisfied: uvicorn>=0.14.0 in /usr/local/lib/python3.11/dist-packages (from gradio==3.41.2->-r requirements_versions.txt (line 13)) (0.34.0)\n",
+ "Requirement already satisfied: websockets<12.0,>=10.0 in /usr/local/lib/python3.11/dist-packages (from gradio==3.41.2->-r requirements_versions.txt (line 13)) (11.0.3)\n",
+ "Requirement already satisfied: cffi>=1.16.0 in /usr/local/lib/python3.11/dist-packages (from pygit2==1.15.1->-r requirements_versions.txt (line 14)) (1.17.1)\n",
+ "Requirement already satisfied: anyio in /usr/local/lib/python3.11/dist-packages (from httpx==0.27.0->-r requirements_versions.txt (line 16)) (4.9.0)\n",
+ "Requirement already satisfied: certifi in /usr/local/lib/python3.11/dist-packages (from httpx==0.27.0->-r requirements_versions.txt (line 16)) (2025.1.31)\n",
+ "Requirement already satisfied: httpcore==1.* in /usr/local/lib/python3.11/dist-packages (from httpx==0.27.0->-r requirements_versions.txt (line 16)) (1.0.7)\n",
+ "Requirement already satisfied: idna in /usr/local/lib/python3.11/dist-packages (from httpx==0.27.0->-r requirements_versions.txt (line 16)) (3.10)\n",
+ "Requirement already satisfied: sniffio in /usr/local/lib/python3.11/dist-packages (from httpx==0.27.0->-r requirements_versions.txt (line 16)) (1.3.1)\n",
+ "Requirement already satisfied: coloredlogs in /usr/local/lib/python3.11/dist-packages (from onnxruntime==1.18.1->-r requirements_versions.txt (line 17)) (15.0.1)\n",
+ "Requirement already satisfied: flatbuffers in /usr/local/lib/python3.11/dist-packages (from onnxruntime==1.18.1->-r requirements_versions.txt (line 17)) (25.2.10)\n",
+ "Requirement already satisfied: protobuf in /usr/local/lib/python3.11/dist-packages (from onnxruntime==1.18.1->-r requirements_versions.txt (line 17)) (5.29.4)\n",
+ "Requirement already satisfied: sympy in /usr/local/lib/python3.11/dist-packages (from onnxruntime==1.18.1->-r requirements_versions.txt (line 17)) (1.13.1)\n",
+ "Requirement already satisfied: torchvision in /usr/local/lib/python3.11/dist-packages (from timm==1.0.7->-r requirements_versions.txt (line 18)) (0.21.0+cu124)\n",
+ "Requirement already satisfied: jsonschema in /usr/local/lib/python3.11/dist-packages (from rembg==2.0.57->-r requirements_versions.txt (line 22)) (4.23.0)\n",
+ "Requirement already satisfied: opencv-python-headless in /usr/local/lib/python3.11/dist-packages (from rembg==2.0.57->-r requirements_versions.txt (line 22)) (4.11.0.86)\n",
+ "Requirement already satisfied: pooch in /usr/local/lib/python3.11/dist-packages (from rembg==2.0.57->-r requirements_versions.txt (line 22)) (1.8.2)\n",
+ "Requirement already satisfied: pymatting in /usr/local/lib/python3.11/dist-packages (from rembg==2.0.57->-r requirements_versions.txt (line 22)) (1.1.13)\n",
+ "Requirement already satisfied: scikit-image in /usr/local/lib/python3.11/dist-packages (from rembg==2.0.57->-r requirements_versions.txt (line 22)) (0.25.2)\n",
+ "Requirement already satisfied: addict in /usr/local/lib/python3.11/dist-packages (from groundingdino-py==0.4.0->-r requirements_versions.txt (line 23)) (2.4.0)\n",
+ "Requirement already satisfied: yapf in /usr/local/lib/python3.11/dist-packages (from groundingdino-py==0.4.0->-r requirements_versions.txt (line 23)) (0.43.0)\n",
+ "Requirement already satisfied: opencv-python in /usr/local/lib/python3.11/dist-packages (from groundingdino-py==0.4.0->-r requirements_versions.txt (line 23)) (4.11.0.86)\n",
+ "Requirement already satisfied: supervision==0.6.0 in /usr/local/lib/python3.11/dist-packages (from groundingdino-py==0.4.0->-r requirements_versions.txt (line 23)) (0.6.0)\n",
+ "Requirement already satisfied: pycocotools in /usr/local/lib/python3.11/dist-packages (from groundingdino-py==0.4.0->-r requirements_versions.txt (line 23)) (2.0.8)\n",
+ "Requirement already satisfied: h11<0.15,>=0.13 in /usr/local/lib/python3.11/dist-packages (from httpcore==1.*->httpx==0.27.0->-r requirements_versions.txt (line 16)) (0.14.0)\n",
+ "Requirement already satisfied: narwhals>=1.14.2 in /usr/local/lib/python3.11/dist-packages (from altair<6.0,>=4.2.0->gradio==3.41.2->-r requirements_versions.txt (line 13)) (1.32.0)\n",
+ "Requirement already satisfied: pycparser in /usr/local/lib/python3.11/dist-packages (from cffi>=1.16.0->pygit2==1.15.1->-r requirements_versions.txt (line 14)) (2.22)\n",
+ "Requirement already satisfied: aiohttp!=4.0.0a0,!=4.0.0a1 in /usr/local/lib/python3.11/dist-packages (from fsspec[http]>=2022.5.0->pytorch_lightning==2.3.3->-r requirements_versions.txt (line 11)) (3.11.14)\n",
+ "Requirement already satisfied: attrs>=22.2.0 in /usr/local/lib/python3.11/dist-packages (from jsonschema->rembg==2.0.57->-r requirements_versions.txt (line 22)) (25.3.0)\n",
+ "Requirement already satisfied: jsonschema-specifications>=2023.03.6 in /usr/local/lib/python3.11/dist-packages (from jsonschema->rembg==2.0.57->-r requirements_versions.txt (line 22)) (2024.10.1)\n",
+ "Requirement already satisfied: referencing>=0.28.4 in /usr/local/lib/python3.11/dist-packages (from jsonschema->rembg==2.0.57->-r requirements_versions.txt (line 22)) (0.36.2)\n",
+ "Requirement already satisfied: rpds-py>=0.7.1 in /usr/local/lib/python3.11/dist-packages (from jsonschema->rembg==2.0.57->-r requirements_versions.txt (line 22)) (0.24.0)\n",
+ "Requirement already satisfied: setuptools in /usr/local/lib/python3.11/dist-packages (from lightning-utilities>=0.10.0->pytorch_lightning==2.3.3->-r requirements_versions.txt (line 11)) (75.2.0)\n",
+ "Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.11/dist-packages (from matplotlib~=3.0->gradio==3.41.2->-r requirements_versions.txt (line 13)) (1.3.1)\n",
+ "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.11/dist-packages (from matplotlib~=3.0->gradio==3.41.2->-r requirements_versions.txt (line 13)) (0.12.1)\n",
+ "Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.11/dist-packages (from matplotlib~=3.0->gradio==3.41.2->-r requirements_versions.txt (line 13)) (4.56.0)\n",
+ "Requirement already satisfied: kiwisolver>=1.3.1 in /usr/local/lib/python3.11/dist-packages (from matplotlib~=3.0->gradio==3.41.2->-r requirements_versions.txt (line 13)) (1.4.8)\n",
+ "Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.11/dist-packages (from matplotlib~=3.0->gradio==3.41.2->-r requirements_versions.txt (line 13)) (3.2.3)\n",
+ "Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.11/dist-packages (from matplotlib~=3.0->gradio==3.41.2->-r requirements_versions.txt (line 13)) (2.8.2)\n",
+ "Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.11/dist-packages (from pandas<3.0,>=1.0->gradio==3.41.2->-r requirements_versions.txt (line 13)) (2025.2)\n",
+ "Requirement already satisfied: tzdata>=2022.7 in /usr/local/lib/python3.11/dist-packages (from pandas<3.0,>=1.0->gradio==3.41.2->-r requirements_versions.txt (line 13)) (2025.2)\n",
+ "Requirement already satisfied: annotated-types>=0.6.0 in /usr/local/lib/python3.11/dist-packages (from pydantic!=1.8,!=1.8.1,!=2.0.0,!=2.0.1,<3.0.0,>=1.7.4->gradio==3.41.2->-r requirements_versions.txt (line 13)) (0.7.0)\n",
+ "Requirement already satisfied: pydantic-core==2.33.0 in /usr/local/lib/python3.11/dist-packages (from pydantic!=1.8,!=1.8.1,!=2.0.0,!=2.0.1,<3.0.0,>=1.7.4->gradio==3.41.2->-r requirements_versions.txt (line 13)) (2.33.0)\n",
+ "Requirement already satisfied: typing-inspection>=0.4.0 in /usr/local/lib/python3.11/dist-packages (from pydantic!=1.8,!=1.8.1,!=2.0.0,!=2.0.1,<3.0.0,>=1.7.4->gradio==3.41.2->-r requirements_versions.txt (line 13)) (0.4.0)\n",
+ "Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.11/dist-packages (from requests->transformers==4.42.4->-r requirements_versions.txt (line 3)) (3.4.1)\n",
+ "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.11/dist-packages (from requests->transformers==4.42.4->-r requirements_versions.txt (line 3)) (2.3.0)\n",
+ "Requirement already satisfied: networkx in /usr/local/lib/python3.11/dist-packages (from torch>=1.6.0->torchsde==0.2.6->-r requirements_versions.txt (line 1)) (3.4.2)\n",
+ "Requirement already satisfied: nvidia-cuda-nvrtc-cu12==12.4.127 in /usr/local/lib/python3.11/dist-packages (from torch>=1.6.0->torchsde==0.2.6->-r requirements_versions.txt (line 1)) (12.4.127)\n",
+ "Requirement already satisfied: nvidia-cuda-runtime-cu12==12.4.127 in /usr/local/lib/python3.11/dist-packages (from torch>=1.6.0->torchsde==0.2.6->-r requirements_versions.txt (line 1)) (12.4.127)\n",
+ "Requirement already satisfied: nvidia-cuda-cupti-cu12==12.4.127 in /usr/local/lib/python3.11/dist-packages (from torch>=1.6.0->torchsde==0.2.6->-r requirements_versions.txt (line 1)) (12.4.127)\n",
+ "Requirement already satisfied: nvidia-cudnn-cu12==9.1.0.70 in /usr/local/lib/python3.11/dist-packages (from torch>=1.6.0->torchsde==0.2.6->-r requirements_versions.txt (line 1)) (9.1.0.70)\n",
+ "Requirement already satisfied: nvidia-cublas-cu12==12.4.5.8 in /usr/local/lib/python3.11/dist-packages (from torch>=1.6.0->torchsde==0.2.6->-r requirements_versions.txt (line 1)) (12.4.5.8)\n",
+ "Requirement already satisfied: nvidia-cufft-cu12==11.2.1.3 in /usr/local/lib/python3.11/dist-packages (from torch>=1.6.0->torchsde==0.2.6->-r requirements_versions.txt (line 1)) (11.2.1.3)\n",
+ "Requirement already satisfied: nvidia-curand-cu12==10.3.5.147 in /usr/local/lib/python3.11/dist-packages (from torch>=1.6.0->torchsde==0.2.6->-r requirements_versions.txt (line 1)) (10.3.5.147)\n",
+ "Requirement already satisfied: nvidia-cusolver-cu12==11.6.1.9 in /usr/local/lib/python3.11/dist-packages (from torch>=1.6.0->torchsde==0.2.6->-r requirements_versions.txt (line 1)) (11.6.1.9)\n",
+ "Requirement already satisfied: nvidia-cusparse-cu12==12.3.1.170 in /usr/local/lib/python3.11/dist-packages (from torch>=1.6.0->torchsde==0.2.6->-r requirements_versions.txt (line 1)) (12.3.1.170)\n",
+ "Requirement already satisfied: nvidia-cusparselt-cu12==0.6.2 in /usr/local/lib/python3.11/dist-packages (from torch>=1.6.0->torchsde==0.2.6->-r requirements_versions.txt (line 1)) (0.6.2)\n",
+ "Requirement already satisfied: nvidia-nccl-cu12==2.21.5 in /usr/local/lib/python3.11/dist-packages (from torch>=1.6.0->torchsde==0.2.6->-r requirements_versions.txt (line 1)) (2.21.5)\n",
+ "Requirement already satisfied: nvidia-nvtx-cu12==12.4.127 in /usr/local/lib/python3.11/dist-packages (from torch>=1.6.0->torchsde==0.2.6->-r requirements_versions.txt (line 1)) (12.4.127)\n",
+ "Requirement already satisfied: nvidia-nvjitlink-cu12==12.4.127 in /usr/local/lib/python3.11/dist-packages (from torch>=1.6.0->torchsde==0.2.6->-r requirements_versions.txt (line 1)) (12.4.127)\n",
+ "Requirement already satisfied: triton==3.2.0 in /usr/local/lib/python3.11/dist-packages (from torch>=1.6.0->torchsde==0.2.6->-r requirements_versions.txt (line 1)) (3.2.0)\n",
+ "Requirement already satisfied: mpmath<1.4,>=1.1.0 in /usr/local/lib/python3.11/dist-packages (from sympy->onnxruntime==1.18.1->-r requirements_versions.txt (line 17)) (1.3.0)\n",
+ "Requirement already satisfied: click>=7.0 in /usr/local/lib/python3.11/dist-packages (from uvicorn>=0.14.0->gradio==3.41.2->-r requirements_versions.txt (line 13)) (8.1.8)\n",
+ "Requirement already satisfied: humanfriendly>=9.1 in /usr/local/lib/python3.11/dist-packages (from coloredlogs->onnxruntime==1.18.1->-r requirements_versions.txt (line 17)) (10.0)\n",
+ "Requirement already satisfied: starlette<0.47.0,>=0.40.0 in /usr/local/lib/python3.11/dist-packages (from fastapi->gradio==3.41.2->-r requirements_versions.txt (line 13)) (0.46.1)\n",
+ "Requirement already satisfied: platformdirs>=2.5.0 in /usr/local/lib/python3.11/dist-packages (from pooch->rembg==2.0.57->-r requirements_versions.txt (line 22)) (4.3.7)\n",
+ "Requirement already satisfied: numba!=0.49.0 in /usr/local/lib/python3.11/dist-packages (from pymatting->rembg==2.0.57->-r requirements_versions.txt (line 22)) (0.60.0)\n",
+ "Requirement already satisfied: imageio!=2.35.0,>=2.33 in /usr/local/lib/python3.11/dist-packages (from scikit-image->rembg==2.0.57->-r requirements_versions.txt (line 22)) (2.37.0)\n",
+ "Requirement already satisfied: tifffile>=2022.8.12 in /usr/local/lib/python3.11/dist-packages (from scikit-image->rembg==2.0.57->-r requirements_versions.txt (line 22)) (2025.3.13)\n",
+ "Requirement already satisfied: lazy-loader>=0.4 in /usr/local/lib/python3.11/dist-packages (from scikit-image->rembg==2.0.57->-r requirements_versions.txt (line 22)) (0.4)\n",
+ "Requirement already satisfied: aiohappyeyeballs>=2.3.0 in /usr/local/lib/python3.11/dist-packages (from aiohttp!=4.0.0a0,!=4.0.0a1->fsspec[http]>=2022.5.0->pytorch_lightning==2.3.3->-r requirements_versions.txt (line 11)) (2.6.1)\n",
+ "Requirement already satisfied: aiosignal>=1.1.2 in /usr/local/lib/python3.11/dist-packages (from aiohttp!=4.0.0a0,!=4.0.0a1->fsspec[http]>=2022.5.0->pytorch_lightning==2.3.3->-r requirements_versions.txt (line 11)) (1.3.2)\n",
+ "Requirement already satisfied: frozenlist>=1.1.1 in /usr/local/lib/python3.11/dist-packages (from aiohttp!=4.0.0a0,!=4.0.0a1->fsspec[http]>=2022.5.0->pytorch_lightning==2.3.3->-r requirements_versions.txt (line 11)) (1.5.0)\n",
+ "Requirement already satisfied: multidict<7.0,>=4.5 in /usr/local/lib/python3.11/dist-packages (from aiohttp!=4.0.0a0,!=4.0.0a1->fsspec[http]>=2022.5.0->pytorch_lightning==2.3.3->-r requirements_versions.txt (line 11)) (6.2.0)\n",
+ "Requirement already satisfied: propcache>=0.2.0 in /usr/local/lib/python3.11/dist-packages (from aiohttp!=4.0.0a0,!=4.0.0a1->fsspec[http]>=2022.5.0->pytorch_lightning==2.3.3->-r requirements_versions.txt (line 11)) (0.3.1)\n",
+ "Requirement already satisfied: yarl<2.0,>=1.17.0 in /usr/local/lib/python3.11/dist-packages (from aiohttp!=4.0.0a0,!=4.0.0a1->fsspec[http]>=2022.5.0->pytorch_lightning==2.3.3->-r requirements_versions.txt (line 11)) (1.18.3)\n",
+ "Requirement already satisfied: llvmlite<0.44,>=0.43.0dev0 in /usr/local/lib/python3.11/dist-packages (from numba!=0.49.0->pymatting->rembg==2.0.57->-r requirements_versions.txt (line 22)) (0.43.0)\n",
+ "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.11/dist-packages (from python-dateutil>=2.7->matplotlib~=3.0->gradio==3.41.2->-r requirements_versions.txt (line 13)) (1.17.0)\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "!ls /content/Fooocus\n"
+ ],
+ "metadata": {
+ "id": "k2_OWxEB3taa",
+ "outputId": "c0ac8fb9-e3bf-4f2f-eaea-6bdfc7b1fbeb",
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ }
+ },
+ "execution_count": 9,
+ "outputs": [
+ {
+ "output_type": "stream",
+ "name": "stdout",
+ "text": [
+ "args_manager.py environment.yaml\t\t launch.py\t\t\t sdxl_styles\n",
+ "auth-example.json experiments_expansion.py\t ldm_patched\t\t shared.py\n",
+ "build_launcher.py experiments_face.py\t LICENSE\t\t\t tests\n",
+ "css\t\t experiments_interrogate.py models\t\t\t troubleshoot.md\n",
+ "development.md\t experiments_mask_generation.py modules\t\t\t update_log.md\n",
+ "docker-compose.yml extras\t\t\t notification-example.mp3\t webui.py\n",
+ "Dockerfile\t fooocus_colab.ipynb\t presets\t\t\t wildcards\n",
+ "docker.md\t fooocus_version.py\t readme.md\n",
+ "entrypoint.sh\t javascript\t\t requirements_docker.txt\n",
+ "entry_with_update.py language\t\t\t requirements_versions.txt\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "!python entry_with_update.py --share --always-high-vram\n"
+ ],
+ "metadata": {
+ "id": "xeuTikYp39lL",
+ "outputId": "e89bc26c-5361-4f1c-f504-72bc6d86e82f",
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ }
+ },
+ "execution_count": 11,
+ "outputs": [
+ {
+ "output_type": "stream",
+ "name": "stdout",
+ "text": [
+ "Already up-to-date\n",
+ "Update succeeded.\n",
+ "[System ARGV] ['entry_with_update.py', '--share', '--always-high-vram']\n",
+ "Python 3.11.11 (main, Dec 4 2024, 08:55:07) [GCC 11.4.0]\n",
+ "Fooocus version: 2.5.5\n",
+ "[Cleanup] Attempting to delete content of temp dir /tmp/fooocus\n",
+ "[Cleanup] Cleanup successful\n",
+ "Downloading: \"https://huggingface.co/lllyasviel/misc/resolve/main/xlvaeapp.pth\" to /content/Fooocus/models/vae_approx/xlvaeapp.pth\n",
+ "\n",
+ "100% 209k/209k [00:00<00:00, 10.9MB/s]\n",
+ "Downloading: \"https://huggingface.co/lllyasviel/misc/resolve/main/vaeapp_sd15.pt\" to /content/Fooocus/models/vae_approx/vaeapp_sd15.pth\n",
+ "\n",
+ "100% 209k/209k [00:00<00:00, 11.1MB/s]\n",
+ "Downloading: \"https://huggingface.co/mashb1t/misc/resolve/main/xl-to-v1_interposer-v4.0.safetensors\" to /content/Fooocus/models/vae_approx/xl-to-v1_interposer-v4.0.safetensors\n",
+ "\n",
+ "100% 5.40M/5.40M [00:00<00:00, 108MB/s]\n",
+ "Downloading: \"https://huggingface.co/lllyasviel/misc/resolve/main/fooocus_expansion.bin\" to /content/Fooocus/models/prompt_expansion/fooocus_expansion/pytorch_model.bin\n",
+ "\n",
+ "100% 335M/335M [00:01<00:00, 297MB/s]\n",
+ "Downloading: \"https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/juggernautXL_v8Rundiffusion.safetensors\" to /content/Fooocus/models/checkpoints/juggernautXL_v8Rundiffusion.safetensors\n",
+ "\n",
+ "100% 6.62G/6.62G [00:42<00:00, 169MB/s]\n",
+ "Downloading: \"https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_offset_example-lora_1.0.safetensors\" to /content/Fooocus/models/loras/sd_xl_offset_example-lora_1.0.safetensors\n",
+ "\n",
+ "100% 47.3M/47.3M [00:00<00:00, 267MB/s]\n",
+ "Total VRAM 15095 MB, total RAM 12979 MB\n",
+ "Set vram state to: HIGH_VRAM\n",
+ "Always offload VRAM\n",
+ "Device: cuda:0 Tesla T4 : native\n",
+ "VAE dtype: torch.float32\n",
+ "Using pytorch cross attention\n",
+ "Refiner unloaded.\n",
+ "IMPORTANT: You are using gradio version 3.41.2, however version 4.44.1 is available, please upgrade.\n",
+ "--------\n",
+ "Running on local URL: http://127.0.0.1:7865\n",
+ "Running on public URL: https://6890a0d23d1816a521.gradio.live\n",
+ "\n",
+ "This share link expires in 72 hours. For free permanent hosting and GPU upgrades, run `gradio deploy` from Terminal to deploy to Spaces (https://huggingface.co/spaces)\n",
+ "model_type EPS\n",
+ "UNet ADM Dimension 2816\n",
+ "Using pytorch attention in VAE\n",
+ "Working with z of shape (1, 4, 32, 32) = 4096 dimensions.\n",
+ "Using pytorch attention in VAE\n",
+ "extra {'cond_stage_model.clip_l.text_projection', 'cond_stage_model.clip_l.logit_scale'}\n",
+ "left over keys: dict_keys(['cond_stage_model.clip_l.transformer.text_model.embeddings.position_ids'])\n",
+ "loaded straight to GPU\n",
+ "Requested to load SDXL\n",
+ "Loading 1 new model\n",
+ "Base model loaded: /content/Fooocus/models/checkpoints/juggernautXL_v8Rundiffusion.safetensors\n",
+ "VAE loaded: None\n",
+ "Request to load LoRAs [('sd_xl_offset_example-lora_1.0.safetensors', 0.1)] for model [/content/Fooocus/models/checkpoints/juggernautXL_v8Rundiffusion.safetensors].\n",
+ "Loaded LoRA [/content/Fooocus/models/loras/sd_xl_offset_example-lora_1.0.safetensors] for UNet [/content/Fooocus/models/checkpoints/juggernautXL_v8Rundiffusion.safetensors] with 788 keys at weight 0.1.\n",
+ "Fooocus V2 Expansion: Vocab with 642 words.\n",
+ "Fooocus Expansion engine loaded for cuda:0, use_fp16 = True.\n",
+ "Requested to load SDXLClipModel\n",
+ "Requested to load GPT2LMHeadModel\n",
+ "Loading 2 new models\n",
+ "[Fooocus Model Management] Moving model(s) has taken 0.77 seconds\n",
+ "2025-04-02 05:02:52.894243: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:477] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n",
+ "WARNING: All log messages before absl::InitializeLog() is called are written to STDERR\n",
+ "E0000 00:00:1743570173.155849 16485 cuda_dnn.cc:8310] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n",
+ "E0000 00:00:1743570173.225117 16485 cuda_blas.cc:1418] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n",
+ "2025-04-02 05:02:53.782034: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n",
+ "To enable the following instructions: AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n",
+ "Started worker with PID 16119\n",
+ "App started successful. Use the app with http://127.0.0.1:7865/ or 127.0.0.1:7865 or https://6890a0d23d1816a521.gradio.live\n",
+ "Keyboard interruption in main thread... closing server.\n",
+ "Traceback (most recent call last):\n",
+ " File \"/usr/local/lib/python3.11/dist-packages/gradio/blocks.py\", line 2199, in block_thread\n",
+ " time.sleep(0.1)\n",
+ "KeyboardInterrupt\n",
+ "\n",
+ "During handling of the above exception, another exception occurred:\n",
+ "\n",
+ "Traceback (most recent call last):\n",
+ " File \"/content/Fooocus/entry_with_update.py\", line 46, in \n",
+ " from launch import *\n",
+ " File \"/content/Fooocus/launch.py\", line 152, in \n",
+ " from webui import *\n",
+ " File \"/content/Fooocus/webui.py\", line 1120, in \n",
+ " shared.gradio_root.launch(\n",
+ " File \"/usr/local/lib/python3.11/dist-packages/gradio/blocks.py\", line 2115, in launch\n",
+ " self.block_thread()\n",
+ " File \"/usr/local/lib/python3.11/dist-packages/gradio/blocks.py\", line 2203, in block_thread\n",
+ " self.server.close()\n",
+ " File \"/usr/local/lib/python3.11/dist-packages/gradio/networking.py\", line 49, in close\n",
+ " self.thread.join()\n",
+ " File \"/usr/lib/python3.11/threading.py\", line 1119, in join\n",
+ " self._wait_for_tstate_lock()\n",
+ " File \"/usr/lib/python3.11/threading.py\", line 1139, in _wait_for_tstate_lock\n",
+ " if lock.acquire(block, timeout):\n",
+ " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
+ "KeyboardInterrupt\n",
+ "Killing tunnel 127.0.0.1:7865 <> https://6890a0d23d1816a521.gradio.live\n",
+ "^C\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "!rm -rf /content/Fooocus # Delete the existing Fooocus repo\n",
+ "!git clone https://github.com/Alen-joy47/Fooocus.git /content/Fooocus\n",
+ "%cd /content/Fooocus"
+ ],
+ "metadata": {
+ "id": "wb3fgNxM5lOD",
+ "outputId": "1069b4c2-0cec-49de-ef6d-e53f43191106",
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ }
+ },
+ "execution_count": 2,
+ "outputs": [
+ {
+ "output_type": "stream",
+ "name": "stdout",
+ "text": [
+ "Cloning into '/content/Fooocus'...\n",
+ "remote: Enumerating objects: 6527, done.\u001b[K\n",
+ "remote: Total 6527 (delta 0), reused 0 (delta 0), pack-reused 6527 (from 1)\u001b[K\n",
+ "Receiving objects: 100% (6527/6527), 33.26 MiB | 28.19 MiB/s, done.\n",
+ "Resolving deltas: 100% (3702/3702), done.\n",
+ "/content/Fooocus\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "!git remote -v\n"
+ ],
+ "metadata": {
+ "id": "NCopr3He6XlG",
+ "outputId": "1714211a-1903-4d11-af54-f3598db7c28c",
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ }
+ },
+ "execution_count": 3,
+ "outputs": [
+ {
+ "output_type": "stream",
+ "name": "stdout",
+ "text": [
+ "origin\thttps://github.com/Alen-joy47/Fooocus.git (fetch)\n",
+ "origin\thttps://github.com/Alen-joy47/Fooocus.git (push)\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "%%writefile /content/Fooocus/shared.py\n",
+ "\n",
+ "# Existing imports\n",
+ "import os\n",
+ "import sys\n",
+ "\n",
+ "# Global variable to store the saved character\n",
+ "saved_character = None\n",
+ "\n"
+ ],
+ "metadata": {
+ "id": "1Qzetvzg67yO",
+ "outputId": "6d4c35b0-b720-422b-fda2-c53c3c2f9f94",
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ }
+ },
+ "execution_count": 4,
+ "outputs": [
+ {
+ "output_type": "stream",
+ "name": "stdout",
+ "text": [
+ "Overwriting /content/Fooocus/shared.py\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "%%writefile /content/Fooocus/modules/prompt_expansion.py\n",
+ "\n",
+ "import shared # Import the shared file to access the saved character\n",
+ "\n",
+ "def expand_prompt(prompt):\n",
+ " \"\"\"Modify the prompt to always include the saved character\"\"\"\n",
+ "\n",
+ " # Check if a character is saved\n",
+ " if shared.saved_character:\n",
+ " prompt = f\"{shared.saved_character}, {prompt}\" # Append saved character to prompt\n",
+ "\n",
+ " return prompt\n"
+ ],
+ "metadata": {
+ "id": "jBgIZ7_E7Aet",
+ "outputId": "8c9afaa0-97f6-498a-800b-0cdb7a88a7a9",
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ }
+ },
+ "execution_count": 5,
+ "outputs": [
+ {
+ "output_type": "stream",
+ "name": "stdout",
+ "text": [
+ "Writing /content/Fooocus/modules/prompt_expansion.py\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "%%writefile /content/Fooocus/webui.py\n",
+ "\n",
+ "import gradio as gr\n",
+ "import shared\n",
+ "\n",
+ "def save_character(character_description):\n",
+ " \"\"\"Save the character globally\"\"\"\n",
+ " shared.saved_character = character_description\n",
+ " return f\"Character saved: {character_description}\"\n",
+ "\n",
+ "def launch_ui():\n",
+ " with gr.Blocks() as ui:\n",
+ " gr.Markdown(\"### Character Persistence Feature\")\n",
+ "\n",
+ " character_input = gr.Textbox(label=\"Character Description\")\n",
+ " save_button = gr.Button(\"Save Character\")\n",
+ " save_status = gr.Textbox(label=\"Status\", interactive=False)\n",
+ "\n",
+ " save_button.click(fn=save_character, inputs=[character_input], outputs=[save_status])\n",
+ "\n",
+ " # Existing UI components...\n",
+ "\n",
+ " ui.launch()\n"
+ ],
+ "metadata": {
+ "id": "DgLcyqc17Gvs",
+ "outputId": "b3d45375-4ce7-4110-dea3-3bec482a87bd",
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ }
+ },
+ "execution_count": 6,
+ "outputs": [
+ {
+ "output_type": "stream",
+ "name": "stdout",
+ "text": [
+ "Overwriting /content/Fooocus/webui.py\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "!python /content/Fooocus/entry_with_update.py --share --always-high-vram\n"
+ ],
+ "metadata": {
+ "id": "s87S0CuK7L51",
+ "outputId": "0bc61b2b-0cfd-4452-e896-f7c3313e43ca",
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ }
+ },
+ "execution_count": 7,
+ "outputs": [
+ {
+ "output_type": "stream",
+ "name": "stdout",
+ "text": [
+ "Already up-to-date\n",
+ "Update succeeded.\n",
+ "[System ARGV] ['/content/Fooocus/entry_with_update.py', '--share', '--always-high-vram']\n",
+ "Python 3.11.11 (main, Dec 4 2024, 08:55:07) [GCC 11.4.0]\n",
+ "Fooocus version: 2.5.5\n",
+ "[Cleanup] Attempting to delete content of temp dir /tmp/fooocus\n",
+ "[Cleanup] Cleanup successful\n",
+ "Downloading: \"https://huggingface.co/lllyasviel/misc/resolve/main/xlvaeapp.pth\" to /content/Fooocus/models/vae_approx/xlvaeapp.pth\n",
+ "\n",
+ "100% 209k/209k [00:00<00:00, 11.5MB/s]\n",
+ "Downloading: \"https://huggingface.co/lllyasviel/misc/resolve/main/vaeapp_sd15.pt\" to /content/Fooocus/models/vae_approx/vaeapp_sd15.pth\n",
+ "\n",
+ "100% 209k/209k [00:00<00:00, 9.74MB/s]\n",
+ "Downloading: \"https://huggingface.co/mashb1t/misc/resolve/main/xl-to-v1_interposer-v4.0.safetensors\" to /content/Fooocus/models/vae_approx/xl-to-v1_interposer-v4.0.safetensors\n",
+ "\n",
+ "100% 5.40M/5.40M [00:00<00:00, 88.6MB/s]\n",
+ "Downloading: \"https://huggingface.co/lllyasviel/misc/resolve/main/fooocus_expansion.bin\" to /content/Fooocus/models/prompt_expansion/fooocus_expansion/pytorch_model.bin\n",
+ "\n",
+ "100% 335M/335M [00:01<00:00, 343MB/s]\n",
+ "Downloading: \"https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/juggernautXL_v8Rundiffusion.safetensors\" to /content/Fooocus/models/checkpoints/juggernautXL_v8Rundiffusion.safetensors\n",
+ "\n",
+ "100% 6.62G/6.62G [00:45<00:00, 157MB/s]\n",
+ "Downloading: \"https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_offset_example-lora_1.0.safetensors\" to /content/Fooocus/models/loras/sd_xl_offset_example-lora_1.0.safetensors\n",
+ "\n",
+ "100% 47.3M/47.3M [00:00<00:00, 234MB/s]\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "!git config --global user.email \"your-email@example.com\"\n",
+ "!git config --global user.name \"Your Name\"\n"
+ ],
+ "metadata": {
+ "id": "QGq9FBrG7-Zr"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "%cd /content/Fooocus\n",
+ "!git add .\n",
+ "!git commit -m \"Added persistent character feature\"\n",
+ "!git push origin main\n"
+ ],
+ "metadata": {
+ "id": "LZ-ZsW4t7ypt",
+ "outputId": "ca9fd38e-c58b-4582-e4f3-8ebb8dfa5f3f",
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ }
+ },
+ "execution_count": 8,
+ "outputs": [
+ {
+ "output_type": "stream",
+ "name": "stdout",
+ "text": [
+ "/content/Fooocus\n",
+ "Author identity unknown\n",
+ "\n",
+ "*** Please tell me who you are.\n",
+ "\n",
+ "Run\n",
+ "\n",
+ " git config --global user.email \"you@example.com\"\n",
+ " git config --global user.name \"Your Name\"\n",
+ "\n",
+ "to set your account's default identity.\n",
+ "Omit --global to set the identity only in this repository.\n",
+ "\n",
+ "fatal: unable to auto-detect email address (got 'root@f945c8cbaf5f.(none)')\n",
+ "fatal: could not read Username for 'https://github.com': No such device or address\n"
+ ]
+ }
+ ]
+ }
+ ],
+ "metadata": {
+ "accelerator": "GPU",
+ "colab": {
+ "gpuType": "T4",
+ "provenance": [],
+ "include_colab_link": true
+ },
+ "kernelspec": {
+ "display_name": "Python 3",
+ "name": "python3"
+ },
+ "language_info": {
+ "name": "python"
+ }
},
- "kernelspec": {
- "display_name": "Python 3",
- "name": "python3"
- },
- "language_info": {
- "name": "python"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 0
-}
+ "nbformat": 4,
+ "nbformat_minor": 0
+}
\ No newline at end of file
diff --git a/modules/prompt_expansion.py b/modules/prompt_expansion.py
new file mode 100644
index 00000000..f875bcd7
--- /dev/null
+++ b/modules/prompt_expansion.py
@@ -0,0 +1,11 @@
+
+import shared # Import the shared file to access the saved character
+
+def expand_prompt(prompt):
+ """Modify the prompt to always include the saved character"""
+
+ # Check if a character is saved
+ if shared.saved_character:
+ prompt = f"{shared.saved_character}, {prompt}" # Append saved character to prompt
+
+ return prompt
diff --git a/modules/shared.py b/modules/shared.py
new file mode 100644
index 00000000..2903ad97
--- /dev/null
+++ b/modules/shared.py
@@ -0,0 +1,13 @@
+# shared.py
+
+# Global variable to store the saved character
+saved_character = None
+
+def save_character(character):
+ """Save a character description globally."""
+ global saved_character
+ saved_character = character
+
+def get_saved_character():
+ """Retrieve the saved character description."""
+ return saved_character
diff --git a/shared.py b/shared.py
index 21a2a864..62897e87 100644
--- a/shared.py
+++ b/shared.py
@@ -1 +1,8 @@
-gradio_root = None
\ No newline at end of file
+
+# Existing imports
+import os
+import sys
+
+# Global variable to store the saved character
+saved_character = None
+
diff --git a/webui.py b/webui.py
index b8159d85..7d464264 100644
--- a/webui.py
+++ b/webui.py
@@ -1,1128 +1,46 @@
-import gradio as gr
-import random
import os
-import json
-import time
-import shared
-import modules.config
-import fooocus_version
-import modules.html
+import gradio as gr
import modules.async_worker as worker
-import modules.constants as constants
-import modules.flags as flags
-import modules.gradio_hijack as grh
-import modules.style_sorter as style_sorter
-import modules.meta_parser
-import args_manager
-import copy
-import launch
-from extras.inpaint_mask import SAMOptions
+from modules.util import load_file
+from modules.config import cfg
+from modules.default_values import default_prompt, default_negative_prompt
-from modules.sdxl_styles import legal_style_names
-from modules.private_logger import get_current_html_path
-from modules.ui_gradio_extensions import reload_javascript
-from modules.auth import auth_enabled, check_auth
-from modules.util import is_json
+SAVE_PATH = "saved_character.txt"
-def get_task(*args):
- args = list(args)
- args.pop(0)
+def save_character(character_description):
+ with open(SAVE_PATH, "w", encoding="utf-8") as f:
+ f.write(character_description)
+ return "✅ Character saved!"
- return worker.AsyncTask(args=args)
+def load_character():
+ if os.path.exists(SAVE_PATH):
+ with open(SAVE_PATH, "r", encoding="utf-8") as f:
+ return f.read()
+ return ""
-def generate_clicked(task: worker.AsyncTask):
- import ldm_patched.modules.model_management as model_management
+def start_processing(prompt, negative_prompt):
+ return worker.run_task(prompt, negative_prompt)
- with model_management.interrupt_processing_mutex:
- model_management.interrupt_processing = False
- # outputs=[progress_html, progress_window, progress_gallery, gallery]
+with gr.Blocks(css="style.css") as ui:
+ gr.Markdown("## 🖼️ Fooocus - Advanced UI")
- if len(task.args) == 0:
- return
-
- execution_start_time = time.perf_counter()
- finished = False
-
- yield gr.update(visible=True, value=modules.html.make_progress_html(1, 'Waiting for task to start ...')), \
- gr.update(visible=True, value=None), \
- gr.update(visible=False, value=None), \
- gr.update(visible=False)
-
- worker.async_tasks.append(task)
-
- while not finished:
- time.sleep(0.01)
- if len(task.yields) > 0:
- flag, product = task.yields.pop(0)
- if flag == 'preview':
-
- # help bad internet connection by skipping duplicated preview
- if len(task.yields) > 0: # if we have the next item
- if task.yields[0][0] == 'preview': # if the next item is also a preview
- # print('Skipped one preview for better internet connection.')
- continue
-
- percentage, title, image = product
- yield gr.update(visible=True, value=modules.html.make_progress_html(percentage, title)), \
- gr.update(visible=True, value=image) if image is not None else gr.update(), \
- gr.update(), \
- gr.update(visible=False)
- if flag == 'results':
- yield gr.update(visible=True), \
- gr.update(visible=True), \
- gr.update(visible=True, value=product), \
- gr.update(visible=False)
- if flag == 'finish':
- if not args_manager.args.disable_enhance_output_sorting:
- product = sort_enhance_images(product, task)
-
- yield gr.update(visible=False), \
- gr.update(visible=False), \
- gr.update(visible=False), \
- gr.update(visible=True, value=product)
- finished = True
-
- # delete Fooocus temp images, only keep gradio temp images
- if args_manager.args.disable_image_log:
- for filepath in product:
- if isinstance(filepath, str) and os.path.exists(filepath):
- os.remove(filepath)
-
- execution_time = time.perf_counter() - execution_start_time
- print(f'Total time: {execution_time:.2f} seconds')
- return
-
-
-def sort_enhance_images(images, task):
- if not task.should_enhance or len(images) <= task.images_to_enhance_count:
- return images
-
- sorted_images = []
- walk_index = task.images_to_enhance_count
-
- for index, enhanced_img in enumerate(images[:task.images_to_enhance_count]):
- sorted_images.append(enhanced_img)
- if index not in task.enhance_stats:
- continue
- target_index = walk_index + task.enhance_stats[index]
- if walk_index < len(images) and target_index <= len(images):
- sorted_images += images[walk_index:target_index]
- walk_index += task.enhance_stats[index]
-
- return sorted_images
-
-
-def inpaint_mode_change(mode, inpaint_engine_version):
- assert mode in modules.flags.inpaint_options
-
- # inpaint_additional_prompt, outpaint_selections, example_inpaint_prompts,
- # inpaint_disable_initial_latent, inpaint_engine,
- # inpaint_strength, inpaint_respective_field
-
- if mode == modules.flags.inpaint_option_detail:
- return [
- gr.update(visible=True), gr.update(visible=False, value=[]),
- gr.Dataset.update(visible=True, samples=modules.config.example_inpaint_prompts),
- False, 'None', 0.5, 0.0
- ]
-
- if inpaint_engine_version == 'empty':
- inpaint_engine_version = modules.config.default_inpaint_engine_version
-
- if mode == modules.flags.inpaint_option_modify:
- return [
- gr.update(visible=True), gr.update(visible=False, value=[]),
- gr.Dataset.update(visible=False, samples=modules.config.example_inpaint_prompts),
- True, inpaint_engine_version, 1.0, 0.0
- ]
-
- return [
- gr.update(visible=False, value=''), gr.update(visible=True),
- gr.Dataset.update(visible=False, samples=modules.config.example_inpaint_prompts),
- False, inpaint_engine_version, 1.0, 0.618
- ]
-
-
-reload_javascript()
-
-title = f'Fooocus {fooocus_version.version}'
-
-if isinstance(args_manager.args.preset, str):
- title += ' ' + args_manager.args.preset
-
-shared.gradio_root = gr.Blocks(title=title).queue()
-
-with shared.gradio_root:
- currentTask = gr.State(worker.AsyncTask(args=[]))
- inpaint_engine_state = gr.State('empty')
with gr.Row():
- with gr.Column(scale=2):
- with gr.Row():
- progress_window = grh.Image(label='Preview', show_label=True, visible=False, height=768,
- elem_classes=['main_view'])
- progress_gallery = gr.Gallery(label='Finished Images', show_label=True, object_fit='contain',
- height=768, visible=False, elem_classes=['main_view', 'image_gallery'])
- progress_html = gr.HTML(value=modules.html.make_progress_html(32, 'Progress 32%'), visible=False,
- elem_id='progress-bar', elem_classes='progress-bar')
- gallery = gr.Gallery(label='Gallery', show_label=False, object_fit='contain', visible=True, height=768,
- elem_classes=['resizable_area', 'main_view', 'final_gallery', 'image_gallery'],
- elem_id='final_gallery')
- with gr.Row():
- with gr.Column(scale=17):
- prompt = gr.Textbox(show_label=False, placeholder="Type prompt here or paste parameters.", elem_id='positive_prompt',
- autofocus=True, lines=3)
+ with gr.Column():
+ prompt = gr.Textbox(label="Prompt", value=default_prompt)
+ negative_prompt = gr.Textbox(label="Negative Prompt", value=default_negative_prompt)
- default_prompt = modules.config.default_prompt
- if isinstance(default_prompt, str) and default_prompt != '':
- shared.gradio_root.load(lambda: default_prompt, outputs=prompt)
+ generate_btn = gr.Button("Generate")
+ output = gr.Image(label="Result")
- with gr.Column(scale=3, min_width=0):
- generate_button = gr.Button(label="Generate", value="Generate", elem_classes='type_row', elem_id='generate_button', visible=True)
- reset_button = gr.Button(label="Reconnect", value="Reconnect", elem_classes='type_row', elem_id='reset_button', visible=False)
- load_parameter_button = gr.Button(label="Load Parameters", value="Load Parameters", elem_classes='type_row', elem_id='load_parameter_button', visible=False)
- skip_button = gr.Button(label="Skip", value="Skip", elem_classes='type_row_half', elem_id='skip_button', visible=False)
- stop_button = gr.Button(label="Stop", value="Stop", elem_classes='type_row_half', elem_id='stop_button', visible=False)
+ with gr.Column():
+ with gr.Accordion("Advanced", open=False):
+ character_input = gr.Textbox(label="Character Description", value=load_character())
+ save_button = gr.Button("💾 Save Character")
+ save_output = gr.Markdown()
+ save_button.click(fn=save_character, inputs=character_input, outputs=save_output)
- def stop_clicked(currentTask):
- import ldm_patched.modules.model_management as model_management
- currentTask.last_stop = 'stop'
- if (currentTask.processing):
- model_management.interrupt_current_processing()
- return currentTask
+ generate_btn.click(fn=start_processing, inputs=[prompt, negative_prompt], outputs=output)
- def skip_clicked(currentTask):
- import ldm_patched.modules.model_management as model_management
- currentTask.last_stop = 'skip'
- if (currentTask.processing):
- model_management.interrupt_current_processing()
- return currentTask
+ui.launch(share=True)
- stop_button.click(stop_clicked, inputs=currentTask, outputs=currentTask, queue=False, show_progress=False, _js='cancelGenerateForever')
- skip_button.click(skip_clicked, inputs=currentTask, outputs=currentTask, queue=False, show_progress=False)
- with gr.Row(elem_classes='advanced_check_row'):
- input_image_checkbox = gr.Checkbox(label='Input Image', value=modules.config.default_image_prompt_checkbox, container=False, elem_classes='min_check')
- enhance_checkbox = gr.Checkbox(label='Enhance', value=modules.config.default_enhance_checkbox, container=False, elem_classes='min_check')
- advanced_checkbox = gr.Checkbox(label='Advanced', value=modules.config.default_advanced_checkbox, container=False, elem_classes='min_check')
- with gr.Row(visible=modules.config.default_image_prompt_checkbox) as image_input_panel:
- with gr.Tabs(selected=modules.config.default_selected_image_input_tab_id):
- with gr.Tab(label='Upscale or Variation', id='uov_tab') as uov_tab:
- with gr.Row():
- with gr.Column():
- uov_input_image = grh.Image(label='Image', source='upload', type='numpy', show_label=False)
- with gr.Column():
- uov_method = gr.Radio(label='Upscale or Variation:', choices=flags.uov_list, value=modules.config.default_uov_method)
- gr.HTML('\U0001F4D4 Documentation')
- with gr.Tab(label='Image Prompt', id='ip_tab') as ip_tab:
- with gr.Row():
- ip_images = []
- ip_types = []
- ip_stops = []
- ip_weights = []
- ip_ctrls = []
- ip_ad_cols = []
- for image_count in range(modules.config.default_controlnet_image_count):
- image_count += 1
- with gr.Column():
- ip_image = grh.Image(label='Image', source='upload', type='numpy', show_label=False, height=300, value=modules.config.default_ip_images[image_count])
- ip_images.append(ip_image)
- ip_ctrls.append(ip_image)
- with gr.Column(visible=modules.config.default_image_prompt_advanced_checkbox) as ad_col:
- with gr.Row():
- ip_stop = gr.Slider(label='Stop At', minimum=0.0, maximum=1.0, step=0.001, value=modules.config.default_ip_stop_ats[image_count])
- ip_stops.append(ip_stop)
- ip_ctrls.append(ip_stop)
- ip_weight = gr.Slider(label='Weight', minimum=0.0, maximum=2.0, step=0.001, value=modules.config.default_ip_weights[image_count])
- ip_weights.append(ip_weight)
- ip_ctrls.append(ip_weight)
-
- ip_type = gr.Radio(label='Type', choices=flags.ip_list, value=modules.config.default_ip_types[image_count], container=False)
- ip_types.append(ip_type)
- ip_ctrls.append(ip_type)
-
- ip_type.change(lambda x: flags.default_parameters[x], inputs=[ip_type], outputs=[ip_stop, ip_weight], queue=False, show_progress=False)
- ip_ad_cols.append(ad_col)
- ip_advanced = gr.Checkbox(label='Advanced', value=modules.config.default_image_prompt_advanced_checkbox, container=False)
- gr.HTML('* \"Image Prompt\" is powered by Fooocus Image Mixture Engine (v1.0.1). \U0001F4D4 Documentation')
-
- def ip_advance_checked(x):
- return [gr.update(visible=x)] * len(ip_ad_cols) + \
- [flags.default_ip] * len(ip_types) + \
- [flags.default_parameters[flags.default_ip][0]] * len(ip_stops) + \
- [flags.default_parameters[flags.default_ip][1]] * len(ip_weights)
-
- ip_advanced.change(ip_advance_checked, inputs=ip_advanced,
- outputs=ip_ad_cols + ip_types + ip_stops + ip_weights,
- queue=False, show_progress=False)
-
- with gr.Tab(label='Inpaint or Outpaint', id='inpaint_tab') as inpaint_tab:
- with gr.Row():
- with gr.Column():
- inpaint_input_image = grh.Image(label='Image', source='upload', type='numpy', tool='sketch', height=500, brush_color="#FFFFFF", elem_id='inpaint_canvas', show_label=False)
- inpaint_advanced_masking_checkbox = gr.Checkbox(label='Enable Advanced Masking Features', value=modules.config.default_inpaint_advanced_masking_checkbox)
- inpaint_mode = gr.Dropdown(choices=modules.flags.inpaint_options, value=modules.config.default_inpaint_method, label='Method')
- inpaint_additional_prompt = gr.Textbox(placeholder="Describe what you want to inpaint.", elem_id='inpaint_additional_prompt', label='Inpaint Additional Prompt', visible=False)
- outpaint_selections = gr.CheckboxGroup(choices=['Left', 'Right', 'Top', 'Bottom'], value=[], label='Outpaint Direction')
- example_inpaint_prompts = gr.Dataset(samples=modules.config.example_inpaint_prompts,
- label='Additional Prompt Quick List',
- components=[inpaint_additional_prompt],
- visible=False)
- gr.HTML('* Powered by Fooocus Inpaint Engine \U0001F4D4 Documentation')
- example_inpaint_prompts.click(lambda x: x[0], inputs=example_inpaint_prompts, outputs=inpaint_additional_prompt, show_progress=False, queue=False)
-
- with gr.Column(visible=modules.config.default_inpaint_advanced_masking_checkbox) as inpaint_mask_generation_col:
- inpaint_mask_image = grh.Image(label='Mask Upload', source='upload', type='numpy', tool='sketch', height=500, brush_color="#FFFFFF", mask_opacity=1, elem_id='inpaint_mask_canvas')
- invert_mask_checkbox = gr.Checkbox(label='Invert Mask When Generating', value=modules.config.default_invert_mask_checkbox)
- inpaint_mask_model = gr.Dropdown(label='Mask generation model',
- choices=flags.inpaint_mask_models,
- value=modules.config.default_inpaint_mask_model)
- inpaint_mask_cloth_category = gr.Dropdown(label='Cloth category',
- choices=flags.inpaint_mask_cloth_category,
- value=modules.config.default_inpaint_mask_cloth_category,
- visible=False)
- inpaint_mask_dino_prompt_text = gr.Textbox(label='Detection prompt', value='', visible=False, info='Use singular whenever possible', placeholder='Describe what you want to detect.')
- example_inpaint_mask_dino_prompt_text = gr.Dataset(
- samples=modules.config.example_enhance_detection_prompts,
- label='Detection Prompt Quick List',
- components=[inpaint_mask_dino_prompt_text],
- visible=modules.config.default_inpaint_mask_model == 'sam')
- example_inpaint_mask_dino_prompt_text.click(lambda x: x[0],
- inputs=example_inpaint_mask_dino_prompt_text,
- outputs=inpaint_mask_dino_prompt_text,
- show_progress=False, queue=False)
-
- with gr.Accordion("Advanced options", visible=False, open=False) as inpaint_mask_advanced_options:
- inpaint_mask_sam_model = gr.Dropdown(label='SAM model', choices=flags.inpaint_mask_sam_model, value=modules.config.default_inpaint_mask_sam_model)
- inpaint_mask_box_threshold = gr.Slider(label="Box Threshold", minimum=0.0, maximum=1.0, value=0.3, step=0.05)
- inpaint_mask_text_threshold = gr.Slider(label="Text Threshold", minimum=0.0, maximum=1.0, value=0.25, step=0.05)
- inpaint_mask_sam_max_detections = gr.Slider(label="Maximum number of detections", info="Set to 0 to detect all", minimum=0, maximum=10, value=modules.config.default_sam_max_detections, step=1, interactive=True)
- generate_mask_button = gr.Button(value='Generate mask from image')
-
- def generate_mask(image, mask_model, cloth_category, dino_prompt_text, sam_model, box_threshold, text_threshold, sam_max_detections, dino_erode_or_dilate, dino_debug):
- from extras.inpaint_mask import generate_mask_from_image
-
- extras = {}
- sam_options = None
- if mask_model == 'u2net_cloth_seg':
- extras['cloth_category'] = cloth_category
- elif mask_model == 'sam':
- sam_options = SAMOptions(
- dino_prompt=dino_prompt_text,
- dino_box_threshold=box_threshold,
- dino_text_threshold=text_threshold,
- dino_erode_or_dilate=dino_erode_or_dilate,
- dino_debug=dino_debug,
- max_detections=sam_max_detections,
- model_type=sam_model
- )
-
- mask, _, _, _ = generate_mask_from_image(image, mask_model, extras, sam_options)
-
- return mask
-
-
- inpaint_mask_model.change(lambda x: [gr.update(visible=x == 'u2net_cloth_seg')] +
- [gr.update(visible=x == 'sam')] * 2 +
- [gr.Dataset.update(visible=x == 'sam',
- samples=modules.config.example_enhance_detection_prompts)],
- inputs=inpaint_mask_model,
- outputs=[inpaint_mask_cloth_category,
- inpaint_mask_dino_prompt_text,
- inpaint_mask_advanced_options,
- example_inpaint_mask_dino_prompt_text],
- queue=False, show_progress=False)
-
- with gr.Tab(label='Describe', id='describe_tab') as describe_tab:
- with gr.Row():
- with gr.Column():
- describe_input_image = grh.Image(label='Image', source='upload', type='numpy', show_label=False)
- with gr.Column():
- describe_methods = gr.CheckboxGroup(
- label='Content Type',
- choices=flags.describe_types,
- value=modules.config.default_describe_content_type)
- describe_apply_styles = gr.Checkbox(label='Apply Styles', value=modules.config.default_describe_apply_prompts_checkbox)
- describe_btn = gr.Button(value='Describe this Image into Prompt')
- describe_image_size = gr.Textbox(label='Image Size and Recommended Size', elem_id='describe_image_size', visible=False)
- gr.HTML('\U0001F4D4 Documentation')
-
- def trigger_show_image_properties(image):
- value = modules.util.get_image_size_info(image, modules.flags.sdxl_aspect_ratios)
- return gr.update(value=value, visible=True)
-
- describe_input_image.upload(trigger_show_image_properties, inputs=describe_input_image,
- outputs=describe_image_size, show_progress=False, queue=False)
-
- with gr.Tab(label='Enhance', id='enhance_tab') as enhance_tab:
- with gr.Row():
- with gr.Column():
- enhance_input_image = grh.Image(label='Use with Enhance, skips image generation', source='upload', type='numpy')
- gr.HTML('\U0001F4D4 Documentation')
-
- with gr.Tab(label='Metadata', id='metadata_tab') as metadata_tab:
- with gr.Column():
- metadata_input_image = grh.Image(label='For images created by Fooocus', source='upload', type='pil')
- metadata_json = gr.JSON(label='Metadata')
- metadata_import_button = gr.Button(value='Apply Metadata')
-
- def trigger_metadata_preview(file):
- parameters, metadata_scheme = modules.meta_parser.read_info_from_image(file)
-
- results = {}
- if parameters is not None:
- results['parameters'] = parameters
-
- if isinstance(metadata_scheme, flags.MetadataScheme):
- results['metadata_scheme'] = metadata_scheme.value
-
- return results
-
- metadata_input_image.upload(trigger_metadata_preview, inputs=metadata_input_image,
- outputs=metadata_json, queue=False, show_progress=True)
-
- with gr.Row(visible=modules.config.default_enhance_checkbox) as enhance_input_panel:
- with gr.Tabs():
- with gr.Tab(label='Upscale or Variation'):
- with gr.Row():
- with gr.Column():
- enhance_uov_method = gr.Radio(label='Upscale or Variation:', choices=flags.uov_list,
- value=modules.config.default_enhance_uov_method)
- enhance_uov_processing_order = gr.Radio(label='Order of Processing',
- info='Use before to enhance small details and after to enhance large areas.',
- choices=flags.enhancement_uov_processing_order,
- value=modules.config.default_enhance_uov_processing_order)
- enhance_uov_prompt_type = gr.Radio(label='Prompt',
- info='Choose which prompt to use for Upscale or Variation.',
- choices=flags.enhancement_uov_prompt_types,
- value=modules.config.default_enhance_uov_prompt_type,
- visible=modules.config.default_enhance_uov_processing_order == flags.enhancement_uov_after)
-
- enhance_uov_processing_order.change(lambda x: gr.update(visible=x == flags.enhancement_uov_after),
- inputs=enhance_uov_processing_order,
- outputs=enhance_uov_prompt_type,
- queue=False, show_progress=False)
- gr.HTML('\U0001F4D4 Documentation')
- enhance_ctrls = []
- enhance_inpaint_mode_ctrls = []
- enhance_inpaint_engine_ctrls = []
- enhance_inpaint_update_ctrls = []
- for index in range(modules.config.default_enhance_tabs):
- with gr.Tab(label=f'#{index + 1}') as enhance_tab_item:
- enhance_enabled = gr.Checkbox(label='Enable', value=False, elem_classes='min_check',
- container=False)
-
- enhance_mask_dino_prompt_text = gr.Textbox(label='Detection prompt',
- info='Use singular whenever possible',
- placeholder='Describe what you want to detect.',
- interactive=True,
- visible=modules.config.default_enhance_inpaint_mask_model == 'sam')
- example_enhance_mask_dino_prompt_text = gr.Dataset(
- samples=modules.config.example_enhance_detection_prompts,
- label='Detection Prompt Quick List',
- components=[enhance_mask_dino_prompt_text],
- visible=modules.config.default_enhance_inpaint_mask_model == 'sam')
- example_enhance_mask_dino_prompt_text.click(lambda x: x[0],
- inputs=example_enhance_mask_dino_prompt_text,
- outputs=enhance_mask_dino_prompt_text,
- show_progress=False, queue=False)
-
- enhance_prompt = gr.Textbox(label="Enhancement positive prompt",
- placeholder="Uses original prompt instead if empty.",
- elem_id='enhance_prompt')
- enhance_negative_prompt = gr.Textbox(label="Enhancement negative prompt",
- placeholder="Uses original negative prompt instead if empty.",
- elem_id='enhance_negative_prompt')
-
- with gr.Accordion("Detection", open=False):
- enhance_mask_model = gr.Dropdown(label='Mask generation model',
- choices=flags.inpaint_mask_models,
- value=modules.config.default_enhance_inpaint_mask_model)
- enhance_mask_cloth_category = gr.Dropdown(label='Cloth category',
- choices=flags.inpaint_mask_cloth_category,
- value=modules.config.default_inpaint_mask_cloth_category,
- visible=modules.config.default_enhance_inpaint_mask_model == 'u2net_cloth_seg',
- interactive=True)
-
- with gr.Accordion("SAM Options",
- visible=modules.config.default_enhance_inpaint_mask_model == 'sam',
- open=False) as sam_options:
- enhance_mask_sam_model = gr.Dropdown(label='SAM model',
- choices=flags.inpaint_mask_sam_model,
- value=modules.config.default_inpaint_mask_sam_model,
- interactive=True)
- enhance_mask_box_threshold = gr.Slider(label="Box Threshold", minimum=0.0,
- maximum=1.0, value=0.3, step=0.05,
- interactive=True)
- enhance_mask_text_threshold = gr.Slider(label="Text Threshold", minimum=0.0,
- maximum=1.0, value=0.25, step=0.05,
- interactive=True)
- enhance_mask_sam_max_detections = gr.Slider(label="Maximum number of detections",
- info="Set to 0 to detect all",
- minimum=0, maximum=10,
- value=modules.config.default_sam_max_detections,
- step=1, interactive=True)
-
- with gr.Accordion("Inpaint", visible=True, open=False):
- enhance_inpaint_mode = gr.Dropdown(choices=modules.flags.inpaint_options,
- value=modules.config.default_inpaint_method,
- label='Method', interactive=True)
- enhance_inpaint_disable_initial_latent = gr.Checkbox(
- label='Disable initial latent in inpaint', value=False)
- enhance_inpaint_engine = gr.Dropdown(label='Inpaint Engine',
- value=modules.config.default_inpaint_engine_version,
- choices=flags.inpaint_engine_versions,
- info='Version of Fooocus inpaint model. If set, use performance Quality or Speed (no performance LoRAs) for best results.')
- enhance_inpaint_strength = gr.Slider(label='Inpaint Denoising Strength',
- minimum=0.0, maximum=1.0, step=0.001,
- value=1.0,
- info='Same as the denoising strength in A1111 inpaint. '
- 'Only used in inpaint, not used in outpaint. '
- '(Outpaint always use 1.0)')
- enhance_inpaint_respective_field = gr.Slider(label='Inpaint Respective Field',
- minimum=0.0, maximum=1.0, step=0.001,
- value=0.618,
- info='The area to inpaint. '
- 'Value 0 is same as "Only Masked" in A1111. '
- 'Value 1 is same as "Whole Image" in A1111. '
- 'Only used in inpaint, not used in outpaint. '
- '(Outpaint always use 1.0)')
- enhance_inpaint_erode_or_dilate = gr.Slider(label='Mask Erode or Dilate',
- minimum=-64, maximum=64, step=1, value=0,
- info='Positive value will make white area in the mask larger, '
- 'negative value will make white area smaller. '
- '(default is 0, always processed before any mask invert)')
- enhance_mask_invert = gr.Checkbox(label='Invert Mask', value=False)
-
- gr.HTML('\U0001F4D4 Documentation')
-
- enhance_ctrls += [
- enhance_enabled,
- enhance_mask_dino_prompt_text,
- enhance_prompt,
- enhance_negative_prompt,
- enhance_mask_model,
- enhance_mask_cloth_category,
- enhance_mask_sam_model,
- enhance_mask_text_threshold,
- enhance_mask_box_threshold,
- enhance_mask_sam_max_detections,
- enhance_inpaint_disable_initial_latent,
- enhance_inpaint_engine,
- enhance_inpaint_strength,
- enhance_inpaint_respective_field,
- enhance_inpaint_erode_or_dilate,
- enhance_mask_invert
- ]
-
- enhance_inpaint_mode_ctrls += [enhance_inpaint_mode]
- enhance_inpaint_engine_ctrls += [enhance_inpaint_engine]
-
- enhance_inpaint_update_ctrls += [[
- enhance_inpaint_mode, enhance_inpaint_disable_initial_latent, enhance_inpaint_engine,
- enhance_inpaint_strength, enhance_inpaint_respective_field
- ]]
-
- enhance_inpaint_mode.change(inpaint_mode_change, inputs=[enhance_inpaint_mode, inpaint_engine_state], outputs=[
- inpaint_additional_prompt, outpaint_selections, example_inpaint_prompts,
- enhance_inpaint_disable_initial_latent, enhance_inpaint_engine,
- enhance_inpaint_strength, enhance_inpaint_respective_field
- ], show_progress=False, queue=False)
-
- enhance_mask_model.change(
- lambda x: [gr.update(visible=x == 'u2net_cloth_seg')] +
- [gr.update(visible=x == 'sam')] * 2 +
- [gr.Dataset.update(visible=x == 'sam',
- samples=modules.config.example_enhance_detection_prompts)],
- inputs=enhance_mask_model,
- outputs=[enhance_mask_cloth_category, enhance_mask_dino_prompt_text, sam_options,
- example_enhance_mask_dino_prompt_text],
- queue=False, show_progress=False)
-
- switch_js = "(x) => {if(x){viewer_to_bottom(100);viewer_to_bottom(500);}else{viewer_to_top();} return x;}"
- down_js = "() => {viewer_to_bottom();}"
-
- input_image_checkbox.change(lambda x: gr.update(visible=x), inputs=input_image_checkbox,
- outputs=image_input_panel, queue=False, show_progress=False, _js=switch_js)
- ip_advanced.change(lambda: None, queue=False, show_progress=False, _js=down_js)
-
- current_tab = gr.Textbox(value='uov', visible=False)
- uov_tab.select(lambda: 'uov', outputs=current_tab, queue=False, _js=down_js, show_progress=False)
- inpaint_tab.select(lambda: 'inpaint', outputs=current_tab, queue=False, _js=down_js, show_progress=False)
- ip_tab.select(lambda: 'ip', outputs=current_tab, queue=False, _js=down_js, show_progress=False)
- describe_tab.select(lambda: 'desc', outputs=current_tab, queue=False, _js=down_js, show_progress=False)
- enhance_tab.select(lambda: 'enhance', outputs=current_tab, queue=False, _js=down_js, show_progress=False)
- metadata_tab.select(lambda: 'metadata', outputs=current_tab, queue=False, _js=down_js, show_progress=False)
- enhance_checkbox.change(lambda x: gr.update(visible=x), inputs=enhance_checkbox,
- outputs=enhance_input_panel, queue=False, show_progress=False, _js=switch_js)
-
- with gr.Column(scale=1, visible=modules.config.default_advanced_checkbox) as advanced_column:
- with gr.Tab(label='Settings'):
- if not args_manager.args.disable_preset_selection:
- preset_selection = gr.Dropdown(label='Preset',
- choices=modules.config.available_presets,
- value=args_manager.args.preset if args_manager.args.preset else "initial",
- interactive=True)
-
- performance_selection = gr.Radio(label='Performance',
- choices=flags.Performance.values(),
- value=modules.config.default_performance,
- elem_classes=['performance_selection'])
-
- with gr.Accordion(label='Aspect Ratios', open=False, elem_id='aspect_ratios_accordion') as aspect_ratios_accordion:
- aspect_ratios_selection = gr.Radio(label='Aspect Ratios', show_label=False,
- choices=modules.config.available_aspect_ratios_labels,
- value=modules.config.default_aspect_ratio,
- info='width × height',
- elem_classes='aspect_ratios')
-
- aspect_ratios_selection.change(lambda x: None, inputs=aspect_ratios_selection, queue=False, show_progress=False, _js='(x)=>{refresh_aspect_ratios_label(x);}')
- shared.gradio_root.load(lambda x: None, inputs=aspect_ratios_selection, queue=False, show_progress=False, _js='(x)=>{refresh_aspect_ratios_label(x);}')
-
- image_number = gr.Slider(label='Image Number', minimum=1, maximum=modules.config.default_max_image_number, step=1, value=modules.config.default_image_number)
-
- output_format = gr.Radio(label='Output Format',
- choices=flags.OutputFormat.list(),
- value=modules.config.default_output_format)
-
- negative_prompt = gr.Textbox(label='Negative Prompt', show_label=True, placeholder="Type prompt here.",
- info='Describing what you do not want to see.', lines=2,
- elem_id='negative_prompt',
- value=modules.config.default_prompt_negative)
- seed_random = gr.Checkbox(label='Random', value=True)
- image_seed = gr.Textbox(label='Seed', value=0, max_lines=1, visible=False) # workaround for https://github.com/gradio-app/gradio/issues/5354
-
- def random_checked(r):
- return gr.update(visible=not r)
-
- def refresh_seed(r, seed_string):
- if r:
- return random.randint(constants.MIN_SEED, constants.MAX_SEED)
- else:
- try:
- seed_value = int(seed_string)
- if constants.MIN_SEED <= seed_value <= constants.MAX_SEED:
- return seed_value
- except ValueError:
- pass
- return random.randint(constants.MIN_SEED, constants.MAX_SEED)
-
- seed_random.change(random_checked, inputs=[seed_random], outputs=[image_seed],
- queue=False, show_progress=False)
-
- def update_history_link():
- if args_manager.args.disable_image_log:
- return gr.update(value='')
-
- return gr.update(value=f'\U0001F4DA History Log')
-
- history_link = gr.HTML()
- shared.gradio_root.load(update_history_link, outputs=history_link, queue=False, show_progress=False)
-
- with gr.Tab(label='Styles', elem_classes=['style_selections_tab']):
- style_sorter.try_load_sorted_styles(
- style_names=legal_style_names,
- default_selected=modules.config.default_styles)
-
- style_search_bar = gr.Textbox(show_label=False, container=False,
- placeholder="\U0001F50E Type here to search styles ...",
- value="",
- label='Search Styles')
- style_selections = gr.CheckboxGroup(show_label=False, container=False,
- choices=copy.deepcopy(style_sorter.all_styles),
- value=copy.deepcopy(modules.config.default_styles),
- label='Selected Styles',
- elem_classes=['style_selections'])
- gradio_receiver_style_selections = gr.Textbox(elem_id='gradio_receiver_style_selections', visible=False)
-
- shared.gradio_root.load(lambda: gr.update(choices=copy.deepcopy(style_sorter.all_styles)),
- outputs=style_selections)
-
- style_search_bar.change(style_sorter.search_styles,
- inputs=[style_selections, style_search_bar],
- outputs=style_selections,
- queue=False,
- show_progress=False).then(
- lambda: None, _js='()=>{refresh_style_localization();}')
-
- gradio_receiver_style_selections.input(style_sorter.sort_styles,
- inputs=style_selections,
- outputs=style_selections,
- queue=False,
- show_progress=False).then(
- lambda: None, _js='()=>{refresh_style_localization();}')
-
- with gr.Tab(label='Models'):
- with gr.Group():
- with gr.Row():
- base_model = gr.Dropdown(label='Base Model (SDXL only)', choices=modules.config.model_filenames, value=modules.config.default_base_model_name, show_label=True)
- refiner_model = gr.Dropdown(label='Refiner (SDXL or SD 1.5)', choices=['None'] + modules.config.model_filenames, value=modules.config.default_refiner_model_name, show_label=True)
-
- refiner_switch = gr.Slider(label='Refiner Switch At', minimum=0.1, maximum=1.0, step=0.0001,
- info='Use 0.4 for SD1.5 realistic models; '
- 'or 0.667 for SD1.5 anime models; '
- 'or 0.8 for XL-refiners; '
- 'or any value for switching two SDXL models.',
- value=modules.config.default_refiner_switch,
- visible=modules.config.default_refiner_model_name != 'None')
-
- refiner_model.change(lambda x: gr.update(visible=x != 'None'),
- inputs=refiner_model, outputs=refiner_switch, show_progress=False, queue=False)
-
- with gr.Group():
- lora_ctrls = []
-
- for i, (enabled, filename, weight) in enumerate(modules.config.default_loras):
- with gr.Row():
- lora_enabled = gr.Checkbox(label='Enable', value=enabled,
- elem_classes=['lora_enable', 'min_check'], scale=1)
- lora_model = gr.Dropdown(label=f'LoRA {i + 1}',
- choices=['None'] + modules.config.lora_filenames, value=filename,
- elem_classes='lora_model', scale=5)
- lora_weight = gr.Slider(label='Weight', minimum=modules.config.default_loras_min_weight,
- maximum=modules.config.default_loras_max_weight, step=0.01, value=weight,
- elem_classes='lora_weight', scale=5)
- lora_ctrls += [lora_enabled, lora_model, lora_weight]
-
- with gr.Row():
- refresh_files = gr.Button(label='Refresh', value='\U0001f504 Refresh All Files', variant='secondary', elem_classes='refresh_button')
- with gr.Tab(label='Advanced'):
- guidance_scale = gr.Slider(label='Guidance Scale', minimum=1.0, maximum=30.0, step=0.01,
- value=modules.config.default_cfg_scale,
- info='Higher value means style is cleaner, vivider, and more artistic.')
- sharpness = gr.Slider(label='Image Sharpness', minimum=0.0, maximum=30.0, step=0.001,
- value=modules.config.default_sample_sharpness,
- info='Higher value means image and texture are sharper.')
- gr.HTML('\U0001F4D4 Documentation')
- dev_mode = gr.Checkbox(label='Developer Debug Mode', value=modules.config.default_developer_debug_mode_checkbox, container=False)
-
- with gr.Column(visible=modules.config.default_developer_debug_mode_checkbox) as dev_tools:
- with gr.Tab(label='Debug Tools'):
- adm_scaler_positive = gr.Slider(label='Positive ADM Guidance Scaler', minimum=0.1, maximum=3.0,
- step=0.001, value=1.5, info='The scaler multiplied to positive ADM (use 1.0 to disable). ')
- adm_scaler_negative = gr.Slider(label='Negative ADM Guidance Scaler', minimum=0.1, maximum=3.0,
- step=0.001, value=0.8, info='The scaler multiplied to negative ADM (use 1.0 to disable). ')
- adm_scaler_end = gr.Slider(label='ADM Guidance End At Step', minimum=0.0, maximum=1.0,
- step=0.001, value=0.3,
- info='When to end the guidance from positive/negative ADM. ')
-
- refiner_swap_method = gr.Dropdown(label='Refiner swap method', value=flags.refiner_swap_method,
- choices=['joint', 'separate', 'vae'])
-
- adaptive_cfg = gr.Slider(label='CFG Mimicking from TSNR', minimum=1.0, maximum=30.0, step=0.01,
- value=modules.config.default_cfg_tsnr,
- info='Enabling Fooocus\'s implementation of CFG mimicking for TSNR '
- '(effective when real CFG > mimicked CFG).')
- clip_skip = gr.Slider(label='CLIP Skip', minimum=1, maximum=flags.clip_skip_max, step=1,
- value=modules.config.default_clip_skip,
- info='Bypass CLIP layers to avoid overfitting (use 1 to not skip any layers, 2 is recommended).')
- sampler_name = gr.Dropdown(label='Sampler', choices=flags.sampler_list,
- value=modules.config.default_sampler)
- scheduler_name = gr.Dropdown(label='Scheduler', choices=flags.scheduler_list,
- value=modules.config.default_scheduler)
- vae_name = gr.Dropdown(label='VAE', choices=[modules.flags.default_vae] + modules.config.vae_filenames,
- value=modules.config.default_vae, show_label=True)
-
- generate_image_grid = gr.Checkbox(label='Generate Image Grid for Each Batch',
- info='(Experimental) This may cause performance problems on some computers and certain internet conditions.',
- value=False)
-
- overwrite_step = gr.Slider(label='Forced Overwrite of Sampling Step',
- minimum=-1, maximum=200, step=1,
- value=modules.config.default_overwrite_step,
- info='Set as -1 to disable. For developer debugging.')
- overwrite_switch = gr.Slider(label='Forced Overwrite of Refiner Switch Step',
- minimum=-1, maximum=200, step=1,
- value=modules.config.default_overwrite_switch,
- info='Set as -1 to disable. For developer debugging.')
- overwrite_width = gr.Slider(label='Forced Overwrite of Generating Width',
- minimum=-1, maximum=2048, step=1, value=-1,
- info='Set as -1 to disable. For developer debugging. '
- 'Results will be worse for non-standard numbers that SDXL is not trained on.')
- overwrite_height = gr.Slider(label='Forced Overwrite of Generating Height',
- minimum=-1, maximum=2048, step=1, value=-1,
- info='Set as -1 to disable. For developer debugging. '
- 'Results will be worse for non-standard numbers that SDXL is not trained on.')
- overwrite_vary_strength = gr.Slider(label='Forced Overwrite of Denoising Strength of "Vary"',
- minimum=-1, maximum=1.0, step=0.001, value=-1,
- info='Set as negative number to disable. For developer debugging.')
- overwrite_upscale_strength = gr.Slider(label='Forced Overwrite of Denoising Strength of "Upscale"',
- minimum=-1, maximum=1.0, step=0.001,
- value=modules.config.default_overwrite_upscale,
- info='Set as negative number to disable. For developer debugging.')
-
- disable_preview = gr.Checkbox(label='Disable Preview', value=modules.config.default_black_out_nsfw,
- interactive=not modules.config.default_black_out_nsfw,
- info='Disable preview during generation.')
- disable_intermediate_results = gr.Checkbox(label='Disable Intermediate Results',
- value=flags.Performance.has_restricted_features(modules.config.default_performance),
- info='Disable intermediate results during generation, only show final gallery.')
-
- disable_seed_increment = gr.Checkbox(label='Disable seed increment',
- info='Disable automatic seed increment when image number is > 1.',
- value=False)
- read_wildcards_in_order = gr.Checkbox(label="Read wildcards in order", value=False)
-
- black_out_nsfw = gr.Checkbox(label='Black Out NSFW', value=modules.config.default_black_out_nsfw,
- interactive=not modules.config.default_black_out_nsfw,
- info='Use black image if NSFW is detected.')
-
- black_out_nsfw.change(lambda x: gr.update(value=x, interactive=not x),
- inputs=black_out_nsfw, outputs=disable_preview, queue=False,
- show_progress=False)
-
- if not args_manager.args.disable_image_log:
- save_final_enhanced_image_only = gr.Checkbox(label='Save only final enhanced image',
- value=modules.config.default_save_only_final_enhanced_image)
-
- if not args_manager.args.disable_metadata:
- save_metadata_to_images = gr.Checkbox(label='Save Metadata to Images', value=modules.config.default_save_metadata_to_images,
- info='Adds parameters to generated images allowing manual regeneration.')
- metadata_scheme = gr.Radio(label='Metadata Scheme', choices=flags.metadata_scheme, value=modules.config.default_metadata_scheme,
- info='Image Prompt parameters are not included. Use png and a1111 for compatibility with Civitai.',
- visible=modules.config.default_save_metadata_to_images)
-
- save_metadata_to_images.change(lambda x: gr.update(visible=x), inputs=[save_metadata_to_images], outputs=[metadata_scheme],
- queue=False, show_progress=False)
-
- with gr.Tab(label='Control'):
- debugging_cn_preprocessor = gr.Checkbox(label='Debug Preprocessors', value=False,
- info='See the results from preprocessors.')
- skipping_cn_preprocessor = gr.Checkbox(label='Skip Preprocessors', value=False,
- info='Do not preprocess images. (Inputs are already canny/depth/cropped-face/etc.)')
-
- mixing_image_prompt_and_vary_upscale = gr.Checkbox(label='Mixing Image Prompt and Vary/Upscale',
- value=False)
- mixing_image_prompt_and_inpaint = gr.Checkbox(label='Mixing Image Prompt and Inpaint',
- value=False)
-
- controlnet_softness = gr.Slider(label='Softness of ControlNet', minimum=0.0, maximum=1.0,
- step=0.001, value=0.25,
- info='Similar to the Control Mode in A1111 (use 0.0 to disable). ')
-
- with gr.Tab(label='Canny'):
- canny_low_threshold = gr.Slider(label='Canny Low Threshold', minimum=1, maximum=255,
- step=1, value=64)
- canny_high_threshold = gr.Slider(label='Canny High Threshold', minimum=1, maximum=255,
- step=1, value=128)
-
- with gr.Tab(label='Inpaint'):
- debugging_inpaint_preprocessor = gr.Checkbox(label='Debug Inpaint Preprocessing', value=False)
- debugging_enhance_masks_checkbox = gr.Checkbox(label='Debug Enhance Masks', value=False,
- info='Show enhance masks in preview and final results')
- debugging_dino = gr.Checkbox(label='Debug GroundingDINO', value=False,
- info='Use GroundingDINO boxes instead of more detailed SAM masks')
- inpaint_disable_initial_latent = gr.Checkbox(label='Disable initial latent in inpaint', value=False)
- inpaint_engine = gr.Dropdown(label='Inpaint Engine',
- value=modules.config.default_inpaint_engine_version,
- choices=flags.inpaint_engine_versions,
- info='Version of Fooocus inpaint model. If set, use performance Quality or Speed (no performance LoRAs) for best results.')
- inpaint_strength = gr.Slider(label='Inpaint Denoising Strength',
- minimum=0.0, maximum=1.0, step=0.001, value=1.0,
- info='Same as the denoising strength in A1111 inpaint. '
- 'Only used in inpaint, not used in outpaint. '
- '(Outpaint always use 1.0)')
- inpaint_respective_field = gr.Slider(label='Inpaint Respective Field',
- minimum=0.0, maximum=1.0, step=0.001, value=0.618,
- info='The area to inpaint. '
- 'Value 0 is same as "Only Masked" in A1111. '
- 'Value 1 is same as "Whole Image" in A1111. '
- 'Only used in inpaint, not used in outpaint. '
- '(Outpaint always use 1.0)')
- inpaint_erode_or_dilate = gr.Slider(label='Mask Erode or Dilate',
- minimum=-64, maximum=64, step=1, value=0,
- info='Positive value will make white area in the mask larger, '
- 'negative value will make white area smaller. '
- '(default is 0, always processed before any mask invert)')
- dino_erode_or_dilate = gr.Slider(label='GroundingDINO Box Erode or Dilate',
- minimum=-64, maximum=64, step=1, value=0,
- info='Positive value will make white area in the mask larger, '
- 'negative value will make white area smaller. '
- '(default is 0, processed before SAM)')
-
- inpaint_mask_color = gr.ColorPicker(label='Inpaint brush color', value='#FFFFFF', elem_id='inpaint_brush_color')
-
- inpaint_ctrls = [debugging_inpaint_preprocessor, inpaint_disable_initial_latent, inpaint_engine,
- inpaint_strength, inpaint_respective_field,
- inpaint_advanced_masking_checkbox, invert_mask_checkbox, inpaint_erode_or_dilate]
-
- inpaint_advanced_masking_checkbox.change(lambda x: [gr.update(visible=x)] * 2,
- inputs=inpaint_advanced_masking_checkbox,
- outputs=[inpaint_mask_image, inpaint_mask_generation_col],
- queue=False, show_progress=False)
-
- inpaint_mask_color.change(lambda x: gr.update(brush_color=x), inputs=inpaint_mask_color,
- outputs=inpaint_input_image,
- queue=False, show_progress=False)
-
- with gr.Tab(label='FreeU'):
- freeu_enabled = gr.Checkbox(label='Enabled', value=False)
- freeu_b1 = gr.Slider(label='B1', minimum=0, maximum=2, step=0.01, value=1.01)
- freeu_b2 = gr.Slider(label='B2', minimum=0, maximum=2, step=0.01, value=1.02)
- freeu_s1 = gr.Slider(label='S1', minimum=0, maximum=4, step=0.01, value=0.99)
- freeu_s2 = gr.Slider(label='S2', minimum=0, maximum=4, step=0.01, value=0.95)
- freeu_ctrls = [freeu_enabled, freeu_b1, freeu_b2, freeu_s1, freeu_s2]
-
- def dev_mode_checked(r):
- return gr.update(visible=r)
-
- dev_mode.change(dev_mode_checked, inputs=[dev_mode], outputs=[dev_tools],
- queue=False, show_progress=False)
-
- def refresh_files_clicked():
- modules.config.update_files()
- results = [gr.update(choices=modules.config.model_filenames)]
- results += [gr.update(choices=['None'] + modules.config.model_filenames)]
- results += [gr.update(choices=[flags.default_vae] + modules.config.vae_filenames)]
- if not args_manager.args.disable_preset_selection:
- results += [gr.update(choices=modules.config.available_presets)]
- for i in range(modules.config.default_max_lora_number):
- results += [gr.update(interactive=True),
- gr.update(choices=['None'] + modules.config.lora_filenames), gr.update()]
- return results
-
- refresh_files_output = [base_model, refiner_model, vae_name]
- if not args_manager.args.disable_preset_selection:
- refresh_files_output += [preset_selection]
- refresh_files.click(refresh_files_clicked, [], refresh_files_output + lora_ctrls,
- queue=False, show_progress=False)
-
- state_is_generating = gr.State(False)
-
- load_data_outputs = [advanced_checkbox, image_number, prompt, negative_prompt, style_selections,
- performance_selection, overwrite_step, overwrite_switch, aspect_ratios_selection,
- overwrite_width, overwrite_height, guidance_scale, sharpness, adm_scaler_positive,
- adm_scaler_negative, adm_scaler_end, refiner_swap_method, adaptive_cfg, clip_skip,
- base_model, refiner_model, refiner_switch, sampler_name, scheduler_name, vae_name,
- seed_random, image_seed, inpaint_engine, inpaint_engine_state,
- inpaint_mode] + enhance_inpaint_mode_ctrls + [generate_button,
- load_parameter_button] + freeu_ctrls + lora_ctrls
-
- if not args_manager.args.disable_preset_selection:
- def preset_selection_change(preset, is_generating, inpaint_mode):
- preset_content = modules.config.try_get_preset_content(preset) if preset != 'initial' else {}
- preset_prepared = modules.meta_parser.parse_meta_from_preset(preset_content)
-
- default_model = preset_prepared.get('base_model')
- previous_default_models = preset_prepared.get('previous_default_models', [])
- checkpoint_downloads = preset_prepared.get('checkpoint_downloads', {})
- embeddings_downloads = preset_prepared.get('embeddings_downloads', {})
- lora_downloads = preset_prepared.get('lora_downloads', {})
- vae_downloads = preset_prepared.get('vae_downloads', {})
-
- preset_prepared['base_model'], preset_prepared['checkpoint_downloads'] = launch.download_models(
- default_model, previous_default_models, checkpoint_downloads, embeddings_downloads, lora_downloads,
- vae_downloads)
-
- if 'prompt' in preset_prepared and preset_prepared.get('prompt') == '':
- del preset_prepared['prompt']
-
- return modules.meta_parser.load_parameter_button_click(json.dumps(preset_prepared), is_generating, inpaint_mode)
-
-
- def inpaint_engine_state_change(inpaint_engine_version, *args):
- if inpaint_engine_version == 'empty':
- inpaint_engine_version = modules.config.default_inpaint_engine_version
-
- result = []
- for inpaint_mode in args:
- if inpaint_mode != modules.flags.inpaint_option_detail:
- result.append(gr.update(value=inpaint_engine_version))
- else:
- result.append(gr.update())
-
- return result
-
- preset_selection.change(preset_selection_change, inputs=[preset_selection, state_is_generating, inpaint_mode], outputs=load_data_outputs, queue=False, show_progress=True) \
- .then(fn=style_sorter.sort_styles, inputs=style_selections, outputs=style_selections, queue=False, show_progress=False) \
- .then(lambda: None, _js='()=>{refresh_style_localization();}') \
- .then(inpaint_engine_state_change, inputs=[inpaint_engine_state] + enhance_inpaint_mode_ctrls, outputs=enhance_inpaint_engine_ctrls, queue=False, show_progress=False)
-
- performance_selection.change(lambda x: [gr.update(interactive=not flags.Performance.has_restricted_features(x))] * 11 +
- [gr.update(visible=not flags.Performance.has_restricted_features(x))] * 1 +
- [gr.update(value=flags.Performance.has_restricted_features(x))] * 1,
- inputs=performance_selection,
- outputs=[
- guidance_scale, sharpness, adm_scaler_end, adm_scaler_positive,
- adm_scaler_negative, refiner_switch, refiner_model, sampler_name,
- scheduler_name, adaptive_cfg, refiner_swap_method, negative_prompt, disable_intermediate_results
- ], queue=False, show_progress=False)
-
- output_format.input(lambda x: gr.update(output_format=x), inputs=output_format)
-
- advanced_checkbox.change(lambda x: gr.update(visible=x), advanced_checkbox, advanced_column,
- queue=False, show_progress=False) \
- .then(fn=lambda: None, _js='refresh_grid_delayed', queue=False, show_progress=False)
-
- inpaint_mode.change(inpaint_mode_change, inputs=[inpaint_mode, inpaint_engine_state], outputs=[
- inpaint_additional_prompt, outpaint_selections, example_inpaint_prompts,
- inpaint_disable_initial_latent, inpaint_engine,
- inpaint_strength, inpaint_respective_field
- ], show_progress=False, queue=False)
-
- # load configured default_inpaint_method
- default_inpaint_ctrls = [inpaint_mode, inpaint_disable_initial_latent, inpaint_engine, inpaint_strength, inpaint_respective_field]
- for mode, disable_initial_latent, engine, strength, respective_field in [default_inpaint_ctrls] + enhance_inpaint_update_ctrls:
- shared.gradio_root.load(inpaint_mode_change, inputs=[mode, inpaint_engine_state], outputs=[
- inpaint_additional_prompt, outpaint_selections, example_inpaint_prompts, disable_initial_latent,
- engine, strength, respective_field
- ], show_progress=False, queue=False)
-
- generate_mask_button.click(fn=generate_mask,
- inputs=[inpaint_input_image, inpaint_mask_model, inpaint_mask_cloth_category,
- inpaint_mask_dino_prompt_text, inpaint_mask_sam_model,
- inpaint_mask_box_threshold, inpaint_mask_text_threshold,
- inpaint_mask_sam_max_detections, dino_erode_or_dilate, debugging_dino],
- outputs=inpaint_mask_image, show_progress=True, queue=True)
-
- ctrls = [currentTask, generate_image_grid]
- ctrls += [
- prompt, negative_prompt, style_selections,
- performance_selection, aspect_ratios_selection, image_number, output_format, image_seed,
- read_wildcards_in_order, sharpness, guidance_scale
- ]
-
- ctrls += [base_model, refiner_model, refiner_switch] + lora_ctrls
- ctrls += [input_image_checkbox, current_tab]
- ctrls += [uov_method, uov_input_image]
- ctrls += [outpaint_selections, inpaint_input_image, inpaint_additional_prompt, inpaint_mask_image]
- ctrls += [disable_preview, disable_intermediate_results, disable_seed_increment, black_out_nsfw]
- ctrls += [adm_scaler_positive, adm_scaler_negative, adm_scaler_end, adaptive_cfg, clip_skip]
- ctrls += [sampler_name, scheduler_name, vae_name]
- ctrls += [overwrite_step, overwrite_switch, overwrite_width, overwrite_height, overwrite_vary_strength]
- ctrls += [overwrite_upscale_strength, mixing_image_prompt_and_vary_upscale, mixing_image_prompt_and_inpaint]
- ctrls += [debugging_cn_preprocessor, skipping_cn_preprocessor, canny_low_threshold, canny_high_threshold]
- ctrls += [refiner_swap_method, controlnet_softness]
- ctrls += freeu_ctrls
- ctrls += inpaint_ctrls
-
- if not args_manager.args.disable_image_log:
- ctrls += [save_final_enhanced_image_only]
-
- if not args_manager.args.disable_metadata:
- ctrls += [save_metadata_to_images, metadata_scheme]
-
- ctrls += ip_ctrls
- ctrls += [debugging_dino, dino_erode_or_dilate, debugging_enhance_masks_checkbox,
- enhance_input_image, enhance_checkbox, enhance_uov_method, enhance_uov_processing_order,
- enhance_uov_prompt_type]
- ctrls += enhance_ctrls
-
- def parse_meta(raw_prompt_txt, is_generating):
- loaded_json = None
- if is_json(raw_prompt_txt):
- loaded_json = json.loads(raw_prompt_txt)
-
- if loaded_json is None:
- if is_generating:
- return gr.update(), gr.update(), gr.update()
- else:
- return gr.update(), gr.update(visible=True), gr.update(visible=False)
-
- return json.dumps(loaded_json), gr.update(visible=False), gr.update(visible=True)
-
- prompt.input(parse_meta, inputs=[prompt, state_is_generating], outputs=[prompt, generate_button, load_parameter_button], queue=False, show_progress=False)
-
- load_parameter_button.click(modules.meta_parser.load_parameter_button_click, inputs=[prompt, state_is_generating, inpaint_mode], outputs=load_data_outputs, queue=False, show_progress=False)
-
- def trigger_metadata_import(file, state_is_generating):
- parameters, metadata_scheme = modules.meta_parser.read_info_from_image(file)
- if parameters is None:
- print('Could not find metadata in the image!')
- parsed_parameters = {}
- else:
- metadata_parser = modules.meta_parser.get_metadata_parser(metadata_scheme)
- parsed_parameters = metadata_parser.to_json(parameters)
-
- return modules.meta_parser.load_parameter_button_click(parsed_parameters, state_is_generating, inpaint_mode)
-
- metadata_import_button.click(trigger_metadata_import, inputs=[metadata_input_image, state_is_generating], outputs=load_data_outputs, queue=False, show_progress=True) \
- .then(style_sorter.sort_styles, inputs=style_selections, outputs=style_selections, queue=False, show_progress=False)
-
- generate_button.click(lambda: (gr.update(visible=True, interactive=True), gr.update(visible=True, interactive=True), gr.update(visible=False, interactive=False), [], True),
- outputs=[stop_button, skip_button, generate_button, gallery, state_is_generating]) \
- .then(fn=refresh_seed, inputs=[seed_random, image_seed], outputs=image_seed) \
- .then(fn=get_task, inputs=ctrls, outputs=currentTask) \
- .then(fn=generate_clicked, inputs=currentTask, outputs=[progress_html, progress_window, progress_gallery, gallery]) \
- .then(lambda: (gr.update(visible=True, interactive=True), gr.update(visible=False, interactive=False), gr.update(visible=False, interactive=False), False),
- outputs=[generate_button, stop_button, skip_button, state_is_generating]) \
- .then(fn=update_history_link, outputs=history_link) \
- .then(fn=lambda: None, _js='playNotification').then(fn=lambda: None, _js='refresh_grid_delayed')
-
- reset_button.click(lambda: [worker.AsyncTask(args=[]), False, gr.update(visible=True, interactive=True)] +
- [gr.update(visible=False)] * 6 +
- [gr.update(visible=True, value=[])],
- outputs=[currentTask, state_is_generating, generate_button,
- reset_button, stop_button, skip_button,
- progress_html, progress_window, progress_gallery, gallery],
- queue=False)
-
- for notification_file in ['notification.ogg', 'notification.mp3']:
- if os.path.exists(notification_file):
- gr.Audio(interactive=False, value=notification_file, elem_id='audio_notification', visible=False)
- break
-
- def trigger_describe(modes, img, apply_styles):
- describe_prompts = []
- styles = set()
-
- if flags.describe_type_photo in modes:
- from extras.interrogate import default_interrogator as default_interrogator_photo
- describe_prompts.append(default_interrogator_photo(img))
- styles.update(["Fooocus V2", "Fooocus Enhance", "Fooocus Sharp"])
-
- if flags.describe_type_anime in modes:
- from extras.wd14tagger import default_interrogator as default_interrogator_anime
- describe_prompts.append(default_interrogator_anime(img))
- styles.update(["Fooocus V2", "Fooocus Masterpiece"])
-
- if len(styles) == 0 or not apply_styles:
- styles = gr.update()
- else:
- styles = list(styles)
-
- if len(describe_prompts) == 0:
- describe_prompt = gr.update()
- else:
- describe_prompt = ', '.join(describe_prompts)
-
- return describe_prompt, styles
-
- describe_btn.click(trigger_describe, inputs=[describe_methods, describe_input_image, describe_apply_styles],
- outputs=[prompt, style_selections], show_progress=True, queue=True) \
- .then(fn=style_sorter.sort_styles, inputs=style_selections, outputs=style_selections, queue=False, show_progress=False) \
- .then(lambda: None, _js='()=>{refresh_style_localization();}')
-
- if args_manager.args.enable_auto_describe_image:
- def trigger_auto_describe(mode, img, prompt, apply_styles):
- # keep prompt if not empty
- if prompt == '':
- return trigger_describe(mode, img, apply_styles)
- return gr.update(), gr.update()
-
- uov_input_image.upload(trigger_auto_describe, inputs=[describe_methods, uov_input_image, prompt, describe_apply_styles],
- outputs=[prompt, style_selections], show_progress=True, queue=True) \
- .then(fn=style_sorter.sort_styles, inputs=style_selections, outputs=style_selections, queue=False, show_progress=False) \
- .then(lambda: None, _js='()=>{refresh_style_localization();}')
-
- enhance_input_image.upload(lambda: gr.update(value=True), outputs=enhance_checkbox, queue=False, show_progress=False) \
- .then(trigger_auto_describe, inputs=[describe_methods, enhance_input_image, prompt, describe_apply_styles],
- outputs=[prompt, style_selections], show_progress=True, queue=True) \
- .then(fn=style_sorter.sort_styles, inputs=style_selections, outputs=style_selections, queue=False, show_progress=False) \
- .then(lambda: None, _js='()=>{refresh_style_localization();}')
-
-def dump_default_english_config():
- from modules.localization import dump_english_config
- dump_english_config(grh.all_components)
-
-
-# dump_default_english_config()
-
-shared.gradio_root.launch(
- inbrowser=args_manager.args.in_browser,
- server_name=args_manager.args.listen,
- server_port=args_manager.args.port,
- share=args_manager.args.share,
- auth=check_auth if (args_manager.args.share or args_manager.args.listen) and auth_enabled else None,
- allowed_paths=[modules.config.path_outputs],
- blocked_paths=[constants.AUTH_FILENAME]
-)