From f8ca04a4061a0dabb420c5c271cfd115f88169cd Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Mon, 19 Feb 2024 15:22:10 +0100 Subject: [PATCH] feat: add early return for prompt expansion when no new tokens should be added closes https://github.com/lllyasviel/Fooocus/issues/2278, also removes comma at the end added before tokenizer --- extras/expansion.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/extras/expansion.py b/extras/expansion.py index c1b59b8a..34c1ee8d 100644 --- a/extras/expansion.py +++ b/extras/expansion.py @@ -112,6 +112,9 @@ class FooocusExpansion: max_token_length = 75 * int(math.ceil(float(current_token_length) / 75.0)) max_new_tokens = max_token_length - current_token_length + if max_new_tokens == 0: + return prompt[:-1] + # https://huggingface.co/blog/introducing-csearch # https://huggingface.co/docs/transformers/generation_strategies features = self.model.generate(**tokenized_kwargs,