From a16b451fd76c6055b8393a861d8d95193e21d776 Mon Sep 17 00:00:00 2001 From: lllyasviel Date: Thu, 26 Oct 2023 09:51:10 -0700 Subject: [PATCH] adjust context length (#795) * adjust context length * Update sdxl_styles_fooocus.json --- fooocus_version.py | 2 +- modules/expansion.py | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/fooocus_version.py b/fooocus_version.py index 199193ac..023bd3a1 100644 --- a/fooocus_version.py +++ b/fooocus_version.py @@ -1 +1 @@ -version = '2.1.747' +version = '2.1.748' diff --git a/modules/expansion.py b/modules/expansion.py index 5a2fbaf0..a5ea1aa2 100644 --- a/modules/expansion.py +++ b/modules/expansion.py @@ -1,4 +1,5 @@ import torch +import math import fcbh.model_management as model_management from transformers.generation.logits_process import LogitsProcessorList @@ -91,7 +92,8 @@ class FooocusExpansion: tokenized_kwargs.data['attention_mask'] = tokenized_kwargs.data['attention_mask'].to(self.patcher.load_device) current_token_length = int(tokenized_kwargs.data['input_ids'].shape[1]) - max_token_length = 77 + 77 * int(float(current_token_length) / 77.0) + max_token_length = 75 * int(math.ceil(float(current_token_length) / 75.0)) + max_new_tokens = max_token_length - current_token_length logits_processor = LogitsProcessorList([self.logits_processor]) @@ -99,7 +101,7 @@ class FooocusExpansion: # https://huggingface.co/docs/transformers/generation_strategies features = self.model.generate(**tokenized_kwargs, num_beams=1, - max_new_tokens=max_token_length - current_token_length, + max_new_tokens=max_new_tokens, do_sample=True, logits_processor=logits_processor)