memory : fix broken batch splits for recurrent cache

Splits producing more than one ubatch per batch for recurrent models
were broken with #14512.

This fixes it by moving the completeness check after the ubatch split loop.
This commit is contained in:
Francis Couture-Harpin 2025-07-07 21:19:12 -04:00
parent e1a7059053
commit 2ff3354c33
1 changed files with 6 additions and 2 deletions

View File

@ -377,14 +377,18 @@ llama_memory_context_ptr llama_memory_recurrent::init_batch(llama_batch_allocr &
ubatch = balloc.split_equal(n_ubatch, false); ubatch = balloc.split_equal(n_ubatch, false);
} }
if (balloc.get_n_used() < balloc.get_n_tokens()) { if (ubatch.n_tokens == 0) {
// failed to find a suitable split
break; break;
} }
ubatches.push_back(std::move(ubatch)); // NOLINT ubatches.push_back(std::move(ubatch)); // NOLINT
} }
if (balloc.get_n_used() < balloc.get_n_tokens()) {
// failed to find a suitable split
break;
}
if (!prepare(ubatches)) { if (!prepare(ubatches)) {
break; break;
} }