From 97e1de457c71358fd2fc2fe4c99cd7cb490af78b Mon Sep 17 00:00:00 2001 From: Ryan Mangeno <160974989+ryan-mangeno@users.noreply.github.com> Date: Fri, 10 Oct 2025 11:47:23 -0400 Subject: [PATCH] Update src/llama-model.cpp MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Sigbjørn Skjæret --- src/llama-model.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/llama-model.cpp b/src/llama-model.cpp index a657a7cc71..8cb8bbe857 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -19483,7 +19483,7 @@ llama_memory_i * llama_model::create_memory(const llama_memory_params & params, case LLM_ARCH_NOMIC_BERT_MOE: case LLM_ARCH_NEO_BERT: case LLM_ARCH_WAVTOKENIZER_DEC: - //case LLM_ARCH_MODERN_BERT: // TODO: disabled until cachless SWA logic is fixed [TAG_NO_CACHE_ISWA] + //case LLM_ARCH_MODERN_BERT: // TODO: disabled until cacheless SWA logic is fixed [TAG_NO_CACHE_ISWA] //case LLM_ARCH_GEMMA_EMBEDDING: // TODO: disabled until the cacheless SWA logic is fixed [TAG_NO_CACHE_ISWA] case LLM_ARCH_DREAM: case LLM_ARCH_LLADA: