From bd5de6bab9d144cb6db862eac704e99ae875523e Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 2 Jan 2026 16:00:25 +0200 Subject: [PATCH] cont : fix --- src/llama-context.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/llama-context.cpp b/src/llama-context.cpp index 2bce5c3485..e505ef40c2 100644 --- a/src/llama-context.cpp +++ b/src/llama-context.cpp @@ -320,7 +320,7 @@ llama_context::llama_context( reserve(); - if (cparams.flash_attn) { + if (!cparams.flash_attn) { if (ggml_is_quantized(params.type_v)) { throw std::runtime_error("quantized V cache was requested, but this requires Flash Attention"); }