graph : fix nkvo offload with FA (#19105)

This commit is contained in:
Georgi Gerganov 2026-01-26 20:18:34 +02:00 committed by GitHub
parent 142cbe2ac6
commit 8f80d1b254
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 5 additions and 7 deletions

View File

@ -2173,13 +2173,6 @@ llm_graph_cb llama_context::graph_get_cb() const {
ggml_set_name(cur, name);
}
if (!cparams.offload_kqv) {
if (strcmp(name, "kqv_merged_cont") == 0) {
// all nodes between the KV store and the attention output are run on the CPU
ggml_backend_sched_set_tensor_backend(sched.get(), cur, backend_cpu);
}
}
// norm may be automatically assigned to the backend of the previous layer, increasing data transfer between backends
// FIXME: fix in ggml_backend_sched
const bool full_offload = model.n_gpu_layers() > model.hparams.n_layer;

View File

@ -1630,6 +1630,11 @@ ggml_tensor * llm_graph_context::build_attn_mha(
hparams.attn_soft_cap ? hparams.f_attn_logit_softcapping : 0.0f);
cb(cur, LLAMA_TENSOR_NAME_FATTN, il);
if (!cparams.offload_kqv) {
// all nodes between the KV store and the attention output are run on the CPU
ggml_backend_sched_set_tensor_backend(sched, cur, backend_cpu);
}
ggml_flash_attn_ext_add_sinks(cur, sinks);
ggml_flash_attn_ext_set_prec (cur, GGML_PREC_F32);