From 9da3dcd75313b4c492b1b3b0e189ecc70906a195 Mon Sep 17 00:00:00 2001 From: Daniel Bevenius Date: Wed, 21 Jan 2026 18:31:34 +0100 Subject: [PATCH] llama : clarify nemotron-h.cpp comment about RoPE [no ci] (#18997) This commit removes the mention of RoPE in the comment for the Q and K computation as RoPE is not applied. --- src/models/nemotron-h.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/models/nemotron-h.cpp b/src/models/nemotron-h.cpp index eb135e63f1..079c730ac2 100644 --- a/src/models/nemotron-h.cpp +++ b/src/models/nemotron-h.cpp @@ -67,7 +67,7 @@ ggml_tensor * llm_build_nemotron_h::build_attention_layer(ggml_tensor * const llama_model & model, const int64_t n_embd_head, const int il) { - // compute Q and K and (optionally) RoPE them + // compute Q and K ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); cb(Qcur, "Qcur", il); if (model.layers[il].bq) {