llama : clarify nemotron-h.cpp comment about RoPE [no ci] (#18997)

This commit removes the mention of RoPE in the comment for the Q and K
computation as RoPE is not applied.
This commit is contained in:
Daniel Bevenius 2026-01-21 18:31:34 +01:00 committed by GitHub
parent bd544c94a3
commit 9da3dcd753
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
1 changed files with 1 additions and 1 deletions

View File

@ -67,7 +67,7 @@ ggml_tensor * llm_build_nemotron_h::build_attention_layer(ggml_tensor *
const llama_model & model,
const int64_t n_embd_head,
const int il) {
// compute Q and K and (optionally) RoPE them
// compute Q and K
ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
cb(Qcur, "Qcur", il);
if (model.layers[il].bq) {