From 00c361fe53e5fc105a077f90a0a22d4c60936ffe Mon Sep 17 00:00:00 2001 From: "Gilad S." <7817232+giladgd@users.noreply.github.com> Date: Mon, 1 Dec 2025 22:21:13 +0200 Subject: [PATCH] fix: llama arch implementation (#17665) --- src/llama-model.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/llama-model.cpp b/src/llama-model.cpp index 584efbf3c8..c46ee3707f 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -626,6 +626,8 @@ void llama_model::load_hparams(llama_model_loader & ml) { switch (arch) { case LLM_ARCH_LLAMA: { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + if (hparams.n_expert == 8) { switch (hparams.n_layer) { case 32: type = LLM_TYPE_8x7B; break;