From ae8d710c39886e12422fd564ecd0a278057cbdcd Mon Sep 17 00:00:00 2001 From: Yee Man Chan Date: Thu, 22 Jan 2026 07:06:17 +0800 Subject: [PATCH] remove DT_B from constants.py. remove one comment line in llama-model.cpp --- gguf-py/gguf/constants.py | 1 - src/llama-model.cpp | 1 - 2 files changed, 2 deletions(-) diff --git a/gguf-py/gguf/constants.py b/gguf-py/gguf/constants.py index 73e7bae6e1..a51b3d87d1 100644 --- a/gguf-py/gguf/constants.py +++ b/gguf-py/gguf/constants.py @@ -559,7 +559,6 @@ class MODEL_TENSOR(IntEnum): SSM_F_A = auto() # Kimi Linear SSM_F_B = auto() # Kimi Linear SSM_BETA = auto() # Kimi Linear - SSM_DT_B = auto() # Kimi Linear SSM_G_A = auto() # Kimi Linear SSM_G_B = auto() # Kimi Linear TIME_MIX_W0 = auto() diff --git a/src/llama-model.cpp b/src/llama-model.cpp index 4ea23dca53..edf7108fd7 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -6917,7 +6917,6 @@ bool llama_model::load_tensors(llama_model_loader & ml) { layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), {n_ff_shexp_actual, n_embd}, TENSOR_NOT_REQUIRED); layer.ffn_up_shexp = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP, "weight", i), {n_embd, n_ff_shexp_actual}, TENSOR_NOT_REQUIRED); - // exp_probs_b (e_score_correction_bias in vLLM) layer.ffn_exp_probs_b = create_tensor(tn(LLM_TENSOR_FFN_EXP_PROBS_B, "bias", i), {n_expert}, TENSOR_NOT_REQUIRED); if (!layer.ffn_exp_probs_b) { layer.ffn_exp_probs_b = create_tensor(tn(LLM_TENSOR_FFN_EXP_PROBS_B, "weight", i), {n_expert}, TENSOR_NOT_REQUIRED);