remove ssm_o_norm_b

This commit is contained in:
Yee Man Chan 2026-01-27 13:19:06 +08:00
parent f1525b3695
commit 0de4680bdf
2 changed files with 0 additions and 2 deletions

View File

@ -6853,7 +6853,6 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
// o_norm (reusing SSM_NORM)
layer.ssm_o_norm = create_tensor(tn(LLM_TENSOR_SSM_NORM, "weight", i), {n_embd_head_k_kda}, 0); // FusedRMSNormGated
layer.ssm_o_norm_b = create_tensor(tn(LLM_TENSOR_SSM_NORM, "bias", i), {n_embd_head_k_kda}, TENSOR_NOT_REQUIRED);
// o_proj
layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_v_kda * n_head, n_embd}, 0);

View File

@ -423,7 +423,6 @@ struct llama_layer {
struct ggml_tensor * ssm_g_a = nullptr;
struct ggml_tensor * ssm_g_b = nullptr;
struct ggml_tensor * ssm_o_norm = nullptr;
struct ggml_tensor * ssm_o_norm_b = nullptr;
struct llama_layer_posnet posnet;