remove ssm_o_norm_b
This commit is contained in:
parent
f1525b3695
commit
0de4680bdf
|
|
@ -6853,7 +6853,6 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
|
|||
|
||||
// o_norm (reusing SSM_NORM)
|
||||
layer.ssm_o_norm = create_tensor(tn(LLM_TENSOR_SSM_NORM, "weight", i), {n_embd_head_k_kda}, 0); // FusedRMSNormGated
|
||||
layer.ssm_o_norm_b = create_tensor(tn(LLM_TENSOR_SSM_NORM, "bias", i), {n_embd_head_k_kda}, TENSOR_NOT_REQUIRED);
|
||||
|
||||
// o_proj
|
||||
layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_v_kda * n_head, n_embd}, 0);
|
||||
|
|
|
|||
|
|
@ -423,7 +423,6 @@ struct llama_layer {
|
|||
struct ggml_tensor * ssm_g_a = nullptr;
|
||||
struct ggml_tensor * ssm_g_b = nullptr;
|
||||
struct ggml_tensor * ssm_o_norm = nullptr;
|
||||
struct ggml_tensor * ssm_o_norm_b = nullptr;
|
||||
|
||||
struct llama_layer_posnet posnet;
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue