543 lines
22 KiB
C++
543 lines
22 KiB
C++
#include "models.h"
|
|
|
|
#include "llama-memory-recurrent.h"
|
|
|
|
llm_build_qwen35::llm_build_qwen35(const llama_model & model, const llm_graph_params & params) :
|
|
llm_build_delta_net_base(params), model(model) {
|
|
const int64_t n_embd_head = hparams.n_embd_head_v();
|
|
|
|
GGML_ASSERT(n_embd_head == hparams.n_embd_head_k());
|
|
|
|
int sections[4];
|
|
std::copy(std::begin(hparams.rope_sections), std::begin(hparams.rope_sections) + 4, sections);
|
|
|
|
ggml_tensor * cur;
|
|
ggml_tensor * inpL;
|
|
|
|
inpL = build_inp_embd(model.tok_embd);
|
|
|
|
cb(inpL, "model.input_embed", -1);
|
|
|
|
auto * inp = build_inp_mem_hybrid();
|
|
|
|
ggml_tensor * inp_pos = build_inp_pos();
|
|
ggml_tensor * inp_out_ids = build_inp_out_ids();
|
|
|
|
// Only process main transformer layers (skip MTP layers appended at the end)
|
|
const int n_transformer_layers = n_layer - hparams.nextn_predict_layers;
|
|
|
|
for (int il = 0; il < n_transformer_layers; ++il) {
|
|
ggml_tensor * inpSA = inpL;
|
|
|
|
cur = build_norm(inpL, model.layers[il].attn_norm, nullptr, LLM_NORM_RMS, il);
|
|
cb(cur, "attn_norm", il);
|
|
|
|
ggml_build_forward_expand(gf, cur);
|
|
|
|
// Determine layer type and build appropriate attention mechanism
|
|
if (hparams.is_recurrent(il)) {
|
|
// Linear attention layer (gated delta net)
|
|
cur = build_layer_attn_linear(inp->get_recr(), cur, il);
|
|
} else {
|
|
// Full attention layer
|
|
cur = build_layer_attn(inp->get_attn(), cur, inp_pos, sections, il);
|
|
}
|
|
|
|
// For the last main layer, process BOTH filtered and unfiltered paths:
|
|
// - Unfiltered: saved for MTP head (needs all batch tokens for attention KV cache)
|
|
// - Filtered: used for main model logits (only output tokens)
|
|
if (il == n_transformer_layers - 1 && inp_out_ids) {
|
|
// First: compute full layer output without filtering (for MTP)
|
|
ggml_tensor * full_residual = ggml_add(ctx0, cur, inpSA);
|
|
ggml_tensor * full_ffn_res = full_residual;
|
|
ggml_tensor * full_post_norm = build_norm(full_residual, model.layers[il].attn_post_norm, nullptr, LLM_NORM_RMS, il);
|
|
ggml_tensor * full_ffn = build_layer_ffn(full_post_norm, il);
|
|
mtp_inp_hidden = ggml_add(ctx0, full_ffn, full_ffn_res);
|
|
mtp_inp_hidden = build_cvec(mtp_inp_hidden, il);
|
|
cb(mtp_inp_hidden, "mtp_inp_hidden", il);
|
|
|
|
// Second: filter for main model logits
|
|
cur = ggml_get_rows(ctx0, mtp_inp_hidden, inp_out_ids);
|
|
inpL = cur;
|
|
} else {
|
|
// Residual connection
|
|
cur = ggml_add(ctx0, cur, inpSA);
|
|
cb(cur, "attn_residual", il);
|
|
|
|
ggml_tensor * ffn_residual = cur;
|
|
|
|
ggml_tensor * attn_post_norm = build_norm(cur, model.layers[il].attn_post_norm, nullptr, LLM_NORM_RMS, il);
|
|
cb(attn_post_norm, "attn_post_norm", il);
|
|
|
|
cur = build_layer_ffn(attn_post_norm, il);
|
|
cb(cur, "ffn_out", il);
|
|
|
|
cur = ggml_add(ctx0, cur, ffn_residual);
|
|
cb(cur, "post_ffn", il);
|
|
|
|
cur = build_cvec(cur, il);
|
|
cb(cur, "l_out", il);
|
|
|
|
inpL = cur;
|
|
}
|
|
}
|
|
cur = inpL;
|
|
|
|
// Final norm
|
|
cur = build_norm(cur, model.output_norm, nullptr, LLM_NORM_RMS, -1);
|
|
|
|
cb(cur, "result_norm", -1);
|
|
res->t_embd = cur;
|
|
|
|
// LM head
|
|
cur = build_lora_mm(model.output, cur);
|
|
|
|
cb(cur, "result_output", -1);
|
|
res->t_logits = cur;
|
|
|
|
ggml_build_forward_expand(gf, cur);
|
|
|
|
// Build MTP head if nextn_predict_layers > 0
|
|
if (hparams.nextn_predict_layers > 0) {
|
|
build_mtp_head(inp, inp_pos, sections);
|
|
}
|
|
}
|
|
|
|
std::pair<ggml_tensor *, ggml_tensor *> llm_build_qwen35::build_qkvz(
|
|
ggml_tensor * input,
|
|
int il) {
|
|
const int64_t n_seqs = ubatch.n_seqs;
|
|
const int64_t n_seq_tokens = ubatch.n_seq_tokens;
|
|
|
|
ggml_tensor * qkv_mixed = build_lora_mm(model.layers[il].wqkv, input, model.layers[il].wqkv_s);
|
|
qkv_mixed = ggml_reshape_3d(ctx0, qkv_mixed, qkv_mixed->ne[0], n_seq_tokens, n_seqs);
|
|
cb(qkv_mixed, "linear_attn_qkv_mixed", il);
|
|
|
|
ggml_tensor * z = build_lora_mm(model.layers[il].wqkv_gate, input, model.layers[il].wqkv_gate_s);
|
|
cb(z, "z", il);
|
|
|
|
return { qkv_mixed, z };
|
|
}
|
|
|
|
ggml_tensor * llm_build_qwen35::build_norm_gated(
|
|
ggml_tensor * input,
|
|
ggml_tensor * weights,
|
|
ggml_tensor * gate,
|
|
int layer) {
|
|
ggml_tensor * normalized = build_norm(input, weights, nullptr, LLM_NORM_RMS, layer);
|
|
ggml_tensor * gated_silu = ggml_silu(ctx0, gate);
|
|
|
|
return ggml_mul(ctx0, normalized, gated_silu);
|
|
}
|
|
|
|
ggml_tensor * llm_build_qwen35::build_layer_attn(
|
|
llm_graph_input_attn_kv * inp,
|
|
ggml_tensor * cur,
|
|
ggml_tensor * inp_pos,
|
|
int * sections,
|
|
int il) {
|
|
const int64_t n_embd_head = hparams.n_embd_head_v();
|
|
GGML_ASSERT(n_embd_head == hparams.n_embd_head_k());
|
|
|
|
// Order: joint QG projection, QG split, Q norm, KV projection, K norm, RoPE, attention
|
|
|
|
// Qwen3Next uses a single Q projection that outputs query + gate
|
|
ggml_tensor * Qcur_full = build_lora_mm(model.layers[il].wq, cur, model.layers[il].wq_s); // [ (n_embd_head * 2) * n_head, n_tokens ]
|
|
cb(Qcur_full, "Qcur_full", il);
|
|
|
|
ggml_tensor * Qcur = ggml_view_3d(ctx0, Qcur_full, n_embd_head, n_head, n_tokens,
|
|
ggml_element_size(Qcur_full) * n_embd_head * 2,
|
|
ggml_element_size(Qcur_full) * n_embd_head * 2 * n_head, 0);
|
|
cb(Qcur, "Qcur_reshaped", il);
|
|
|
|
// Apply Q normalization
|
|
Qcur = build_norm(Qcur, model.layers[il].attn_q_norm, nullptr, LLM_NORM_RMS, il);
|
|
cb(Qcur, "Qcur_normed", il);
|
|
|
|
ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur, model.layers[il].wk_s);
|
|
cb(Kcur, "Kcur", il);
|
|
|
|
ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur, model.layers[il].wv_s);
|
|
cb(Vcur, "Vcur", il);
|
|
|
|
// Apply K normalization
|
|
Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
|
|
Kcur = build_norm(Kcur, model.layers[il].attn_k_norm, nullptr, LLM_NORM_RMS, il);
|
|
cb(Kcur, "Kcur_normed", il);
|
|
|
|
ggml_tensor * gate = ggml_view_3d(ctx0, Qcur_full, n_embd_head, n_head, n_tokens,
|
|
ggml_element_size(Qcur_full) * n_embd_head * 2,
|
|
ggml_element_size(Qcur_full) * n_embd_head * 2 * n_head,
|
|
ggml_element_size(Qcur_full) * n_embd_head);
|
|
gate = ggml_cont_2d(ctx0, gate, n_embd_head * n_head, n_tokens);
|
|
cb(gate, "gate_reshaped", il);
|
|
|
|
Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
|
|
|
|
// Apply MRoPE
|
|
Qcur = ggml_rope_multi(
|
|
ctx0, Qcur, inp_pos, nullptr,
|
|
n_rot, sections, rope_type, n_ctx_orig, freq_base, freq_scale,
|
|
ext_factor, attn_factor, beta_fast, beta_slow
|
|
);
|
|
|
|
Kcur = ggml_rope_multi(
|
|
ctx0, Kcur, inp_pos, nullptr,
|
|
n_rot, sections, rope_type, n_ctx_orig, freq_base, freq_scale,
|
|
ext_factor, attn_factor, beta_fast, beta_slow
|
|
);
|
|
|
|
cb(Qcur, "Qcur", il);
|
|
cb(Kcur, "Kcur", il);
|
|
cb(Vcur, "Vcur", il);
|
|
|
|
// Attention computation
|
|
const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f / sqrtf(float(n_embd_head)) : hparams.f_attention_scale;
|
|
|
|
cur = build_attn(inp,
|
|
nullptr, nullptr,
|
|
Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, kq_scale, il);
|
|
cb(cur, "attn_pregate", il);
|
|
|
|
ggml_tensor * gate_sigmoid = ggml_sigmoid(ctx0, gate);
|
|
cb(gate_sigmoid, "gate_sigmoid", il);
|
|
|
|
cur = ggml_mul(ctx0, cur, gate_sigmoid);
|
|
cb(cur, "attn_gated", il);
|
|
|
|
cur = build_lora_mm(model.layers[il].wo, cur, model.layers[il].wo_s);
|
|
cb(cur, "attn_output", il);
|
|
|
|
return cur;
|
|
}
|
|
|
|
ggml_tensor * llm_build_qwen35::build_layer_attn_linear(
|
|
llm_graph_input_rs * inp,
|
|
ggml_tensor * cur,
|
|
int il) {
|
|
const auto * mctx_cur = inp->mctx;
|
|
|
|
const int64_t d_inner = hparams.ssm_d_inner;
|
|
const int64_t n_seqs = ubatch.n_seqs;
|
|
const int64_t head_k_dim = hparams.ssm_d_state;
|
|
const int64_t num_k_heads = hparams.ssm_n_group;
|
|
const int64_t num_v_heads = hparams.ssm_dt_rank;
|
|
const int64_t head_v_dim = d_inner / num_v_heads;
|
|
const int64_t n_seq_tokens = ubatch.n_seq_tokens;
|
|
|
|
const auto kv_head = mctx_cur->get_head();
|
|
|
|
GGML_ASSERT(n_seqs != 0);
|
|
GGML_ASSERT(ubatch.equal_seqs());
|
|
GGML_ASSERT(ubatch.n_tokens == n_seq_tokens * n_seqs);
|
|
|
|
// Input projections
|
|
auto qkvz = build_qkvz(cur, il);
|
|
ggml_tensor * qkv_mixed = qkvz.first;
|
|
ggml_tensor * z = qkvz.second;
|
|
|
|
ggml_tensor * beta = build_lora_mm(model.layers[il].ssm_beta, cur, model.layers[il].ssm_beta_s);
|
|
beta = ggml_reshape_4d(ctx0, beta, 1, num_v_heads, n_seq_tokens, n_seqs);
|
|
cb(beta, "beta", il);
|
|
|
|
beta = ggml_sigmoid(ctx0, beta);
|
|
|
|
ggml_tensor * alpha = build_lora_mm(model.layers[il].ssm_alpha, cur, model.layers[il].ssm_alpha_s);
|
|
alpha = ggml_reshape_3d(ctx0, alpha, num_v_heads, n_seq_tokens, n_seqs);
|
|
cb(alpha, "alpha", il);
|
|
|
|
ggml_tensor * alpha_biased = ggml_add(ctx0, alpha, model.layers[il].ssm_dt);
|
|
ggml_tensor * alpha_softplus = ggml_softplus(ctx0, alpha_biased);
|
|
cb(alpha_softplus, "a_softplus", il);
|
|
|
|
ggml_tensor * gate = ggml_mul(ctx0, alpha_softplus, model.layers[il].ssm_a); // -A_log.exp() * softplus
|
|
cb(gate, "gate", il);
|
|
|
|
gate = ggml_reshape_4d(ctx0, gate, 1, num_v_heads, n_seq_tokens, n_seqs);
|
|
|
|
// Get convolution states from cache
|
|
ggml_tensor * conv_states_all = mctx_cur->get_r_l(il);
|
|
ggml_tensor * ssm_states_all = mctx_cur->get_s_l(il);
|
|
|
|
// Build the convolution states tensor
|
|
ggml_tensor * conv_states = build_rs(inp, conv_states_all, hparams.n_embd_r(), n_seqs);
|
|
cb(conv_states, "conv_states", il);
|
|
|
|
// Calculate convolution kernel size
|
|
ggml_tensor * conv_kernel = model.layers[il].ssm_conv1d;
|
|
const int64_t conv_kernel_size = conv_kernel->ne[0];
|
|
const int64_t conv_channels = d_inner + 2 * hparams.ssm_n_group * hparams.ssm_d_state;
|
|
|
|
conv_states = ggml_reshape_3d(ctx0, conv_states, conv_kernel_size - 1, conv_channels, n_seqs);
|
|
cb(conv_states, "conv_states_reshaped", il);
|
|
|
|
qkv_mixed = ggml_transpose(ctx0, qkv_mixed);
|
|
cb(qkv_mixed, "qkv_mixed_transposed", il);
|
|
|
|
ggml_tensor * conv_input = ggml_concat(ctx0, conv_states, qkv_mixed, 0);
|
|
cb(conv_input, "conv_input", il);
|
|
|
|
// Update convolution state cache
|
|
// Extract the last (conv_kernel_size - 1) states from conv_input
|
|
ggml_tensor * last_conv_states =
|
|
ggml_view_3d(ctx0, conv_input, conv_kernel_size - 1, conv_channels, n_seqs, conv_input->nb[1],
|
|
conv_input->nb[2], (conv_input->ne[0] - conv_states->ne[0]) * ggml_element_size(conv_input));
|
|
cb(last_conv_states, "last_conv_states", il);
|
|
|
|
ggml_tensor * state_update_target =
|
|
ggml_view_1d(ctx0, conv_states_all, (conv_kernel_size - 1) * conv_channels * n_seqs,
|
|
kv_head * (conv_kernel_size - 1) * conv_channels * ggml_element_size(conv_states_all));
|
|
cb(state_update_target, "state_update_target", il);
|
|
|
|
ggml_build_forward_expand(gf, ggml_cpy(ctx0, last_conv_states, state_update_target));
|
|
|
|
ggml_tensor * state = build_rs(inp, ssm_states_all, hparams.n_embd_s(), n_seqs);
|
|
state = ggml_reshape_4d(ctx0, state, head_v_dim, head_v_dim, num_v_heads, n_seqs);
|
|
cb(state, "state_predelta", il);
|
|
|
|
ggml_tensor * conv_output_proper = ggml_ssm_conv(ctx0, conv_input, conv_kernel);
|
|
cb(conv_output_proper, "conv_output_raw", il);
|
|
|
|
ggml_tensor * conv_output_silu = ggml_silu(ctx0, conv_output_proper);
|
|
cb(conv_output_silu, "conv_output_silu", il);
|
|
|
|
ggml_tensor * conv_qkv_mix = conv_output_silu;
|
|
|
|
// Calculate the total conv dimension
|
|
int64_t qkv_dim = head_k_dim * num_k_heads * 2 + head_v_dim * num_v_heads;
|
|
int64_t nb1_qkv = ggml_row_size(conv_qkv_mix->type, qkv_dim);
|
|
|
|
// Extract the convolved Q, K, V from conv_output
|
|
ggml_tensor * q_conv = ggml_view_4d(ctx0, conv_qkv_mix, head_k_dim, num_k_heads, n_seq_tokens, n_seqs,
|
|
ggml_row_size(conv_qkv_mix->type, head_k_dim),
|
|
nb1_qkv,
|
|
nb1_qkv * n_seq_tokens,
|
|
0);
|
|
|
|
ggml_tensor * k_conv = ggml_view_4d(ctx0, conv_qkv_mix, head_k_dim, num_k_heads, n_seq_tokens, n_seqs,
|
|
ggml_row_size(conv_qkv_mix->type, head_k_dim),
|
|
nb1_qkv,
|
|
nb1_qkv * n_seq_tokens,
|
|
head_k_dim * num_k_heads * ggml_element_size(conv_qkv_mix));
|
|
|
|
ggml_tensor * v_conv = ggml_view_4d(ctx0, conv_qkv_mix, head_v_dim, num_v_heads, n_seq_tokens, n_seqs,
|
|
ggml_row_size(conv_qkv_mix->type, head_v_dim),
|
|
nb1_qkv,
|
|
nb1_qkv * n_seq_tokens,
|
|
ggml_row_size(conv_qkv_mix->type, 2 * head_k_dim * num_k_heads));
|
|
|
|
cb(q_conv, "q_conv", il);
|
|
cb(k_conv, "k_conv", il);
|
|
cb(v_conv, "v_conv", il);
|
|
|
|
const float eps_norm = hparams.f_norm_rms_eps;
|
|
|
|
q_conv = ggml_l2_norm(ctx0, q_conv, eps_norm);
|
|
k_conv = ggml_l2_norm(ctx0, k_conv, eps_norm);
|
|
|
|
//q_conv = ggml_cont_4d(ctx0, q_conv, head_k_dim, num_k_heads, n_seq_tokens, n_seqs);
|
|
//k_conv = ggml_cont_4d(ctx0, k_conv, head_k_dim, num_k_heads, n_seq_tokens, n_seqs);
|
|
//v_conv = ggml_cont_4d(ctx0, v_conv, head_v_dim, num_v_heads, n_seq_tokens, n_seqs);
|
|
|
|
// if head keys and value keys are different, repeat to force tensors into matching shapes
|
|
// note: need explicit repeat only if we are not using the fused GDN
|
|
if (num_k_heads != num_v_heads && (!cparams.fused_gdn_ar || !cparams.fused_gdn_ch)) {
|
|
GGML_ASSERT(num_v_heads % num_k_heads == 0);
|
|
q_conv = ggml_repeat_4d(ctx0, q_conv, head_k_dim, num_v_heads, n_seq_tokens, n_seqs);
|
|
k_conv = ggml_repeat_4d(ctx0, k_conv, head_k_dim, num_v_heads, n_seq_tokens, n_seqs);
|
|
}
|
|
|
|
cb(q_conv, "q_conv_predelta", il);
|
|
cb(k_conv, "k_conv_predelta", il);
|
|
cb(v_conv, "v_conv_predelta", il);
|
|
|
|
auto attn_out = build_delta_net(q_conv, k_conv, v_conv, gate, beta, state, il);
|
|
|
|
ggml_tensor * output = attn_out.first;
|
|
ggml_tensor * new_state = attn_out.second;
|
|
cb(output, "attn_output", il);
|
|
cb(new_state, "new_state", il);
|
|
|
|
// Update the recurrent states
|
|
ggml_build_forward_expand(gf,
|
|
ggml_cpy(ctx0, new_state,
|
|
ggml_view_1d(ctx0, ssm_states_all, hparams.n_embd_s() * n_seqs,
|
|
kv_head * hparams.n_embd_s() * ggml_element_size(ssm_states_all))));
|
|
|
|
// z: [head_dim, n_heads, n_tokens, n_seqs] -> [n_heads * n_tokens * n_seqs, head_dim]
|
|
ggml_tensor * z_2d = ggml_reshape_4d(ctx0, z, head_v_dim, num_v_heads, n_seq_tokens, n_seqs);
|
|
|
|
// Apply gated normalization: self.norm(core_attn_out, z)
|
|
ggml_tensor * attn_out_norm = build_norm_gated(output, model.layers[il].ssm_norm, z_2d, il);
|
|
|
|
// Final reshape: [head_dim, n_heads, n_tokens, n_seqs] -> [n_tokens, n_seqs, n_heads * head_dim]
|
|
ggml_tensor * final_output = ggml_reshape_3d(ctx0, attn_out_norm, head_v_dim * num_v_heads, n_seq_tokens, n_seqs);
|
|
cb(final_output, "final_output", il);
|
|
|
|
// Output projection
|
|
cur = build_lora_mm(model.layers[il].ssm_out, final_output, model.layers[il].ssm_out_s);
|
|
cb(cur, "linear_attn_out", il);
|
|
|
|
// Reshape back to original dimensions
|
|
cur = ggml_reshape_2d(ctx0, cur, n_embd, n_seq_tokens * n_seqs);
|
|
|
|
return cur;
|
|
}
|
|
|
|
ggml_tensor * llm_build_qwen35::build_layer_ffn(ggml_tensor * cur, const int il) {
|
|
// Qwen3.5 does not use MoE FFN
|
|
GGML_ASSERT(model.layers[il].ffn_gate_inp == nullptr);
|
|
|
|
cur = build_ffn(cur,
|
|
model.layers[il].ffn_up, NULL, model.layers[il].ffn_up_s,
|
|
model.layers[il].ffn_gate, NULL, model.layers[il].ffn_gate_s,
|
|
model.layers[il].ffn_down, NULL, model.layers[il].ffn_down_s,
|
|
NULL,
|
|
LLM_FFN_SILU, LLM_FFN_PAR, il);
|
|
cb(cur, "ffn_out", il);
|
|
|
|
return cur;
|
|
}
|
|
|
|
void llm_build_qwen35::build_mtp_head(
|
|
llm_graph_input_mem_hybrid * inp,
|
|
ggml_tensor * inp_pos,
|
|
int * sections) {
|
|
// MTP (Multi-Token Prediction) head for dense Qwen 3.5
|
|
//
|
|
// The MTP module takes the hidden state from the last main transformer layer
|
|
// and uses the model's built-in MTP head to produce draft logits.
|
|
//
|
|
// MTP forward pass:
|
|
// 1. sampled_token = argmax(main_logits)
|
|
// 2. emb = embed_tokens(sampled_token)
|
|
// 3. h_norm = RMSNorm(hidden_state, hnorm)
|
|
// 4. e_norm = RMSNorm(emb, enorm)
|
|
// 5. combined = eh_proj(concat(e_norm, h_norm))
|
|
// 6. Standard self-attention (Q/K/V with Q/K norms + RoPE)
|
|
// 7. Standard FFN (gate_proj + up_proj → SiLU → down_proj)
|
|
// 8. logits = lm_head(RMSNorm(output, mtp_norm))
|
|
|
|
const int n_transformer_layers = n_layer - hparams.nextn_predict_layers;
|
|
const int64_t n_embd_head = hparams.n_embd_head_v();
|
|
|
|
// Use unfiltered hidden state for MTP (needs all batch tokens for attention KV cache)
|
|
ggml_tensor * hidden_state = mtp_inp_hidden ? mtp_inp_hidden : res->t_embd;
|
|
GGML_ASSERT(hidden_state != nullptr);
|
|
|
|
// Get logits for greedy token selection.
|
|
// If no filtering occurred (generation), reuse main logits to avoid expensive lm_head recomputation.
|
|
// If filtering occurred (prompt processing), recompute from unfiltered hidden state.
|
|
ggml_tensor * greedy_logits;
|
|
if (!mtp_inp_hidden || mtp_inp_hidden == res->t_embd) {
|
|
// No filtering — main logits already cover all tokens
|
|
greedy_logits = res->t_logits;
|
|
} else {
|
|
// Filtered — recompute logits from unfiltered hidden state
|
|
ggml_tensor * full_normed = build_norm(hidden_state, model.output_norm, nullptr, LLM_NORM_RMS, -1);
|
|
greedy_logits = build_lora_mm(model.output, full_normed);
|
|
}
|
|
|
|
ggml_tensor * greedy_tokens = ggml_argmax(ctx0, greedy_logits);
|
|
cb(greedy_tokens, "mtp_greedy_tokens", -1);
|
|
|
|
ggml_tensor * mtp_hidden = hidden_state;
|
|
|
|
for (uint32_t k = 0; k < hparams.nextn_predict_layers; ++k) {
|
|
const int il = n_transformer_layers + k;
|
|
const auto & layer = model.layers[il];
|
|
|
|
if (layer.nextn.eh_proj == nullptr) {
|
|
continue;
|
|
}
|
|
|
|
// Step 1: Get token embedding (shared with main model)
|
|
ggml_tensor * tok_embd = layer.nextn.embed_tokens ? layer.nextn.embed_tokens : model.tok_embd;
|
|
ggml_tensor * emb = ggml_get_rows(ctx0, tok_embd, greedy_tokens);
|
|
cb(emb, "mtp_token_embd", il);
|
|
|
|
// Step 2: Normalize hidden state and embedding
|
|
ggml_tensor * h_norm = build_norm(mtp_hidden, layer.nextn.hnorm, nullptr, LLM_NORM_RMS, il);
|
|
cb(h_norm, "mtp_hnorm", il);
|
|
|
|
ggml_tensor * e_norm = build_norm(emb, layer.nextn.enorm, nullptr, LLM_NORM_RMS, il);
|
|
cb(e_norm, "mtp_enorm", il);
|
|
|
|
// Step 3: Concatenate and project
|
|
ggml_tensor * concat = ggml_concat(ctx0, e_norm, h_norm, 0); // [2*n_embd, n_tokens]
|
|
cb(concat, "mtp_concat", il);
|
|
|
|
ggml_tensor * cur = build_lora_mm(layer.nextn.eh_proj, concat);
|
|
cb(cur, "mtp_projected", il);
|
|
|
|
// Step 4: Full self-attention for the MTP head (same architecture as main model attention layers)
|
|
// The MTP layer has its own KV cache (allocated because is_recurrent(il) = false).
|
|
// We use the unfiltered hidden state (mtp_inp_hidden) so token count matches inp_pos.
|
|
{
|
|
ggml_tensor * attn_residual = cur;
|
|
|
|
cur = build_norm(cur, layer.attn_norm, nullptr, LLM_NORM_RMS, il);
|
|
|
|
cur = build_layer_attn(inp->get_attn(), cur, inp_pos, sections, il);
|
|
|
|
cur = ggml_add(ctx0, cur, attn_residual);
|
|
}
|
|
|
|
// Step 5: Post-attention norm + FFN
|
|
{
|
|
ggml_tensor * ffn_residual = cur;
|
|
|
|
ggml_tensor * attn_post_norm = build_norm(cur, layer.attn_post_norm, nullptr, LLM_NORM_RMS, il);
|
|
cb(attn_post_norm, "mtp_attn_post_norm", il);
|
|
|
|
// Standard dense FFN (same as main model FFN)
|
|
cur = build_ffn(attn_post_norm,
|
|
layer.ffn_up, NULL, layer.ffn_up_s,
|
|
layer.ffn_gate, NULL, layer.ffn_gate_s,
|
|
layer.ffn_down, NULL, layer.ffn_down_s,
|
|
NULL,
|
|
LLM_FFN_SILU, LLM_FFN_PAR, il);
|
|
cb(cur, "mtp_ffn_out", il);
|
|
|
|
cur = ggml_add(ctx0, cur, ffn_residual);
|
|
cb(cur, "mtp_post_ffn", il);
|
|
}
|
|
|
|
mtp_hidden = cur;
|
|
|
|
// Step 6: Final norm + LM head for draft logits
|
|
ggml_tensor * mtp_normed;
|
|
if (layer.nextn.shared_head_norm != nullptr) {
|
|
mtp_normed = build_norm(mtp_hidden, layer.nextn.shared_head_norm, nullptr, LLM_NORM_RMS, il);
|
|
} else {
|
|
// Use main model's output norm
|
|
mtp_normed = build_norm(mtp_hidden, model.output_norm, nullptr, LLM_NORM_RMS, il);
|
|
}
|
|
cb(mtp_normed, "mtp_head_norm", il);
|
|
|
|
ggml_tensor * lm_head = layer.nextn.shared_head_head ? layer.nextn.shared_head_head : model.output;
|
|
|
|
// FastMTP: vocabulary trimming — only compute logits for top-K tokens
|
|
// instead of full 248K vocabulary. Most tokenizers order by frequency,
|
|
// so tokens 0..K-1 cover ~95%+ of generated code tokens.
|
|
// This reduces the lm_head matmul from [4096,248K] to [4096,32K] (~8x faster).
|
|
const int64_t mtp_vocab_size = std::min(lm_head->ne[1], (int64_t)32768);
|
|
ggml_tensor * lm_head_reduced = ggml_view_2d(ctx0, lm_head,
|
|
lm_head->ne[0], mtp_vocab_size, lm_head->nb[1], 0);
|
|
ggml_tensor * mtp_logits = build_lora_mm(lm_head_reduced, mtp_normed);
|
|
cb(mtp_logits, "mtp_logits", il);
|
|
|
|
// Store MTP outputs in graph result
|
|
res->t_embd_mtp = mtp_hidden;
|
|
res->t_logits_mtp = mtp_logits;
|
|
|
|
// For recursive MTP (multiple layers), feed greedy tokens forward
|
|
if (k + 1 < hparams.nextn_predict_layers) {
|
|
greedy_tokens = ggml_argmax(ctx0, mtp_logits);
|
|
cb(greedy_tokens, "mtp_greedy_next", il);
|
|
}
|
|
|
|
ggml_build_forward_expand(gf, mtp_logits);
|
|
}
|
|
}
|