moved const llama_model & model; around to follow qwen3next format and see if it cna pass the -Wunused-private-field error

This commit is contained in:
Yee Man Chan 2026-01-11 21:44:54 +08:00
parent 6ae66fc40d
commit 93afbedc96
2 changed files with 4 additions and 2 deletions

View File

@ -3,7 +3,8 @@
#define CHUNK_SIZE 64
llm_build_kimi_linear::llm_build_kimi_linear(const llama_model & model, const llm_graph_params & params) : llm_graph_context_mamba(params), model(model) {
llm_build_kimi_linear::llm_build_kimi_linear(const llama_model & model, const llm_graph_params & params) :
llm_graph_context_mamba(params), model(model) {
ggml_tensor * cur;
ggml_tensor * inpL;

View File

@ -287,7 +287,6 @@ struct llm_build_jamba : public llm_graph_context_mamba {
struct llm_build_kimi_linear : public llm_graph_context_mamba {
llm_build_kimi_linear(const llama_model & model, const llm_graph_params & params);
private:
const llama_model & model;
ggml_tensor * build_kda_autoregressive(
ggml_tensor * q,
ggml_tensor * k,
@ -308,6 +307,8 @@ private:
ggml_tensor * identity,
ggml_tensor * diag_mask,
int il);
const llama_model & model;
};
struct llm_build_lfm2 : public llm_graph_context {