memory : handle kv_unified for hybrid models

This commit is contained in:
Francis Couture-Harpin 2025-08-03 00:45:47 -04:00
parent 5c0eb5ef54
commit e549515cb3
3 changed files with 4 additions and 1 deletions

View File

@ -25,6 +25,7 @@ llama_memory_hybrid::llama_memory_hybrid(
/* common */
uint32_t n_seq_max,
bool offload,
bool unified,
/* layer filters */
layer_filter_cb && filter_attn,
layer_filter_cb && filter_recr) :
@ -38,7 +39,7 @@ llama_memory_hybrid::llama_memory_hybrid(
type_v,
v_trans,
offload,
1,
unified,
kv_size,
n_seq_max,
n_pad,

View File

@ -39,6 +39,7 @@ public:
/* common */
uint32_t n_seq_max,
bool offload,
bool unified,
/* layer filters */
layer_filter_cb && filter_attn = nullptr,
layer_filter_cb && filter_recr = nullptr);

View File

@ -17598,6 +17598,7 @@ llama_memory_i * llama_model::create_memory(const llama_memory_params & params,
/* recurrent_kv_size */ std::max((uint32_t) 1, cparams.n_seq_max),
/* n_seq_max */ cparams.n_seq_max,
/* offload */ cparams.offload_kqv,
/* unified */ cparams.kv_unified,
/* filter_attn */ (arch == LLM_ARCH_FALCON_H1) ? [&](int32_t) { return true; } : (llama_memory_hybrid::layer_filter_cb)nullptr,
/* filter_recr */ (arch == LLM_ARCH_FALCON_H1) ? [&](int32_t) { return true; } : (llama_memory_hybrid::layer_filter_cb)nullptr);
} else {