model : add Granite Hybrid nano types (#16896)
Signed-off-by: Giuseppe Scrivano <gscrivan@redhat.com>
This commit is contained in:
parent
31c511a968
commit
e58d585604
|
|
@ -1898,7 +1898,8 @@ void llama_model::load_hparams(llama_model_loader & ml) {
|
||||||
ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
|
ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
|
||||||
|
|
||||||
switch (hparams.n_embd) {
|
switch (hparams.n_embd) {
|
||||||
case 1536: type = LLM_TYPE_7B_A1B; break;
|
case 768: type = LLM_TYPE_350M; break;
|
||||||
|
case 1536: type = (hparams.n_embd == 2048 ? LLM_TYPE_7B_A1B : LLM_TYPE_1B); break;
|
||||||
case 2048: case 2560: type = LLM_TYPE_3B; break;
|
case 2048: case 2560: type = LLM_TYPE_3B; break;
|
||||||
case 4096: type = LLM_TYPE_32B; break;
|
case 4096: type = LLM_TYPE_32B; break;
|
||||||
default: type = LLM_TYPE_UNKNOWN;
|
default: type = LLM_TYPE_UNKNOWN;
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue