mtmd : add VAETKI vision encoder support
This commit is contained in:
parent
488cdee96f
commit
b267aada03
|
|
@ -7727,7 +7727,7 @@ class VaetkiModel(TextModel):
|
|||
toktypes.append(gguf.TokenType.CONTROL)
|
||||
else:
|
||||
# pre-normalize user-defined spaces (Metaspace → space)
|
||||
token = token.replace("\xe2\x96\x81", " ")
|
||||
token = token.replace("\u2581", " ")
|
||||
toktypes.append(gguf.TokenType.USER_DEFINED)
|
||||
tokens.append(token)
|
||||
elif i in reverse_vocab:
|
||||
|
|
|
|||
|
|
@ -1165,7 +1165,7 @@ void llama_model::load_hparams(llama_model_loader & ml) {
|
|||
|
||||
switch (hparams.n_layer) {
|
||||
case 24: type = LLM_TYPE_7B; break;
|
||||
case 48: type = LLM_TYPE_109B; break;
|
||||
case 48: type = LLM_TYPE_120B; break;
|
||||
default: type = LLM_TYPE_UNKNOWN;
|
||||
}
|
||||
} break;
|
||||
|
|
|
|||
|
|
@ -26,6 +26,7 @@ add_library(mtmd
|
|||
models/qwen2vl.cpp
|
||||
models/qwen3vl.cpp
|
||||
models/siglip.cpp
|
||||
models/vaetki.cpp
|
||||
models/whisper-enc.cpp
|
||||
models/mobilenetv5.cpp
|
||||
models/youtuvl.cpp
|
||||
|
|
|
|||
|
|
@ -66,6 +66,7 @@
|
|||
|
||||
#define TN_POS_EMBD "%s.position_embd.weight"
|
||||
#define TN_CLASS_EMBD "v.class_embd"
|
||||
#define TN_CLASS_POS_EMBD "v.class_pos_embd"
|
||||
#define TN_PATCH_EMBD "v.patch_embd.weight" // not rename tensor with ".0" postfix for backwrad compat
|
||||
#define TN_PATCH_EMBD_1 "v.patch_embd.weight.1"
|
||||
#define TN_PATCH_BIAS "v.patch_embd.bias"
|
||||
|
|
@ -233,6 +234,7 @@ enum projector_type {
|
|||
PROJECTOR_TYPE_LFM2A,
|
||||
PROJECTOR_TYPE_GLM4V,
|
||||
PROJECTOR_TYPE_YOUTUVL,
|
||||
PROJECTOR_TYPE_VAETKI,
|
||||
PROJECTOR_TYPE_UNKNOWN,
|
||||
};
|
||||
|
||||
|
|
@ -266,6 +268,7 @@ static std::map<projector_type, std::string> PROJECTOR_TYPE_NAMES = {
|
|||
{ PROJECTOR_TYPE_LFM2A, "lfm2a"},
|
||||
{ PROJECTOR_TYPE_GLM4V, "glm4v"},
|
||||
{ PROJECTOR_TYPE_YOUTUVL, "youtuvl"},
|
||||
{ PROJECTOR_TYPE_VAETKI, "vaetki"},
|
||||
};
|
||||
|
||||
static projector_type clip_projector_type_from_string(const std::string & str) {
|
||||
|
|
|
|||
|
|
@ -219,6 +219,7 @@ struct clip_model {
|
|||
|
||||
// embeddings
|
||||
ggml_tensor * class_embedding = nullptr;
|
||||
ggml_tensor * class_pos_emb = nullptr;
|
||||
ggml_tensor * patch_embeddings_0 = nullptr;
|
||||
ggml_tensor * patch_embeddings_1 = nullptr; // second Conv2D kernel when we decouple Conv3D along temproal dimension (Qwen2VL)
|
||||
ggml_tensor * patch_bias = nullptr;
|
||||
|
|
|
|||
|
|
@ -849,6 +849,10 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
|
|||
{
|
||||
builder = std::make_unique<clip_graph_youtuvl>(ctx, img);
|
||||
} break;
|
||||
case PROJECTOR_TYPE_VAETKI:
|
||||
{
|
||||
builder = std::make_unique<clip_graph_vaetki>(ctx, img);
|
||||
} break;
|
||||
default:
|
||||
GGML_ABORT("missing cgraph builder");
|
||||
}
|
||||
|
|
@ -1192,6 +1196,13 @@ struct clip_model_loader {
|
|||
hparams.set_limit_image_tokens(8, 4096);
|
||||
hparams.set_warmup_n_tokens(46*46); // avoid OOM on warmup
|
||||
} break;
|
||||
case PROJECTOR_TYPE_VAETKI:
|
||||
{
|
||||
hparams.rope_theta = 10000.0f;
|
||||
hparams.n_merge = 2;
|
||||
get_u32(KEY_SPATIAL_MERGE_SIZE, hparams.n_merge, false);
|
||||
hparams.set_warmup_n_tokens(40*40);
|
||||
} break;
|
||||
case PROJECTOR_TYPE_LLAMA4:
|
||||
{
|
||||
hparams.rope_theta = 10000.0f;
|
||||
|
|
@ -1325,6 +1336,7 @@ struct clip_model_loader {
|
|||
};
|
||||
|
||||
model.class_embedding = get_tensor(TN_CLASS_EMBD, false);
|
||||
model.class_pos_emb = get_tensor(TN_CLASS_POS_EMBD, false);
|
||||
|
||||
model.pre_ln_w = get_tensor(string_format(TN_LN_PRE, prefix, "weight"), false);
|
||||
model.pre_ln_b = get_tensor(string_format(TN_LN_PRE, prefix, "bias"), false);
|
||||
|
|
@ -1540,6 +1552,15 @@ struct clip_model_loader {
|
|||
model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight")); // merger.mlp.2
|
||||
model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias"));
|
||||
} break;
|
||||
case PROJECTOR_TYPE_VAETKI:
|
||||
{
|
||||
model.mm_0_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 0, "weight"));
|
||||
model.mm_0_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 0, "bias"));
|
||||
model.mm_1_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "weight"));
|
||||
model.mm_1_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "bias"));
|
||||
model.mm_3_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "weight"));
|
||||
model.mm_3_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "bias"));
|
||||
} break;
|
||||
case PROJECTOR_TYPE_GLM4V:
|
||||
{
|
||||
model.projection = get_tensor(TN_MM_PROJECTOR);
|
||||
|
|
@ -2952,6 +2973,7 @@ bool clip_image_preprocess(struct clip_ctx * ctx, const clip_image_u8 * img, str
|
|||
case PROJECTOR_TYPE_GLM_EDGE:
|
||||
case PROJECTOR_TYPE_GEMMA3:
|
||||
case PROJECTOR_TYPE_INTERNVL: // TODO @ngxson : support dynamic resolution
|
||||
case PROJECTOR_TYPE_VAETKI:
|
||||
{
|
||||
clip_image_u8 resized_image;
|
||||
int sz = params.image_size;
|
||||
|
|
@ -3229,6 +3251,7 @@ int clip_n_output_tokens(const struct clip_ctx * ctx, struct clip_image_f32 * im
|
|||
case PROJECTOR_TYPE_IDEFICS3:
|
||||
case PROJECTOR_TYPE_INTERNVL:
|
||||
case PROJECTOR_TYPE_LLAMA4:
|
||||
case PROJECTOR_TYPE_VAETKI:
|
||||
{
|
||||
// both X and Y are downscaled by the scale factor
|
||||
int scale_factor = ctx->model.hparams.n_merge;
|
||||
|
|
@ -3496,6 +3519,31 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima
|
|||
}
|
||||
}
|
||||
|
||||
set_input_i32("positions", positions);
|
||||
} break;
|
||||
case PROJECTOR_TYPE_VAETKI:
|
||||
{
|
||||
const int merge_ratio = 2;
|
||||
const int ipw = image_size_width / patch_size;
|
||||
const int iph = image_size_height / patch_size;
|
||||
|
||||
std::vector<int> positions(num_patches * 4);
|
||||
|
||||
int ptr = 0;
|
||||
for (int y = 0; y < iph; y += merge_ratio) {
|
||||
for (int x = 0; x < ipw; x += merge_ratio) {
|
||||
for (int dy = 0; dy < 2; dy++) {
|
||||
for (int dx = 0; dx < 2; dx++) {
|
||||
positions[ ptr] = y + dy;
|
||||
positions[ num_patches + ptr] = x + dx;
|
||||
positions[2 * num_patches + ptr] = y + dy;
|
||||
positions[3 * num_patches + ptr] = x + dx;
|
||||
ptr++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
set_input_i32("positions", positions);
|
||||
} break;
|
||||
case PROJECTOR_TYPE_QWEN25VL:
|
||||
|
|
@ -3756,6 +3804,7 @@ int clip_n_mmproj_embd(const struct clip_ctx * ctx) {
|
|||
case PROJECTOR_TYPE_MUSIC_FLAMINGO:
|
||||
return ctx->model.mm_2_w->ne[1];
|
||||
case PROJECTOR_TYPE_INTERNVL:
|
||||
case PROJECTOR_TYPE_VAETKI:
|
||||
return ctx->model.mm_3_w->ne[1];
|
||||
case PROJECTOR_TYPE_LLAMA4:
|
||||
return ctx->model.mm_model_proj->ne[1];
|
||||
|
|
|
|||
|
|
@ -109,3 +109,8 @@ struct clip_graph_mobilenetv5 : clip_graph {
|
|||
ggml_tensor * inp,
|
||||
const mobilenetv5_block & block);
|
||||
};
|
||||
|
||||
struct clip_graph_vaetki : clip_graph {
|
||||
clip_graph_vaetki(clip_ctx * ctx, const clip_image_f32 & img) : clip_graph(ctx, img) {}
|
||||
ggml_cgraph * build() override;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -0,0 +1,185 @@
|
|||
#include "models.h"
|
||||
|
||||
ggml_cgraph * clip_graph_vaetki::build() {
|
||||
GGML_ASSERT(model.class_embedding != nullptr);
|
||||
|
||||
const int batch_size = 1;
|
||||
const int n_pos = n_patches + 1;
|
||||
const int n_pos_patches = n_patches;
|
||||
const int num_position_ids = n_pos_patches * 4;
|
||||
|
||||
norm_type norm_t = NORM_TYPE_NORMAL;
|
||||
int mrope_sections[4] = {d_head/4, d_head/4, d_head/4, d_head/4};
|
||||
|
||||
ggml_tensor * inp = build_inp();
|
||||
|
||||
// add CLS token
|
||||
inp = ggml_concat(ctx0, model.class_embedding, inp, 1);
|
||||
cb(inp, "inp_with_cls", -1);
|
||||
|
||||
ggml_tensor * inpL = inp;
|
||||
|
||||
// position IDs for 2D RoPE (patch tokens only)
|
||||
ggml_tensor * positions = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, num_position_ids);
|
||||
ggml_set_name(positions, "positions");
|
||||
ggml_set_input(positions);
|
||||
|
||||
// precompute CLS position embedding cos/sin
|
||||
ggml_tensor * cls_cos = nullptr;
|
||||
ggml_tensor * cls_sin = nullptr;
|
||||
if (model.class_pos_emb) {
|
||||
// class_pos_emb: [head_dim/2] -> concat to [head_dim]
|
||||
ggml_tensor * cls_pos = ggml_concat(ctx0, model.class_pos_emb, model.class_pos_emb, 0);
|
||||
cls_cos = ggml_cos(ctx0, cls_pos);
|
||||
cls_sin = ggml_sin(ctx0, cls_pos);
|
||||
}
|
||||
|
||||
if (model.pre_ln_w) {
|
||||
inpL = build_norm(inpL, model.pre_ln_w, model.pre_ln_b, norm_t, eps, -1);
|
||||
cb(inpL, "pre_ln", -1);
|
||||
}
|
||||
|
||||
for (int il = 0; il < n_layer; il++) {
|
||||
const auto & layer = model.layers[il];
|
||||
ggml_tensor * cur = inpL;
|
||||
|
||||
cur = build_norm(cur, layer.ln_1_w, layer.ln_1_b, norm_t, eps, il);
|
||||
cb(cur, "ln1", il);
|
||||
|
||||
// self-attention with 2D RoPE
|
||||
{
|
||||
ggml_tensor * Qcur = ggml_mul_mat(ctx0, layer.q_w, cur);
|
||||
if (layer.q_b) {
|
||||
Qcur = ggml_add(ctx0, Qcur, layer.q_b);
|
||||
}
|
||||
|
||||
ggml_tensor * Kcur = ggml_mul_mat(ctx0, layer.k_w, cur);
|
||||
if (layer.k_b) {
|
||||
Kcur = ggml_add(ctx0, Kcur, layer.k_b);
|
||||
}
|
||||
|
||||
ggml_tensor * Vcur = ggml_mul_mat(ctx0, layer.v_w, cur);
|
||||
if (layer.v_b) {
|
||||
Vcur = ggml_add(ctx0, Vcur, layer.v_b);
|
||||
}
|
||||
|
||||
Qcur = ggml_reshape_3d(ctx0, Qcur, d_head, n_head, n_pos);
|
||||
Kcur = ggml_reshape_3d(ctx0, Kcur, d_head, n_head, n_pos);
|
||||
Vcur = ggml_reshape_3d(ctx0, Vcur, d_head, n_head, n_pos);
|
||||
|
||||
cb(Qcur, "Qcur", il);
|
||||
cb(Kcur, "Kcur", il);
|
||||
cb(Vcur, "Vcur", il);
|
||||
|
||||
// split CLS and patch tokens for RoPE
|
||||
ggml_tensor * Q_cls = ggml_view_3d(ctx0, Qcur, d_head, n_head, 1,
|
||||
ggml_row_size(Qcur->type, d_head),
|
||||
ggml_row_size(Qcur->type, d_head * n_head), 0);
|
||||
ggml_tensor * K_cls = ggml_view_3d(ctx0, Kcur, d_head, n_head, 1,
|
||||
ggml_row_size(Kcur->type, d_head),
|
||||
ggml_row_size(Kcur->type, d_head * n_head), 0);
|
||||
|
||||
ggml_tensor * Q_patch = ggml_view_3d(ctx0, Qcur, d_head, n_head, n_pos_patches,
|
||||
ggml_row_size(Qcur->type, d_head),
|
||||
ggml_row_size(Qcur->type, d_head * n_head),
|
||||
ggml_row_size(Qcur->type, d_head * n_head));
|
||||
ggml_tensor * K_patch = ggml_view_3d(ctx0, Kcur, d_head, n_head, n_pos_patches,
|
||||
ggml_row_size(Kcur->type, d_head),
|
||||
ggml_row_size(Kcur->type, d_head * n_head),
|
||||
ggml_row_size(Kcur->type, d_head * n_head));
|
||||
|
||||
// apply RoPE to CLS token using class_pos_emb
|
||||
if (cls_cos && cls_sin) {
|
||||
// rotate_half: split into two halves, negate second, swap order
|
||||
ggml_tensor * Q_cls_1 = ggml_view_3d(ctx0, Q_cls, d_head/2, n_head, 1,
|
||||
ggml_row_size(Q_cls->type, d_head),
|
||||
ggml_row_size(Q_cls->type, d_head * n_head), 0);
|
||||
ggml_tensor * Q_cls_2 = ggml_view_3d(ctx0, Q_cls, d_head/2, n_head, 1,
|
||||
ggml_row_size(Q_cls->type, d_head),
|
||||
ggml_row_size(Q_cls->type, d_head * n_head),
|
||||
ggml_row_size(Q_cls->type, d_head/2));
|
||||
ggml_tensor * Q_cls_rot = ggml_concat(ctx0, ggml_neg(ctx0, Q_cls_2), Q_cls_1, 0);
|
||||
|
||||
ggml_tensor * K_cls_1 = ggml_view_3d(ctx0, K_cls, d_head/2, n_head, 1,
|
||||
ggml_row_size(K_cls->type, d_head),
|
||||
ggml_row_size(K_cls->type, d_head * n_head), 0);
|
||||
ggml_tensor * K_cls_2 = ggml_view_3d(ctx0, K_cls, d_head/2, n_head, 1,
|
||||
ggml_row_size(K_cls->type, d_head),
|
||||
ggml_row_size(K_cls->type, d_head * n_head),
|
||||
ggml_row_size(K_cls->type, d_head/2));
|
||||
ggml_tensor * K_cls_rot = ggml_concat(ctx0, ggml_neg(ctx0, K_cls_2), K_cls_1, 0);
|
||||
|
||||
// RoPE: x * cos + rotate_half(x) * sin
|
||||
Q_cls = ggml_add(ctx0,
|
||||
ggml_mul(ctx0, Q_cls, cls_cos),
|
||||
ggml_mul(ctx0, Q_cls_rot, cls_sin));
|
||||
K_cls = ggml_add(ctx0,
|
||||
ggml_mul(ctx0, K_cls, cls_cos),
|
||||
ggml_mul(ctx0, K_cls_rot, cls_sin));
|
||||
}
|
||||
|
||||
// apply 2D RoPE to patch tokens
|
||||
Q_patch = ggml_rope_multi(ctx0, Q_patch, positions, nullptr,
|
||||
d_head/2, mrope_sections, GGML_ROPE_TYPE_VISION, 32768, 10000, 1, 0, 1, 32, 1);
|
||||
K_patch = ggml_rope_multi(ctx0, K_patch, positions, nullptr,
|
||||
d_head/2, mrope_sections, GGML_ROPE_TYPE_VISION, 32768, 10000, 1, 0, 1, 32, 1);
|
||||
|
||||
Qcur = ggml_concat(ctx0, Q_cls, Q_patch, 2);
|
||||
Kcur = ggml_concat(ctx0, K_cls, K_patch, 2);
|
||||
|
||||
cb(Qcur, "Qcur_rope", il);
|
||||
cb(Kcur, "Kcur_rope", il);
|
||||
|
||||
cur = build_attn(layer.o_w, layer.o_b,
|
||||
Qcur, Kcur, Vcur, nullptr, kq_scale, il);
|
||||
cb(cur, "attn_out", il);
|
||||
}
|
||||
|
||||
cur = ggml_add(ctx0, cur, inpL);
|
||||
inpL = cur;
|
||||
cb(cur, "ffn_inp", il);
|
||||
|
||||
cur = build_norm(cur, layer.ln_2_w, layer.ln_2_b, norm_t, eps, il);
|
||||
cb(cur, "ln2", il);
|
||||
|
||||
cur = build_ffn(cur,
|
||||
layer.ff_up_w, layer.ff_up_b,
|
||||
nullptr, nullptr,
|
||||
layer.ff_down_w, layer.ff_down_b,
|
||||
hparams.ffn_op, il);
|
||||
cb(cur, "ffn_out", il);
|
||||
|
||||
cur = ggml_add(ctx0, inpL, cur);
|
||||
cb(cur, "layer_out", il);
|
||||
|
||||
inpL = cur;
|
||||
}
|
||||
|
||||
// remove CLS token
|
||||
ggml_tensor * embeddings = ggml_view_2d(ctx0, inpL,
|
||||
n_embd, n_pos_patches,
|
||||
ggml_row_size(inpL->type, n_embd),
|
||||
ggml_row_size(inpL->type, n_embd));
|
||||
cb(embeddings, "patches_only", -1);
|
||||
|
||||
// merger
|
||||
embeddings = build_norm(embeddings, model.mm_0_w, model.mm_0_b, NORM_TYPE_NORMAL, 1e-5, -1);
|
||||
cb(embeddings, "merger_normed", -1);
|
||||
|
||||
// pixel shuffle
|
||||
const int scale_factor = hparams.n_merge;
|
||||
embeddings = ggml_reshape_3d(ctx0, embeddings, n_embd * scale_factor * scale_factor, n_pos_patches / (scale_factor * scale_factor), batch_size);
|
||||
cb(embeddings, "merger_reshaped", -1);
|
||||
|
||||
embeddings = build_ffn(embeddings,
|
||||
model.mm_1_w, model.mm_1_b,
|
||||
nullptr, nullptr,
|
||||
model.mm_3_w, model.mm_3_b,
|
||||
FFN_GELU,
|
||||
-1);
|
||||
cb(embeddings, "merger_out", -1);
|
||||
|
||||
ggml_build_forward_expand(gf, embeddings);
|
||||
|
||||
return gf;
|
||||
}
|
||||
Loading…
Reference in New Issue