Merge branch 'sf/deepseek-ocr' into sf/deepseek-ocr-cleanup

# Conflicts:
#	gguf-py/gguf/tensor_mapping.py
This commit is contained in:
Saba Fallah 2025-12-04 15:09:32 +01:00
commit c73748ab5d
4 changed files with 20 additions and 25 deletions

View File

@ -1077,7 +1077,7 @@ TENSOR_NAMES: dict[MODEL_TENSOR, str] = {
MODEL_TENSOR.V_MM_GATE: "mm.gate", MODEL_TENSOR.V_MM_GATE: "mm.gate",
MODEL_TENSOR.V_TOK_BOI: "v.boi", MODEL_TENSOR.V_TOK_BOI: "v.boi",
MODEL_TENSOR.V_TOK_EOI: "v.eoi", MODEL_TENSOR.V_TOK_EOI: "v.eoi",
# DeepSeek-OCR sam_model # DeepSeek-OCR SAM
MODEL_TENSOR.V_SAM_POS_EMBD: "v.sam.pos_embd", MODEL_TENSOR.V_SAM_POS_EMBD: "v.sam.pos_embd",
MODEL_TENSOR.V_SAM_PATCH_EMBD: "v.sam.patch_embd", MODEL_TENSOR.V_SAM_PATCH_EMBD: "v.sam.patch_embd",
MODEL_TENSOR.V_SAM_PRE_NORM: "v.sam.blk.{bid}.pre_ln", MODEL_TENSOR.V_SAM_PRE_NORM: "v.sam.blk.{bid}.pre_ln",
@ -1091,8 +1091,8 @@ TENSOR_NAMES: dict[MODEL_TENSOR, str] = {
MODEL_TENSOR.V_SAM_NECK: "v.sam.neck.{bid}", MODEL_TENSOR.V_SAM_NECK: "v.sam.neck.{bid}",
MODEL_TENSOR.V_SAM_NET_2: "v.sam.net_2", MODEL_TENSOR.V_SAM_NET_2: "v.sam.net_2",
MODEL_TENSOR.V_SAM_NET_3: "v.sam.net_3", MODEL_TENSOR.V_SAM_NET_3: "v.sam.net_3",
MODEL_TENSOR.V_ENC_EMBD_IMGNL: "model.image_newline", # Deepseek-OCR MODEL_TENSOR.V_ENC_EMBD_IMGNL: "v.image_newline", # Deepseek-OCR
MODEL_TENSOR.V_ENC_EMBD_VSEP: "model.view_seperator", # Deepseek-OCR MODEL_TENSOR.V_ENC_EMBD_VSEP: "v.view_seperator", # Deepseek-OCR
# audio (mtmd) # audio (mtmd)
MODEL_TENSOR.A_ENC_EMBD_POS: "a.position_embd", MODEL_TENSOR.A_ENC_EMBD_POS: "a.position_embd",
MODEL_TENSOR.A_ENC_CONV1D: "a.conv1d.{bid}", MODEL_TENSOR.A_ENC_CONV1D: "a.conv1d.{bid}",

View File

@ -1,6 +1,7 @@
from __future__ import annotations from __future__ import annotations
from typing import Sequence from typing import Sequence
from .constants import MODEL_ARCH, MODEL_TENSOR, MODEL_TENSORS, TENSOR_NAMES from .constants import MODEL_ARCH, MODEL_TENSOR, MODEL_TENSORS, TENSOR_NAMES

View File

@ -86,8 +86,8 @@
#define TN_MVLM_PROJ_MLP "mm.model.mlp.%d.%s" #define TN_MVLM_PROJ_MLP "mm.model.mlp.%d.%s"
#define TN_MVLM_PROJ_BLOCK "mm.model.mb_block.%d.block.%d.%s" #define TN_MVLM_PROJ_BLOCK "mm.model.mb_block.%d.block.%d.%s"
#define TN_MVLM_PROJ_PEG "mm.model.peg.%d.%s" #define TN_MVLM_PROJ_PEG "mm.model.peg.%d.%s"
#define TN_IMAGE_NEWLINE "model.image_newline" #define TN_IMAGE_NEWLINE "v.image_newline"
#define TN_IMAGE_SEPERATOR "model.view_seperator" #define TN_IMAGE_SEPERATOR "v.view_seperator"
#define TN_MM_INP_NORM "mm.input_norm.weight" #define TN_MM_INP_NORM "mm.input_norm.weight"
#define TN_MM_INP_NORM_B "mm.input_norm.bias" #define TN_MM_INP_NORM_B "mm.input_norm.bias"
#define TN_MM_INP_PROJ "mm.input_projection.weight" // gemma3 #define TN_MM_INP_PROJ "mm.input_projection.weight" // gemma3
@ -443,7 +443,6 @@ static std::string gguf_kv_to_str(const struct gguf_context * ctx_gguf, int i) {
// debugging // debugging
// //
static std::string to_ne_string(const ggml_tensor * t) { static std::string to_ne_string(const ggml_tensor * t) {
std::string str; std::string str;
for (int i = 0; i < GGML_MAX_DIMS; ++i) { for (int i = 0; i < GGML_MAX_DIMS; ++i) {

View File

@ -3706,22 +3706,19 @@ struct clip_model_loader {
layer.ff_down_w = get_tensor(string_format(TN_SAM_FFN_DOWN, il, "weight")); layer.ff_down_w = get_tensor(string_format(TN_SAM_FFN_DOWN, il, "weight"));
layer.ff_down_b = get_tensor(string_format(TN_SAM_FFN_DOWN, il, "bias")); layer.ff_down_b = get_tensor(string_format(TN_SAM_FFN_DOWN, il, "bias"));
} }
model.neck_0_w = get_tensor(string_format(TN_SAM_NECK, 0, "weight")); model.neck_0_w = get_tensor(string_format(TN_SAM_NECK, 0, "weight"));
model.neck_1_b = get_tensor(string_format(TN_SAM_NECK, 1, "bias")); model.neck_1_b = get_tensor(string_format(TN_SAM_NECK, 1, "bias"));
model.neck_1_w = get_tensor(string_format(TN_SAM_NECK, 1, "weight")); model.neck_1_w = get_tensor(string_format(TN_SAM_NECK, 1, "weight"));
model.neck_2_w = get_tensor(string_format(TN_SAM_NECK, 2, "weight")); model.neck_2_w = get_tensor(string_format(TN_SAM_NECK, 2, "weight"));
model.neck_3_b = get_tensor(string_format(TN_SAM_NECK, 3, "bias")); model.neck_3_b = get_tensor(string_format(TN_SAM_NECK, 3, "bias"));
model.neck_3_w = get_tensor(string_format(TN_SAM_NECK, 3, "weight")); model.neck_3_w = get_tensor(string_format(TN_SAM_NECK, 3, "weight"));
model.net_2 = get_tensor(string_format(TN_SAM_NET, 2, "weight")); model.net_2 = get_tensor(string_format(TN_SAM_NET, 2, "weight"));
model.net_3 = get_tensor(string_format(TN_SAM_NET, 3, "weight")); model.net_3 = get_tensor(string_format(TN_SAM_NET, 3, "weight"));
} model.image_newline = get_tensor(TN_IMAGE_NEWLINE);
model.image_newline = get_tensor(TN_IMAGE_NEWLINE, false); model.view_seperator = get_tensor(TN_IMAGE_SEPERATOR);
model.view_seperator = get_tensor(TN_IMAGE_SEPERATOR, false); model.fc_w = get_tensor(string_format(TN_MM_PROJECTOR, "weight"));
model.fc_w = get_tensor(string_format(TN_MM_PROJECTOR, "weight")); model.fc_b = get_tensor(string_format(TN_MM_PROJECTOR, "bias"));
model.fc_b = get_tensor(string_format(TN_MM_PROJECTOR, "bias")); } break;
break;
default: default:
GGML_ASSERT(false && "unknown projector type"); GGML_ASSERT(false && "unknown projector type");
} }
@ -5843,11 +5840,9 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima
case PROJECTOR_TYPE_VOXTRAL: case PROJECTOR_TYPE_VOXTRAL:
case PROJECTOR_TYPE_JANUS_PRO: case PROJECTOR_TYPE_JANUS_PRO:
case PROJECTOR_TYPE_COGVLM: case PROJECTOR_TYPE_COGVLM:
{
// do nothing
} break;
case PROJECTOR_TYPE_DEEPSEEKOCR: case PROJECTOR_TYPE_DEEPSEEKOCR:
{ {
// do nothing
} break; } break;
case PROJECTOR_TYPE_LLAMA4: case PROJECTOR_TYPE_LLAMA4:
{ {