Compare commits
11 Commits
dd77a8f89e
...
3c74a9d77d
| Author | SHA1 | Date |
|---|---|---|
|
|
3c74a9d77d | |
|
|
f2e77e90a8 | |
|
|
873c825611 | |
|
|
82764d8f40 | |
|
|
21a4933042 | |
|
|
1e9d771e2c | |
|
|
aa4695c5e5 | |
|
|
547765a93e | |
|
|
9e209c5aee | |
|
|
6313acbef0 | |
|
|
b47e0cf21a |
|
|
@ -4258,9 +4258,7 @@ class Qwen2VLVisionModel(MmprojModel):
|
|||
yield from super().modify_tensors(data_torch, name, bid)
|
||||
|
||||
|
||||
@ModelBase.register("Qwen2_5OmniModel")
|
||||
class Qwen25OmniModel(Qwen2VLVisionModel):
|
||||
has_vision_encoder = True
|
||||
class Qwen25AudioModel(MmprojModel):
|
||||
has_audio_encoder = True
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
|
|
@ -4276,12 +4274,6 @@ class Qwen25OmniModel(Qwen2VLVisionModel):
|
|||
self.gguf_writer.add_audio_num_mel_bins(self.hparams_audio["num_mel_bins"])
|
||||
self.gguf_writer.add_audio_attention_layernorm_eps(self.hparams_audio.get("layer_norm_eps", 1e-5))
|
||||
|
||||
def get_vision_config(self) -> dict[str, Any] | None:
|
||||
return self.global_config["thinker_config"].get("vision_config")
|
||||
|
||||
def get_audio_config(self) -> dict[str, Any] | None:
|
||||
return self.global_config["thinker_config"].get("audio_config")
|
||||
|
||||
def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]:
|
||||
# SinusoidsPositionEmbedding
|
||||
assert self.hparams_audio is not None
|
||||
|
|
@ -4312,7 +4304,32 @@ class Qwen25OmniModel(Qwen2VLVisionModel):
|
|||
# this tensor is left unused in transformers code
|
||||
# https://github.com/huggingface/transformers/blob/6e3063422c4b1c014aa60c32b9254fd2902f0f28/src/transformers/models/qwen2_5_omni/modular_qwen2_5_omni.py#L1809
|
||||
return
|
||||
yield from super().modify_tensors(data_torch, name, bid)
|
||||
yield from MmprojModel.modify_tensors(self, data_torch, name, bid)
|
||||
|
||||
return # skip other tensors
|
||||
|
||||
|
||||
@ModelBase.register("Qwen2_5OmniModel")
|
||||
class Qwen25OmniModel(Qwen2VLVisionModel, Qwen25AudioModel):
|
||||
has_audio_encoder = True
|
||||
has_vision_encoder = True
|
||||
|
||||
def get_vision_config(self) -> dict[str, Any] | None:
|
||||
return self.global_config["thinker_config"].get("vision_config")
|
||||
|
||||
def get_audio_config(self) -> dict[str, Any] | None:
|
||||
return self.global_config["thinker_config"].get("audio_config")
|
||||
|
||||
def set_gguf_parameters(self):
|
||||
super().set_gguf_parameters()
|
||||
self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.QWEN25O)
|
||||
|
||||
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
|
||||
if "visual." in name:
|
||||
yield from Qwen2VLVisionModel.modify_tensors(self, data_torch, name, bid)
|
||||
elif "audio_tower." in name:
|
||||
yield from Qwen25AudioModel.modify_tensors(self, data_torch, name, bid)
|
||||
return # skip other tensors
|
||||
|
||||
|
||||
@ModelBase.register("InternVisionModel")
|
||||
|
|
@ -4684,12 +4701,20 @@ class Qwen3Model(Qwen2Model):
|
|||
if self.is_rerank:
|
||||
self.gguf_writer.add_pooling_type(gguf.PoolingType.RANK)
|
||||
self.gguf_writer.add_classifier_output_labels(["yes", "no"])
|
||||
self.gguf_writer.add_chat_template([{
|
||||
"name": "rerank",
|
||||
"template": "<|im_start|>system\nJudge whether the Document meets the requirements based on the Query and the Instruct provided. Note that the answer can only be \"yes\" or \"no\".<|im_end|>\n"
|
||||
"<|im_start|>user\n<Instruct>: Given a web search query, retrieve relevant passages that answer the query\n<Query>: {query}\n<Document>: {document}<|im_end|>\n"
|
||||
"<|im_start|>assistant\n<think>\n\n</think>\n\n"
|
||||
}])
|
||||
self.gguf_writer.add_chat_template([
|
||||
{
|
||||
"name": "rerank",
|
||||
"template": "<|im_start|>system\nJudge whether the Document meets the requirements based on the Query and the Instruct provided. Note that the answer can only be \"yes\" or \"no\".<|im_end|>\n"
|
||||
"<|im_start|>user\n<Instruct>: Given a web search query, retrieve relevant passages that answer the query\n<Query>: {query}\n<Document>: {document}<|im_end|>\n"
|
||||
"<|im_start|>assistant\n<think>\n\n</think>\n\n",
|
||||
},
|
||||
{
|
||||
"name": "rerank_instruct",
|
||||
"template": "<|im_start|>system\nJudge whether the Document meets the requirements based on the Query and the Instruct provided. Note that the answer can only be \"yes\" or \"no\".<|im_end|>\n"
|
||||
"<|im_start|>user\n<Instruct>: {instruction}\n<Query>: {query}\n<Document>: {document}<|im_end|>\n"
|
||||
"<|im_start|>assistant\n<think>\n\n</think>\n\n",
|
||||
},
|
||||
])
|
||||
|
||||
def _get_cls_out_tensor(self, data_torch: Tensor) -> Tensor:
|
||||
# extract "yes" and "no" tokens from the output lm_head tensor
|
||||
|
|
@ -4816,7 +4841,10 @@ class RND1Model(Qwen2MoeModel):
|
|||
class Qwen3VLVisionModel(MmprojModel):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
assert self.hparams_vision is not None
|
||||
if self.hparams_vision is None:
|
||||
logger.info("No vision config found, skipping vision tensor processing")
|
||||
return
|
||||
|
||||
# Compute image_size if not present
|
||||
if "image_size" not in self.hparams_vision:
|
||||
# For Qwen3VL/Qwen3VLMoe, compute from num_position_embeddings
|
||||
|
|
@ -4837,7 +4865,9 @@ class Qwen3VLVisionModel(MmprojModel):
|
|||
|
||||
def set_gguf_parameters(self):
|
||||
super().set_gguf_parameters()
|
||||
self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.QWEN3VL)
|
||||
# in case mixed modalities, the arch will be handled by subclass
|
||||
if not self.has_audio_encoder:
|
||||
self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.QWEN3VL)
|
||||
self.gguf_writer.add_vision_use_gelu(True)
|
||||
|
||||
if self.hparams_vision is not None:
|
||||
|
|
@ -4925,11 +4955,64 @@ class Qwen3VLVisionModel(MmprojModel):
|
|||
return
|
||||
|
||||
if name.startswith("visual."):
|
||||
yield from super().modify_tensors(data_torch, name, bid)
|
||||
return
|
||||
yield from MmprojModel.modify_tensors(self, data_torch, name, bid)
|
||||
return # skip other tensors
|
||||
|
||||
# Fall back to parent class for other tensors
|
||||
yield from super().modify_tensors(data_torch, name, bid)
|
||||
|
||||
@ModelBase.register("Qwen3OmniMoeForConditionalGeneration")
|
||||
class Qwen3OmniMmprojModel(Qwen3VLVisionModel, Qwen25AudioModel):
|
||||
has_audio_encoder = True
|
||||
has_vision_encoder = True
|
||||
|
||||
def get_vision_config(self) -> dict[str, Any] | None:
|
||||
if self.has_vision_encoder:
|
||||
return self.global_config["thinker_config"].get("vision_config")
|
||||
else:
|
||||
return None
|
||||
|
||||
def get_audio_config(self) -> dict[str, Any] | None:
|
||||
if self.has_audio_encoder:
|
||||
return self.global_config["thinker_config"].get("audio_config")
|
||||
else:
|
||||
return None
|
||||
|
||||
def set_gguf_parameters(self):
|
||||
if self.has_vision_encoder:
|
||||
Qwen3VLVisionModel.set_gguf_parameters(self)
|
||||
self.gguf_writer.add_clip_vision_projector_type(gguf.VisionProjectorType.QWEN3VL)
|
||||
if self.has_audio_encoder:
|
||||
Qwen25AudioModel.set_gguf_parameters(self)
|
||||
self.gguf_writer.add_clip_audio_projector_type(gguf.VisionProjectorType.QWEN3A)
|
||||
|
||||
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
|
||||
if "visual." in name:
|
||||
if not self.has_vision_encoder:
|
||||
raise ValueError(f"Model does not have vision encoder, but found tensor {name}")
|
||||
# need to transform vision tensor naming, so that modify_tensors() logic can be used correctly
|
||||
name = name.replace("thinker.visual.", "model.visual.")
|
||||
if ".merger_list." in name:
|
||||
name = name.replace(".merger_list.", ".deepstack_merger_list.")
|
||||
name = name.replace(".ln_q", ".norm")
|
||||
name = name.replace(".mlp.0", ".linear_fc1")
|
||||
name = name.replace(".mlp.2", ".linear_fc2")
|
||||
elif ".merger." in name:
|
||||
name = name.replace(".ln_q", ".norm")
|
||||
name = name.replace(".mlp.0", ".linear_fc1")
|
||||
name = name.replace(".mlp.2", ".linear_fc2")
|
||||
yield from Qwen3VLVisionModel.modify_tensors(self, data_torch, name, bid)
|
||||
elif "audio_tower." in name:
|
||||
if not self.has_audio_encoder:
|
||||
raise ValueError(f"Model does not have audio encoder, but found tensor {name}")
|
||||
if "conv2d" in name and name.endswith(".bias"):
|
||||
# transform conv2d bias [n_embd] --> [1, 1, n_embd]
|
||||
data_torch = data_torch.unsqueeze(-1).unsqueeze(-1)
|
||||
yield from Qwen25AudioModel.modify_tensors(self, data_torch, name, bid)
|
||||
|
||||
|
||||
@ModelBase.register("Qwen3ASRForConditionalGeneration")
|
||||
class Qwen3ASRMmprojModel(Qwen3OmniMmprojModel):
|
||||
has_audio_encoder = True
|
||||
has_vision_encoder = False
|
||||
|
||||
|
||||
@ModelBase.register("Glm4vForConditionalGeneration", "Glm4vMoeForConditionalGeneration", "GlmOcrForConditionalGeneration")
|
||||
|
|
@ -4992,6 +5075,8 @@ class Step3VLVisionModel(MmprojModel):
|
|||
def tensor_force_quant(self, name, new_name, bid, n_dims):
|
||||
if ".position_embd." in new_name:
|
||||
return gguf.GGMLQuantizationType.F32
|
||||
if ("mm.0." in new_name or "mm.1." in new_name) and new_name.endswith(".weight"):
|
||||
return gguf.GGMLQuantizationType.F16 if self.ftype == gguf.LlamaFileType.MOSTLY_F16 else gguf.GGMLQuantizationType.F32
|
||||
return super().tensor_force_quant(name, new_name, bid, n_dims)
|
||||
|
||||
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
|
||||
|
|
@ -5030,9 +5115,10 @@ class Qwen3VLTextModel(Qwen3Model):
|
|||
|
||||
def set_gguf_parameters(self):
|
||||
super().set_gguf_parameters()
|
||||
|
||||
# Handle MRoPE (Multi-axis Rotary Position Embedding) for Qwen3-VL
|
||||
vision_config = self.hparams.get("vision_config", {})
|
||||
if "thinker_config" in self.hparams:
|
||||
vision_config = self.hparams["thinker_config"].get("vision_config", {})
|
||||
else:
|
||||
vision_config = self.hparams.get("vision_config", {})
|
||||
deepstack_layer_num = len(vision_config.get("deepstack_visual_indexes", []))
|
||||
self.gguf_writer.add_num_deepstack_layers(deepstack_layer_num)
|
||||
|
||||
|
|
@ -5101,6 +5187,70 @@ class Qwen3VLMoeTextModel(Qwen3MoeModel):
|
|||
yield from super().modify_tensors(data_torch, name, bid)
|
||||
|
||||
|
||||
@ModelBase.register("Qwen3OmniMoeForConditionalGeneration")
|
||||
class Qwen3OmniMoeTextModel(Qwen3VLMoeTextModel):
|
||||
model_arch = gguf.MODEL_ARCH.QWEN3VLMOE
|
||||
|
||||
def set_vocab(self):
|
||||
super().set_vocab()
|
||||
# correct BOS/EOS tokens
|
||||
with open(self.dir_model / "tokenizer_config.json", "r", encoding="utf-8") as f:
|
||||
tokenizer_config = json.load(f)
|
||||
added_tokens = tokenizer_config.get("added_tokens_decoder", {})
|
||||
for token_id, data in added_tokens.items():
|
||||
if data.get("content") == "<|im_end|>":
|
||||
self.gguf_writer.add_bos_token_id(int(token_id))
|
||||
self.gguf_writer.add_eos_token_id(int(token_id))
|
||||
break
|
||||
|
||||
def set_gguf_parameters(self):
|
||||
super().set_gguf_parameters()
|
||||
self.gguf_writer.add_num_deepstack_layers(0)
|
||||
|
||||
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
|
||||
# Skip vision and audio tensors - they go in the mmproj file
|
||||
if "visual." in name or "audio_tower." in name \
|
||||
or "talker." in name or "code2wav." in name:
|
||||
return
|
||||
|
||||
name = name.replace("thinker.", "")
|
||||
yield from super().modify_tensors(data_torch, name, bid)
|
||||
|
||||
|
||||
@ModelBase.register("Qwen3ASRForConditionalGeneration")
|
||||
class Qwen3ASRTextModel(Qwen3VLTextModel):
|
||||
model_arch = gguf.MODEL_ARCH.QWEN3VL
|
||||
|
||||
def set_gguf_parameters(self):
|
||||
super().set_gguf_parameters()
|
||||
self.gguf_writer.add_num_deepstack_layers(0)
|
||||
|
||||
def set_vocab(self):
|
||||
super().set_vocab()
|
||||
# fix chat template, use correct chatml format
|
||||
self.gguf_writer.add_chat_template("{% for message in messages %}{{'<|im_start|>' + message['role'] + '\\n' + message['content'] + '<|im_end|>' + '\\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\\n' }}{% endif %}")
|
||||
# correct BOS/EOS tokens
|
||||
with open(self.dir_model / "tokenizer_config.json", "r", encoding="utf-8") as f:
|
||||
tokenizer_config = json.load(f)
|
||||
added_tokens = tokenizer_config.get("added_tokens_decoder", {})
|
||||
for token_id, data in added_tokens.items():
|
||||
if data.get("content") == "<|im_end|>":
|
||||
self.gguf_writer.add_bos_token_id(int(token_id))
|
||||
self.gguf_writer.add_eos_token_id(int(token_id))
|
||||
break
|
||||
|
||||
def modify_tensors(self, data_torch, name, bid):
|
||||
# qwen3-omni
|
||||
name = name.replace("thinker.", "")
|
||||
|
||||
# Skip vision and audio tensors - they go in the mmproj file
|
||||
if "visual." in name or "audio_tower." in name \
|
||||
or "talker." in name or "code2wav." in name:
|
||||
return
|
||||
|
||||
yield from super().modify_tensors(data_torch, name, bid)
|
||||
|
||||
|
||||
class _LinearAttentionVReorderBase(Qwen3NextModel):
|
||||
model_arch = gguf.MODEL_ARCH.QWEN3NEXT # overridden by subclasses
|
||||
"""reorders V heads from grouped to tiled order for ggml broadcast
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ Adding a model requires few steps:
|
|||
1. Convert the model to GGUF
|
||||
2. Define the model architecture in `llama.cpp`
|
||||
3. Build the GGML graph implementation
|
||||
4. Optional: Add multimodal encoder implementation
|
||||
|
||||
After following these steps, you can open PR.
|
||||
|
||||
|
|
@ -114,6 +115,21 @@ Some `ggml` backends do not support all operations. Backend implementations can
|
|||
|
||||
Note: to debug the inference graph: you can use [llama-eval-callback](/examples/eval-callback/).
|
||||
|
||||
### 4. Optional: Add multimodal encoder implementation
|
||||
|
||||
If the new model supports multimodal inputs, you will need to add a new encoder definition in `libmtmd`. You can find more information about llama.cpp's multimodal support in [the docs](../multimodal.md) and in the `tools/mtmd` source directory.
|
||||
|
||||
1. In the conversion script, make sure you add a subclass that extends `MmprojModel` or another class that inherits from the same base class.
|
||||
2. Add the encoder definition in `clip.cpp`.
|
||||
3. Implement the preprocessor in `mtmd.cpp`. In most cases, you can reuse an existing preprocessor.
|
||||
4. Implement the encoder GGML graph, either in a dedicated file if the model is truly different from existing ones, or by reusing an existing implementation (for example: siglip, pixtral, or qwen) and adding a model-specific projector.
|
||||
|
||||
Note:
|
||||
- Many multimodal encoders are based on models that are already supported. Make sure to read the existing encoder definitions in `tools/mtmd/models` before adding a new one. In `libmtmd`, it is generally better to extend an existing model than to duplicate code.
|
||||
- To debug the multimodal preprocessor and encoder, you can use [llama-mtmd-debug](tools/mtmd/debug/mtmd-debug.cpp).
|
||||
- Adding a model-specific API or CLI is an anti-pattern in `libmtmd`. The goal of `libmtmd` is to provide an easy-to-use, model-agnostic library for multimodal pipeline.
|
||||
- In most cases, `llama-mtmd-cli` should not be modified. If a model requires a specific prompt, either let the user provide it or bake it into the Jinja chat template.
|
||||
|
||||
## GGUF specification
|
||||
|
||||
https://github.com/ggml-org/ggml/blob/master/docs/gguf.md
|
||||
|
|
|
|||
|
|
@ -94,6 +94,11 @@ NOTE: some models may require large context window, for example: `-c 8192`
|
|||
# Moondream2 20250414 version
|
||||
(tool_name) -hf ggml-org/moondream2-20250414-GGUF
|
||||
|
||||
# Gemma 4
|
||||
(tool_name) -hf ggml-org/gemma-4-E2B-it-GGUF
|
||||
(tool_name) -hf ggml-org/gemma-4-E4B-it-GGUF
|
||||
(tool_name) -hf ggml-org/gemma-4-26B-A4B-it-GGUF
|
||||
(tool_name) -hf ggml-org/gemma-4-31B-it-GGUF
|
||||
```
|
||||
|
||||
**Audio models**:
|
||||
|
|
@ -118,6 +123,11 @@ NOTE: some models may require large context window, for example: `-c 8192`
|
|||
# Capabilities: audio input, vision input
|
||||
(tool_name) -hf ggml-org/Qwen2.5-Omni-3B-GGUF
|
||||
(tool_name) -hf ggml-org/Qwen2.5-Omni-7B-GGUF
|
||||
|
||||
# Gemma 4
|
||||
# Capabilities: audio input, vision input
|
||||
(tool_name) -hf ggml-org/gemma-4-E2B-it-GGUF
|
||||
(tool_name) -hf ggml-org/gemma-4-E4B-it-GGUF
|
||||
```
|
||||
|
||||
## Finding more models:
|
||||
|
|
|
|||
|
|
@ -134,8 +134,9 @@ static void ssm_conv_f32_cuda(const float * src0, const float * src1, const int
|
|||
switch (nc) {
|
||||
case 3: launch_kernel(std::integral_constant<int, 3>{}); break;
|
||||
case 4: launch_kernel(std::integral_constant<int, 4>{}); break;
|
||||
case 5: launch_kernel(std::integral_constant<int, 5>{}); break;
|
||||
case 9: launch_kernel(std::integral_constant<int, 9>{}); break;
|
||||
default: GGML_ABORT("Only support kernel sizes 3, 4, 9 right now.");
|
||||
default: GGML_ABORT("Only support kernel sizes 3, 4, 5, 9 right now.");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -488,7 +488,7 @@ static void dequantize_row_nvfp4_sycl(const void * vx, dst_t * y, const int64_t
|
|||
const int nb = k / QK_NVFP4;
|
||||
stream->parallel_for(
|
||||
sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * sycl::range<3>(1, 1, 32), sycl::range<3>(1, 1, 32)),
|
||||
[=](sycl::nd_item<3> item_ct1) {
|
||||
[=](sycl::nd_item<3> /*item_ct1*/) {
|
||||
dequantize_block_nvfp4(vx, y, k);
|
||||
});
|
||||
}
|
||||
|
|
|
|||
|
|
@ -14,6 +14,7 @@
|
|||
#define GGML_SYCL_DEQUANTIZE_HPP
|
||||
|
||||
#include "common.hpp"
|
||||
#include "convert.hpp"
|
||||
|
||||
typedef void (*dequantize_kernel_t)(const void * vx, const int64_t ib, const int iqs, dfloat2 & v);
|
||||
typedef void (*dequantize_kernel_t_reorder)(const void *d, const int64_t ib, const void *qs,
|
||||
|
|
|
|||
|
|
@ -355,7 +355,7 @@ static void acc_f32_sycl(const float *x, const float *y, float *dst,
|
|||
const int num_blocks = (n_elements + SYCL_ACC_BLOCK_SIZE - 1) / SYCL_ACC_BLOCK_SIZE;
|
||||
stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_ACC_BLOCK_SIZE),
|
||||
sycl::range<3>(1, 1, SYCL_ACC_BLOCK_SIZE)),
|
||||
[=](sycl::nd_item<3> item_ct1) {
|
||||
[=](sycl::nd_item<3> /*item_ct1*/) {
|
||||
acc_f32(x, y, dst, n_elements, ne10, ne11, ne12, ne13, s1, s2, s3, offset);
|
||||
});
|
||||
}
|
||||
|
|
|
|||
|
|
@ -176,14 +176,12 @@ static void launch_gated_delta_net(const float * q_d,
|
|||
const sycl::uint3 neqk1_magic = init_fastdiv_values(neqk1);
|
||||
const sycl::uint3 rq3_magic = init_fastdiv_values(rq3);
|
||||
|
||||
int cc = ggml_sycl_info().devices[ggml_sycl_get_device()].cc;
|
||||
|
||||
switch (S_v) {
|
||||
case 16:
|
||||
{
|
||||
constexpr int sv = 16;
|
||||
stream->parallel_for(sycl::nd_range<3>(grid_dims * block_dims, block_dims),
|
||||
[=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
|
||||
[=](sycl::nd_item<3> /*item_ct1*/) [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
|
||||
gated_delta_net_sycl<sv, KDA>(q_d, k_d, v_d, g_d, b_d, s_d, dst_d, H, n_tokens,
|
||||
n_seqs, sq1, sq2, sq3, sv1, sv2, sv3, sb1, sb2,
|
||||
sb3, neqk1_magic, rq3_magic, scale);
|
||||
|
|
@ -194,7 +192,7 @@ static void launch_gated_delta_net(const float * q_d,
|
|||
{
|
||||
constexpr int sv = 32;
|
||||
stream->parallel_for(sycl::nd_range<3>(grid_dims * block_dims, block_dims),
|
||||
[=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
|
||||
[=](sycl::nd_item<3> /*item_ct1*/) [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
|
||||
gated_delta_net_sycl<sv, KDA>(q_d, k_d, v_d, g_d, b_d, s_d, dst_d, H, n_tokens,
|
||||
n_seqs, sq1, sq2, sq3, sv1, sv2, sv3, sb1, sb2,
|
||||
sb3, neqk1_magic, rq3_magic, scale);
|
||||
|
|
@ -205,7 +203,7 @@ static void launch_gated_delta_net(const float * q_d,
|
|||
{
|
||||
constexpr int sv = 64;
|
||||
stream->parallel_for(sycl::nd_range<3>(grid_dims * block_dims, block_dims),
|
||||
[=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
|
||||
[=](sycl::nd_item<3> /*item_ct1*/) [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
|
||||
gated_delta_net_sycl<sv, KDA>(
|
||||
q_d, k_d, v_d, g_d, b_d, s_d, dst_d, H, n_tokens, n_seqs, sq1, sq2,
|
||||
sq3, sv1, sv2, sv3, sb1, sb2, sb3, neqk1_magic, rq3_magic, scale);
|
||||
|
|
@ -217,7 +215,7 @@ static void launch_gated_delta_net(const float * q_d,
|
|||
{
|
||||
constexpr int sv = 128;
|
||||
stream->parallel_for(sycl::nd_range<3>(grid_dims * block_dims, block_dims),
|
||||
[=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
|
||||
[=](sycl::nd_item<3> /*item_ct1*/) [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
|
||||
gated_delta_net_sycl<sv, KDA>(
|
||||
q_d, k_d, v_d, g_d, b_d, s_d, dst_d, H, n_tokens, n_seqs, sq1, sq2,
|
||||
sq3, sv1, sv2, sv3, sb1, sb2, sb3, neqk1_magic, rq3_magic, scale);
|
||||
|
|
|
|||
|
|
@ -4727,12 +4727,19 @@ static bool ggml_backend_sycl_device_supports_op(ggml_backend_dev_t dev, const g
|
|||
struct ggml_tensor * a = op->src[0];
|
||||
struct ggml_tensor * b = op->src[1];
|
||||
|
||||
// disable Q1_0 until implementation
|
||||
if (a->type == GGML_TYPE_Q1_0 || b->type == GGML_TYPE_Q1_0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (a->ne[3] != b->ne[3]) {
|
||||
return false;
|
||||
}
|
||||
|
||||
ggml_type src0_type = op->src[0]->type;
|
||||
|
||||
|
||||
|
||||
// TODO: The configuration below needs more work to be supported with oneDNN
|
||||
if (ggml_is_permuted(a) && !ggml_is_contiguous(a) &&
|
||||
a->ne[2] > 1 && a->ne[3] > 1 && src0_type == GGML_TYPE_F16) {
|
||||
|
|
|
|||
|
|
@ -272,7 +272,7 @@ static void upscale_f32_sycl(const float * x,
|
|||
sycl::nd_range<3>(
|
||||
sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_UPSCALE_BLOCK_SIZE),
|
||||
sycl::range<3>(1, 1, SYCL_UPSCALE_BLOCK_SIZE)),
|
||||
[=](sycl::nd_item<3> item_ct1) {
|
||||
[=](sycl::nd_item<3> /*item_ct1*/) {
|
||||
upscale_f32(x, dst, nb00, nb01, nb02, nb03, ne10, ne11, ne12, ne13, sf0, sf1, sf2, sf3);
|
||||
});
|
||||
}
|
||||
|
|
@ -304,7 +304,7 @@ static void upscale_f32_bilinear_sycl(const float * x,
|
|||
sycl::nd_range<3>(
|
||||
sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_UPSCALE_BLOCK_SIZE),
|
||||
sycl::range<3>(1, 1, SYCL_UPSCALE_BLOCK_SIZE)),
|
||||
[=](sycl::nd_item<3> item_ct1) {
|
||||
[=](sycl::nd_item<3> /*item_ct1*/) {
|
||||
upscale_f32_bilinear_antialias(
|
||||
x, dst, nb00, nb01, nb02, nb03, ne00_src, ne01_src, ne10_dst, ne11_dst,
|
||||
ne12_dst, ne13_dst, sf0, sf1, sf2, sf3, pixel_offset);
|
||||
|
|
@ -314,7 +314,7 @@ static void upscale_f32_bilinear_sycl(const float * x,
|
|||
sycl::nd_range<3>(
|
||||
sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_UPSCALE_BLOCK_SIZE),
|
||||
sycl::range<3>(1, 1, SYCL_UPSCALE_BLOCK_SIZE)),
|
||||
[=](sycl::nd_item<3> item_ct1) {
|
||||
[=](sycl::nd_item<3> /*item_ct1*/) {
|
||||
upscale_f32_bilinear(
|
||||
x, dst, nb00, nb01, nb02, nb03, ne00_src, ne01_src, ne10_dst, ne11_dst, ne12_dst,
|
||||
ne13_dst, sf0, sf1, sf2, sf3, pixel_offset);
|
||||
|
|
@ -349,7 +349,7 @@ static void upscale_f32_bicubic_sycl(const float * x,
|
|||
sycl::nd_range<3>(
|
||||
sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_UPSCALE_BLOCK_SIZE),
|
||||
sycl::range<3>(1, 1, SYCL_UPSCALE_BLOCK_SIZE)),
|
||||
[=](sycl::nd_item<3> item_ct1) {
|
||||
[=](sycl::nd_item<3> /*item_ct1*/) {
|
||||
upscale_f32_bicubic(
|
||||
x, dst, nb00, nb01, nb02, nb03, ne00_src, ne01_src, ne10_dst, ne11_dst,
|
||||
ne12_dst, ne13_dst, sf0, sf1, sf2, sf3, pixel_offset);
|
||||
|
|
|
|||
|
|
@ -798,6 +798,8 @@ class MODEL_TENSOR(IntEnum):
|
|||
A_ENC_INP_PROJ = auto() # gemma4
|
||||
A_ENC_CONV1D = auto()
|
||||
A_ENC_CONV1D_NORM = auto() # gemma3n
|
||||
A_ENC_CONV2D = auto()
|
||||
A_ENC_CONV_OUT = auto()
|
||||
A_PRE_NORM = auto()
|
||||
A_POST_NORM = auto()
|
||||
A_ENC_LAYER_PRE_NORM = auto() # gemma3n
|
||||
|
|
@ -1280,6 +1282,8 @@ TENSOR_NAMES: dict[MODEL_TENSOR, str] = {
|
|||
MODEL_TENSOR.A_ENC_EMBD_TO_LOGITS: "a.embd_to_logits",
|
||||
MODEL_TENSOR.A_ENC_INP_PROJ: "a.input_projection",
|
||||
MODEL_TENSOR.A_ENC_CONV1D: "a.conv1d.{bid}",
|
||||
MODEL_TENSOR.A_ENC_CONV2D: "a.conv2d.{bid}",
|
||||
MODEL_TENSOR.A_ENC_CONV_OUT: "a.conv_out",
|
||||
MODEL_TENSOR.A_ENC_CONV1D_NORM: "a.conv1d.{bid}.norm",
|
||||
MODEL_TENSOR.A_PRE_NORM: "a.pre_ln",
|
||||
MODEL_TENSOR.A_POST_NORM: "a.post_ln",
|
||||
|
|
@ -1426,6 +1430,8 @@ MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = {
|
|||
MODEL_TENSOR.A_ENC_EMBD_TO_LOGITS,
|
||||
MODEL_TENSOR.A_ENC_INP_PROJ,
|
||||
MODEL_TENSOR.A_ENC_CONV1D,
|
||||
MODEL_TENSOR.A_ENC_CONV2D,
|
||||
MODEL_TENSOR.A_ENC_CONV_OUT,
|
||||
MODEL_TENSOR.A_ENC_CONV1D_NORM,
|
||||
MODEL_TENSOR.A_PRE_NORM,
|
||||
MODEL_TENSOR.A_POST_NORM,
|
||||
|
|
@ -4112,6 +4118,7 @@ class VisionProjectorType:
|
|||
ULTRAVOX = "ultravox"
|
||||
INTERNVL = "internvl"
|
||||
QWEN2A = "qwen2a" # audio
|
||||
QWEN3A = "qwen3a" # audio
|
||||
GLMA = "glma" # audio
|
||||
QWEN25O = "qwen2.5o" # omni
|
||||
VOXTRAL = "voxtral"
|
||||
|
|
|
|||
|
|
@ -1892,6 +1892,14 @@ class TensorNameMap:
|
|||
"conformer.subsample_conv_projection.input_proj_linear", # gemma4
|
||||
),
|
||||
|
||||
MODEL_TENSOR.A_ENC_CONV2D: (
|
||||
"audio_tower.conv2d{bid}", # qwen3omni
|
||||
),
|
||||
|
||||
MODEL_TENSOR.A_ENC_CONV_OUT: (
|
||||
"audio_tower.conv_out", # qwen3omni
|
||||
),
|
||||
|
||||
MODEL_TENSOR.A_PRE_NORM: (),
|
||||
|
||||
MODEL_TENSOR.A_POST_NORM: (
|
||||
|
|
@ -2042,7 +2050,8 @@ class TensorNameMap:
|
|||
|
||||
MODEL_TENSOR.A_MMPROJ: (
|
||||
"audio.multi_modal_projector.linear_{bid}", # ultravox, meralion
|
||||
"audio_adapter.model.{bid}" # lfm2
|
||||
"audio_adapter.model.{bid}", # lfm2
|
||||
"audio_tower.proj{bid}", # qwen3omni
|
||||
),
|
||||
|
||||
MODEL_TENSOR.A_MMPROJ_FC: (
|
||||
|
|
|
|||
|
|
@ -88,6 +88,11 @@ static gguf_context_ptr get_gguf_ctx(const llm_arch arch, const bool moe) {
|
|||
uint32_t n_layer = 2;
|
||||
if (arch == LLM_ARCH_LLAMA4) {
|
||||
n_layer = 4; // hparams.n_no_rope_layer_step is hard-coded to 4
|
||||
} else if (arch == LLM_ARCH_GEMMA4) {
|
||||
n_embd = 128;
|
||||
n_head = 2;
|
||||
n_ff = 192;
|
||||
n_layer = 5; // need at least 5 for swa_pattern (every 5th is full_attention)
|
||||
} else if (arch == LLM_ARCH_GEMMA3N) {
|
||||
n_embd = 64;
|
||||
n_head = 1;
|
||||
|
|
@ -169,7 +174,15 @@ static gguf_context_ptr get_gguf_ctx(const llm_arch arch, const bool moe) {
|
|||
ms.add_kv(LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT, uint32_t(8));
|
||||
ms.add_kv(LLM_KV_ATTENTION_SLIDING_WINDOW, n_ctx/8);
|
||||
|
||||
if (arch == LLM_ARCH_MIMO2 || arch == LLM_ARCH_STEP35) {
|
||||
if (arch == LLM_ARCH_GEMMA4) {
|
||||
ms.add_kv(LLM_KV_EMBEDDING_LENGTH_PER_LAYER, n_embd/2);
|
||||
ms.add_kv(LLM_KV_ATTENTION_SHARED_KV_LAYERS, uint32_t(0));
|
||||
ms.add_kv(LLM_KV_ATTENTION_KEY_LENGTH_SWA, n_embd_head);
|
||||
ms.add_kv(LLM_KV_ATTENTION_VALUE_LENGTH_SWA, n_embd_head);
|
||||
ms.add_kv(LLM_KV_ROPE_FREQ_BASE_SWA, 10000.0f);
|
||||
// SWA pattern: every 5th layer is full attention (matches E2B layer_types)
|
||||
ms.add_kv(LLM_KV_ATTENTION_SLIDING_WINDOW_PATTERN, uint32_t(5));
|
||||
} else if (arch == LLM_ARCH_MIMO2 || arch == LLM_ARCH_STEP35) {
|
||||
std::vector<uint32_t> pattern;
|
||||
pattern.reserve(n_layer);
|
||||
for (uint32_t il = 0; il < n_layer; il++) {
|
||||
|
|
@ -429,6 +442,9 @@ static int save_models(const llm_arch target_arch, const size_t seed, const ggml
|
|||
if (target_arch != LLM_ARCH_UNKNOWN && arch != target_arch) {
|
||||
continue;
|
||||
}
|
||||
if (arch == LLM_ARCH_GEMMA4) {
|
||||
continue; // FIXME: ISWA KV cache initialization needs more fixture params
|
||||
}
|
||||
for (bool moe : {false, true}) {
|
||||
if (moe && !moe_implemented(arch)) {
|
||||
continue;
|
||||
|
|
@ -510,6 +526,9 @@ static int test_backends(const llm_arch target_arch, const size_t seed, const gg
|
|||
if (target_arch != LLM_ARCH_UNKNOWN && arch != target_arch) {
|
||||
continue;
|
||||
}
|
||||
if (arch == LLM_ARCH_GEMMA4) {
|
||||
continue; // FIXME: ISWA KV cache initialization needs more fixture params
|
||||
}
|
||||
|
||||
const bool encode = arch == LLM_ARCH_T5 || arch == LLM_ARCH_DREAM || arch == LLM_ARCH_LLADA || arch == LLM_ARCH_LLADA_MOE || arch == LLM_ARCH_RND1;
|
||||
for (bool moe : {false, true}) {
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ add_library(mtmd
|
|||
models/cogvlm.cpp
|
||||
models/conformer.cpp
|
||||
models/dotsocr.cpp
|
||||
models/gemma4a.cpp
|
||||
models/gemma4v.cpp
|
||||
models/glm4v.cpp
|
||||
models/hunyuanocr.cpp
|
||||
|
|
@ -32,6 +33,7 @@ add_library(mtmd
|
|||
models/pixtral.cpp
|
||||
models/qwen2vl.cpp
|
||||
models/qwen3vl.cpp
|
||||
models/qwen3a.cpp
|
||||
models/step3vl.cpp
|
||||
models/siglip.cpp
|
||||
models/whisper-enc.cpp
|
||||
|
|
|
|||
|
|
@ -135,6 +135,8 @@
|
|||
|
||||
// ultravox
|
||||
#define TN_CONV1D "a.conv1d.%d.%s"
|
||||
#define TN_CONV2D "a.conv2d.%d.%s"
|
||||
#define TN_CONV_OUT "a.conv_out.%s"
|
||||
#define TN_MM_AUDIO_MLP "mm.a.mlp.%d.%s"
|
||||
#define TN_MM_AUDIO_FC "mm.a.fc.%s" // fully connected layer
|
||||
#define TN_MM_NORM_PRE "mm.a.norm_pre.%s"
|
||||
|
|
@ -181,6 +183,21 @@
|
|||
#define TN_CONV_PW1 "%s.blk.%d.conv_pw1.%s"
|
||||
#define TN_CONV_PW2 "%s.blk.%d.conv_pw2.%s"
|
||||
|
||||
// gemma4 audio conformer
|
||||
#define TN_A_MM_INP_PROJ "mm.a.input_projection.%s"
|
||||
#define TN_A_MM_SOFT_EMB_N "mm.a.soft_emb_norm.%s"
|
||||
#define TN_A_INP_PROJ "a.input_projection.%s"
|
||||
#define TN_A_CONV1D "a.conv1d.%d.%s"
|
||||
#define TN_A_CONV1D_NORM "a.conv1d.%d.norm.%s"
|
||||
#define TN_A_OUT_PROJ "a.pre_encode.out.%s"
|
||||
#define TN_A_ATTN_PRE_NORM "%s.blk.%d.attn_pre_norm.%s"
|
||||
#define TN_A_ATTN_POST_NORM "%s.blk.%d.attn_post_norm.%s"
|
||||
#define TN_A_ATTN_K_REL "%s.blk.%d.attn_k_rel.%s"
|
||||
#define TN_A_PER_DIM_SCALE "%s.blk.%d.per_dim_scale.%s"
|
||||
#define TN_A_PER_DIM_K_SCALE "%s.blk.%d.per_dim_k_scale.%s"
|
||||
#define TN_A_FFN_POST_NORM "%s.blk.%d.ffn_post_norm.%s"
|
||||
#define TN_A_FFN_POST_NORM_1 "%s.blk.%d.ffn_post_norm_1.%s"
|
||||
|
||||
// mobilenetv5 (gemma3n) definitions
|
||||
#define TN_MNV5_STEM_CONV "v.conv_stem.conv.weight"
|
||||
#define TN_MNV5_STEM_BIAS "v.conv_stem.conv.bias"
|
||||
|
|
@ -256,6 +273,7 @@ enum projector_type {
|
|||
PROJECTOR_TYPE_INTERNVL,
|
||||
PROJECTOR_TYPE_LLAMA4,
|
||||
PROJECTOR_TYPE_QWEN2A,
|
||||
PROJECTOR_TYPE_QWEN3A,
|
||||
PROJECTOR_TYPE_GLMA,
|
||||
PROJECTOR_TYPE_QWEN25O, // will be replaced by QWEN2A or QWEN25VL depending on clip_ctx
|
||||
PROJECTOR_TYPE_VOXTRAL,
|
||||
|
|
@ -300,6 +318,7 @@ static std::map<projector_type, std::string> PROJECTOR_TYPE_NAMES = {
|
|||
{ PROJECTOR_TYPE_INTERNVL, "internvl"},
|
||||
{ PROJECTOR_TYPE_LLAMA4, "llama4"},
|
||||
{ PROJECTOR_TYPE_QWEN2A, "qwen2a"},
|
||||
{ PROJECTOR_TYPE_QWEN3A, "qwen3a"},
|
||||
{ PROJECTOR_TYPE_GLMA, "glma"},
|
||||
{ PROJECTOR_TYPE_QWEN25O, "qwen2.5o"},
|
||||
{ PROJECTOR_TYPE_VOXTRAL, "voxtral"},
|
||||
|
|
|
|||
|
|
@ -217,6 +217,13 @@ struct clip_layer {
|
|||
ggml_tensor * conv_pw2_w = nullptr;
|
||||
ggml_tensor * conv_pw2_b = nullptr;
|
||||
|
||||
// gemma4 audio conformer per-layer
|
||||
ggml_tensor * attn_pre_norm_w = nullptr;
|
||||
ggml_tensor * attn_k_rel_w = nullptr;
|
||||
ggml_tensor * per_dim_scale_w = nullptr;
|
||||
ggml_tensor * per_dim_k_scale_w = nullptr;
|
||||
ggml_tensor * ff_post_norm_1_w = nullptr;
|
||||
|
||||
bool has_deepstack() const {
|
||||
return deepstack_fc1_w != nullptr;
|
||||
}
|
||||
|
|
@ -406,10 +413,20 @@ struct clip_model {
|
|||
ggml_tensor * conv1d_1_b = nullptr;
|
||||
ggml_tensor * conv1d_2_w = nullptr;
|
||||
ggml_tensor * conv1d_2_b = nullptr;
|
||||
ggml_tensor * conv_out_w = nullptr;
|
||||
ggml_tensor * conv_out_b = nullptr;
|
||||
ggml_tensor * mm_norm_pre_w = nullptr;
|
||||
ggml_tensor * mm_norm_pre_b = nullptr;
|
||||
ggml_tensor * mm_norm_mid_w = nullptr;
|
||||
|
||||
// qwen3a
|
||||
ggml_tensor * conv2d_1_w = nullptr;
|
||||
ggml_tensor * conv2d_1_b = nullptr;
|
||||
ggml_tensor * conv2d_2_w = nullptr;
|
||||
ggml_tensor * conv2d_2_b = nullptr;
|
||||
ggml_tensor * conv2d_3_w = nullptr;
|
||||
ggml_tensor * conv2d_3_b = nullptr;
|
||||
|
||||
// cogvlm
|
||||
ggml_tensor * mm_post_fc_norm_w = nullptr;
|
||||
ggml_tensor * mm_post_fc_norm_b = nullptr;
|
||||
|
|
@ -459,6 +476,15 @@ struct clip_model {
|
|||
};
|
||||
std::map<std::string, clamp_info> clamp_info_map;
|
||||
|
||||
// gemma4 audio conformer
|
||||
std::array<ggml_tensor *, 2> sscp_conv_w = {nullptr};
|
||||
std::array<ggml_tensor *, 2> sscp_conv_b = {nullptr};
|
||||
std::array<ggml_tensor *, 2> sscp_norm_w = {nullptr};
|
||||
ggml_tensor * sscp_inp_proj_w = nullptr;
|
||||
ggml_tensor * sscp_inp_proj_b = nullptr;
|
||||
ggml_tensor * audio_out_proj_w = nullptr;
|
||||
ggml_tensor * audio_out_proj_b = nullptr;
|
||||
|
||||
bool audio_has_avgpool() const {
|
||||
return proj_type == PROJECTOR_TYPE_QWEN2A
|
||||
|| proj_type == PROJECTOR_TYPE_VOXTRAL
|
||||
|
|
|
|||
|
|
@ -931,10 +931,18 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
|
|||
{
|
||||
builder = std::make_unique<clip_graph_conformer>(ctx, img);
|
||||
} break;
|
||||
case PROJECTOR_TYPE_GEMMA4A:
|
||||
{
|
||||
builder = std::make_unique<clip_graph_gemma4a>(ctx, img);
|
||||
} break;
|
||||
case PROJECTOR_TYPE_GLM4V:
|
||||
{
|
||||
builder = std::make_unique<clip_graph_glm4v>(ctx, img);
|
||||
} break;
|
||||
case PROJECTOR_TYPE_QWEN3A:
|
||||
{
|
||||
builder = std::make_unique<clip_graph_qwen3a>(ctx, img);
|
||||
} break;
|
||||
case PROJECTOR_TYPE_YOUTUVL:
|
||||
{
|
||||
builder = std::make_unique<clip_graph_youtuvl>(ctx, img);
|
||||
|
|
@ -1398,6 +1406,7 @@ struct clip_model_loader {
|
|||
} break;
|
||||
case PROJECTOR_TYPE_ULTRAVOX:
|
||||
case PROJECTOR_TYPE_QWEN2A:
|
||||
case PROJECTOR_TYPE_QWEN3A:
|
||||
case PROJECTOR_TYPE_GLMA:
|
||||
case PROJECTOR_TYPE_VOXTRAL:
|
||||
case PROJECTOR_TYPE_MERALION:
|
||||
|
|
@ -1459,6 +1468,16 @@ struct clip_model_loader {
|
|||
hparams.audio_window_len = 400;
|
||||
hparams.audio_hop_len = 160;
|
||||
} break;
|
||||
case PROJECTOR_TYPE_GEMMA4A:
|
||||
{
|
||||
// Gemma4 feature_extraction_gemma4.py:
|
||||
// frame_length_ms=20 -> 320 samples, n_fft=512, hop=10ms -> 160
|
||||
hparams.audio_chunk_len = 0; // no fixed-length padding
|
||||
hparams.audio_sample_rate = 16000;
|
||||
hparams.audio_n_fft = 512;
|
||||
hparams.audio_window_len = 320; // 20ms frame (NOT 25ms/400)
|
||||
hparams.audio_hop_len = 160;
|
||||
} break;
|
||||
case PROJECTOR_TYPE_JANUS_PRO:
|
||||
{
|
||||
hparams.image_pad_color = {127, 127, 127};
|
||||
|
|
@ -1561,16 +1580,21 @@ struct clip_model_loader {
|
|||
}
|
||||
|
||||
// helper function
|
||||
std::unordered_set<std::string> loaded_tensor_names;
|
||||
auto get_tensor = [&](const std::string & name, bool required = true) {
|
||||
// Each tensor should only be loaded once; duplicates indicate a bug
|
||||
if (loaded_tensor_names.count(name)) {
|
||||
throw std::runtime_error(string_format("%s: tensor already loaded: %s\n", __func__, name.c_str()));
|
||||
}
|
||||
ggml_tensor * cur = ggml_get_tensor(ctx_meta.get(), name.c_str());
|
||||
if (!cur && required) {
|
||||
throw std::runtime_error(string_format("%s: unable to find tensor %s\n", __func__, name.c_str()));
|
||||
}
|
||||
if (cur) {
|
||||
tensors_to_load.push_back(cur);
|
||||
// add tensors to context
|
||||
ggml_tensor * data_tensor = ggml_dup_tensor(ctx_clip.ctx_data.get(), cur);
|
||||
ggml_set_name(data_tensor, cur->name);
|
||||
loaded_tensor_names.insert(name);
|
||||
cur = data_tensor;
|
||||
}
|
||||
return cur;
|
||||
|
|
@ -2053,6 +2077,20 @@ struct clip_model_loader {
|
|||
model.mm_fc_w = get_tensor(string_format(TN_MM_AUDIO_FC, "weight"));
|
||||
model.mm_fc_b = get_tensor(string_format(TN_MM_AUDIO_FC, "bias"));
|
||||
} break;
|
||||
case PROJECTOR_TYPE_QWEN3A:
|
||||
{
|
||||
model.conv2d_1_w = get_tensor(string_format(TN_CONV2D, 1, "weight"));
|
||||
model.conv2d_1_b = get_tensor(string_format(TN_CONV2D, 1, "bias"));
|
||||
model.conv2d_2_w = get_tensor(string_format(TN_CONV2D, 2, "weight"));
|
||||
model.conv2d_2_b = get_tensor(string_format(TN_CONV2D, 2, "bias"));
|
||||
model.conv2d_3_w = get_tensor(string_format(TN_CONV2D, 3, "weight"));
|
||||
model.conv2d_3_b = get_tensor(string_format(TN_CONV2D, 3, "bias"));
|
||||
model.conv_out_w = get_tensor(string_format(TN_CONV_OUT, "weight")); // no bias
|
||||
model.mm_1_w = get_tensor(string_format(TN_MM_AUDIO_MLP, 1, "weight"));
|
||||
model.mm_1_b = get_tensor(string_format(TN_MM_AUDIO_MLP, 1, "bias"));
|
||||
model.mm_2_w = get_tensor(string_format(TN_MM_AUDIO_MLP, 2, "weight"));
|
||||
model.mm_2_b = get_tensor(string_format(TN_MM_AUDIO_MLP, 2, "bias"));
|
||||
} break;
|
||||
case PROJECTOR_TYPE_VOXTRAL:
|
||||
{
|
||||
model.conv1d_1_w = get_tensor(string_format(TN_CONV1D, 1, "weight"));
|
||||
|
|
@ -2186,6 +2224,76 @@ struct clip_model_loader {
|
|||
model.mm_fc_w = get_tensor(string_format(TN_MM_PROJECTOR, "weight"));
|
||||
model.mm_fc_b = get_tensor(string_format(TN_MM_PROJECTOR, "bias"));
|
||||
} break;
|
||||
case PROJECTOR_TYPE_GEMMA4A:
|
||||
{
|
||||
for (int i = 0; i < 2; i++) {
|
||||
model.sscp_conv_w[i] = get_tensor(string_format(TN_A_CONV1D, i, "weight"));
|
||||
model.sscp_conv_b[i] = get_tensor(string_format(TN_A_CONV1D, i, "bias"), false);
|
||||
model.sscp_norm_w[i] = get_tensor(string_format(TN_A_CONV1D_NORM, i, "weight"), false);
|
||||
}
|
||||
model.sscp_inp_proj_w = get_tensor(string_format(TN_A_INP_PROJ, "weight"));
|
||||
model.sscp_inp_proj_b = get_tensor(string_format(TN_A_INP_PROJ, "bias"), false);
|
||||
model.audio_out_proj_w = get_tensor(string_format(TN_A_OUT_PROJ, "weight"), false);
|
||||
model.audio_out_proj_b = get_tensor(string_format(TN_A_OUT_PROJ, "bias"), false);
|
||||
// audio multimodal embedder (mm.a.* namespace, not mm.*)
|
||||
model.mm_soft_emb_norm_w = get_tensor(string_format(TN_A_MM_SOFT_EMB_N, "weight"), false);
|
||||
model.mm_input_proj_w = get_tensor(string_format(TN_A_MM_INP_PROJ, "weight"), false);
|
||||
|
||||
// Per-layer tensors NOT loaded by the generic loop above
|
||||
for (int il = 0; il < hparams.n_layer; ++il) {
|
||||
auto & layer = model.layers[il];
|
||||
|
||||
// Gemma4 audio conformer-specific tensors
|
||||
layer.ff_norm_w = get_tensor(string_format(TN_FFN_NORM, prefix, il, "weight"));
|
||||
layer.attn_pre_norm_w = get_tensor(string_format(TN_A_ATTN_PRE_NORM, prefix, il, "weight"), false);
|
||||
layer.per_dim_scale_w = get_tensor(string_format(TN_A_PER_DIM_SCALE, prefix, il, "weight"), false);
|
||||
layer.per_dim_k_scale_w = get_tensor(string_format(TN_A_PER_DIM_K_SCALE, prefix, il, "weight"), false);
|
||||
layer.attn_k_rel_w = get_tensor(string_format(TN_A_ATTN_K_REL, prefix, il, "weight"), false);
|
||||
|
||||
// Convolution module
|
||||
// Note: conv_norm / norm_conv are swapped in GGUF due to
|
||||
// upstream tensor_mapping.py, so we load them in reverse order
|
||||
layer.norm_conv_w = get_tensor(string_format(TN_CONV_NORM, prefix, il, "weight"), false);
|
||||
layer.norm_conv_b = get_tensor(string_format(TN_CONV_NORM, prefix, il, "bias"), false);
|
||||
layer.conv_pw1_w = get_tensor(string_format(TN_CONV_PW1, prefix, il, "weight"));
|
||||
layer.conv_pw1_b = get_tensor(string_format(TN_CONV_PW1, prefix, il, "bias"), false);
|
||||
layer.conv_dw_w = get_tensor(string_format(TN_CONV_DW, prefix, il, "weight"));
|
||||
layer.conv_dw_b = get_tensor(string_format(TN_CONV_DW, prefix, il, "bias"), false);
|
||||
layer.conv_norm_w = get_tensor(string_format(TN_NORM_CONV, prefix, il, "weight"), false);
|
||||
layer.conv_norm_b = get_tensor(string_format(TN_NORM_CONV, prefix, il, "bias"), false);
|
||||
layer.conv_pw2_w = get_tensor(string_format(TN_CONV_PW2, prefix, il, "weight"));
|
||||
layer.conv_pw2_b = get_tensor(string_format(TN_CONV_PW2, prefix, il, "bias"), false);
|
||||
|
||||
// FFN2 (second half-step)
|
||||
layer.ff_norm_1_w = get_tensor(string_format(TN_FFN_NORM_1, prefix, il, "weight"));
|
||||
layer.ff_up_1_w = get_tensor(string_format(TN_FFN_UP_1, prefix, il, "weight"));
|
||||
layer.ff_up_1_b = get_tensor(string_format(TN_FFN_UP_1, prefix, il, "bias"), false);
|
||||
layer.ff_down_1_w = get_tensor(string_format(TN_FFN_DOWN_1, prefix, il, "weight"));
|
||||
layer.ff_down_1_b = get_tensor(string_format(TN_FFN_DOWN_1, prefix, il, "bias"), false);
|
||||
layer.ff_post_norm_1_w = get_tensor(string_format(TN_A_FFN_POST_NORM_1, prefix, il, "weight"), false);
|
||||
}
|
||||
|
||||
// Load clamp info for ClippableLinear AFTER all tensors are loaded
|
||||
for (auto * tensor : tensors_to_load) {
|
||||
std::string name = tensor->name;
|
||||
if (string_ends_with(name, ".weight")) {
|
||||
std::string name_inp_max = name;
|
||||
std::string name_inp_min = name;
|
||||
std::string name_out_max = name;
|
||||
std::string name_out_min = name;
|
||||
string_replace_all(name_inp_max, ".weight", ".input_max");
|
||||
string_replace_all(name_inp_min, ".weight", ".input_min");
|
||||
string_replace_all(name_out_max, ".weight", ".output_max");
|
||||
string_replace_all(name_out_min, ".weight", ".output_min");
|
||||
model.clamp_info_map[name] = {
|
||||
get_scalar(name_inp_max, FLT_MAX),
|
||||
get_scalar(name_inp_min, -FLT_MAX),
|
||||
get_scalar(name_out_max, FLT_MAX),
|
||||
get_scalar(name_out_min, -FLT_MAX)
|
||||
};
|
||||
}
|
||||
}
|
||||
} break;
|
||||
case PROJECTOR_TYPE_LFM2A:
|
||||
{
|
||||
for (int i : {0, 2, 3, 5, 6}) {
|
||||
|
|
@ -2246,7 +2354,10 @@ struct clip_model_loader {
|
|||
ggml_backend_buffer_set_usage(ctx_clip.buf.get(), GGML_BACKEND_BUFFER_USAGE_WEIGHTS);
|
||||
for (auto & t : tensors_to_load) {
|
||||
ggml_tensor * cur = ggml_get_tensor(ctx_clip.ctx_data.get(), t->name);
|
||||
const size_t offset = tensor_offset[t->name];
|
||||
GGML_ASSERT(cur && "tensor not found in ctx_data");
|
||||
auto it_off = tensor_offset.find(t->name);
|
||||
GGML_ASSERT(it_off != tensor_offset.end() && "no offset for tensor");
|
||||
const size_t offset = it_off->second;
|
||||
fin.seekg(offset, std::ios::beg);
|
||||
if (!fin) {
|
||||
throw std::runtime_error(string_format("%s: failed to seek for tensor %s\n", __func__, t->name));
|
||||
|
|
@ -2266,6 +2377,7 @@ struct clip_model_loader {
|
|||
|
||||
LOG_DBG("%s: loaded %zu tensors from %s\n", __func__, tensors_to_load.size(), fname.c_str());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
struct support_info_op {
|
||||
|
|
@ -2538,8 +2650,7 @@ struct clip_init_result clip_init(const char * fname, struct clip_context_params
|
|||
|
||||
// TODO: we don't support audio for Gemma 3N, but GGUF contains audio tensors
|
||||
// we can remove this check when we implement audio support for Gemma 3N
|
||||
skip_audio = ctx_vision->model.proj_type == PROJECTOR_TYPE_GEMMA3NV
|
||||
|| ctx_vision->model.proj_type == PROJECTOR_TYPE_GEMMA4V;
|
||||
skip_audio = ctx_vision->model.proj_type == PROJECTOR_TYPE_GEMMA3NV;
|
||||
}
|
||||
|
||||
if (loader.has_audio && !skip_audio) {
|
||||
|
|
@ -2856,6 +2967,15 @@ int clip_n_output_tokens(const struct clip_ctx * ctx, struct clip_image_f32 * im
|
|||
n_patches /= 2;
|
||||
}
|
||||
} break;
|
||||
case PROJECTOR_TYPE_QWEN3A:
|
||||
{
|
||||
// 3x stride-2 conv2d: each step is floor((n-1)/2)+1
|
||||
int n = img->nx;
|
||||
n = (n - 1) / 2 + 1;
|
||||
n = (n - 1) / 2 + 1;
|
||||
n = (n - 1) / 2 + 1;
|
||||
n_patches = n;
|
||||
} break;
|
||||
case PROJECTOR_TYPE_GLMA:
|
||||
{
|
||||
n_patches = img->nx;
|
||||
|
|
@ -2893,6 +3013,16 @@ int clip_n_output_tokens(const struct clip_ctx * ctx, struct clip_image_f32 * im
|
|||
{
|
||||
n_patches = ((((img->nx + 1) / 2) + 1) / 2 + 1) / 2;
|
||||
} break;
|
||||
case PROJECTOR_TYPE_GEMMA4A:
|
||||
{
|
||||
// Two Conv2D stride-2: O = floor((I + 2p - k) / s) + 1, p=1, k=3, s=2
|
||||
// O = floor((I - 1) / 2) + 1
|
||||
int n = img->nx;
|
||||
for (int i = 0; i < 2; i++) {
|
||||
n = (n - 1) / 2 + 1;
|
||||
}
|
||||
n_patches = n;
|
||||
} break;
|
||||
default:
|
||||
GGML_ABORT("unsupported projector type");
|
||||
}
|
||||
|
|
@ -3322,6 +3452,7 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima
|
|||
case PROJECTOR_TYPE_INTERNVL:
|
||||
case PROJECTOR_TYPE_NEMOTRON_V2_VL:
|
||||
case PROJECTOR_TYPE_QWEN2A:
|
||||
case PROJECTOR_TYPE_QWEN3A:
|
||||
case PROJECTOR_TYPE_GLMA:
|
||||
case PROJECTOR_TYPE_ULTRAVOX:
|
||||
case PROJECTOR_TYPE_LFM2:
|
||||
|
|
@ -3352,6 +3483,56 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima
|
|||
}
|
||||
set_input_i32("pos_w", pos_data);
|
||||
} break;
|
||||
case PROJECTOR_TYPE_GEMMA4A:
|
||||
{
|
||||
GGML_ASSERT(imgs.entries.size() == 1);
|
||||
const auto & img0 = imgs.entries.front();
|
||||
// Compute n_pos matching SSCP output: two stride-2 convs
|
||||
int n_pos = img0->nx;
|
||||
for (int i = 0; i < 2; i++) { n_pos = (n_pos - 1) / 2 + 1; }
|
||||
|
||||
// Chunked local attention: blocked causal mask and RPE
|
||||
const int chunk_size = 12;
|
||||
const int max_past = 12;
|
||||
const int context_size = chunk_size + max_past;
|
||||
const int num_blocks = (n_pos + chunk_size - 1) / chunk_size;
|
||||
|
||||
// Blocked causal attention mask: [context_size, chunk_size, num_blocks]
|
||||
{
|
||||
std::vector<float> mask(context_size * chunk_size * num_blocks, -1e9f);
|
||||
for (int b = 0; b < num_blocks; b++) {
|
||||
for (int q = 0; q < chunk_size; q++) {
|
||||
int gq = b * chunk_size + q;
|
||||
for (int k = 0; k < context_size; k++) {
|
||||
int gk = b * chunk_size - max_past + k;
|
||||
if (gq < n_pos && gk >= 0 && gk < n_pos && gk <= gq && (gq - gk) < max_past) {
|
||||
mask[k + q * context_size + b * context_size * chunk_size] = 0.0f;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
set_input_f32("kq_mask", mask);
|
||||
}
|
||||
|
||||
// Sinusoidal RPE: 13 positions [12, 11, ..., 0]
|
||||
{
|
||||
const int n_embd = ctx->model.hparams.n_embd;
|
||||
const int num_timescales = n_embd / 2;
|
||||
const float log_timescale_increment = logf(10000.0f) / std::max(num_timescales - 1, 1);
|
||||
const int rpe_len = max_past + 1;
|
||||
std::vector<float> pos_emb(n_embd * rpe_len, 0.0f);
|
||||
for (int p = 0; p < rpe_len; p++) {
|
||||
float position = (float)(max_past - p);
|
||||
for (int i = 0; i < num_timescales; i++) {
|
||||
float inv_ts = expf(-(float)i * log_timescale_increment);
|
||||
float scaled = position * inv_ts;
|
||||
pos_emb[p * n_embd + i] = sinf(scaled);
|
||||
pos_emb[p * n_embd + i + num_timescales] = cosf(scaled);
|
||||
}
|
||||
}
|
||||
set_input_f32("pos_emb", pos_emb);
|
||||
}
|
||||
} break;
|
||||
case PROJECTOR_TYPE_LFM2A:
|
||||
{
|
||||
GGML_ASSERT(imgs.entries.size() == 1);
|
||||
|
|
@ -3501,8 +3682,9 @@ int clip_n_mmproj_embd(const struct clip_ctx * ctx) {
|
|||
return ctx->model.mm_model_proj->ne[1];
|
||||
case PROJECTOR_TYPE_QWEN2A:
|
||||
return ctx->model.mm_fc_w->ne[1];
|
||||
case PROJECTOR_TYPE_GLMA:
|
||||
case PROJECTOR_TYPE_QWEN3A:
|
||||
return ctx->model.mm_2_w->ne[1];
|
||||
case PROJECTOR_TYPE_GLMA:
|
||||
case PROJECTOR_TYPE_LFM2:
|
||||
case PROJECTOR_TYPE_KIMIVL:
|
||||
case PROJECTOR_TYPE_PADDLEOCR:
|
||||
|
|
@ -3516,6 +3698,8 @@ int clip_n_mmproj_embd(const struct clip_ctx * ctx) {
|
|||
return ctx->model.mm_fc_w->ne[1];
|
||||
case PROJECTOR_TYPE_LFM2A:
|
||||
return ctx->model.position_embeddings->ne[0];
|
||||
case PROJECTOR_TYPE_GEMMA4A:
|
||||
return ctx->model.hparams.projection_dim;
|
||||
case PROJECTOR_TYPE_GLM4V:
|
||||
return ctx->model.mm_ffn_down_w->ne[1];
|
||||
default:
|
||||
|
|
@ -3552,6 +3736,7 @@ bool clip_has_whisper_encoder(const struct clip_ctx * ctx) {
|
|||
switch (ctx->proj_type()) {
|
||||
case PROJECTOR_TYPE_ULTRAVOX:
|
||||
case PROJECTOR_TYPE_QWEN2A:
|
||||
case PROJECTOR_TYPE_QWEN3A:
|
||||
case PROJECTOR_TYPE_GLMA:
|
||||
case PROJECTOR_TYPE_VOXTRAL:
|
||||
case PROJECTOR_TYPE_MERALION:
|
||||
|
|
|
|||
|
|
@ -0,0 +1,288 @@
|
|||
/**
|
||||
* Gemma 4 Audio Conformer Encoder (clip_graph_gemma4a)
|
||||
*
|
||||
* Architecture: Conformer with dual half-step FFN, full self-attention
|
||||
* with sinusoidal RPE, depthwise light conv, and output projection.
|
||||
*/
|
||||
|
||||
#include "models.h"
|
||||
#include <cmath>
|
||||
|
||||
ggml_cgraph * clip_graph_gemma4a::build() {
|
||||
const float res_weight = 0.5f;
|
||||
const float norm_eps = 1e-6f;
|
||||
|
||||
// 1. Input
|
||||
ggml_tensor * inp = build_inp_raw(1);
|
||||
auto * cur = ggml_cont(ctx0, ggml_transpose(ctx0, inp));
|
||||
|
||||
// 2. Subsampling Conv2D (symmetric padding=1, matching PyTorch)
|
||||
{
|
||||
for (int i = 0; i < 2; i++) {
|
||||
cur = ggml_conv_2d(ctx0, model.sscp_conv_w[i], cur, 2, 2, 1, 1, 1, 1);
|
||||
if (model.sscp_conv_b[i]) {
|
||||
cur = ggml_add(ctx0, cur, model.sscp_conv_b[i]);
|
||||
}
|
||||
// nn.LayerNorm(channels): permute ch to ne[0], normalize, permute back
|
||||
if (model.sscp_norm_w[i]) {
|
||||
cur = ggml_cont(ctx0, ggml_permute(ctx0, cur, 1, 2, 0, 3));
|
||||
cur = ggml_norm(ctx0, cur, norm_eps);
|
||||
cur = ggml_mul(ctx0, cur, model.sscp_norm_w[i]);
|
||||
cur = ggml_cont(ctx0, ggml_permute(ctx0, cur, 2, 0, 1, 3));
|
||||
}
|
||||
cur = ggml_relu(ctx0, cur);
|
||||
}
|
||||
// Flatten [freq, time, ch, 1] -> [ch*freq, time]
|
||||
cur = ggml_cont(ctx0, ggml_permute(ctx0, cur, 1, 2, 0, 3));
|
||||
cur = ggml_reshape_2d(ctx0, cur, cur->ne[0] * cur->ne[1], cur->ne[2]);
|
||||
if (model.sscp_inp_proj_w) {
|
||||
cur = build_mm(model.sscp_inp_proj_w, cur);
|
||||
if (model.sscp_inp_proj_b) {
|
||||
cur = ggml_add(ctx0, cur, model.sscp_inp_proj_b);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const int64_t n_pos = cur->ne[1];
|
||||
|
||||
// Chunked local attention parameters
|
||||
const int64_t C = 12; // chunk_size
|
||||
const int64_t P = 12; // max_past_horizon (context_left - 1)
|
||||
const int64_t S = C + P; // context_size = 24
|
||||
const int64_t R = P + 1; // RPE positions = 13
|
||||
const int64_t B = (n_pos + C - 1) / C; // num_blocks
|
||||
const int64_t Np = B * C; // padded sequence length
|
||||
const int64_t pad_seq = Np - n_pos;
|
||||
|
||||
// Input tensors: blocked RPE and blocked attention mask
|
||||
ggml_tensor * pos_emb = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_head * d_head, R);
|
||||
ggml_set_name(pos_emb, "pos_emb");
|
||||
ggml_set_input(pos_emb);
|
||||
|
||||
ggml_tensor * kq_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, S, C, B);
|
||||
ggml_set_name(kq_mask, "kq_mask");
|
||||
ggml_set_input(kq_mask);
|
||||
|
||||
// 3. Conformer Blocks
|
||||
for (int il = 0; il < hparams.n_layer; il++) {
|
||||
const auto & layer = model.layers[il];
|
||||
auto * residual = cur;
|
||||
|
||||
// FFN 1 (half-step)
|
||||
if (layer.ff_norm_w && layer.ff_up_w && layer.ff_down_w) {
|
||||
cur = build_norm(cur, layer.ff_norm_w, nullptr, NORM_TYPE_RMS, norm_eps, il);
|
||||
cur = build_ffn(cur,
|
||||
layer.ff_up_w, nullptr, nullptr, nullptr,
|
||||
layer.ff_down_w, nullptr, FFN_SILU, il);
|
||||
if (layer.ff_post_norm_w) {
|
||||
cur = build_norm(cur, layer.ff_post_norm_w, nullptr, NORM_TYPE_RMS, norm_eps, il);
|
||||
}
|
||||
residual = ggml_add(ctx0, residual, ggml_scale(ctx0, cur, res_weight));
|
||||
}
|
||||
|
||||
// Chunked local self-attention with RPE
|
||||
if (layer.q_w && layer.k_w && layer.v_w && layer.o_w) {
|
||||
const float q_scale = (1.0f / sqrtf((float)d_head)) / logf(2.0f);
|
||||
const float k_scale = logf(1.0f + expf(1.0f)) / logf(2.0f);
|
||||
const float softcap = 50.0f;
|
||||
|
||||
ggml_tensor * attn_norm_w = layer.attn_pre_norm_w ? layer.attn_pre_norm_w : layer.ln_1_w;
|
||||
cur = attn_norm_w
|
||||
? build_norm(residual, attn_norm_w, nullptr, NORM_TYPE_RMS, norm_eps, il)
|
||||
: residual;
|
||||
|
||||
ggml_tensor * Qcur = build_mm(layer.q_w, cur);
|
||||
ggml_tensor * Kcur = build_mm(layer.k_w, cur);
|
||||
ggml_tensor * Vcur = build_mm(layer.v_w, cur);
|
||||
|
||||
// [n_embd, n_pos] -> [D, H, N]
|
||||
Qcur = ggml_reshape_3d(ctx0, Qcur, d_head, n_head, n_pos);
|
||||
Kcur = ggml_reshape_3d(ctx0, Kcur, d_head, n_head, n_pos);
|
||||
Vcur = ggml_reshape_3d(ctx0, Vcur, d_head, n_head, n_pos);
|
||||
|
||||
// Q/K scaling
|
||||
Qcur = ggml_scale(ctx0, Qcur, q_scale);
|
||||
if (layer.per_dim_scale_w) {
|
||||
Qcur = ggml_mul(ctx0, Qcur, ggml_reshape_3d(ctx0, layer.per_dim_scale_w, d_head, 1, 1));
|
||||
}
|
||||
Kcur = ggml_scale(ctx0, Kcur, k_scale);
|
||||
if (layer.per_dim_k_scale_w) {
|
||||
Kcur = ggml_mul(ctx0, Kcur, ggml_reshape_3d(ctx0, layer.per_dim_k_scale_w, d_head, 1, 1));
|
||||
}
|
||||
|
||||
// Q blocking: [D, H, N] -> pad to Np -> reshape [D, H, C, B]
|
||||
// ggml permute: ne[ax_i] = src->ne[i], so (0,3,1,2) sends H->3, C->1, B->2
|
||||
Qcur = ggml_pad(ctx0, Qcur, 0, 0, pad_seq, 0); // [D, H, Np]
|
||||
Qcur = ggml_reshape_4d(ctx0, Qcur, d_head, n_head, C, B); // [D, H, C, B]
|
||||
Qcur = ggml_cont(ctx0, ggml_permute(ctx0, Qcur, 0, 3, 1, 2)); // [D, C, B, H]
|
||||
|
||||
// K/V block context extraction via overlapping view:
|
||||
// Pad to S*B elements, roll right by P to create left-padding,
|
||||
// then view with stride C in the block dimension (overlapping windows).
|
||||
auto extract_blocks = [&](ggml_tensor * t) -> ggml_tensor * {
|
||||
// [D, H, N] -> pad to S*B -> roll right by P -> cont (materialize)
|
||||
const int64_t pad_kv = S * B - n_pos;
|
||||
t = ggml_pad(ctx0, t, 0, 0, pad_kv, 0); // [D, H, S*B]
|
||||
t = ggml_roll(ctx0, t, 0, 0, P, 0); // left-pad by P
|
||||
t = ggml_cont(ctx0, t); // materialize roll (removes view offset)
|
||||
// Overlapping view: stride for B dim is C positions, not S
|
||||
// ne = [D, H, S, B], data_size = D*H*S*B*sizeof = source_nbytes (exact fit)
|
||||
// nb1=D*sizeof, nb2=D*H*sizeof, nb3=C*D*H*sizeof (overlap: C < S)
|
||||
t = ggml_view_4d(ctx0, t, d_head, n_head, S, B,
|
||||
t->nb[1], t->nb[2], C * t->nb[2], 0);
|
||||
t = ggml_cont(ctx0, t); // materialize overlapping windows
|
||||
return t;
|
||||
};
|
||||
|
||||
ggml_tensor * Kblk = extract_blocks(Kcur);
|
||||
// [D, H, S, B] -> [D, S, B, H] via permute(0,3,1,2)
|
||||
Kblk = ggml_cont(ctx0, ggml_permute(ctx0, Kblk, 0, 3, 1, 2));
|
||||
|
||||
ggml_tensor * Vblk = extract_blocks(Vcur);
|
||||
// [D, H, S, B] -> [S, D, B, H] via permute(1,3,0,2)
|
||||
Vblk = ggml_cont(ctx0, ggml_permute(ctx0, Vblk, 1, 3, 0, 2));
|
||||
|
||||
// Content attention: Q @ K^T
|
||||
// Kblk=[D,S,B,H], Qcur=[D,C,B,H] -> mul_mat contracts on D -> [S,C,B,H]
|
||||
ggml_tensor * matrix_ac = ggml_mul_mat(ctx0, Kblk, Qcur);
|
||||
|
||||
// Relative position attention
|
||||
if (layer.attn_k_rel_w) {
|
||||
// RPE: [n_embd, R] -> project -> [D, H, R] -> [D, R, H]
|
||||
auto * p = ggml_mul_mat(ctx0, layer.attn_k_rel_w, pos_emb);
|
||||
p = ggml_reshape_3d(ctx0, p, d_head, n_head, R);
|
||||
p = ggml_cont(ctx0, ggml_permute(ctx0, p, 0, 2, 1, 3)); // [D, R, H]
|
||||
|
||||
// Q_flat @ RPE^T: [D, C*B, H] @ [D, R, H] -> [R, C*B, H]
|
||||
auto * Q_flat = ggml_reshape_3d(ctx0, Qcur, d_head, C * B, n_head);
|
||||
auto * matrix_bd = ggml_mul_mat(ctx0, p, Q_flat); // [R, C*B, H]
|
||||
matrix_bd = ggml_reshape_4d(ctx0, matrix_bd, R, C, B, n_head); // [R, C, B, H]
|
||||
|
||||
// Blocked relative shift (appendix B of Transformer-XL)
|
||||
{
|
||||
matrix_bd = ggml_pad(ctx0, matrix_bd, S + 1 - R, 0, 0, 0); // [S+1, C, B, H]
|
||||
matrix_bd = ggml_reshape_3d(ctx0, matrix_bd, (S + 1) * C, B, n_head);
|
||||
matrix_bd = ggml_view_3d(ctx0, matrix_bd,
|
||||
C * S, B, n_head,
|
||||
matrix_bd->nb[1], matrix_bd->nb[2], 0);
|
||||
matrix_bd = ggml_cont(ctx0, matrix_bd); // [C*S, B, H]
|
||||
matrix_bd = ggml_reshape_4d(ctx0, matrix_bd, S, C, B, n_head); // [S, C, B, H]
|
||||
}
|
||||
|
||||
matrix_ac = ggml_add(ctx0, matrix_ac, matrix_bd);
|
||||
}
|
||||
|
||||
auto * scores = matrix_ac; // [S, C, B, H]
|
||||
|
||||
// Softcap
|
||||
scores = ggml_scale(ctx0, scores, 1.0f / softcap);
|
||||
scores = ggml_tanh(ctx0, scores);
|
||||
scores = ggml_scale(ctx0, scores, softcap);
|
||||
|
||||
// Blocked attention mask: [S, C, B] broadcasts over H
|
||||
scores = ggml_add(ctx0, scores, kq_mask);
|
||||
|
||||
ggml_tensor * attn = ggml_soft_max(ctx0, scores);
|
||||
|
||||
// attn @ V: [S,C,B,H] @ [S,D,B,H] -> [D,C,B,H]
|
||||
ggml_tensor * x = ggml_mul_mat(ctx0, Vblk, attn);
|
||||
|
||||
// [D,C,B,H] -> [D,H,C,B] via permute(0,2,3,1) -> flatten -> trim
|
||||
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 0, 2, 3, 1));
|
||||
x = ggml_cont_2d(ctx0, x, d_head * n_head, C * B);
|
||||
if (pad_seq > 0) {
|
||||
x = ggml_view_2d(ctx0, x, d_head * n_head, n_pos, x->nb[1], 0);
|
||||
x = ggml_cont(ctx0, x);
|
||||
}
|
||||
|
||||
x = build_mm(layer.o_w, x);
|
||||
if (layer.o_b) { x = ggml_add(ctx0, x, layer.o_b); }
|
||||
|
||||
if (layer.attn_post_norm_w) {
|
||||
x = build_norm(x, layer.attn_post_norm_w, nullptr, NORM_TYPE_RMS, norm_eps, il);
|
||||
}
|
||||
residual = ggml_add(ctx0, residual, x);
|
||||
}
|
||||
|
||||
// Convolution Module
|
||||
if (layer.norm_conv_w && layer.conv_pw1_w && layer.conv_dw_w && layer.conv_pw2_w) {
|
||||
cur = build_norm(residual, layer.norm_conv_w, nullptr, NORM_TYPE_RMS, norm_eps, il);
|
||||
auto * x = build_mm(layer.conv_pw1_w, cur);
|
||||
|
||||
// GLU
|
||||
{
|
||||
int64_t d = x->ne[0] / 2;
|
||||
ggml_tensor * gate = ggml_sigmoid(ctx0,
|
||||
ggml_cont(ctx0, ggml_view_2d(ctx0, x, d, x->ne[1], x->nb[1], d * x->nb[0])));
|
||||
x = ggml_mul(ctx0,
|
||||
ggml_view_2d(ctx0, x, d, x->ne[1], x->nb[1], 0), gate);
|
||||
x = ggml_cont(ctx0, ggml_transpose(ctx0, x));
|
||||
}
|
||||
|
||||
// Causal depthwise Conv1D via ggml_ssm_conv (pad+roll for left-only padding).
|
||||
x = ggml_pad(ctx0, x, 4, 0, 0, 0);
|
||||
x = ggml_roll(ctx0, x, 4, 0, 0, 0);
|
||||
x = ggml_ssm_conv(ctx0, x, layer.conv_dw_w);
|
||||
if (layer.conv_dw_b) {
|
||||
x = ggml_add(ctx0, x, layer.conv_dw_b);
|
||||
}
|
||||
|
||||
if (layer.conv_norm_w) {
|
||||
x = ggml_rms_norm(ctx0, x, norm_eps);
|
||||
x = ggml_mul(ctx0, x, layer.conv_norm_w);
|
||||
}
|
||||
x = ggml_silu(ctx0, x);
|
||||
x = build_mm(layer.conv_pw2_w, x);
|
||||
residual = ggml_add(ctx0, residual, x);
|
||||
}
|
||||
|
||||
// FFN 2 (half-step)
|
||||
if (layer.ff_norm_1_w && layer.ff_up_1_w && layer.ff_down_1_w) {
|
||||
cur = build_norm(residual, layer.ff_norm_1_w, nullptr, NORM_TYPE_RMS, norm_eps, il);
|
||||
cur = build_ffn(cur,
|
||||
layer.ff_up_1_w, nullptr, nullptr, nullptr,
|
||||
layer.ff_down_1_w, nullptr, FFN_SILU, il);
|
||||
if (layer.ff_post_norm_1_w) {
|
||||
cur = build_norm(cur, layer.ff_post_norm_1_w, nullptr, NORM_TYPE_RMS, norm_eps, il);
|
||||
}
|
||||
residual = ggml_add(ctx0, residual, ggml_scale(ctx0, cur, res_weight));
|
||||
}
|
||||
|
||||
// Layer output norm
|
||||
cur = layer.ln_2_w
|
||||
? build_norm(residual, layer.ln_2_w, nullptr, NORM_TYPE_RMS, norm_eps, il)
|
||||
: residual;
|
||||
|
||||
}
|
||||
|
||||
// 4. Output Projection
|
||||
if (model.audio_out_proj_w) {
|
||||
cur = build_mm(model.audio_out_proj_w, cur);
|
||||
if (model.audio_out_proj_b) {
|
||||
cur = ggml_add(ctx0, cur, model.audio_out_proj_b);
|
||||
}
|
||||
}
|
||||
|
||||
// 5. Audio Multimodal Embedder
|
||||
cur = ggml_rms_norm(ctx0, cur, norm_eps);
|
||||
if (model.mm_soft_emb_norm_w) {
|
||||
cur = ggml_mul(ctx0, cur, model.mm_soft_emb_norm_w);
|
||||
}
|
||||
if (model.mm_input_proj_w) {
|
||||
cur = build_mm(model.mm_input_proj_w, cur);
|
||||
}
|
||||
|
||||
ggml_build_forward_expand(gf, cur);
|
||||
return gf;
|
||||
}
|
||||
|
||||
ggml_tensor * clip_graph_gemma4a::build_mm(ggml_tensor * w, ggml_tensor * x) const {
|
||||
auto it = model.clamp_info_map.find(w->name);
|
||||
if (it == model.clamp_info_map.end()) {
|
||||
return ggml_mul_mat(ctx0, w, x);
|
||||
}
|
||||
const auto & ci = it->second;
|
||||
ggml_tensor * clamped = ggml_clamp(ctx0, x, ci.inp_min, ci.inp_max);
|
||||
ggml_tensor * out = ggml_mul_mat(ctx0, w, clamped);
|
||||
return ggml_clamp(ctx0, out, ci.out_min, ci.out_max);
|
||||
}
|
||||
|
|
@ -103,6 +103,12 @@ struct clip_graph_conformer : clip_graph {
|
|||
ggml_cgraph * build() override;
|
||||
};
|
||||
|
||||
struct clip_graph_gemma4a : clip_graph {
|
||||
clip_graph_gemma4a(clip_ctx * ctx, const clip_image_f32 & img) : clip_graph(ctx, img) {}
|
||||
ggml_cgraph * build() override;
|
||||
ggml_tensor * build_mm(ggml_tensor * w, ggml_tensor * x) const override;
|
||||
};
|
||||
|
||||
struct clip_graph_glm4v : clip_graph {
|
||||
clip_graph_glm4v(clip_ctx * ctx, const clip_image_f32 & img) : clip_graph(ctx, img) {}
|
||||
ggml_cgraph * build() override;
|
||||
|
|
@ -146,6 +152,11 @@ struct clip_graph_mobilenetv5 : clip_graph {
|
|||
const mobilenetv5_block & block);
|
||||
};
|
||||
|
||||
struct clip_graph_qwen3a : clip_graph {
|
||||
clip_graph_qwen3a(clip_ctx * ctx, const clip_image_f32 & img) : clip_graph(ctx, img) {}
|
||||
ggml_cgraph * build() override;
|
||||
};
|
||||
|
||||
struct clip_graph_kimik25 : clip_graph {
|
||||
clip_graph_kimik25(clip_ctx * ctx, const clip_image_f32 & img) : clip_graph(ctx, img) {}
|
||||
ggml_cgraph * build() override;
|
||||
|
|
|
|||
|
|
@ -0,0 +1,68 @@
|
|||
#include "models.h"
|
||||
|
||||
ggml_cgraph * clip_graph_qwen3a::build() {
|
||||
ggml_tensor * inp = build_inp_raw(1);
|
||||
|
||||
// conv2d block
|
||||
// TODO: do we need to split by chunks of n_window each like on transformers impl?
|
||||
{
|
||||
inp = ggml_conv_2d(ctx0, model.conv2d_1_w, inp, 2, 2, 1, 1, 1, 1);
|
||||
inp = ggml_add(ctx0, inp, model.conv2d_1_b);
|
||||
inp = ggml_gelu_erf(ctx0, inp);
|
||||
|
||||
inp = ggml_conv_2d(ctx0, model.conv2d_2_w, inp, 2, 2, 1, 1, 1, 1);
|
||||
inp = ggml_add(ctx0, inp, model.conv2d_2_b);
|
||||
inp = ggml_gelu_erf(ctx0, inp);
|
||||
|
||||
inp = ggml_conv_2d(ctx0, model.conv2d_3_w, inp, 2, 2, 1, 1, 1, 1);
|
||||
inp = ggml_add(ctx0, inp, model.conv2d_3_b);
|
||||
inp = ggml_gelu_erf(ctx0, inp);
|
||||
|
||||
// inp [n_pos, n_mels/8, channels, 1] (W, H, C, N)
|
||||
cb(inp, "after_conv_blocks", -1);
|
||||
|
||||
const int64_t n_pos_after_conv = inp->ne[0];
|
||||
const int64_t n_mel_after_conv = inp->ne[1]; // 128/8 = 16
|
||||
|
||||
inp = ggml_cont(ctx0, ggml_permute(ctx0, inp, 0, 2, 3, 1));
|
||||
inp = ggml_reshape_2d(ctx0, inp, n_pos_after_conv, n_mel_after_conv * inp->ne[3]); // [n_pos, 7680]
|
||||
inp = ggml_cont(ctx0, ggml_transpose(ctx0, inp)); // [7680, n_pos]
|
||||
|
||||
// project to n_embd
|
||||
inp = ggml_mul_mat(ctx0, model.conv_out_w, inp);
|
||||
if (model.conv_out_b) {
|
||||
inp = ggml_add(ctx0, inp, model.conv_out_b);
|
||||
}
|
||||
cb(inp, "after_conv_out", -1);
|
||||
}
|
||||
|
||||
auto n_pos = inp->ne[1];
|
||||
|
||||
ggml_tensor * pos_embd_selected = ggml_view_2d(
|
||||
ctx0, model.position_embeddings,
|
||||
model.position_embeddings->ne[0], n_pos,
|
||||
model.position_embeddings->nb[1], 0
|
||||
);
|
||||
ggml_tensor * cur = build_vit(
|
||||
inp, n_pos,
|
||||
NORM_TYPE_NORMAL,
|
||||
hparams.ffn_op,
|
||||
pos_embd_selected,
|
||||
nullptr);
|
||||
|
||||
cb(cur, "after_transformer", -1);
|
||||
|
||||
// projector
|
||||
cur = build_ffn(cur,
|
||||
model.mm_1_w, model.mm_1_b,
|
||||
nullptr, nullptr,
|
||||
model.mm_2_w, model.mm_2_b,
|
||||
FFN_GELU_ERF,
|
||||
-1);
|
||||
|
||||
cb(cur, "projected", -1);
|
||||
|
||||
ggml_build_forward_expand(gf, cur);
|
||||
|
||||
return gf;
|
||||
}
|
||||
|
|
@ -8,6 +8,7 @@
|
|||
#include <vector>
|
||||
#include <fstream>
|
||||
#include <algorithm>
|
||||
#include <functional>
|
||||
|
||||
// some of the code here is copied from whisper.cpp
|
||||
|
||||
|
|
@ -37,23 +38,36 @@ void mtmd_audio_cache::fill_mel_filterbank_matrix(int n_mel,
|
|||
float fmin,
|
||||
float fmax,
|
||||
bool slaney_area_norm,
|
||||
float scale) {
|
||||
float scale,
|
||||
bool use_htk) {
|
||||
GGML_ASSERT(n_mel > 0 && n_fft > 1);
|
||||
if (fmax <= 0.0f) {
|
||||
fmax = 0.5f * sample_rate;
|
||||
}
|
||||
|
||||
// Slaney scale (matches librosa default)
|
||||
const double min_log_hz = 1000.0;
|
||||
const double lin_slope = 3 / 200.;
|
||||
const double min_log_mel = min_log_hz * lin_slope;
|
||||
const double log_step = log(6.4) / 27.0;
|
||||
auto hz_to_mel = [min_log_hz, lin_slope, log_step, min_log_mel](const double f_hz) -> double {
|
||||
return (f_hz < min_log_hz) ? f_hz * lin_slope : min_log_mel + log(f_hz / min_log_hz) / log_step;
|
||||
};
|
||||
auto mel_to_hz = [min_log_hz, lin_slope, log_step, min_log_mel](const double m) -> double {
|
||||
return (m < min_log_mel) ? m / lin_slope : min_log_hz * exp((m - min_log_mel) * log_step);
|
||||
};
|
||||
std::function<double(double)> hz_to_mel;
|
||||
std::function<double(double)> mel_to_hz;
|
||||
|
||||
if (use_htk) {
|
||||
hz_to_mel = [](const double f_hz) -> double {
|
||||
return 2595.0 * log10(1.0 + f_hz / 700.0);
|
||||
};
|
||||
mel_to_hz = [](const double m) -> double {
|
||||
return 700.0 * (pow(10.0, m / 2595.0) - 1.0);
|
||||
};
|
||||
} else {
|
||||
// Slaney scale (matches librosa default)
|
||||
const double min_log_hz = 1000.0;
|
||||
const double lin_slope = 3 / 200.;
|
||||
const double min_log_mel = min_log_hz * lin_slope;
|
||||
const double log_step = log(6.4) / 27.0;
|
||||
hz_to_mel = [min_log_hz, lin_slope, log_step, min_log_mel](const double f_hz) -> double {
|
||||
return (f_hz < min_log_hz) ? f_hz * lin_slope : min_log_mel + log(f_hz / min_log_hz) / log_step;
|
||||
};
|
||||
mel_to_hz = [min_log_hz, lin_slope, log_step, min_log_mel](const double m) -> double {
|
||||
return (m < min_log_mel) ? m / lin_slope : min_log_hz * exp((m - min_log_mel) * log_step);
|
||||
};
|
||||
}
|
||||
|
||||
// infer N_fft from n_fft_bins
|
||||
const double bin_hz_step = double(sample_rate) / double(n_fft);
|
||||
|
|
@ -257,10 +271,13 @@ struct filter_params {
|
|||
int32_t hann_window_size;
|
||||
int32_t hop_length;
|
||||
int32_t sample_rate;
|
||||
bool center_padding = false;
|
||||
float preemph = 0.f;
|
||||
bool no_padding = false;
|
||||
bool center_padding = false;
|
||||
float preemph = 0.f;
|
||||
bool use_natural_log = false;
|
||||
bool norm_per_feature = false;
|
||||
bool use_magnitude = false; // |X| instead of |X|^2
|
||||
float mel_floor = 5.960464477539063e-08f;
|
||||
};
|
||||
|
||||
static void log_mel_spectrogram_worker_thread(int ith,
|
||||
|
|
@ -301,10 +318,10 @@ static void log_mel_spectrogram_worker_thread(int ith,
|
|||
// FFT
|
||||
fft(cache, fft_in.data(), frame_size, fft_out.data());
|
||||
|
||||
// Calculate modulus^2 of complex numbers
|
||||
// Use pow(fft_out[2 * j + 0], 2) + pow(fft_out[2 * j + 1], 2) causes inference quality problem? Interesting.
|
||||
// Calculate modulus^2 (power) or modulus (magnitude)
|
||||
for (int j = 0; j < n_fft_bins; j++) {
|
||||
fft_out[j] = (fft_out[2 * j + 0] * fft_out[2 * j + 0] + fft_out[2 * j + 1] * fft_out[2 * j + 1]);
|
||||
float power = (fft_out[2 * j + 0] * fft_out[2 * j + 0] + fft_out[2 * j + 1] * fft_out[2 * j + 1]);
|
||||
fft_out[j] = params.use_magnitude ? sqrtf(power) : power;
|
||||
}
|
||||
|
||||
// mel spectrogram
|
||||
|
|
@ -324,9 +341,10 @@ static void log_mel_spectrogram_worker_thread(int ith,
|
|||
for (; k < n_fft_bins; k++) {
|
||||
sum += fft_out[k] * filters.data[j * n_fft_bins + k];
|
||||
}
|
||||
sum = std::max(sum, (double)params.mel_floor);
|
||||
sum = params.use_natural_log
|
||||
? log(sum + 5.960464477539063e-08)
|
||||
: log10(std::max(sum, 1e-10));
|
||||
? log(sum)
|
||||
: log10(sum);
|
||||
out.data[j * out.n_len + i] = sum;
|
||||
}
|
||||
}
|
||||
|
|
@ -360,7 +378,12 @@ static bool log_mel_spectrogram(
|
|||
|
||||
// Padding
|
||||
std::vector<float> samples_padded;
|
||||
if (params.center_padding) {
|
||||
if (params.no_padding) {
|
||||
// no padding, use samples as-is
|
||||
samples_padded = std::vector<float>(samples, samples + n_samples);
|
||||
samples = samples_padded.data();
|
||||
n_samples = samples_padded.size();
|
||||
} else if (params.center_padding) {
|
||||
const auto pad_amount = frame_size / 2;
|
||||
samples_padded = std::vector<float>(n_samples + 2 * pad_amount, 0);
|
||||
std::copy(samples, samples + n_samples, samples_padded.data() + pad_amount);
|
||||
|
|
@ -464,8 +487,8 @@ static bool log_mel_spectrogram(
|
|||
out.data[i * out.n_len + j] = 0.0;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// clamping and normalization
|
||||
} else if (!params.no_padding) {
|
||||
// Whisper-style clamping and normalization (NOT used by Gemma4)
|
||||
double mmax = -1e20;
|
||||
for (int i = 0; i < out.n_mel*out.n_len; i++) {
|
||||
if (out.data[i] > mmax) {
|
||||
|
|
@ -627,6 +650,87 @@ bool mtmd_audio_preprocessor_conformer::preprocess(const float *
|
|||
return true;
|
||||
}
|
||||
|
||||
//
|
||||
// mtmd_audio_preprocessor_gemma4a
|
||||
//
|
||||
|
||||
void mtmd_audio_preprocessor_gemma4a::initialize() {
|
||||
cache.fill_sin_cos_table(hparams.audio_n_fft);
|
||||
|
||||
// Standard periodic Hann window, zero-padded to FFT size
|
||||
cache.hann_window.assign(hparams.audio_n_fft, 0.0f);
|
||||
for (uint32_t i = 0; i < (uint32_t)hparams.audio_window_len; i++) {
|
||||
cache.hann_window[i] = 0.5f - 0.5f * cosf((2.0f * (float)M_PI * i) / hparams.audio_window_len);
|
||||
}
|
||||
|
||||
// HTK mel scale, no Slaney area normalization
|
||||
cache.fill_mel_filterbank_matrix(
|
||||
hparams.n_mel_bins, hparams.audio_n_fft, hparams.audio_sample_rate,
|
||||
0.0f, hparams.audio_sample_rate / 2.0f,
|
||||
/*slaney_area_norm=*/ false,
|
||||
/*scale=*/ 1.0f,
|
||||
/*use_htk=*/ true
|
||||
);
|
||||
}
|
||||
|
||||
bool mtmd_audio_preprocessor_gemma4a::preprocess(const float * samples,
|
||||
size_t n_samples,
|
||||
std::vector<mtmd_audio_mel> & output) {
|
||||
if (n_samples == 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
GGML_ASSERT(!cache.sin_vals.empty());
|
||||
GGML_ASSERT(!cache.cos_vals.empty());
|
||||
GGML_ASSERT(!cache.filters.data.empty());
|
||||
|
||||
filter_params params;
|
||||
params.n_mel = hparams.n_mel_bins;
|
||||
params.n_fft_bins = 1 + (hparams.audio_n_fft / 2);
|
||||
params.hann_window_size = hparams.audio_n_fft; // window is zero-padded to FFT size
|
||||
params.hop_length = hparams.audio_hop_len;
|
||||
params.sample_rate = hparams.audio_sample_rate;
|
||||
params.no_padding = true;
|
||||
params.center_padding = false;
|
||||
params.preemph = 0.0f;
|
||||
params.use_natural_log = true;
|
||||
params.use_magnitude = true;
|
||||
params.mel_floor = 0.001f;
|
||||
params.norm_per_feature = false;
|
||||
|
||||
// Split into 30-second chunks (model context limit, ~750 tokens each)
|
||||
const size_t chunk_samples = 30 * hparams.audio_sample_rate;
|
||||
for (size_t off = 0; off < n_samples; off += chunk_samples) {
|
||||
const float * chunk_ptr = samples + off;
|
||||
size_t chunk_len = std::min(chunk_samples, n_samples - off);
|
||||
|
||||
// Semicausal left-padding + right-padding to match PyTorch frame count
|
||||
const int pad_left = hparams.audio_window_len / 2;
|
||||
const int fft_size = hparams.audio_n_fft;
|
||||
const int hop = hparams.audio_hop_len;
|
||||
const int n_with_left = (int)chunk_len + pad_left;
|
||||
// PyTorch: unfold(size=frame_length+1, step=hop) on semicausal-padded waveform
|
||||
const int pt_frames = (n_with_left - (hparams.audio_window_len + 1)) / hop + 1;
|
||||
const int n_padded_needed = (pt_frames - 1) * hop + fft_size;
|
||||
const int total_pad = std::max((int)(n_padded_needed - (int)chunk_len), pad_left);
|
||||
std::vector<float> padded_samples(total_pad + chunk_len, 0.0f);
|
||||
std::copy(chunk_ptr, chunk_ptr + chunk_len, padded_samples.data() + pad_left);
|
||||
|
||||
mtmd_audio_mel out_chunk;
|
||||
bool ok = log_mel_spectrogram(padded_samples.data(), padded_samples.size(), 4, params, cache, out_chunk);
|
||||
if (!ok) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Trim to PyTorch frame count
|
||||
out_chunk.n_len = std::min(out_chunk.n_len, pt_frames);
|
||||
|
||||
output.push_back(std::move(out_chunk));
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
//
|
||||
// mtmd_audio_streaming_istft implementation
|
||||
//
|
||||
|
|
|
|||
|
|
@ -45,7 +45,8 @@ struct mtmd_audio_cache {
|
|||
float fmin = 0.0f, // e.g. 0.0
|
||||
float fmax = -1.0f, // e.g. sr/2; pass -1 for auto
|
||||
bool slaney_area_norm = true,
|
||||
float scale = 1.0f // optional extra scaling
|
||||
float scale = 1.0f,
|
||||
bool use_htk = false
|
||||
);
|
||||
};
|
||||
|
||||
|
|
@ -77,6 +78,15 @@ struct mtmd_audio_preprocessor_conformer : mtmd_audio_preprocessor {
|
|||
mtmd_audio_cache cache;
|
||||
};
|
||||
|
||||
struct mtmd_audio_preprocessor_gemma4a : mtmd_audio_preprocessor {
|
||||
mtmd_audio_preprocessor_gemma4a(const clip_ctx * ctx) : mtmd_audio_preprocessor(ctx) {}
|
||||
void initialize() override;
|
||||
bool preprocess(const float * samples, size_t n_samples, std::vector<mtmd_audio_mel> & output) override;
|
||||
|
||||
private:
|
||||
mtmd_audio_cache cache;
|
||||
};
|
||||
|
||||
//
|
||||
// streaming ISTFT - converts spectrogram frames back to audio one frame at a time
|
||||
//
|
||||
|
|
|
|||
|
|
@ -198,35 +198,38 @@ struct img_tool {
|
|||
private:
|
||||
// Bilinear resize function
|
||||
static void resize_bilinear(const clip_image_u8 & src, clip_image_u8 & dst, int target_width, int target_height) {
|
||||
GGML_ASSERT(src.nx >= 2 && src.ny >= 2);
|
||||
if (src.nx == 0 || src.ny == 0) { dst.nx = dst.ny = 0; dst.buf.clear(); return; }
|
||||
if (target_width <= 0) target_width = 1;
|
||||
if (target_height <= 0) target_height = 1;
|
||||
|
||||
dst.nx = target_width;
|
||||
dst.ny = target_height;
|
||||
dst.buf.resize(3 * target_width * target_height);
|
||||
|
||||
float x_ratio = static_cast<float>(src.nx - 1) / target_width;
|
||||
float y_ratio = static_cast<float>(src.ny - 1) / target_height;
|
||||
float x_ratio = target_width > 1 ? static_cast<float>(src.nx - 1) / (target_width - 1) : 0.0f;
|
||||
float y_ratio = target_height > 1 ? static_cast<float>(src.ny - 1) / (target_height - 1) : 0.0f;
|
||||
|
||||
for (int y = 0; y < target_height; y++) {
|
||||
for (int x = 0; x < target_width; x++) {
|
||||
float px = x_ratio * x;
|
||||
float py = y_ratio * y;
|
||||
int x_floor = std::min(static_cast<int>(px), src.nx - 2);
|
||||
int y_floor = std::min(static_cast<int>(py), src.ny - 2);
|
||||
float x_lerp = px - x_floor;
|
||||
float y_lerp = py - y_floor;
|
||||
for (int y = 0; y < target_height; ++y) {
|
||||
for (int x = 0; x < target_width; ++x) {
|
||||
float px = x * x_ratio;
|
||||
float py = y * y_ratio;
|
||||
|
||||
for (int c = 0; c < 3; c++) {
|
||||
float top = lerp(
|
||||
static_cast<float>(src.buf[3 * (y_floor * src.nx + x_floor) + c]),
|
||||
static_cast<float>(src.buf[3 * (y_floor * src.nx + (x_floor + 1)) + c]),
|
||||
x_lerp
|
||||
);
|
||||
float bottom = lerp(
|
||||
static_cast<float>(src.buf[3 * ((y_floor + 1) * src.nx + x_floor) + c]),
|
||||
static_cast<float>(src.buf[3 * ((y_floor + 1) * src.nx + (x_floor + 1)) + c]),
|
||||
x_lerp
|
||||
);
|
||||
dst.buf[3 * (y * target_width + x) + c] = static_cast<uint8_t>(lerp(top, bottom, y_lerp));
|
||||
int x0 = std::min(static_cast<int>(px), src.nx - 1);
|
||||
int y0 = std::min(static_cast<int>(py), src.ny - 1);
|
||||
int x1 = std::min(x0 + 1, src.nx - 1);
|
||||
int y1 = std::min(y0 + 1, src.ny - 1);
|
||||
|
||||
float xf = px - x0;
|
||||
float yf = py - y0;
|
||||
|
||||
for (int c = 0; c < 3; ++c) {
|
||||
float top = lerp(static_cast<float>(src.buf[3 * (y0 * src.nx + x0) + c]),
|
||||
static_cast<float>(src.buf[3 * (y0 * src.nx + x1) + c]),
|
||||
xf);
|
||||
float bottom = lerp(static_cast<float>(src.buf[3 * (y1 * src.nx + x0) + c]),
|
||||
static_cast<float>(src.buf[3 * (y1 * src.nx + x1) + c]),
|
||||
xf);
|
||||
dst.buf[3 * (y * target_width + x) + c] = static_cast<uint8_t>(lerp(top, bottom, yf));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -455,6 +455,7 @@ struct mtmd_context {
|
|||
// set preprocessor
|
||||
switch (proj) {
|
||||
case PROJECTOR_TYPE_QWEN2A:
|
||||
case PROJECTOR_TYPE_QWEN3A:
|
||||
case PROJECTOR_TYPE_QWEN25O:
|
||||
{
|
||||
// <|audio_bos|> ... (embeddings) ... <|audio_eos|>
|
||||
|
|
@ -484,6 +485,12 @@ struct mtmd_context {
|
|||
{
|
||||
audio_preproc = std::make_unique<mtmd_audio_preprocessor_conformer>(ctx_a);
|
||||
} break;
|
||||
case PROJECTOR_TYPE_GEMMA4A:
|
||||
{
|
||||
aud_beg = "<|audio>";
|
||||
aud_end = "<audio|>";
|
||||
audio_preproc = std::make_unique<mtmd_audio_preprocessor_gemma4a>(ctx_a);
|
||||
} break;
|
||||
default:
|
||||
throw std::runtime_error(string_format("%s: unexpected audio projector type %d\n", __func__, proj));
|
||||
}
|
||||
|
|
@ -1021,6 +1028,10 @@ bool mtmd_decode_use_non_causal(mtmd_context * ctx) {
|
|||
}
|
||||
|
||||
bool mtmd_decode_use_mrope(mtmd_context * ctx) {
|
||||
if (ctx->ctx_v == nullptr && ctx->proj_type_a() == PROJECTOR_TYPE_QWEN3A) {
|
||||
// qwen3-asr
|
||||
return true;
|
||||
}
|
||||
switch (ctx->proj_type_v()) {
|
||||
case PROJECTOR_TYPE_QWEN2VL:
|
||||
case PROJECTOR_TYPE_QWEN25VL:
|
||||
|
|
|
|||
|
|
@ -91,11 +91,13 @@ add_test_vision "ggml-org/LightOnOCR-1B-1025-GGUF:Q8_0"
|
|||
add_test_vision "ggml-org/DeepSeek-OCR-GGUF:Q8_0" -p "Free OCR." --chat-template deepseek-ocr
|
||||
add_test_vision "ggml-org/dots.ocr-GGUF:Q8_0" -p "OCR"
|
||||
add_test_vision "ggml-org/HunyuanOCR-GGUF:Q8_0" -p "OCR"
|
||||
add_test_vision "ggml-org/gemma-4-E2B-it-GGUF:Q8_0" --jinja
|
||||
|
||||
add_test_audio "ggml-org/ultravox-v0_5-llama-3_2-1b-GGUF:Q8_0"
|
||||
add_test_audio "ggml-org/Qwen2.5-Omni-3B-GGUF:Q4_K_M"
|
||||
add_test_audio "ggml-org/Voxtral-Mini-3B-2507-GGUF:Q4_K_M"
|
||||
add_test_audio "ggml-org/LFM2-Audio-1.5B-GGUF:Q8_0"
|
||||
add_test_audio "ggml-org/gemma-4-E2B-it-GGUF:Q8_0" --jinja
|
||||
|
||||
# to test the big models, run: ./tests.sh big
|
||||
if [ "$RUN_BIG_TESTS" = true ]; then
|
||||
|
|
|
|||
|
|
@ -1218,7 +1218,7 @@ button,1,"preview-code-btn",null,classes_1,{"opacity-50":disabled(),"!cursor-not
|
|||
|
||||
\`\`\`
|
||||
Upstream LLM error:
|
||||
`,LLM_ERROR_BLOCK_END="\n```\n",DEFAULT_AGENTIC_CONFIG={enabled:!0,maxTurns:100,maxToolPreviewLines:25},LEGACY_REASONING_TAGS={START:"<<<reasoning_content_start>>>"},LEGACY_AGENTIC_REGEX={COMPLETED_TOOL_CALL:/<<<AGENTIC_TOOL_CALL_START>>>\n<<<TOOL_NAME:(.+?)>>>\n<<<TOOL_ARGS_START>>>([\s\S]*?)<<<TOOL_ARGS_END>>>([\s\S]*?)<<<AGENTIC_TOOL_CALL_END>>>/g,REASONING_BLOCK:/<<<reasoning_content_start>>>[\s\S]*?<<<reasoning_content_end>>>/g,REASONING_EXTRACT:/<<<reasoning_content_start>>>([\s\S]*?)<<<reasoning_content_end>>>/,
|
||||
`,LLM_ERROR_BLOCK_END="\n```\n",DEFAULT_AGENTIC_CONFIG={enabled:!0,maxTurns:100,maxToolPreviewLines:25},REASONING_TAGS={START:"<think>",END:"</think>"},LEGACY_REASONING_TAGS={START:"<<<reasoning_content_start>>>"},LEGACY_AGENTIC_REGEX={COMPLETED_TOOL_CALL:/<<<AGENTIC_TOOL_CALL_START>>>\n<<<TOOL_NAME:(.+?)>>>\n<<<TOOL_ARGS_START>>>([\s\S]*?)<<<TOOL_ARGS_END>>>([\s\S]*?)<<<AGENTIC_TOOL_CALL_END>>>/g,REASONING_BLOCK:/<<<reasoning_content_start>>>[\s\S]*?<<<reasoning_content_end>>>/g,REASONING_EXTRACT:/<<<reasoning_content_start>>>([\s\S]*?)<<<reasoning_content_end>>>/,
|
||||
REASONING_OPEN:/<<<reasoning_content_start>>>[\s\S]*$/,AGENTIC_TOOL_CALL_OPEN:/\n*<<<AGENTIC_TOOL_CALL_START>>>[\s\S]*$/,HAS_LEGACY_MARKERS:/<<<(?:AGENTIC_TOOL_CALL_START|reasoning_content_start)>>>/},API_MODELS={LIST:"/v1/models",LOAD:"/models/load",UNLOAD:"/models/unload"},CORS_PROXY_ENDPOINT="/cors-proxy",ATTACHMENT_LABEL_PDF_FILE="PDF File",ATTACHMENT_LABEL_MCP_PROMPT="MCP Prompt",ATTACHMENT_LABEL_MCP_RESOURCE="MCP Resource",AUTO_SCROLL_INTERVAL=100,AUTO_SCROLL_AT_BOTTOM_THRESHOLD=10,DEFAULT_BINARY_DETECTION_OPTIONS={
|
||||
prefixLength:1024*10,suspiciousCharThresholdRatio:.15,maxAbsoluteNullBytes:2},DEFAULT_CACHE_TTL_MS=300*1e3,DEFAULT_CACHE_MAX_ENTRIES=100,MODEL_PROPS_CACHE_TTL_MS=600*1e3,MODEL_PROPS_CACHE_MAX_ENTRIES=50,MCP_RESOURCE_CACHE_MAX_ENTRIES=50,MCP_RESOURCE_CACHE_TTL_MS=300*1e3,MAX_INACTIVE_CONVERSATION_STATES=10,INACTIVE_CONVERSATION_STATE_MAX_AGE_MS=1800*1e3,INITIAL_FILE_SIZE=0,PROMPT_CONTENT_SEPARATOR=`
|
||||
|
||||
|
|
@ -6923,30 +6923,36 @@ getPromptProgressText,getLiveProcessingStats,getLiveGenerationStats,shouldShowDe
|
|||
er justify-between"><div class="flex items-center space-x-2"><!> <!></div> <div class="flex gap-2"><!> <!></div></div></div>'),root_8$c=from_html('<pre class="raw-output svelte-14103tf"> </pre>'),root_10$a=from_html('<div class="text-sm whitespace-pre-wrap"> </div>'),root_11$5=from_html('<div class="mt-4 w-full max-w-[48rem]"><div class="processing-container svelte-14103tf"><span class="processing-text svelte-14103tf"> </span></div></div>'),root_12$6=from_html('<div class="inline-flex flex-wrap \
|
||||
items-start gap-2 text-xs text-muted-foreground"><!> <!></div>'),root$P=from_html('<div role="group" aria-label="Assistant message with actions"><!> <!> <!> <div class="info my-6 grid gap-4 tabular-nums"><!></div> <!></div>');function ChatMessageAssistant($$anchor,$$props){push$1($$props,!0);let className=prop($$props,"class",3,""),isLastAssistantMessage=prop($$props,"isLastAssistantMessage",3,!1),toolMessages=prop($$props,"toolMessages",19,()=>[]),siblingInfo=prop($$props,"siblingInfo",3,null),
|
||||
textareaElement=prop($$props,"textareaElement",15);const editCtx=getMessageEditContext();let shouldBranchAfterEdit=state$1(!1);function handleEditKeydown(event2){event2.key===KeyboardKey.ENTER&&!event2.shiftKey&&!isIMEComposing(event2)?(event2.preventDefault(),editCtx.save()):event2.key===KeyboardKey.ESCAPE&&(event2.preventDefault(),editCtx.cancel())}const isAgentic=user_derived(()=>hasAgenticContent($$props.message,toolMessages())),hasReasoning=user_derived(()=>!!$$props.message.reasoningContent),
|
||||
processingState=useProcessingState();let currentConfig=user_derived(config$1),isRouter=user_derived(isRouterMode),showRawOutput=state$1(!1),activeStatsView=state$1(proxy(ChatMessageStatsView.GENERATION)),statsContainerEl=state$1(void 0);function getScrollParent(el){let parent=el.parentElement;for(;parent;){const style2=getComputedStyle(parent);if(/(auto|scroll)/.test(style2.overflowY))return parent;parent=parent.parentElement}return null}async function handleStatsViewChange(view){const el=get$4(
|
||||
statsContainerEl);if(!el){set$1(activeStatsView,view,!0);return}const scrollParent=getScrollParent(el);if(!scrollParent){set$1(activeStatsView,view,!0);return}const yBefore=el.getBoundingClientRect().top;set$1(activeStatsView,view,!0),await tick();const delta=el.getBoundingClientRect().top-yBefore;delta!==0&&(scrollParent.scrollTop+=delta),requestAnimationFrame(()=>{const drift=el.getBoundingClientRect().top-yBefore;Math.abs(drift)>1&&(scrollParent.scrollTop+=drift)})}let highlightAgenticTurns=user_derived(
|
||||
()=>get$4(isAgentic)&&(get$4(currentConfig).alwaysShowAgenticTurns||get$4(activeStatsView)===ChatMessageStatsView.SUMMARY)),displayedModel=user_derived(()=>$$props.message.model??null),isCurrentlyLoading=user_derived(isLoading),isStreaming=user_derived(isChatStreaming),hasNoContent=user_derived(()=>!$$props.message?.content?.trim()),isActivelyProcessing=user_derived(()=>get$4(isCurrentlyLoading)||get$4(isStreaming)),showProcessingInfoTop=user_derived(()=>$$props.message?.role===MessageRole.ASSISTANT&&
|
||||
get$4(isActivelyProcessing)&&get$4(hasNoContent)&&!get$4(isAgentic)&&isLastAssistantMessage()),showProcessingInfoBottom=user_derived(()=>$$props.message?.role===MessageRole.ASSISTANT&&get$4(isActivelyProcessing)&&(!get$4(hasNoContent)||get$4(isAgentic))&&isLastAssistantMessage());function handleCopyModel(){copyToClipboard(get$4(displayedModel)??"")}user_effect(()=>{editCtx.isEditing&&textareaElement()&&autoResizeTextarea(textareaElement())}),user_effect(()=>{(get$4(showProcessingInfoTop)||get$4(
|
||||
showProcessingInfoBottom))&&processingState.startMonitoring()});var div=root$P(),node2=child(div);{var consequent=$$anchor2=>{var div_1=root_1$t(),div_2=child(div_1),span=child(div_2),text2=child(span,!0);reset(span),reset(div_2),reset(div_1),template_effect($0=>set_text(text2,$0),[()=>processingState.getPromptProgressText()??processingState.getProcessingMessage()??"Processing..."]),transition(1,div_1,()=>fade),append($$anchor2,div_1)};if_block(node2,$$render=>{get$4(showProcessingInfoTop)&&$$render(
|
||||
consequent)})}var node_1=sibling(node2,2);{var consequent_1=$$anchor2=>{var div_3=root_2$v(),textarea=child(div_3);remove_textarea_child(textarea),textarea.__keydown=handleEditKeydown,textarea.__input=e=>{autoResizeTextarea(e.currentTarget),editCtx.setContent(e.currentTarget.value)},bind_this(textarea,$$value=>textareaElement($$value),()=>textareaElement());var div_4=sibling(textarea,2),div_5=child(div_4),node_2=child(div_5);Checkbox(node_2,{id:"branch-after-edit",onCheckedChange:checked=>set$1(
|
||||
shouldBranchAfterEdit,checked===!0),get checked(){return get$4(shouldBranchAfterEdit)},set checked($$value){set$1(shouldBranchAfterEdit,$$value,!0)}});var node_3=sibling(node_2,2);Label(node_3,{for:"branch-after-edit",class:"cursor-pointer text-sm text-muted-foreground",children:($$anchor3,$$slotProps)=>{next$1();var text_1=text$8("Branch conversation after edit");append($$anchor3,text_1)},$$slots:{default:!0}}),reset(div_5);var div_6=sibling(div_5,2),node_4=child(div_6);Button(node_4,{class:"h-\
|
||||
8 px-3",get onclick(){return editCtx.cancel},size:"sm",variant:"outline",children:($$anchor3,$$slotProps)=>{var fragment=root_4$d(),node_5=first_child(fragment);X(node_5,{class:"mr-1 h-3 w-3"}),next$1(),append($$anchor3,fragment)},$$slots:{default:!0}});var node_6=sibling(node_4,2);{let $0=user_derived(()=>!editCtx.editedContent?.trim());Button(node_6,{class:"h-8 px-3",get onclick(){return editCtx.save},get disabled(){return get$4($0)},size:"sm",children:($$anchor3,$$slotProps)=>{var fragment_1=root_5$i(),
|
||||
node_7=first_child(fragment_1);Check(node_7,{class:"mr-1 h-3 w-3"}),next$1(),append($$anchor3,fragment_1)},$$slots:{default:!0}})}reset(div_6),reset(div_4),reset(div_3),template_effect(()=>{set_value(textarea,editCtx.editedContent),set_class(textarea,1,`min-h-[50vh] w-full resize-y rounded-2xl px-3 py-2 text-sm ${INPUT_CLASSES??""}`,"svelte-14103tf")}),append($$anchor2,div_3)},alternate_2=$$anchor2=>{var fragment_2=comment$2(),node_8=first_child(fragment_2);{var consequent_3=$$anchor3=>{var fragment_3=comment$2(),
|
||||
node_9=first_child(fragment_3);{var consequent_2=$$anchor4=>{var pre=root_8$c(),text_2=child(pre,!0);reset(pre),template_effect(()=>set_text(text_2,$$props.messageContent||"")),append($$anchor4,pre)},alternate=$$anchor4=>{{let $0=user_derived(isChatStreaming);ChatMessageAgenticContent($$anchor4,{get message(){return $$props.message},get toolMessages(){return toolMessages()},get isStreaming(){return get$4($0)},get highlightTurns(){return get$4(highlightAgenticTurns)}})}};if_block(node_9,$$render=>{
|
||||
get$4(showRawOutput)?$$render(consequent_2):$$render(alternate,!1)})}append($$anchor3,fragment_3)},alternate_1=$$anchor3=>{var div_7=root_10$a(),text_3=child(div_7,!0);reset(div_7),template_effect(()=>set_text(text_3,$$props.messageContent)),append($$anchor3,div_7)};if_block(node_8,$$render=>{$$props.message.role===MessageRole.ASSISTANT?$$render(consequent_3):$$render(alternate_1,!1)},!0)}append($$anchor2,fragment_2)};if_block(node_1,$$render=>{editCtx.isEditing?$$render(consequent_1):$$render(alternate_2,
|
||||
!1)})}var node_10=sibling(node_1,2);{var consequent_4=$$anchor2=>{var div_8=root_11$5(),div_9=child(div_8),span_1=child(div_9),text_4=child(span_1,!0);reset(span_1),reset(div_9),reset(div_8),template_effect($0=>set_text(text_4,$0),[()=>processingState.getPromptProgressText()??processingState.getProcessingMessage()??"Processing..."]),transition(1,div_8,()=>fade),append($$anchor2,div_8)};if_block(node_10,$$render=>{get$4(showProcessingInfoBottom)&&$$render(consequent_4)})}var div_10=sibling(node_10,
|
||||
2),node_11=child(div_10);{var consequent_9=$$anchor2=>{var div_11=root_12$6(),node_12=child(div_11);{var consequent_5=$$anchor3=>{{let $0=user_derived(isLoading);ModelsSelector($$anchor3,{get currentModel(){return get$4(displayedModel)},get disabled(){return get$4($0)},onModelChange:async(modelId,modelName)=>(modelsStore.getModelStatus(modelId)!==ServerModelStatus.LOADED&&await modelsStore.loadModel(modelId),$$props.onRegenerate(modelName),!0)})}},alternate_3=$$anchor3=>{{let $0=user_derived(()=>get$4(
|
||||
displayedModel)||void 0);ModelBadge($$anchor3,{get model(){return get$4($0)},onclick:handleCopyModel})}};if_block(node_12,$$render=>{get$4(isRouter)?$$render(consequent_5):$$render(alternate_3,!1)})}var node_13=sibling(node_12,2);{var consequent_6=$$anchor3=>{const agentic=user_derived(()=>$$props.message.timings.agentic);{let $0=user_derived(()=>get$4(agentic)?get$4(agentic).llm.prompt_n:$$props.message.timings.prompt_n),$1=user_derived(()=>get$4(agentic)?get$4(agentic).llm.prompt_ms:$$props.message.
|
||||
timings.prompt_ms),$2=user_derived(()=>get$4(agentic)?get$4(agentic).llm.predicted_n:$$props.message.timings.predicted_n),$3=user_derived(()=>get$4(agentic)?get$4(agentic).llm.predicted_ms:$$props.message.timings.predicted_ms);ChatMessageStatistics($$anchor3,{get promptTokens(){return get$4($0)},get promptMs(){return get$4($1)},get predictedTokens(){return get$4($2)},get predictedMs(){return get$4($3)},get agenticTimings(){return get$4(agentic)},onActiveViewChange:handleStatsViewChange})}},alternate_4=$$anchor3=>{
|
||||
var fragment_8=comment$2(),node_14=first_child(fragment_8);{var consequent_8=$$anchor4=>{const liveStats=user_derived(()=>processingState.getLiveProcessingStats()),genStats=user_derived(()=>processingState.getLiveGenerationStats()),promptProgress=user_derived(()=>processingState.processingState?.promptProgress),isStillProcessingPrompt=user_derived(()=>get$4(promptProgress)&&get$4(promptProgress).processed<get$4(promptProgress).total);var fragment_9=comment$2(),node_15=first_child(fragment_9);{var consequent_7=$$anchor5=>{
|
||||
{let $0=user_derived(()=>!!get$4(isStillProcessingPrompt)),$1=user_derived(()=>get$4(liveStats)?.tokensProcessed),$2=user_derived(()=>get$4(liveStats)?.timeMs),$3=user_derived(()=>get$4(genStats)?.tokensGenerated),$4=user_derived(()=>get$4(genStats)?.timeMs);ChatMessageStatistics($$anchor5,{isLive:!0,get isProcessingPrompt(){return get$4($0)},get promptTokens(){return get$4($1)},get promptMs(){return get$4($2)},get predictedTokens(){return get$4($3)},get predictedMs(){return get$4($4)}})}};if_block(
|
||||
node_15,$$render=>{(get$4(liveStats)||get$4(genStats))&&$$render(consequent_7)})}append($$anchor4,fragment_9)};if_block(node_14,$$render=>{isLoading()&&get$4(currentConfig).showMessageStats&&$$render(consequent_8)},!0)}append($$anchor3,fragment_8)};if_block(node_13,$$render=>{get$4(currentConfig).showMessageStats&&$$props.message.timings&&$$props.message.timings.predicted_n&&$$props.message.timings.predicted_ms?$$render(consequent_6):$$render(alternate_4,!1)})}reset(div_11),bind_this(div_11,$$value=>set$1(
|
||||
statsContainerEl,$$value),()=>get$4(statsContainerEl)),append($$anchor2,div_11)};if_block(node_11,$$render=>{get$4(displayedModel)&&$$render(consequent_9)})}reset(div_10);var node_16=sibling(div_10,2);{var consequent_10=$$anchor2=>{{let $0=user_derived(()=>get$4(currentConfig).enableContinueGeneration&&!get$4(hasReasoning)?$$props.onContinue:void 0);ChatMessageActions($$anchor2,{get role(){return MessageRole.ASSISTANT},justify:"start",actionsPosition:"left",get siblingInfo(){return siblingInfo()},
|
||||
get showDeleteDialog(){return $$props.showDeleteDialog},get deletionInfo(){return $$props.deletionInfo},get onCopy(){return $$props.onCopy},get onEdit(){return $$props.onEdit},get onRegenerate(){return $$props.onRegenerate},get onContinue(){return get$4($0)},get onForkConversation(){return $$props.onForkConversation},get onDelete(){return $$props.onDelete},get onConfirmDelete(){return $$props.onConfirmDelete},get onNavigateToSibling(){return $$props.onNavigateToSibling},get onShowDeleteDialogChange(){
|
||||
return $$props.onShowDeleteDialogChange},get showRawOutputSwitch(){return get$4(currentConfig).showRawOutputSwitch},get rawOutputEnabled(){return get$4(showRawOutput)},onRawOutputToggle:enabled=>set$1(showRawOutput,enabled,!0)})}};if_block(node_16,$$render=>{$$props.message.timestamp&&!editCtx.isEditing&&$$render(consequent_10)})}reset(div),template_effect(()=>set_class(div,1,`text-md group w-full leading-7.5 ${className()??""}`,"svelte-14103tf")),append($$anchor,div),pop()}delegate(["keydown","\
|
||||
input"]);class AutoScrollController{#_autoScrollEnabled=state$1(!0);get _autoScrollEnabled(){return get$4(this.#_autoScrollEnabled)}set _autoScrollEnabled(value){set$1(this.#_autoScrollEnabled,value,!0)}#_userScrolledUp=state$1(!1);get _userScrolledUp(){return get$4(this.#_userScrolledUp)}set _userScrolledUp(value){set$1(this.#_userScrolledUp,value,!0)}#_lastScrollTop=state$1(0);get _lastScrollTop(){return get$4(this.#_lastScrollTop)}set _lastScrollTop(value){set$1(this.#_lastScrollTop,value,!0)}_scrollInterval;_scrollTimeout;_container;_disabled;_isColumnReverse;_mutationObserver=null;_rafPending=!1;_observerEnabled=!1;constructor(options={}){
|
||||
this._disabled=options.disabled??!1,this._isColumnReverse=options.isColumnReverse??!1}get autoScrollEnabled(){return this._autoScrollEnabled}get userScrolledUp(){return this._userScrolledUp}setContainer(container2){this._doStopObserving(),this._container=container2,this._observerEnabled&&container2&&!this._disabled&&this._doStartObserving()}setDisabled(disabled){this._disabled=disabled,disabled?(this._autoScrollEnabled=!1,this.stopInterval(),this._doStopObserving()):this._observerEnabled&&this._container&&
|
||||
!this._mutationObserver&&this._doStartObserving()}handleScroll(){if(this._disabled||!this._container)return;const{scrollTop,scrollHeight,clientHeight}=this._container;let distanceFromBottom,isScrollingUp;this._isColumnReverse?(distanceFromBottom=Math.abs(scrollTop),isScrollingUp=scrollTop<this._lastScrollTop):(distanceFromBottom=scrollHeight-clientHeight-scrollTop,isScrollingUp=scrollTop<this._lastScrollTop);const isAtBottom=distanceFromBottom<AUTO_SCROLL_AT_BOTTOM_THRESHOLD;isScrollingUp&&!isAtBottom?
|
||||
processingState=useProcessingState();let currentConfig=user_derived(config$1),isRouter=user_derived(isRouterMode),showRawOutput=state$1(!1),rawOutputContent=user_derived(()=>{const sections=deriveAgenticSections($$props.message,toolMessages(),[],!1),parts=[];for(const section of sections)switch(section.type){case AgenticSectionType.REASONING:case AgenticSectionType.REASONING_PENDING:parts.push(`${REASONING_TAGS.START}
|
||||
${section.content}
|
||||
${REASONING_TAGS.END}`);break;case AgenticSectionType.TEXT:parts.push(section.content);break;case AgenticSectionType.TOOL_CALL:case AgenticSectionType.TOOL_CALL_PENDING:case AgenticSectionType.TOOL_CALL_STREAMING:{const callObj={name:section.toolName};if(section.toolArgs)try{callObj.arguments=JSON.parse(section.toolArgs)}catch{callObj.arguments=section.toolArgs}parts.push(JSON.stringify(callObj,null,2)),section.toolResult&&parts.push(`[Tool Result]
|
||||
${section.toolResult}`);break}}return parts.join(`
|
||||
|
||||
|
||||
`)}),activeStatsView=state$1(proxy(ChatMessageStatsView.GENERATION)),statsContainerEl=state$1(void 0);function getScrollParent(el){let parent=el.parentElement;for(;parent;){const style2=getComputedStyle(parent);if(/(auto|scroll)/.test(style2.overflowY))return parent;parent=parent.parentElement}return null}async function handleStatsViewChange(view){const el=get$4(statsContainerEl);if(!el){set$1(activeStatsView,view,!0);return}const scrollParent=getScrollParent(el);if(!scrollParent){set$1(activeStatsView,
|
||||
view,!0);return}const yBefore=el.getBoundingClientRect().top;set$1(activeStatsView,view,!0),await tick();const delta=el.getBoundingClientRect().top-yBefore;delta!==0&&(scrollParent.scrollTop+=delta),requestAnimationFrame(()=>{const drift=el.getBoundingClientRect().top-yBefore;Math.abs(drift)>1&&(scrollParent.scrollTop+=drift)})}let highlightAgenticTurns=user_derived(()=>get$4(isAgentic)&&(get$4(currentConfig).alwaysShowAgenticTurns||get$4(activeStatsView)===ChatMessageStatsView.SUMMARY)),displayedModel=user_derived(
|
||||
()=>$$props.message.model??null),isCurrentlyLoading=user_derived(isLoading),isStreaming=user_derived(isChatStreaming),hasNoContent=user_derived(()=>!$$props.message?.content?.trim()),isActivelyProcessing=user_derived(()=>get$4(isCurrentlyLoading)||get$4(isStreaming)),showProcessingInfoTop=user_derived(()=>$$props.message?.role===MessageRole.ASSISTANT&&get$4(isActivelyProcessing)&&get$4(hasNoContent)&&!get$4(isAgentic)&&isLastAssistantMessage()),showProcessingInfoBottom=user_derived(()=>$$props.message?.
|
||||
role===MessageRole.ASSISTANT&&get$4(isActivelyProcessing)&&(!get$4(hasNoContent)||get$4(isAgentic))&&isLastAssistantMessage());function handleCopyModel(){copyToClipboard(get$4(displayedModel)??"")}user_effect(()=>{editCtx.isEditing&&textareaElement()&&autoResizeTextarea(textareaElement())}),user_effect(()=>{(get$4(showProcessingInfoTop)||get$4(showProcessingInfoBottom))&&processingState.startMonitoring()});var div=root$P(),node2=child(div);{var consequent=$$anchor2=>{var div_1=root_1$t(),div_2=child(
|
||||
div_1),span=child(div_2),text2=child(span,!0);reset(span),reset(div_2),reset(div_1),template_effect($0=>set_text(text2,$0),[()=>processingState.getPromptProgressText()??processingState.getProcessingMessage()??"Processing..."]),transition(1,div_1,()=>fade),append($$anchor2,div_1)};if_block(node2,$$render=>{get$4(showProcessingInfoTop)&&$$render(consequent)})}var node_1=sibling(node2,2);{var consequent_1=$$anchor2=>{var div_3=root_2$v(),textarea=child(div_3);remove_textarea_child(textarea),textarea.
|
||||
__keydown=handleEditKeydown,textarea.__input=e=>{autoResizeTextarea(e.currentTarget),editCtx.setContent(e.currentTarget.value)},bind_this(textarea,$$value=>textareaElement($$value),()=>textareaElement());var div_4=sibling(textarea,2),div_5=child(div_4),node_2=child(div_5);Checkbox(node_2,{id:"branch-after-edit",onCheckedChange:checked=>set$1(shouldBranchAfterEdit,checked===!0),get checked(){return get$4(shouldBranchAfterEdit)},set checked($$value){set$1(shouldBranchAfterEdit,$$value,!0)}});var node_3=sibling(
|
||||
node_2,2);Label(node_3,{for:"branch-after-edit",class:"cursor-pointer text-sm text-muted-foreground",children:($$anchor3,$$slotProps)=>{next$1();var text_1=text$8("Branch conversation after edit");append($$anchor3,text_1)},$$slots:{default:!0}}),reset(div_5);var div_6=sibling(div_5,2),node_4=child(div_6);Button(node_4,{class:"h-8 px-3",get onclick(){return editCtx.cancel},size:"sm",variant:"outline",children:($$anchor3,$$slotProps)=>{var fragment=root_4$d(),node_5=first_child(fragment);X(node_5,
|
||||
{class:"mr-1 h-3 w-3"}),next$1(),append($$anchor3,fragment)},$$slots:{default:!0}});var node_6=sibling(node_4,2);{let $0=user_derived(()=>!editCtx.editedContent?.trim());Button(node_6,{class:"h-8 px-3",get onclick(){return editCtx.save},get disabled(){return get$4($0)},size:"sm",children:($$anchor3,$$slotProps)=>{var fragment_1=root_5$i(),node_7=first_child(fragment_1);Check(node_7,{class:"mr-1 h-3 w-3"}),next$1(),append($$anchor3,fragment_1)},$$slots:{default:!0}})}reset(div_6),reset(div_4),reset(
|
||||
div_3),template_effect(()=>{set_value(textarea,editCtx.editedContent),set_class(textarea,1,`min-h-[50vh] w-full resize-y rounded-2xl px-3 py-2 text-sm ${INPUT_CLASSES??""}`,"svelte-14103tf")}),append($$anchor2,div_3)},alternate_2=$$anchor2=>{var fragment_2=comment$2(),node_8=first_child(fragment_2);{var consequent_3=$$anchor3=>{var fragment_3=comment$2(),node_9=first_child(fragment_3);{var consequent_2=$$anchor4=>{var pre=root_8$c(),text_2=child(pre,!0);reset(pre),template_effect(()=>set_text(text_2,
|
||||
get$4(rawOutputContent)||"")),append($$anchor4,pre)},alternate=$$anchor4=>{{let $0=user_derived(isChatStreaming);ChatMessageAgenticContent($$anchor4,{get message(){return $$props.message},get toolMessages(){return toolMessages()},get isStreaming(){return get$4($0)},get highlightTurns(){return get$4(highlightAgenticTurns)}})}};if_block(node_9,$$render=>{get$4(showRawOutput)?$$render(consequent_2):$$render(alternate,!1)})}append($$anchor3,fragment_3)},alternate_1=$$anchor3=>{var div_7=root_10$a(),
|
||||
text_3=child(div_7,!0);reset(div_7),template_effect(()=>set_text(text_3,$$props.messageContent)),append($$anchor3,div_7)};if_block(node_8,$$render=>{$$props.message.role===MessageRole.ASSISTANT?$$render(consequent_3):$$render(alternate_1,!1)},!0)}append($$anchor2,fragment_2)};if_block(node_1,$$render=>{editCtx.isEditing?$$render(consequent_1):$$render(alternate_2,!1)})}var node_10=sibling(node_1,2);{var consequent_4=$$anchor2=>{var div_8=root_11$5(),div_9=child(div_8),span_1=child(div_9),text_4=child(
|
||||
span_1,!0);reset(span_1),reset(div_9),reset(div_8),template_effect($0=>set_text(text_4,$0),[()=>processingState.getPromptProgressText()??processingState.getProcessingMessage()??"Processing..."]),transition(1,div_8,()=>fade),append($$anchor2,div_8)};if_block(node_10,$$render=>{get$4(showProcessingInfoBottom)&&$$render(consequent_4)})}var div_10=sibling(node_10,2),node_11=child(div_10);{var consequent_9=$$anchor2=>{var div_11=root_12$6(),node_12=child(div_11);{var consequent_5=$$anchor3=>{{let $0=user_derived(
|
||||
isLoading);ModelsSelector($$anchor3,{get currentModel(){return get$4(displayedModel)},get disabled(){return get$4($0)},onModelChange:async(modelId,modelName)=>(modelsStore.getModelStatus(modelId)!==ServerModelStatus.LOADED&&await modelsStore.loadModel(modelId),$$props.onRegenerate(modelName),!0)})}},alternate_3=$$anchor3=>{{let $0=user_derived(()=>get$4(displayedModel)||void 0);ModelBadge($$anchor3,{get model(){return get$4($0)},onclick:handleCopyModel})}};if_block(node_12,$$render=>{get$4(isRouter)?
|
||||
$$render(consequent_5):$$render(alternate_3,!1)})}var node_13=sibling(node_12,2);{var consequent_6=$$anchor3=>{const agentic=user_derived(()=>$$props.message.timings.agentic);{let $0=user_derived(()=>get$4(agentic)?get$4(agentic).llm.prompt_n:$$props.message.timings.prompt_n),$1=user_derived(()=>get$4(agentic)?get$4(agentic).llm.prompt_ms:$$props.message.timings.prompt_ms),$2=user_derived(()=>get$4(agentic)?get$4(agentic).llm.predicted_n:$$props.message.timings.predicted_n),$3=user_derived(()=>get$4(
|
||||
agentic)?get$4(agentic).llm.predicted_ms:$$props.message.timings.predicted_ms);ChatMessageStatistics($$anchor3,{get promptTokens(){return get$4($0)},get promptMs(){return get$4($1)},get predictedTokens(){return get$4($2)},get predictedMs(){return get$4($3)},get agenticTimings(){return get$4(agentic)},onActiveViewChange:handleStatsViewChange})}},alternate_4=$$anchor3=>{var fragment_8=comment$2(),node_14=first_child(fragment_8);{var consequent_8=$$anchor4=>{const liveStats=user_derived(()=>processingState.
|
||||
getLiveProcessingStats()),genStats=user_derived(()=>processingState.getLiveGenerationStats()),promptProgress=user_derived(()=>processingState.processingState?.promptProgress),isStillProcessingPrompt=user_derived(()=>get$4(promptProgress)&&get$4(promptProgress).processed<get$4(promptProgress).total);var fragment_9=comment$2(),node_15=first_child(fragment_9);{var consequent_7=$$anchor5=>{{let $0=user_derived(()=>!!get$4(isStillProcessingPrompt)),$1=user_derived(()=>get$4(liveStats)?.tokensProcessed),
|
||||
$2=user_derived(()=>get$4(liveStats)?.timeMs),$3=user_derived(()=>get$4(genStats)?.tokensGenerated),$4=user_derived(()=>get$4(genStats)?.timeMs);ChatMessageStatistics($$anchor5,{isLive:!0,get isProcessingPrompt(){return get$4($0)},get promptTokens(){return get$4($1)},get promptMs(){return get$4($2)},get predictedTokens(){return get$4($3)},get predictedMs(){return get$4($4)}})}};if_block(node_15,$$render=>{(get$4(liveStats)||get$4(genStats))&&$$render(consequent_7)})}append($$anchor4,fragment_9)};
|
||||
if_block(node_14,$$render=>{isLoading()&&get$4(currentConfig).showMessageStats&&$$render(consequent_8)},!0)}append($$anchor3,fragment_8)};if_block(node_13,$$render=>{get$4(currentConfig).showMessageStats&&$$props.message.timings&&$$props.message.timings.predicted_n&&$$props.message.timings.predicted_ms?$$render(consequent_6):$$render(alternate_4,!1)})}reset(div_11),bind_this(div_11,$$value=>set$1(statsContainerEl,$$value),()=>get$4(statsContainerEl)),append($$anchor2,div_11)};if_block(node_11,$$render=>{
|
||||
get$4(displayedModel)&&$$render(consequent_9)})}reset(div_10);var node_16=sibling(div_10,2);{var consequent_10=$$anchor2=>{{let $0=user_derived(()=>get$4(currentConfig).enableContinueGeneration&&!get$4(hasReasoning)?$$props.onContinue:void 0);ChatMessageActions($$anchor2,{get role(){return MessageRole.ASSISTANT},justify:"start",actionsPosition:"left",get siblingInfo(){return siblingInfo()},get showDeleteDialog(){return $$props.showDeleteDialog},get deletionInfo(){return $$props.deletionInfo},get onCopy(){
|
||||
return $$props.onCopy},get onEdit(){return $$props.onEdit},get onRegenerate(){return $$props.onRegenerate},get onContinue(){return get$4($0)},get onForkConversation(){return $$props.onForkConversation},get onDelete(){return $$props.onDelete},get onConfirmDelete(){return $$props.onConfirmDelete},get onNavigateToSibling(){return $$props.onNavigateToSibling},get onShowDeleteDialogChange(){return $$props.onShowDeleteDialogChange},get showRawOutputSwitch(){return get$4(currentConfig).showRawOutputSwitch},
|
||||
get rawOutputEnabled(){return get$4(showRawOutput)},onRawOutputToggle:enabled=>set$1(showRawOutput,enabled,!0)})}};if_block(node_16,$$render=>{$$props.message.timestamp&&!editCtx.isEditing&&$$render(consequent_10)})}reset(div),template_effect(()=>set_class(div,1,`text-md group w-full leading-7.5 ${className()??""}`,"svelte-14103tf")),append($$anchor,div),pop()}delegate(["keydown","input"]);class AutoScrollController{#_autoScrollEnabled=state$1(!0);get _autoScrollEnabled(){return get$4(this.#_autoScrollEnabled)}set _autoScrollEnabled(value){
|
||||
set$1(this.#_autoScrollEnabled,value,!0)}#_userScrolledUp=state$1(!1);get _userScrolledUp(){return get$4(this.#_userScrolledUp)}set _userScrolledUp(value){set$1(this.#_userScrolledUp,value,!0)}#_lastScrollTop=state$1(0);get _lastScrollTop(){return get$4(this.#_lastScrollTop)}set _lastScrollTop(value){set$1(this.#_lastScrollTop,value,!0)}_scrollInterval;_scrollTimeout;_container;_disabled;_isColumnReverse;_mutationObserver=null;_rafPending=!1;_observerEnabled=!1;constructor(options={}){this._disabled=
|
||||
options.disabled??!1,this._isColumnReverse=options.isColumnReverse??!1}get autoScrollEnabled(){return this._autoScrollEnabled}get userScrolledUp(){return this._userScrolledUp}setContainer(container2){this._doStopObserving(),this._container=container2,this._observerEnabled&&container2&&!this._disabled&&this._doStartObserving()}setDisabled(disabled){this._disabled=disabled,disabled?(this._autoScrollEnabled=!1,this.stopInterval(),this._doStopObserving()):this._observerEnabled&&this._container&&!this.
|
||||
_mutationObserver&&this._doStartObserving()}handleScroll(){if(this._disabled||!this._container)return;const{scrollTop,scrollHeight,clientHeight}=this._container;let distanceFromBottom,isScrollingUp;this._isColumnReverse?(distanceFromBottom=Math.abs(scrollTop),isScrollingUp=scrollTop<this._lastScrollTop):(distanceFromBottom=scrollHeight-clientHeight-scrollTop,isScrollingUp=scrollTop<this._lastScrollTop);const isAtBottom=distanceFromBottom<AUTO_SCROLL_AT_BOTTOM_THRESHOLD;isScrollingUp&&!isAtBottom?
|
||||
(this._userScrolledUp=!0,this._autoScrollEnabled=!1):isAtBottom&&this._userScrolledUp&&(this._userScrolledUp=!1,this._autoScrollEnabled=!0),this._scrollTimeout&&clearTimeout(this._scrollTimeout),this._scrollTimeout=setTimeout(()=>{isAtBottom&&(this._userScrolledUp=!1,this._autoScrollEnabled=!0)},AUTO_SCROLL_INTERVAL),this._lastScrollTop=scrollTop}scrollToBottom(behavior="smooth"){this._disabled||!this._container||(this._isColumnReverse?this._container.scrollTo({top:0,behavior}):this._container.scrollTo(
|
||||
{top:this._container.scrollHeight,behavior}))}enable(){this._disabled||(this._userScrolledUp=!1,this._autoScrollEnabled=!0)}startInterval(){this._disabled||this._scrollInterval||(this._scrollInterval=setInterval(()=>{this.scrollToBottom()},AUTO_SCROLL_INTERVAL))}stopInterval(){this._scrollInterval&&(clearInterval(this._scrollInterval),this._scrollInterval=void 0)}updateInterval(isStreaming){if(this._disabled){this.stopInterval();return}isStreaming&&this._autoScrollEnabled?this._scrollInterval||this.
|
||||
startInterval():this.stopInterval()}destroy(){this.stopInterval(),this._doStopObserving(),this._scrollTimeout&&(clearTimeout(this._scrollTimeout),this._scrollTimeout=void 0)}startObserving(){this._observerEnabled=!0,this._container&&!this._disabled&&!this._mutationObserver&&this._doStartObserving()}stopObserving(){this._observerEnabled=!1,this._doStopObserving()}_doStartObserving(){if(!this._container||this._mutationObserver)return;const isReverse=this._isColumnReverse;this._mutationObserver=new MutationObserver(
|
||||
|
|
|
|||
|
|
@ -2032,13 +2032,22 @@ server_tokens format_prompt_rerank(
|
|||
const struct llama_model * model,
|
||||
const struct llama_vocab * vocab,
|
||||
mtmd_context * mctx,
|
||||
const std::string & instruction,
|
||||
const std::string & query,
|
||||
const std::string & doc) {
|
||||
server_tokens result = {};
|
||||
|
||||
const char * rerank_prompt = llama_model_chat_template(model, "rerank");
|
||||
const char * rerank_prompt_instruct = llama_model_chat_template(model, "rerank_instruct");
|
||||
|
||||
if (rerank_prompt != nullptr) {
|
||||
if ( (rerank_prompt_instruct != nullptr) && !instruction.empty() ) {
|
||||
std::string prompt = rerank_prompt_instruct;
|
||||
string_replace_all(prompt, "{instruction}", instruction);
|
||||
string_replace_all(prompt, "{query}" , query);
|
||||
string_replace_all(prompt, "{document}" , doc );
|
||||
server_tokens tokens = tokenize_input_subprompt(vocab, mctx, prompt, false, true);
|
||||
result.push_back(tokens);
|
||||
} else if (rerank_prompt != nullptr) {
|
||||
std::string prompt = rerank_prompt;
|
||||
string_replace_all(prompt, "{query}" , query);
|
||||
string_replace_all(prompt, "{document}", doc );
|
||||
|
|
|
|||
|
|
@ -370,5 +370,6 @@ server_tokens format_prompt_rerank(
|
|||
const struct llama_model * model,
|
||||
const struct llama_vocab * vocab,
|
||||
mtmd_context * mctx,
|
||||
const std::string & instruction,
|
||||
const std::string & query,
|
||||
const std::string & doc);
|
||||
|
|
|
|||
|
|
@ -3924,6 +3924,15 @@ void server_routes::init_routes() {
|
|||
res->error(format_error_response("\"query\" must be provided", ERROR_TYPE_INVALID_REQUEST));
|
||||
return res;
|
||||
}
|
||||
|
||||
json instruction = "";
|
||||
if (body.count("instruction") == 1) {
|
||||
instruction = body.at("instruction");
|
||||
if (!instruction.is_string()) {
|
||||
res->error(format_error_response("\"instruction\" must be a string", ERROR_TYPE_INVALID_REQUEST));
|
||||
return res;
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<std::string> documents = json_value(body, "documents",
|
||||
json_value(body, "texts", std::vector<std::string>()));
|
||||
|
|
@ -3941,7 +3950,7 @@ void server_routes::init_routes() {
|
|||
std::vector<server_task> tasks;
|
||||
tasks.reserve(documents.size());
|
||||
for (size_t i = 0; i < documents.size(); i++) {
|
||||
auto tmp = format_prompt_rerank(ctx_server.model, ctx_server.vocab, ctx_server.mctx, query, documents[i]);
|
||||
auto tmp = format_prompt_rerank(ctx_server.model, ctx_server.vocab, ctx_server.mctx, instruction, query, documents[i]);
|
||||
server_task task = server_task(SERVER_TASK_TYPE_RERANK);
|
||||
task.id = rd.get_new_id();
|
||||
task.tokens = std::move(tmp);
|
||||
|
|
|
|||
|
|
@ -9,7 +9,14 @@
|
|||
import { getMessageEditContext } from '$lib/contexts';
|
||||
import { useProcessingState } from '$lib/hooks/use-processing-state.svelte';
|
||||
import { isLoading, isChatStreaming } from '$lib/stores/chat.svelte';
|
||||
import { autoResizeTextarea, copyToClipboard, isIMEComposing } from '$lib/utils';
|
||||
import {
|
||||
autoResizeTextarea,
|
||||
copyToClipboard,
|
||||
isIMEComposing,
|
||||
deriveAgenticSections
|
||||
} from '$lib/utils';
|
||||
import { AgenticSectionType } from '$lib/enums';
|
||||
import { REASONING_TAGS } from '$lib/constants/agentic';
|
||||
import { tick } from 'svelte';
|
||||
import { fade } from 'svelte/transition';
|
||||
import { Check, X } from '@lucide/svelte';
|
||||
|
|
@ -95,6 +102,49 @@
|
|||
let currentConfig = $derived(config());
|
||||
let isRouter = $derived(isRouterMode());
|
||||
let showRawOutput = $state(false);
|
||||
|
||||
let rawOutputContent = $derived.by(() => {
|
||||
const sections = deriveAgenticSections(message, toolMessages, [], false);
|
||||
const parts: string[] = [];
|
||||
|
||||
for (const section of sections) {
|
||||
switch (section.type) {
|
||||
case AgenticSectionType.REASONING:
|
||||
case AgenticSectionType.REASONING_PENDING:
|
||||
parts.push(`${REASONING_TAGS.START}\n${section.content}\n${REASONING_TAGS.END}`);
|
||||
break;
|
||||
|
||||
case AgenticSectionType.TEXT:
|
||||
parts.push(section.content);
|
||||
break;
|
||||
|
||||
case AgenticSectionType.TOOL_CALL:
|
||||
case AgenticSectionType.TOOL_CALL_PENDING:
|
||||
case AgenticSectionType.TOOL_CALL_STREAMING: {
|
||||
const callObj: Record<string, unknown> = { name: section.toolName };
|
||||
|
||||
if (section.toolArgs) {
|
||||
try {
|
||||
callObj.arguments = JSON.parse(section.toolArgs);
|
||||
} catch {
|
||||
callObj.arguments = section.toolArgs;
|
||||
}
|
||||
}
|
||||
|
||||
parts.push(JSON.stringify(callObj, null, 2));
|
||||
|
||||
if (section.toolResult) {
|
||||
parts.push(`[Tool Result]\n${section.toolResult}`);
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return parts.join('\n\n\n');
|
||||
});
|
||||
|
||||
let activeStatsView = $state<ChatMessageStatsView>(ChatMessageStatsView.GENERATION);
|
||||
let statsContainerEl: HTMLDivElement | undefined = $state();
|
||||
|
||||
|
|
@ -252,7 +302,7 @@
|
|||
</div>
|
||||
{:else if message.role === MessageRole.ASSISTANT}
|
||||
{#if showRawOutput}
|
||||
<pre class="raw-output">{messageContent || ''}</pre>
|
||||
<pre class="raw-output">{rawOutputContent || ''}</pre>
|
||||
{:else}
|
||||
<ChatMessageAgenticContent
|
||||
{message}
|
||||
|
|
|
|||
|
|
@ -15,6 +15,11 @@ export const DEFAULT_AGENTIC_CONFIG: AgenticConfig = {
|
|||
maxToolPreviewLines: 25
|
||||
} as const;
|
||||
|
||||
export const REASONING_TAGS = {
|
||||
START: '<think>',
|
||||
END: '</think>'
|
||||
} as const;
|
||||
|
||||
/**
|
||||
* @deprecated Legacy marker tags - only used for migration of old stored messages.
|
||||
* New messages use structured fields (reasoningContent, toolCalls, toolCallId).
|
||||
|
|
|
|||
Loading…
Reference in New Issue