From 7bab4a306551072492f97d0c4a179e904332e6bb Mon Sep 17 00:00:00 2001 From: EliteGPT AI Date: Wed, 31 Dec 2025 12:57:45 +1000 Subject: [PATCH 1/4] model : add Qwen3-Omni multimodal architecture support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds support for Qwen3-Omni, Alibaba's multimodal LLM that handles text and vision. This enables the main LLM architecture and vision encoder support. Main LLM changes: - Add LLM_ARCH_QWEN3OMNI enum and architecture registration - Add hparams loading for MoE-based architecture (48 layers, 128 experts) - Reuse llm_build_qwen3moe graph builder - Add IMROPE type for multimodal position encoding Vision encoder changes (via mtmd): - Add PROJECTOR_TYPE_QWEN3O with auto-conversion to QWEN3VL for vision - Support different embedding dimensions (vision=8192, audio=2048) - Add separate Q/K/V tensor support in qwen3vl graph builder Tested with Qwen3-Omni-30B-Q8_0.gguf on distributed 5-GPU setup: - 41-44 tokens/sec inference speed - Text and vision inference working Note: Audio encoder support is WIP and will follow in a separate PR. πŸ€– Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 --- convert_hf_to_gguf.py | 309 ++++++++++++++++++++++++++++++++++ gguf-py/gguf/constants.py | 20 +++ src/llama-arch.cpp | 2 + src/llama-arch.h | 1 + src/llama-model.cpp | 18 +- tools/mtmd/clip-impl.h | 2 + tools/mtmd/clip.cpp | 12 ++ tools/mtmd/models/qwen3vl.cpp | 48 ++++-- tools/mtmd/mtmd.cpp | 16 +- 9 files changed, 409 insertions(+), 19 deletions(-) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index f893b24c75..f3f27654c8 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -4537,6 +4537,315 @@ class Qwen3VLMoeTextModel(Qwen3MoeModel): return super().modify_tensors(data_torch, name, bid) +@ModelBase.register("Qwen3OmniMoeForConditionalGeneration") +class Qwen3OmniModel(MmprojModel): + """Qwen3-Omni multimodal model converter for audio + vision encoders. + + Key differences from Qwen2.5-Omni: + - Audio uses conv2d1/conv2d2/conv2d3 (not conv1/conv2) + - Audio has conv_out, ln_post, proj1, proj2 + - Vision has merger_list (deepstack) like Qwen3-VL + - Vision patch_embed is Conv3D (needs 5Dβ†’4D tensor splitting) + - Vision has explicit pos_embed.weight + """ + has_vision_encoder = True + has_audio_encoder = True + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + # Setup audio config + assert self.hparams_audio is not None + self.hparams_audio["hidden_size"] = self.hparams_audio.get("d_model") + self.hparams_audio["intermediate_size"] = self.hparams_audio.get("encoder_ffn_dim") + self.hparams_audio["num_attention_heads"] = self.hparams_audio.get("encoder_attention_heads") + + # Setup vision config + assert self.hparams_vision is not None + self.hparams_vision["num_attention_heads"] = self.hparams_vision.get("num_heads") + self.hparams_vision["num_hidden_layers"] = self.hparams_vision.get("depth") + + # Handle image_size - may need to compute from other params + if "image_size" not in self.hparams_vision or self.hparams_vision["image_size"] is None: + self.hparams_vision["image_size"] = 768 # Default for Qwen3-Omni + + # Track deepstack layers + self.is_deepstack_layers = [False] * int(self.hparams_vision.get("num_hidden_layers", 27) or 27) + for idx in self.hparams_vision.get("deepstack_visual_indexes", []): + self.is_deepstack_layers[idx] = True + + def get_vision_config(self) -> dict[str, Any] | None: + return self.global_config.get("thinker_config", {}).get("vision_config") + + def get_audio_config(self) -> dict[str, Any] | None: + return self.global_config.get("thinker_config", {}).get("audio_config") + + def set_gguf_parameters(self): + super().set_gguf_parameters() + + # Set projector type for Qwen3-Omni + self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.QWEN3O) + + # Audio parameters + assert self.hparams_audio is not None + self.gguf_writer.add_audio_num_mel_bins(self.hparams_audio.get("num_mel_bins", 128)) + self.gguf_writer.add_audio_attention_layernorm_eps(self.hparams_audio.get("layer_norm_eps", 1e-5)) + + # Vision parameters + self.gguf_writer.add_vision_use_gelu(True) # Qwen3-Omni uses GELU + + # Vision attention layernorm eps from text config + text_config = self.global_config.get("thinker_config", {}).get("text_config", {}) + rms_norm_eps = text_config.get("rms_norm_eps", 1e-6) + self.gguf_writer.add_vision_attention_layernorm_eps(rms_norm_eps) + + # Deepstack layers for vision + if any(self.is_deepstack_layers): + self.gguf_writer.add_vision_is_deepstack_layers(self.is_deepstack_layers) + + def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]: + """Generate sinusoidal position embeddings for audio encoder.""" + assert self.hparams_audio is not None + max_timescale = 10000 + length = 1500 # Max audio sequence length + channels = self.hparams_audio.get("hidden_size", 1280) + + log_timescale_increment = np.log(max_timescale) / (channels // 2 - 1) + inv_timescales = torch.exp(-log_timescale_increment * torch.arange(channels // 2).float()) + scaled_time = torch.arange(length)[:, np.newaxis] * inv_timescales[np.newaxis, :] + pos_embd = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=1).to(dtype=torch.float32) + + yield ("audio_tower.embed_positions.weight", pos_embd) + + def tensor_force_quant(self, name, new_name, bid, n_dims): + # Keep conv layers in higher precision + if ".conv" in name and ".weight" in name: + return gguf.GGMLQuantizationType.F16 + return super().tensor_force_quant(name, new_name, bid, n_dims) + + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + # Strip thinker prefix + if name.startswith("thinker."): + name = name.replace("thinker.", "") + + # Skip text model tensors (handled by text model converter) + if name.startswith("model.") or name.startswith("lm_head") or name.startswith("embed_tokens"): + return [] + + # Skip talker and code2wav (not needed for inference) + if name.startswith("talker.") or name.startswith("code2wav."): + return [] + + # Handle audio tensors + if name.startswith("audio_tower"): + # Strip audio_tower. prefix for processing + audio_name = name.replace("audio_tower.", "") + + # Skip embed_positions - we generate sinusoidal positions in generate_extra_tensors + if audio_name.startswith("embed_positions"): + return [] + + # Handle conv2d1/2/3 - map to a.enc_conv1d.{bid} + for i in [1, 2, 3]: + if audio_name.startswith(f"conv2d{i}."): + suffix = audio_name.split(".", 1)[1] # weight or bias + if suffix == "bias": + data_torch = data_torch.unsqueeze(-1) + new_name = self.format_tensor_name(gguf.MODEL_TENSOR.A_ENC_CONV1D, i - 1, suffix=f".{suffix}") + return [(new_name, data_torch)] + + # Handle conv_out - use a separate conv layer index + if audio_name.startswith("conv_out."): + suffix = audio_name.split(".", 1)[1] + if suffix == "bias": + data_torch = data_torch.unsqueeze(-1) + new_name = self.format_tensor_name(gguf.MODEL_TENSOR.A_ENC_CONV1D, 3, suffix=f".{suffix}") + return [(new_name, data_torch)] + + # Handle ln_post - post normalization + if audio_name.startswith("ln_post."): + suffix = audio_name.split(".", 1)[1] + new_name = self.format_tensor_name(gguf.MODEL_TENSOR.A_POST_NORM, suffix=f".{suffix}") + return [(new_name, data_torch)] + + # Handle proj1/proj2 - audio multimodal projector (use A_MMPROJ which supports bid) + if audio_name.startswith("proj1."): + suffix = audio_name.split(".", 1)[1] + new_name = self.format_tensor_name(gguf.MODEL_TENSOR.A_MMPROJ, 0, suffix=f".{suffix}") + return [(new_name, data_torch)] + if audio_name.startswith("proj2."): + suffix = audio_name.split(".", 1)[1] + new_name = self.format_tensor_name(gguf.MODEL_TENSOR.A_MMPROJ, 1, suffix=f".{suffix}") + return [(new_name, data_torch)] + + # Handle encoder layers - transform to Whisper-compatible names and use map_tensor_name + if audio_name.startswith("layers."): + # Qwen3-Omni uses same layer naming as Whisper/Ultravox + # audio_tower.layers.{bid}.self_attn.q_proj -> audio_tower.layers.{bid}.self_attn.q_proj + # Just add back the audio_tower prefix and use map_tensor_name + return [(self.map_tensor_name(name), data_torch)] + + # Fallback for any other audio tensors + logger.warning(f"Unknown audio tensor: {name}") + return [(self.map_tensor_name(name), data_torch)] + + # Handle visual tensors + if name.startswith("visual."): + # Handle merger_list (deepstack) + if name.startswith("visual.merger_list."): + # Format: visual.merger_list.{idx}.{ln_q|mlp}.{layer}.{weight|bias} + parts = name.split(".") + idx = int(parts[2]) # merger_list index + + # Get actual layer index from deepstack_visual_indexes + deepstack_indexes = self.hparams_vision.get("deepstack_visual_indexes", []) + if idx < len(deepstack_indexes): + layer_idx = deepstack_indexes[idx] + else: + layer_idx = idx # Fallback + + suffix_parts = parts[3:] # Everything after the index + suffix = ".".join(suffix_parts) + + if suffix.startswith("ln_q"): + tensor_type = gguf.MODEL_TENSOR.V_DS_NORM + tail = suffix.split(".", 1)[1] if "." in suffix else "weight" + elif suffix.startswith("mlp.0"): + tensor_type = gguf.MODEL_TENSOR.V_DS_FC1 + tail = suffix.split(".", 2)[2] if suffix.count(".") >= 2 else "weight" + elif suffix.startswith("mlp.2"): + tensor_type = gguf.MODEL_TENSOR.V_DS_FC2 + tail = suffix.split(".", 2)[2] if suffix.count(".") >= 2 else "weight" + else: + raise ValueError(f"Unexpected deepstack tensor: {name}") + + new_name = self.format_tensor_name(tensor_type, layer_idx, suffix=f".{tail}") + return [(new_name, data_torch)] + + # Handle main merger + if name.startswith("visual.merger."): + suffix = name.split(".", 2)[2] + if suffix.startswith("mlp.0"): + # First FC layer + tail = suffix.split(".", 2)[2] if suffix.count(".") >= 2 else "weight" + new_name = self.format_tensor_name(gguf.MODEL_TENSOR.V_MMPROJ, 0, suffix=f".{tail}") + elif suffix.startswith("mlp.2"): + # Second FC layer + tail = suffix.split(".", 2)[2] if suffix.count(".") >= 2 else "weight" + new_name = self.format_tensor_name(gguf.MODEL_TENSOR.V_MMPROJ, 2, suffix=f".{tail}") + elif suffix.startswith("ln_q"): + tail = suffix.split(".", 1)[1] if "." in suffix else "weight" + new_name = self.format_tensor_name(gguf.MODEL_TENSOR.V_POST_NORM, suffix=f".{tail}") + else: + raise ValueError(f"Unexpected merger tensor: {name}") + return [(new_name, data_torch)] + + # Handle QKV split for attention + if ".qkv." in name: + if data_torch.ndim == 2: # weight + c3, _ = data_torch.shape + else: # bias + c3 = data_torch.shape[0] + assert c3 % 3 == 0 + c = c3 // 3 + wq = data_torch[:c] + wk = data_torch[c:c * 2] + wv = data_torch[c * 2:] + return [ + (self.map_tensor_name(name.replace("qkv", "q")), wq), + (self.map_tensor_name(name.replace("qkv", "k")), wk), + (self.map_tensor_name(name.replace("qkv", "v")), wv), + ] + + # Handle patch_embed - Conv3D needs splitting to 4D tensors (GGUF max is 4D) + if name == "visual.patch_embed.proj.weight": + # Split Conv3D into Conv2Ds along temporal dimension + if data_torch.ndim == 5: + c1, c2, kt, kh, kw = data_torch.shape + del c1, c2, kh, kw + if kt != 2: + raise ValueError("Current implementation only supports temporal_patch_size of 2") + return [ + (gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.V_ENC_EMBD_PATCH] + ".weight", data_torch[:, :, 0, ...]), + (gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.V_ENC_EMBD_PATCH] + ".weight.1", data_torch[:, :, 1, ...]), + ] + return [(self.map_tensor_name(name), data_torch)] + + if name == "visual.patch_embed.proj.bias": + return [(gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.V_ENC_EMBD_PATCH] + ".bias", data_torch)] + + # Default handling for other visual tensors + return [(self.map_tensor_name(name), data_torch)] + + # Fall back to parent for any other tensors + return super().modify_tensors(data_torch, name, bid) + + +@ModelBase.register("Qwen3OmniMoeForConditionalGeneration") +class Qwen3OmniMoeTextModel(Qwen3MoeModel): + """Qwen3-Omni MoE text model converter. + + Converts the text model (thinker.model.*) from Qwen3-Omni to GGUF format. + The audio and vision encoders are handled by Qwen3OmniModel (mmproj converter). + + Key differences from Qwen3VLMoeTextModel: + - Tensor prefix is thinker.model.* (not model.*) + - Must skip: thinker.audio_tower, thinker.visual, talker, code2wav + - Config structure: thinker_config.text_config (handled by load_hparams) + """ + model_arch = gguf.MODEL_ARCH.QWEN3OMNI + + def set_gguf_parameters(self): + super().set_gguf_parameters() + + # Handle MRoPE (Multi-axis Rotary Position Embedding) for Qwen3-Omni + # The text_config is already merged into hparams by load_hparams + rope_scaling = self.hparams.get("rope_scaling") or self.hparams.get("rope_parameters") or {} + + if rope_scaling.get("mrope_section"): + # mrope_section contains [time, height, width] dimensions + mrope_section = rope_scaling["mrope_section"] + # Pad to 4 dimensions [time, height, width, extra] + while len(mrope_section) < 4: + mrope_section.append(0) + self.gguf_writer.add_rope_dimension_sections(mrope_section[:4]) + logger.info(f"MRoPE sections: {mrope_section[:4]}") + + # Get vision config for deepstack layers (from thinker_config in hparams) + thinker_config = self.hparams.get("thinker_config", {}) + vision_config = thinker_config.get("vision_config", {}) + deepstack_layer_num = len(vision_config.get("deepstack_visual_indexes", [])) + if deepstack_layer_num > 0: + self.gguf_writer.add_num_deepstack_layers(deepstack_layer_num) + + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + # Skip multimodal encoders - they go in the mmproj file + if name.startswith("thinker.audio_tower."): + return [] + if name.startswith("thinker.visual."): + return [] + + # Skip talker (speech synthesis) and code2wav (audio generation) - not needed for text inference + if name.startswith("talker."): + return [] + if name.startswith("code2wav."): + return [] + + # Strip thinker prefix to get standard tensor names + # Original names: + # thinker.model.layers.* -> model.layers.* + # thinker.model.embed_tokens.* -> model.embed_tokens.* + # thinker.model.norm.* -> model.norm.* + # thinker.lm_head.* -> lm_head.* (NOT model.lm_head!) + if name.startswith("thinker.model."): + name = name.replace("thinker.model.", "model.", 1) + elif name.startswith("thinker."): + # Handle other thinker tensors (lm_head, etc.) - just strip thinker. + name = name.replace("thinker.", "", 1) + + return super().modify_tensors(data_torch, name, bid) + + @ModelBase.register("GPT2LMHeadModel") class GPT2Model(TextModel): model_arch = gguf.MODEL_ARCH.GPT2 diff --git a/gguf-py/gguf/constants.py b/gguf-py/gguf/constants.py index 616b8add36..8c6cfb907f 100644 --- a/gguf-py/gguf/constants.py +++ b/gguf-py/gguf/constants.py @@ -372,6 +372,7 @@ class MODEL_ARCH(IntEnum): QWEN3NEXT = auto() QWEN3VL = auto() QWEN3VLMOE = auto() + QWEN3OMNI = auto() PHI2 = auto() PHI3 = auto() PHIMOE = auto() @@ -769,6 +770,7 @@ MODEL_ARCH_NAMES: dict[MODEL_ARCH, str] = { MODEL_ARCH.QWEN3NEXT: "qwen3next", MODEL_ARCH.QWEN3VL: "qwen3vl", MODEL_ARCH.QWEN3VLMOE: "qwen3vlmoe", + MODEL_ARCH.QWEN3OMNI: "qwen3omni", MODEL_ARCH.PHI2: "phi2", MODEL_ARCH.PHI3: "phi3", MODEL_ARCH.PHIMOE: "phimoe", @@ -1720,6 +1722,23 @@ MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = { MODEL_TENSOR.FFN_DOWN_EXP, MODEL_TENSOR.FFN_UP_EXP, ], + MODEL_ARCH.QWEN3OMNI: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_Q, + MODEL_TENSOR.ATTN_Q_NORM, + MODEL_TENSOR.ATTN_K, + MODEL_TENSOR.ATTN_K_NORM, + MODEL_TENSOR.ATTN_V, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_GATE_INP, + MODEL_TENSOR.FFN_GATE_EXP, + MODEL_TENSOR.FFN_DOWN_EXP, + MODEL_TENSOR.FFN_UP_EXP, + ], MODEL_ARCH.PLAMO: [ MODEL_TENSOR.TOKEN_EMBD, MODEL_TENSOR.OUTPUT_NORM, @@ -3485,6 +3504,7 @@ class VisionProjectorType: QWEN2A = "qwen2a" # audio GLMA = "glma" # audio QWEN25O = "qwen2.5o" # omni + QWEN3O = "qwen3o" # qwen3-omni VOXTRAL = "voxtral" LFM2 = "lfm2" KIMIVL = "kimivl" diff --git a/src/llama-arch.cpp b/src/llama-arch.cpp index 94a6807eac..d70615ff68 100644 --- a/src/llama-arch.cpp +++ b/src/llama-arch.cpp @@ -37,6 +37,7 @@ static const std::map LLM_ARCH_NAMES = { { LLM_ARCH_QWEN3NEXT, "qwen3next" }, { LLM_ARCH_QWEN3VL, "qwen3vl" }, { LLM_ARCH_QWEN3VLMOE, "qwen3vlmoe" }, + { LLM_ARCH_QWEN3OMNI, "qwen3omni" }, { LLM_ARCH_PHI2, "phi2" }, { LLM_ARCH_PHI3, "phi3" }, { LLM_ARCH_PHIMOE, "phimoe" }, @@ -915,6 +916,7 @@ static std::set llm_get_tensor_names(llm_arch arch) { }; case LLM_ARCH_QWEN3MOE: case LLM_ARCH_QWEN3VLMOE: + case LLM_ARCH_QWEN3OMNI: case LLM_ARCH_OLMOE: case LLM_ARCH_LLADA_MOE: case LLM_ARCH_RND1: diff --git a/src/llama-arch.h b/src/llama-arch.h index 714ead4025..346f2ac2b8 100644 --- a/src/llama-arch.h +++ b/src/llama-arch.h @@ -41,6 +41,7 @@ enum llm_arch { LLM_ARCH_QWEN3NEXT, LLM_ARCH_QWEN3VL, LLM_ARCH_QWEN3VLMOE, + LLM_ARCH_QWEN3OMNI, LLM_ARCH_PHI2, LLM_ARCH_PHI3, LLM_ARCH_PHIMOE, diff --git a/src/llama-model.cpp b/src/llama-model.cpp index 5e664c8c57..6008596638 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -1144,6 +1144,16 @@ void llama_model::load_hparams(llama_model_loader & ml) { default: type = LLM_TYPE_UNKNOWN; } } break; + case LLM_ARCH_QWEN3OMNI: + { + ml.get_key(LLM_KV_NUM_DEEPSTACK_LAYERS, hparams.n_deepstack_layers, false); + ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp, false); + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + switch (hparams.n_layer) { + case 48: type = LLM_TYPE_30B_A3B; break; + default: type = LLM_TYPE_UNKNOWN; + } + } break; case LLM_ARCH_PHI2: { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); @@ -3598,6 +3608,7 @@ bool llama_model::load_tensors(llama_model_loader & ml) { } break; case LLM_ARCH_QWEN3MOE: case LLM_ARCH_QWEN3VLMOE: + case LLM_ARCH_QWEN3OMNI: case LLM_ARCH_RND1: { tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); @@ -7115,7 +7126,7 @@ void llama_model::print_info() const { LLAMA_LOG_INFO("%s: n_ff_shexp = %d\n", __func__, hparams.n_ff_shexp); } - if (arch == LLM_ARCH_QWEN3MOE || arch == LLM_ARCH_OPENAI_MOE || arch == LLM_ARCH_QWEN3VLMOE || arch == LLM_ARCH_RND1) { + if (arch == LLM_ARCH_QWEN3MOE || arch == LLM_ARCH_OPENAI_MOE || arch == LLM_ARCH_QWEN3VLMOE || arch == LLM_ARCH_QWEN3OMNI || arch == LLM_ARCH_RND1) { LLAMA_LOG_INFO("%s: n_ff_exp = %d\n", __func__, hparams.n_ff_exp); } @@ -7510,6 +7521,10 @@ ggml_cgraph * llama_model::build_graph(const llm_graph_params & params) const { { llm = std::make_unique(*this, params); } break; + case LLM_ARCH_QWEN3OMNI: + { + llm = std::make_unique(*this, params); + } break; case LLM_ARCH_PHI2: { llm = std::make_unique(*this, params); @@ -8081,6 +8096,7 @@ llama_rope_type llama_model_rope_type(const llama_model * model) { return LLAMA_ROPE_TYPE_MROPE; case LLM_ARCH_QWEN3VL: case LLM_ARCH_QWEN3VLMOE: + case LLM_ARCH_QWEN3OMNI: return LLAMA_ROPE_TYPE_IMROPE; case LLM_ARCH_GLM4: diff --git a/tools/mtmd/clip-impl.h b/tools/mtmd/clip-impl.h index a0939865e3..65231c748f 100644 --- a/tools/mtmd/clip-impl.h +++ b/tools/mtmd/clip-impl.h @@ -169,6 +169,7 @@ enum projector_type { PROJECTOR_TYPE_GLM_EDGE, PROJECTOR_TYPE_QWEN2VL, PROJECTOR_TYPE_QWEN3VL, + PROJECTOR_TYPE_QWEN3O, // qwen3-omni: converts to QWEN3VL for vision, uses custom encoder for audio PROJECTOR_TYPE_GEMMA3, PROJECTOR_TYPE_IDEFICS3, PROJECTOR_TYPE_PIXTRAL, @@ -199,6 +200,7 @@ static std::map PROJECTOR_TYPE_NAMES = { { PROJECTOR_TYPE_QWEN2VL, "qwen2vl_merger"}, { PROJECTOR_TYPE_QWEN25VL, "qwen2.5vl_merger"}, { PROJECTOR_TYPE_QWEN3VL, "qwen3vl_merger"}, + { PROJECTOR_TYPE_QWEN3O, "qwen3o"}, { PROJECTOR_TYPE_GEMMA3, "gemma3"}, { PROJECTOR_TYPE_IDEFICS3, "idefics3"}, { PROJECTOR_TYPE_PIXTRAL, "pixtral"}, diff --git a/tools/mtmd/clip.cpp b/tools/mtmd/clip.cpp index 3ba0823def..c08e7063d8 100644 --- a/tools/mtmd/clip.cpp +++ b/tools/mtmd/clip.cpp @@ -970,6 +970,18 @@ struct clip_model_loader { ? PROJECTOR_TYPE_QWEN25VL : PROJECTOR_TYPE_QWEN2A; } + + // Qwen3-Omni: vision uses qwen3vl pipeline, audio stays qwen3o + if (model.proj_type == PROJECTOR_TYPE_QWEN3O) { + projector_type new_type = modality == CLIP_MODALITY_VISION + ? PROJECTOR_TYPE_QWEN3VL + : PROJECTOR_TYPE_QWEN3O; + LOG_INF("%s: QWEN3O auto-conversion: %s -> %s (modality=%s)\n", __func__, + PROJECTOR_TYPE_NAMES[model.proj_type].c_str(), + PROJECTOR_TYPE_NAMES[new_type].c_str(), + modality == CLIP_MODALITY_VISION ? "vision" : "audio"); + model.proj_type = new_type; + } } const bool is_vision = model.modality == CLIP_MODALITY_VISION; diff --git a/tools/mtmd/models/qwen3vl.cpp b/tools/mtmd/models/qwen3vl.cpp index 35a42cb84d..647738a501 100644 --- a/tools/mtmd/models/qwen3vl.cpp +++ b/tools/mtmd/models/qwen3vl.cpp @@ -85,23 +85,43 @@ ggml_cgraph * clip_graph_qwen3vl::build() { // self-attention { - cur = ggml_mul_mat(ctx0, layer.qkv_w, cur); - cur = ggml_add(ctx0, cur, layer.qkv_b); + // Support both separate Q/K/V (Qwen3-Omni) and combined QKV (Qwen3-VL) + ggml_tensor * Qcur; + ggml_tensor * Kcur; + ggml_tensor * Vcur; - ggml_tensor * Qcur = ggml_view_3d(ctx0, cur, d_head, n_head, n_pos, - /* nb1 */ ggml_row_size(cur->type, d_head), - /* nb2 */ cur->nb[1], - /* offset */ 0); + if (layer.qkv_w) { + // Combined QKV format + cur = ggml_mul_mat(ctx0, layer.qkv_w, cur); + cur = ggml_add(ctx0, cur, layer.qkv_b); - ggml_tensor * Kcur = ggml_view_3d(ctx0, cur, d_head, n_head, n_pos, - /* nb1 */ ggml_row_size(cur->type, d_head), - /* nb2 */ cur->nb[1], - /* offset */ ggml_row_size(cur->type, n_embd)); + Qcur = ggml_view_3d(ctx0, cur, d_head, n_head, n_pos, + /* nb1 */ ggml_row_size(cur->type, d_head), + /* nb2 */ cur->nb[1], + /* offset */ 0); - ggml_tensor * Vcur = ggml_view_3d(ctx0, cur, d_head, n_head, n_pos, - /* nb1 */ ggml_row_size(cur->type, d_head), - /* nb2 */ cur->nb[1], - /* offset */ ggml_row_size(cur->type, 2 * n_embd)); + Kcur = ggml_view_3d(ctx0, cur, d_head, n_head, n_pos, + /* nb1 */ ggml_row_size(cur->type, d_head), + /* nb2 */ cur->nb[1], + /* offset */ ggml_row_size(cur->type, n_embd)); + + Vcur = ggml_view_3d(ctx0, cur, d_head, n_head, n_pos, + /* nb1 */ ggml_row_size(cur->type, d_head), + /* nb2 */ cur->nb[1], + /* offset */ ggml_row_size(cur->type, 2 * n_embd)); + } else { + // Separate Q/K/V format (like Qwen3-Omni) + Qcur = ggml_add(ctx0, + ggml_mul_mat(ctx0, layer.q_w, cur), layer.q_b); + Kcur = ggml_add(ctx0, + ggml_mul_mat(ctx0, layer.k_w, cur), layer.k_b); + Vcur = ggml_add(ctx0, + ggml_mul_mat(ctx0, layer.v_w, cur), layer.v_b); + + Qcur = ggml_reshape_3d(ctx0, Qcur, d_head, n_head, n_pos); + Kcur = ggml_reshape_3d(ctx0, Kcur, d_head, n_head, n_pos); + Vcur = ggml_reshape_3d(ctx0, Vcur, d_head, n_head, n_pos); + } cb(Qcur, "Qcur", il); cb(Kcur, "Kcur", il); diff --git a/tools/mtmd/mtmd.cpp b/tools/mtmd/mtmd.cpp index b9c4fa9098..eb559ac19f 100644 --- a/tools/mtmd/mtmd.cpp +++ b/tools/mtmd/mtmd.cpp @@ -188,7 +188,15 @@ struct mtmd_context { } // if both vision and audio mmproj are present, we need to validate their n_embd - if (ctx_v && ctx_a) { + // Note: QWEN3O has different embedding dimensions for vision and audio, which is valid + // - Vision uses deepstack, so n_embd_v = n_embd * (1 + n_deepstack_layers) = 8192 + // - Audio doesn't use deepstack, so n_embd_a = projection_dim = 2048 + projector_type proj_v = ctx_v ? clip_get_projector_type(ctx_v) : PROJECTOR_TYPE_UNKNOWN; + projector_type proj_a = ctx_a ? clip_get_projector_type(ctx_a) : PROJECTOR_TYPE_UNKNOWN; + bool is_qwen3o = (proj_v == PROJECTOR_TYPE_QWEN3VL || proj_v == PROJECTOR_TYPE_QWEN3O) && + (proj_a == PROJECTOR_TYPE_QWEN3O); + + if (ctx_v && ctx_a && !is_qwen3o) { int n_embd_v = clip_n_mmproj_embd(ctx_v); int n_embd_a = clip_n_mmproj_embd(ctx_a); if (n_embd_v != n_embd_a) { @@ -198,9 +206,9 @@ struct mtmd_context { } } - // since we already validate n_embd of vision and audio mmproj, - // we can safely assume that they are the same - int n_embd_clip = clip_n_mmproj_embd(ctx_v ? ctx_v : ctx_a); + // For QWEN3O, use vision embedding dimension (includes deepstack) for validation + // For other models, vision and audio should have same embedding dimension + int n_embd_clip = is_qwen3o ? clip_n_mmproj_embd(ctx_v) : clip_n_mmproj_embd(ctx_v ? ctx_v : ctx_a); if (n_embd_text != n_embd_clip) { throw std::runtime_error(string_format( "mismatch between text model (n_embd = %d) and mmproj (n_embd = %d)\n" From d8b79cb4953ed0675768ac14b652ef83963caff1 Mon Sep 17 00:00:00 2001 From: EliteGPT AI Date: Wed, 31 Dec 2025 20:28:45 +1000 Subject: [PATCH 2/4] docs: add Qwen3-Omni fork documentation This fork adds Qwen3-Omni multimodal architecture support. Models available at: https://huggingface.co/phnxsystms/Qwen3-Omni-30B-A3B-Instruct-GGUF --- README.md | 69 ++++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 68 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index ed956bb02e..8f6c5f9fab 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,71 @@ -# llama.cpp +# llama.cpp + Qwen3-Omni + +> **This fork adds Qwen3-Omni multimodal architecture support to llama.cpp** + +[![Qwen3-Omni](https://img.shields.io/badge/Qwen3--Omni-Supported-green)](https://huggingface.co/phnxsystms/Qwen3-Omni-30B-A3B-Instruct-GGUF) +[![Models](https://img.shields.io/badge/GGUF%20Models-HuggingFace-yellow)](https://huggingface.co/phnxsystms/Qwen3-Omni-30B-A3B-Instruct-GGUF) + +## What's Added + +This fork includes support for **Qwen3-Omni**, Alibaba's multimodal LLM that handles text and vision: + +- `LLM_ARCH_QWEN3OMNI` - Main LLM architecture (MoE: 48 layers, 128 experts) +- `PROJECTOR_TYPE_QWEN3O` - Vision encoder support +- IMROPE position encoding for multimodal inputs + +## Quick Start + +```bash +# Clone this fork +git clone https://github.com/phnxsystms/llama.cpp.git +cd llama.cpp + +# Build with CUDA +mkdir build && cd build +cmake .. -DGGML_CUDA=ON -DCMAKE_BUILD_TYPE=Release +cmake --build . -j + +# Download models +huggingface-cli download phnxsystms/Qwen3-Omni-30B-A3B-Instruct-GGUF --local-dir models/ + +# Run text inference +./bin/llama-cli -m models/qwen3-omni-30B-Q8_0.gguf -p "Hello!" -ngl 99 + +# Run vision inference +./bin/llama-mtmd-cli \ + -m models/qwen3-omni-30B-Q8_0.gguf \ + --mmproj models/mmproj-qwen3-omni-30B-F16-fixed.gguf \ + --image your_image.jpg \ + -p "What's in this image?" +``` + +## Models + +| Model | Size | Link | +|-------|------|------| +| Qwen3-Omni-30B Q8_0 | 31GB | [Download](https://huggingface.co/phnxsystms/Qwen3-Omni-30B-A3B-Instruct-GGUF/resolve/main/qwen3-omni-30B-Q8_0.gguf) | +| Vision Projector F16 | 2.3GB | [Download](https://huggingface.co/phnxsystms/Qwen3-Omni-30B-A3B-Instruct-GGUF/resolve/main/mmproj-qwen3-omni-30B-F16-fixed.gguf) | + +## Performance + +Tested on distributed 5-GPU setup: +- **41-44 tokens/sec** inference speed +- Text and vision inference working + +## Changes from Upstream + +Key files modified: +- `src/llama-arch.cpp` - Architecture registration +- `src/llama-model.cpp` - Model loading and graph building +- `tools/mtmd/clip.cpp` - Vision projector support +- `tools/mtmd/mtmd.cpp` - Multimodal pipeline + +This fork stays synced with upstream llama.cpp. The Qwen3-Omni additions are minimal and focused. + +--- + +# Original llama.cpp README + ![llama](https://user-images.githubusercontent.com/1991296/230134379-7181e485-c521-4d23-a0d6-f7b3b61ba524.png) From 42e8ab4c38d7b9343024aa9c54e4a2896c206a3c Mon Sep 17 00:00:00 2001 From: EliteGPT AI Date: Wed, 31 Dec 2025 20:35:08 +1000 Subject: [PATCH 3/4] docs: clean README - Qwen3-Omni focus, add RPC docs, note audio WIP --- README.md | 661 +++--------------------------------------------------- 1 file changed, 36 insertions(+), 625 deletions(-) diff --git a/README.md b/README.md index 8f6c5f9fab..92dbccf9dd 100644 --- a/README.md +++ b/README.md @@ -1,18 +1,20 @@ # llama.cpp + Qwen3-Omni -> **This fork adds Qwen3-Omni multimodal architecture support to llama.cpp** +> **Fork with Qwen3-Omni multimodal architecture support** [![Qwen3-Omni](https://img.shields.io/badge/Qwen3--Omni-Supported-green)](https://huggingface.co/phnxsystms/Qwen3-Omni-30B-A3B-Instruct-GGUF) [![Models](https://img.shields.io/badge/GGUF%20Models-HuggingFace-yellow)](https://huggingface.co/phnxsystms/Qwen3-Omni-30B-A3B-Instruct-GGUF) ## What's Added -This fork includes support for **Qwen3-Omni**, Alibaba's multimodal LLM that handles text and vision: +Support for **Qwen3-Omni**, Alibaba's multimodal LLM: - `LLM_ARCH_QWEN3OMNI` - Main LLM architecture (MoE: 48 layers, 128 experts) - `PROJECTOR_TYPE_QWEN3O` - Vision encoder support - IMROPE position encoding for multimodal inputs +**Note:** Audio encoder support is WIP. + ## Quick Start ```bash @@ -25,13 +27,13 @@ mkdir build && cd build cmake .. -DGGML_CUDA=ON -DCMAKE_BUILD_TYPE=Release cmake --build . -j -# Download models +# Download models from HuggingFace huggingface-cli download phnxsystms/Qwen3-Omni-30B-A3B-Instruct-GGUF --local-dir models/ -# Run text inference +# Text inference ./bin/llama-cli -m models/qwen3-omni-30B-Q8_0.gguf -p "Hello!" -ngl 99 -# Run vision inference +# Vision inference ./bin/llama-mtmd-cli \ -m models/qwen3-omni-30B-Q8_0.gguf \ --mmproj models/mmproj-qwen3-omni-30B-F16-fixed.gguf \ @@ -39,635 +41,44 @@ huggingface-cli download phnxsystms/Qwen3-Omni-30B-A3B-Instruct-GGUF --local-dir -p "What's in this image?" ``` +## Distributed Inference (RPC) + +For large models, use llama.cpp's RPC backend to distribute across multiple machines: + +```bash +# On worker nodes - start RPC server +./bin/llama-rpc-server --host 0.0.0.0 --port 50052 + +# On main node - connect to workers +./bin/llama-cli \ + -m models/qwen3-omni-30B-Q8_0.gguf \ + --rpc worker1:50052,worker2:50052 \ + -ngl 99 \ + -p "Hello!" +``` + ## Models -| Model | Size | Link | -|-------|------|------| -| Qwen3-Omni-30B Q8_0 | 31GB | [Download](https://huggingface.co/phnxsystms/Qwen3-Omni-30B-A3B-Instruct-GGUF/resolve/main/qwen3-omni-30B-Q8_0.gguf) | -| Vision Projector F16 | 2.3GB | [Download](https://huggingface.co/phnxsystms/Qwen3-Omni-30B-A3B-Instruct-GGUF/resolve/main/mmproj-qwen3-omni-30B-F16-fixed.gguf) | +| Model | Size | Description | +|-------|------|-------------| +| [qwen3-omni-30B-Q8_0.gguf](https://huggingface.co/phnxsystms/Qwen3-Omni-30B-A3B-Instruct-GGUF/resolve/main/qwen3-omni-30B-Q8_0.gguf) | 31GB | Main LLM (Q8_0) | +| [mmproj-qwen3-omni-30B-F16-fixed.gguf](https://huggingface.co/phnxsystms/Qwen3-Omni-30B-A3B-Instruct-GGUF/resolve/main/mmproj-qwen3-omni-30B-F16-fixed.gguf) | 2.3GB | Vision projector (F16) | ## Performance -Tested on distributed 5-GPU setup: +Tested on multi-GPU distributed setup: - **41-44 tokens/sec** inference speed - Text and vision inference working -## Changes from Upstream +## Files Changed -Key files modified: -- `src/llama-arch.cpp` - Architecture registration -- `src/llama-model.cpp` - Model loading and graph building -- `tools/mtmd/clip.cpp` - Vision projector support -- `tools/mtmd/mtmd.cpp` - Multimodal pipeline - -This fork stays synced with upstream llama.cpp. The Qwen3-Omni additions are minimal and focused. - ---- - -# Original llama.cpp README - - -![llama](https://user-images.githubusercontent.com/1991296/230134379-7181e485-c521-4d23-a0d6-f7b3b61ba524.png) - -[![License: MIT](https://img.shields.io/badge/license-MIT-blue.svg)](https://opensource.org/licenses/MIT) -[![Release](https://img.shields.io/github/v/release/ggml-org/llama.cpp)](https://github.com/ggml-org/llama.cpp/releases) -[![Server](https://github.com/ggml-org/llama.cpp/actions/workflows/server.yml/badge.svg)](https://github.com/ggml-org/llama.cpp/actions/workflows/server.yml) - -[Manifesto](https://github.com/ggml-org/llama.cpp/discussions/205) / [ggml](https://github.com/ggml-org/ggml) / [ops](https://github.com/ggml-org/llama.cpp/blob/master/docs/ops.md) - -LLM inference in C/C++ - -## Recent API changes - -- [Changelog for `libllama` API](https://github.com/ggml-org/llama.cpp/issues/9289) -- [Changelog for `llama-server` REST API](https://github.com/ggml-org/llama.cpp/issues/9291) - -## Hot topics - -- **[guide : using the new WebUI of llama.cpp](https://github.com/ggml-org/llama.cpp/discussions/16938)** -- [guide : running gpt-oss with llama.cpp](https://github.com/ggml-org/llama.cpp/discussions/15396) -- [[FEEDBACK] Better packaging for llama.cpp to support downstream consumers πŸ€—](https://github.com/ggml-org/llama.cpp/discussions/15313) -- Support for the `gpt-oss` model with native MXFP4 format has been added | [PR](https://github.com/ggml-org/llama.cpp/pull/15091) | [Collaboration with NVIDIA](https://blogs.nvidia.com/blog/rtx-ai-garage-openai-oss) | [Comment](https://github.com/ggml-org/llama.cpp/discussions/15095) -- Multimodal support arrived in `llama-server`: [#12898](https://github.com/ggml-org/llama.cpp/pull/12898) | [documentation](./docs/multimodal.md) -- VS Code extension for FIM completions: https://github.com/ggml-org/llama.vscode -- Vim/Neovim plugin for FIM completions: https://github.com/ggml-org/llama.vim -- Hugging Face Inference Endpoints now support GGUF out of the box! https://github.com/ggml-org/llama.cpp/discussions/9669 -- Hugging Face GGUF editor: [discussion](https://github.com/ggml-org/llama.cpp/discussions/9268) | [tool](https://huggingface.co/spaces/CISCai/gguf-editor) - ----- - -## Quick start - -Getting started with llama.cpp is straightforward. Here are several ways to install it on your machine: - -- Install `llama.cpp` using [brew, nix or winget](docs/install.md) -- Run with Docker - see our [Docker documentation](docs/docker.md) -- Download pre-built binaries from the [releases page](https://github.com/ggml-org/llama.cpp/releases) -- Build from source by cloning this repository - check out [our build guide](docs/build.md) - -Once installed, you'll need a model to work with. Head to the [Obtaining and quantizing models](#obtaining-and-quantizing-models) section to learn more. - -Example command: - -```sh -# Use a local model file -llama-cli -m my_model.gguf - -# Or download and run a model directly from Hugging Face -llama-cli -hf ggml-org/gemma-3-1b-it-GGUF - -# Launch OpenAI-compatible API server -llama-server -hf ggml-org/gemma-3-1b-it-GGUF +``` +src/llama-arch.cpp # Architecture registration +src/llama-model.cpp # Model loading & graph building +tools/mtmd/clip.cpp # Vision projector support +tools/mtmd/mtmd.cpp # Multimodal pipeline ``` -## Description +## License -The main goal of `llama.cpp` is to enable LLM inference with minimal setup and state-of-the-art performance on a wide -range of hardware - locally and in the cloud. - -- Plain C/C++ implementation without any dependencies -- Apple silicon is a first-class citizen - optimized via ARM NEON, Accelerate and Metal frameworks -- AVX, AVX2, AVX512 and AMX support for x86 architectures -- RVV, ZVFH, ZFH, ZICBOP and ZIHINTPAUSE support for RISC-V architectures -- 1.5-bit, 2-bit, 3-bit, 4-bit, 5-bit, 6-bit, and 8-bit integer quantization for faster inference and reduced memory use -- Custom CUDA kernels for running LLMs on NVIDIA GPUs (support for AMD GPUs via HIP and Moore Threads GPUs via MUSA) -- Vulkan and SYCL backend support -- CPU+GPU hybrid inference to partially accelerate models larger than the total VRAM capacity - -The `llama.cpp` project is the main playground for developing new features for the [ggml](https://github.com/ggml-org/ggml) library. - -
-Models - -Typically finetunes of the base models below are supported as well. - -Instructions for adding support for new models: [HOWTO-add-model.md](docs/development/HOWTO-add-model.md) - -#### Text-only - -- [X] LLaMA πŸ¦™ -- [x] LLaMA 2 πŸ¦™πŸ¦™ -- [x] LLaMA 3 πŸ¦™πŸ¦™πŸ¦™ -- [X] [Mistral 7B](https://huggingface.co/mistralai/Mistral-7B-v0.1) -- [x] [Mixtral MoE](https://huggingface.co/models?search=mistral-ai/Mixtral) -- [x] [DBRX](https://huggingface.co/databricks/dbrx-instruct) -- [x] [Jamba](https://huggingface.co/ai21labs) -- [X] [Falcon](https://huggingface.co/models?search=tiiuae/falcon) -- [X] [Chinese LLaMA / Alpaca](https://github.com/ymcui/Chinese-LLaMA-Alpaca) and [Chinese LLaMA-2 / Alpaca-2](https://github.com/ymcui/Chinese-LLaMA-Alpaca-2) -- [X] [Vigogne (French)](https://github.com/bofenghuang/vigogne) -- [X] [BERT](https://github.com/ggml-org/llama.cpp/pull/5423) -- [X] [Koala](https://bair.berkeley.edu/blog/2023/04/03/koala/) -- [X] [Baichuan 1 & 2](https://huggingface.co/models?search=baichuan-inc/Baichuan) + [derivations](https://huggingface.co/hiyouga/baichuan-7b-sft) -- [X] [Aquila 1 & 2](https://huggingface.co/models?search=BAAI/Aquila) -- [X] [Starcoder models](https://github.com/ggml-org/llama.cpp/pull/3187) -- [X] [Refact](https://huggingface.co/smallcloudai/Refact-1_6B-fim) -- [X] [MPT](https://github.com/ggml-org/llama.cpp/pull/3417) -- [X] [Bloom](https://github.com/ggml-org/llama.cpp/pull/3553) -- [x] [Yi models](https://huggingface.co/models?search=01-ai/Yi) -- [X] [StableLM models](https://huggingface.co/stabilityai) -- [x] [Deepseek models](https://huggingface.co/models?search=deepseek-ai/deepseek) -- [x] [Qwen models](https://huggingface.co/models?search=Qwen/Qwen) -- [x] [PLaMo-13B](https://github.com/ggml-org/llama.cpp/pull/3557) -- [x] [Phi models](https://huggingface.co/models?search=microsoft/phi) -- [x] [PhiMoE](https://github.com/ggml-org/llama.cpp/pull/11003) -- [x] [GPT-2](https://huggingface.co/gpt2) -- [x] [Orion 14B](https://github.com/ggml-org/llama.cpp/pull/5118) -- [x] [InternLM2](https://huggingface.co/models?search=internlm2) -- [x] [CodeShell](https://github.com/WisdomShell/codeshell) -- [x] [Gemma](https://ai.google.dev/gemma) -- [x] [Mamba](https://github.com/state-spaces/mamba) -- [x] [Grok-1](https://huggingface.co/keyfan/grok-1-hf) -- [x] [Xverse](https://huggingface.co/models?search=xverse) -- [x] [Command-R models](https://huggingface.co/models?search=CohereForAI/c4ai-command-r) -- [x] [SEA-LION](https://huggingface.co/models?search=sea-lion) -- [x] [GritLM-7B](https://huggingface.co/GritLM/GritLM-7B) + [GritLM-8x7B](https://huggingface.co/GritLM/GritLM-8x7B) -- [x] [OLMo](https://allenai.org/olmo) -- [x] [OLMo 2](https://allenai.org/olmo) -- [x] [OLMoE](https://huggingface.co/allenai/OLMoE-1B-7B-0924) -- [x] [Granite models](https://huggingface.co/collections/ibm-granite/granite-code-models-6624c5cec322e4c148c8b330) -- [x] [GPT-NeoX](https://github.com/EleutherAI/gpt-neox) + [Pythia](https://github.com/EleutherAI/pythia) -- [x] [Snowflake-Arctic MoE](https://huggingface.co/collections/Snowflake/arctic-66290090abe542894a5ac520) -- [x] [Smaug](https://huggingface.co/models?search=Smaug) -- [x] [Poro 34B](https://huggingface.co/LumiOpen/Poro-34B) -- [x] [Bitnet b1.58 models](https://huggingface.co/1bitLLM) -- [x] [Flan T5](https://huggingface.co/models?search=flan-t5) -- [x] [Open Elm models](https://huggingface.co/collections/apple/openelm-instruct-models-6619ad295d7ae9f868b759ca) -- [x] [ChatGLM3-6b](https://huggingface.co/THUDM/chatglm3-6b) + [ChatGLM4-9b](https://huggingface.co/THUDM/glm-4-9b) + [GLMEdge-1.5b](https://huggingface.co/THUDM/glm-edge-1.5b-chat) + [GLMEdge-4b](https://huggingface.co/THUDM/glm-edge-4b-chat) -- [x] [GLM-4-0414](https://huggingface.co/collections/THUDM/glm-4-0414-67f3cbcb34dd9d252707cb2e) -- [x] [SmolLM](https://huggingface.co/collections/HuggingFaceTB/smollm-6695016cad7167254ce15966) -- [x] [EXAONE-3.0-7.8B-Instruct](https://huggingface.co/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct) -- [x] [FalconMamba Models](https://huggingface.co/collections/tiiuae/falconmamba-7b-66b9a580324dd1598b0f6d4a) -- [x] [Jais](https://huggingface.co/inceptionai/jais-13b-chat) -- [x] [Bielik-11B-v2.3](https://huggingface.co/collections/speakleash/bielik-11b-v23-66ee813238d9b526a072408a) -- [x] [RWKV-6](https://github.com/BlinkDL/RWKV-LM) -- [x] [QRWKV-6](https://huggingface.co/recursal/QRWKV6-32B-Instruct-Preview-v0.1) -- [x] [GigaChat-20B-A3B](https://huggingface.co/ai-sage/GigaChat-20B-A3B-instruct) -- [X] [Trillion-7B-preview](https://huggingface.co/trillionlabs/Trillion-7B-preview) -- [x] [Ling models](https://huggingface.co/collections/inclusionAI/ling-67c51c85b34a7ea0aba94c32) -- [x] [LFM2 models](https://huggingface.co/collections/LiquidAI/lfm2-686d721927015b2ad73eaa38) -- [x] [Hunyuan models](https://huggingface.co/collections/tencent/hunyuan-dense-model-6890632cda26b19119c9c5e7) -- [x] [BailingMoeV2 (Ring/Ling 2.0) models](https://huggingface.co/collections/inclusionAI/ling-v2-68bf1dd2fc34c306c1fa6f86) - -#### Multimodal - -- [x] [LLaVA 1.5 models](https://huggingface.co/collections/liuhaotian/llava-15-653aac15d994e992e2677a7e), [LLaVA 1.6 models](https://huggingface.co/collections/liuhaotian/llava-16-65b9e40155f60fd046a5ccf2) -- [x] [BakLLaVA](https://huggingface.co/models?search=SkunkworksAI/Bakllava) -- [x] [Obsidian](https://huggingface.co/NousResearch/Obsidian-3B-V0.5) -- [x] [ShareGPT4V](https://huggingface.co/models?search=Lin-Chen/ShareGPT4V) -- [x] [MobileVLM 1.7B/3B models](https://huggingface.co/models?search=mobileVLM) -- [x] [Yi-VL](https://huggingface.co/models?search=Yi-VL) -- [x] [Mini CPM](https://huggingface.co/models?search=MiniCPM) -- [x] [Moondream](https://huggingface.co/vikhyatk/moondream2) -- [x] [Bunny](https://github.com/BAAI-DCAI/Bunny) -- [x] [GLM-EDGE](https://huggingface.co/models?search=glm-edge) -- [x] [Qwen2-VL](https://huggingface.co/collections/Qwen/qwen2-vl-66cee7455501d7126940800d) -- [x] [LFM2-VL](https://huggingface.co/collections/LiquidAI/lfm2-vl-68963bbc84a610f7638d5ffa) - -
- -
-Bindings - -- Python: [ddh0/easy-llama](https://github.com/ddh0/easy-llama) -- Python: [abetlen/llama-cpp-python](https://github.com/abetlen/llama-cpp-python) -- Go: [go-skynet/go-llama.cpp](https://github.com/go-skynet/go-llama.cpp) -- Node.js: [withcatai/node-llama-cpp](https://github.com/withcatai/node-llama-cpp) -- JS/TS (llama.cpp server client): [lgrammel/modelfusion](https://modelfusion.dev/integration/model-provider/llamacpp) -- JS/TS (Programmable Prompt Engine CLI): [offline-ai/cli](https://github.com/offline-ai/cli) -- JavaScript/Wasm (works in browser): [tangledgroup/llama-cpp-wasm](https://github.com/tangledgroup/llama-cpp-wasm) -- Typescript/Wasm (nicer API, available on npm): [ngxson/wllama](https://github.com/ngxson/wllama) -- Ruby: [yoshoku/llama_cpp.rb](https://github.com/yoshoku/llama_cpp.rb) -- Rust (more features): [edgenai/llama_cpp-rs](https://github.com/edgenai/llama_cpp-rs) -- Rust (nicer API): [mdrokz/rust-llama.cpp](https://github.com/mdrokz/rust-llama.cpp) -- Rust (more direct bindings): [utilityai/llama-cpp-rs](https://github.com/utilityai/llama-cpp-rs) -- Rust (automated build from crates.io): [ShelbyJenkins/llm_client](https://github.com/ShelbyJenkins/llm_client) -- C#/.NET: [SciSharp/LLamaSharp](https://github.com/SciSharp/LLamaSharp) -- C#/VB.NET (more features - community license): [LM-Kit.NET](https://docs.lm-kit.com/lm-kit-net/index.html) -- Scala 3: [donderom/llm4s](https://github.com/donderom/llm4s) -- Clojure: [phronmophobic/llama.clj](https://github.com/phronmophobic/llama.clj) -- React Native: [mybigday/llama.rn](https://github.com/mybigday/llama.rn) -- Java: [kherud/java-llama.cpp](https://github.com/kherud/java-llama.cpp) -- Java: [QuasarByte/llama-cpp-jna](https://github.com/QuasarByte/llama-cpp-jna) -- Zig: [deins/llama.cpp.zig](https://github.com/Deins/llama.cpp.zig) -- Flutter/Dart: [netdur/llama_cpp_dart](https://github.com/netdur/llama_cpp_dart) -- Flutter: [xuegao-tzx/Fllama](https://github.com/xuegao-tzx/Fllama) -- PHP (API bindings and features built on top of llama.cpp): [distantmagic/resonance](https://github.com/distantmagic/resonance) [(more info)](https://github.com/ggml-org/llama.cpp/pull/6326) -- Guile Scheme: [guile_llama_cpp](https://savannah.nongnu.org/projects/guile-llama-cpp) -- Swift [srgtuszy/llama-cpp-swift](https://github.com/srgtuszy/llama-cpp-swift) -- Swift [ShenghaiWang/SwiftLlama](https://github.com/ShenghaiWang/SwiftLlama) -- Delphi [Embarcadero/llama-cpp-delphi](https://github.com/Embarcadero/llama-cpp-delphi) -- Go (no CGo needed): [hybridgroup/yzma](https://github.com/hybridgroup/yzma) -- Android: [llama.android](/examples/llama.android) - -
- -
-UIs - -*(to have a project listed here, it should clearly state that it depends on `llama.cpp`)* - -- [AI Sublime Text plugin](https://github.com/yaroslavyaroslav/OpenAI-sublime-text) (MIT) -- [cztomsik/ava](https://github.com/cztomsik/ava) (MIT) -- [Dot](https://github.com/alexpinel/Dot) (GPL) -- [eva](https://github.com/ylsdamxssjxxdd/eva) (MIT) -- [iohub/collama](https://github.com/iohub/coLLaMA) (Apache-2.0) -- [janhq/jan](https://github.com/janhq/jan) (AGPL) -- [johnbean393/Sidekick](https://github.com/johnbean393/Sidekick) (MIT) -- [KanTV](https://github.com/zhouwg/kantv?tab=readme-ov-file) (Apache-2.0) -- [KodiBot](https://github.com/firatkiral/kodibot) (GPL) -- [llama.vim](https://github.com/ggml-org/llama.vim) (MIT) -- [LARS](https://github.com/abgulati/LARS) (AGPL) -- [Llama Assistant](https://github.com/vietanhdev/llama-assistant) (GPL) -- [LLMFarm](https://github.com/guinmoon/LLMFarm?tab=readme-ov-file) (MIT) -- [LLMUnity](https://github.com/undreamai/LLMUnity) (MIT) -- [LMStudio](https://lmstudio.ai/) (proprietary) -- [LocalAI](https://github.com/mudler/LocalAI) (MIT) -- [LostRuins/koboldcpp](https://github.com/LostRuins/koboldcpp) (AGPL) -- [MindMac](https://mindmac.app) (proprietary) -- [MindWorkAI/AI-Studio](https://github.com/MindWorkAI/AI-Studio) (FSL-1.1-MIT) -- [Mobile-Artificial-Intelligence/maid](https://github.com/Mobile-Artificial-Intelligence/maid) (MIT) -- [Mozilla-Ocho/llamafile](https://github.com/Mozilla-Ocho/llamafile) (Apache-2.0) -- [nat/openplayground](https://github.com/nat/openplayground) (MIT) -- [nomic-ai/gpt4all](https://github.com/nomic-ai/gpt4all) (MIT) -- [ollama/ollama](https://github.com/ollama/ollama) (MIT) -- [oobabooga/text-generation-webui](https://github.com/oobabooga/text-generation-webui) (AGPL) -- [PocketPal AI](https://github.com/a-ghorbani/pocketpal-ai) (MIT) -- [psugihara/FreeChat](https://github.com/psugihara/FreeChat) (MIT) -- [ptsochantaris/emeltal](https://github.com/ptsochantaris/emeltal) (MIT) -- [pythops/tenere](https://github.com/pythops/tenere) (AGPL) -- [ramalama](https://github.com/containers/ramalama) (MIT) -- [semperai/amica](https://github.com/semperai/amica) (MIT) -- [withcatai/catai](https://github.com/withcatai/catai) (MIT) -- [Autopen](https://github.com/blackhole89/autopen) (GPL) - -
- -
-Tools - -- [akx/ggify](https://github.com/akx/ggify) – download PyTorch models from HuggingFace Hub and convert them to GGML -- [akx/ollama-dl](https://github.com/akx/ollama-dl) – download models from the Ollama library to be used directly with llama.cpp -- [crashr/gppm](https://github.com/crashr/gppm) – launch llama.cpp instances utilizing NVIDIA Tesla P40 or P100 GPUs with reduced idle power consumption -- [gpustack/gguf-parser](https://github.com/gpustack/gguf-parser-go/tree/main/cmd/gguf-parser) - review/check the GGUF file and estimate the memory usage -- [Styled Lines](https://marketplace.unity.com/packages/tools/generative-ai/styled-lines-llama-cpp-model-292902) (proprietary licensed, async wrapper of inference part for game development in Unity3d with pre-built Mobile and Web platform wrappers and a model example) -- [unslothai/unsloth](https://github.com/unslothai/unsloth) – πŸ¦₯ exports/saves fine-tuned and trained models to GGUF (Apache-2.0) - -
- -
-Infrastructure - -- [Paddler](https://github.com/intentee/paddler) - Open-source LLMOps platform for hosting and scaling AI in your own infrastructure -- [GPUStack](https://github.com/gpustack/gpustack) - Manage GPU clusters for running LLMs -- [llama_cpp_canister](https://github.com/onicai/llama_cpp_canister) - llama.cpp as a smart contract on the Internet Computer, using WebAssembly -- [llama-swap](https://github.com/mostlygeek/llama-swap) - transparent proxy that adds automatic model switching with llama-server -- [Kalavai](https://github.com/kalavai-net/kalavai-client) - Crowdsource end to end LLM deployment at any scale -- [llmaz](https://github.com/InftyAI/llmaz) - ☸️ Easy, advanced inference platform for large language models on Kubernetes. -
- -
-Games - -- [Lucy's Labyrinth](https://github.com/MorganRO8/Lucys_Labyrinth) - A simple maze game where agents controlled by an AI model will try to trick you. - -
- - -## Supported backends - -| Backend | Target devices | -| --- | --- | -| [Metal](docs/build.md#metal-build) | Apple Silicon | -| [BLAS](docs/build.md#blas-build) | All | -| [BLIS](docs/backend/BLIS.md) | All | -| [SYCL](docs/backend/SYCL.md) | Intel and Nvidia GPU | -| [MUSA](docs/build.md#musa) | Moore Threads GPU | -| [CUDA](docs/build.md#cuda) | Nvidia GPU | -| [HIP](docs/build.md#hip) | AMD GPU | -| [ZenDNN](docs/build.md#zendnn) | AMD CPU | -| [Vulkan](docs/build.md#vulkan) | GPU | -| [CANN](docs/build.md#cann) | Ascend NPU | -| [OpenCL](docs/backend/OPENCL.md) | Adreno GPU | -| [IBM zDNN](docs/backend/zDNN.md) | IBM Z & LinuxONE | -| [WebGPU [In Progress]](docs/build.md#webgpu) | All | -| [RPC](https://github.com/ggml-org/llama.cpp/tree/master/tools/rpc) | All | -| [Hexagon [In Progress]](docs/backend/hexagon/README.md) | Snapdragon | - -## Obtaining and quantizing models - -The [Hugging Face](https://huggingface.co) platform hosts a [number of LLMs](https://huggingface.co/models?library=gguf&sort=trending) compatible with `llama.cpp`: - -- [Trending](https://huggingface.co/models?library=gguf&sort=trending) -- [LLaMA](https://huggingface.co/models?sort=trending&search=llama+gguf) - -You can either manually download the GGUF file or directly use any `llama.cpp`-compatible models from [Hugging Face](https://huggingface.co/) or other model hosting sites, such as [ModelScope](https://modelscope.cn/), by using this CLI argument: `-hf /[:quant]`. For example: - -```sh -llama-cli -hf ggml-org/gemma-3-1b-it-GGUF -``` - -By default, the CLI would download from Hugging Face, you can switch to other options with the environment variable `MODEL_ENDPOINT`. For example, you may opt to downloading model checkpoints from ModelScope or other model sharing communities by setting the environment variable, e.g. `MODEL_ENDPOINT=https://www.modelscope.cn/`. - -After downloading a model, use the CLI tools to run it locally - see below. - -`llama.cpp` requires the model to be stored in the [GGUF](https://github.com/ggml-org/ggml/blob/master/docs/gguf.md) file format. Models in other data formats can be converted to GGUF using the `convert_*.py` Python scripts in this repo. - -The Hugging Face platform provides a variety of online tools for converting, quantizing and hosting models with `llama.cpp`: - -- Use the [GGUF-my-repo space](https://huggingface.co/spaces/ggml-org/gguf-my-repo) to convert to GGUF format and quantize model weights to smaller sizes -- Use the [GGUF-my-LoRA space](https://huggingface.co/spaces/ggml-org/gguf-my-lora) to convert LoRA adapters to GGUF format (more info: https://github.com/ggml-org/llama.cpp/discussions/10123) -- Use the [GGUF-editor space](https://huggingface.co/spaces/CISCai/gguf-editor) to edit GGUF meta data in the browser (more info: https://github.com/ggml-org/llama.cpp/discussions/9268) -- Use the [Inference Endpoints](https://ui.endpoints.huggingface.co/) to directly host `llama.cpp` in the cloud (more info: https://github.com/ggml-org/llama.cpp/discussions/9669) - -To learn more about model quantization, [read this documentation](tools/quantize/README.md) - -## [`llama-cli`](tools/cli) - -#### A CLI tool for accessing and experimenting with most of `llama.cpp`'s functionality. - --
- Run in conversation mode - - Models with a built-in chat template will automatically activate conversation mode. If this doesn't occur, you can manually enable it by adding `-cnv` and specifying a suitable chat template with `--chat-template NAME` - - ```bash - llama-cli -m model.gguf - - # > hi, who are you? - # Hi there! I'm your helpful assistant! I'm an AI-powered chatbot designed to assist and provide information to users like you. I'm here to help answer your questions, provide guidance, and offer support on a wide range of topics. I'm a friendly and knowledgeable AI, and I'm always happy to help with anything you need. What's on your mind, and how can I assist you today? - # - # > what is 1+1? - # Easy peasy! The answer to 1+1 is... 2! - ``` - -
- --
- Run in conversation mode with custom chat template - - ```bash - # use the "chatml" template (use -h to see the list of supported templates) - llama-cli -m model.gguf -cnv --chat-template chatml - - # use a custom template - llama-cli -m model.gguf -cnv --in-prefix 'User: ' --reverse-prompt 'User:' - ``` - -
- --
- Constrain the output with a custom grammar - - ```bash - llama-cli -m model.gguf -n 256 --grammar-file grammars/json.gbnf -p 'Request: schedule a call at 8pm; Command:' - - # {"appointmentTime": "8pm", "appointmentDetails": "schedule a a call"} - ``` - - The [grammars/](grammars/) folder contains a handful of sample grammars. To write your own, check out the [GBNF Guide](grammars/README.md). - - For authoring more complex JSON grammars, check out https://grammar.intrinsiclabs.ai/ - -
- - -## [`llama-server`](tools/server) - -#### A lightweight, [OpenAI API](https://github.com/openai/openai-openapi) compatible, HTTP server for serving LLMs. - --
- Start a local HTTP server with default configuration on port 8080 - - ```bash - llama-server -m model.gguf --port 8080 - - # Basic web UI can be accessed via browser: http://localhost:8080 - # Chat completion endpoint: http://localhost:8080/v1/chat/completions - ``` - -
- --
- Support multiple-users and parallel decoding - - ```bash - # up to 4 concurrent requests, each with 4096 max context - llama-server -m model.gguf -c 16384 -np 4 - ``` - -
- --
- Enable speculative decoding - - ```bash - # the draft.gguf model should be a small variant of the target model.gguf - llama-server -m model.gguf -md draft.gguf - ``` - -
- --
- Serve an embedding model - - ```bash - # use the /embedding endpoint - llama-server -m model.gguf --embedding --pooling cls -ub 8192 - ``` - -
- --
- Serve a reranking model - - ```bash - # use the /reranking endpoint - llama-server -m model.gguf --reranking - ``` - -
- --
- Constrain all outputs with a grammar - - ```bash - # custom grammar - llama-server -m model.gguf --grammar-file grammar.gbnf - - # JSON - llama-server -m model.gguf --grammar-file grammars/json.gbnf - ``` - -
- - -## [`llama-perplexity`](tools/perplexity) - -#### A tool for measuring the [perplexity](tools/perplexity/README.md) [^1] (and other quality metrics) of a model over a given text. - --
- Measure the perplexity over a text file - - ```bash - llama-perplexity -m model.gguf -f file.txt - - # [1]15.2701,[2]5.4007,[3]5.3073,[4]6.2965,[5]5.8940,[6]5.6096,[7]5.7942,[8]4.9297, ... - # Final estimate: PPL = 5.4007 +/- 0.67339 - ``` - -
- --
- Measure KL divergence - - ```bash - # TODO - ``` - -
- -[^1]: [https://huggingface.co/docs/transformers/perplexity](https://huggingface.co/docs/transformers/perplexity) - -## [`llama-bench`](tools/llama-bench) - -#### Benchmark the performance of the inference for various parameters. - --
- Run default benchmark - - ```bash - llama-bench -m model.gguf - - # Output: - # | model | size | params | backend | threads | test | t/s | - # | ------------------- | ---------: | ---------: | ---------- | ------: | ------------: | -------------------: | - # | qwen2 1.5B Q4_0 | 885.97 MiB | 1.54 B | Metal,BLAS | 16 | pp512 | 5765.41 Β± 20.55 | - # | qwen2 1.5B Q4_0 | 885.97 MiB | 1.54 B | Metal,BLAS | 16 | tg128 | 197.71 Β± 0.81 | - # - # build: 3e0ba0e60 (4229) - ``` - -
- -## [`llama-run`](tools/run) - -#### A comprehensive example for running `llama.cpp` models. Useful for inferencing. Used with RamaLama [^3]. - --
- Run a model with a specific prompt (by default it's pulled from Ollama registry) - - ```bash - llama-run granite-code - ``` - -
- -[^3]: [RamaLama](https://github.com/containers/ramalama) - -## [`llama-simple`](examples/simple) - -#### A minimal example for implementing apps with `llama.cpp`. Useful for developers. - --
- Basic text completion - - ```bash - llama-simple -m model.gguf - - # Hello my name is Kaitlyn and I am a 16 year old girl. I am a junior in high school and I am currently taking a class called "The Art of - ``` - -
- - -## Contributing - -- Contributors can open PRs -- Collaborators will be invited based on contributions -- Maintainers can push to branches in the `llama.cpp` repo and merge PRs into the `master` branch -- Any help with managing issues, PRs and projects is very appreciated! -- See [good first issues](https://github.com/ggml-org/llama.cpp/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) for tasks suitable for first contributions -- Read the [CONTRIBUTING.md](CONTRIBUTING.md) for more information -- Make sure to read this: [Inference at the edge](https://github.com/ggml-org/llama.cpp/discussions/205) -- A bit of backstory for those who are interested: [Changelog podcast](https://changelog.com/podcast/532) - -## Other documentation - -- [cli](tools/cli/README.md) -- [completion](tools/completion/README.md) -- [server](tools/server/README.md) -- [GBNF grammars](grammars/README.md) - -#### Development documentation - -- [How to build](docs/build.md) -- [Running on Docker](docs/docker.md) -- [Build on Android](docs/android.md) -- [Performance troubleshooting](docs/development/token_generation_performance_tips.md) -- [GGML tips & tricks](https://github.com/ggml-org/llama.cpp/wiki/GGML-Tips-&-Tricks) - -#### Seminal papers and background on the models - -If your issue is with model generation quality, then please at least scan the following links and papers to understand the limitations of LLaMA models. This is especially important when choosing an appropriate model size and appreciating both the significant and subtle differences between LLaMA models and ChatGPT: -- LLaMA: - - [Introducing LLaMA: A foundational, 65-billion-parameter large language model](https://ai.facebook.com/blog/large-language-model-llama-meta-ai/) - - [LLaMA: Open and Efficient Foundation Language Models](https://arxiv.org/abs/2302.13971) -- GPT-3 - - [Language Models are Few-Shot Learners](https://arxiv.org/abs/2005.14165) -- GPT-3.5 / InstructGPT / ChatGPT: - - [Aligning language models to follow instructions](https://openai.com/research/instruction-following) - - [Training language models to follow instructions with human feedback](https://arxiv.org/abs/2203.02155) - -## XCFramework -The XCFramework is a precompiled version of the library for iOS, visionOS, tvOS, -and macOS. It can be used in Swift projects without the need to compile the -library from source. For example: -```swift -// swift-tools-version: 5.10 -// The swift-tools-version declares the minimum version of Swift required to build this package. - -import PackageDescription - -let package = Package( - name: "MyLlamaPackage", - targets: [ - .executableTarget( - name: "MyLlamaPackage", - dependencies: [ - "LlamaFramework" - ]), - .binaryTarget( - name: "LlamaFramework", - url: "https://github.com/ggml-org/llama.cpp/releases/download/b5046/llama-b5046-xcframework.zip", - checksum: "c19be78b5f00d8d29a25da41042cb7afa094cbf6280a225abe614b03b20029ab" - ) - ] -) -``` -The above example is using an intermediate build `b5046` of the library. This can be modified -to use a different version by changing the URL and checksum. - -## Completions -Command-line completion is available for some environments. - -#### Bash Completion -```bash -$ build/bin/llama-cli --completion-bash > ~/.llama-completion.bash -$ source ~/.llama-completion.bash -``` -Optionally this can be added to your `.bashrc` or `.bash_profile` to load it -automatically. For example: -```console -$ echo "source ~/.llama-completion.bash" >> ~/.bashrc -``` - -## Dependencies - -- [yhirose/cpp-httplib](https://github.com/yhirose/cpp-httplib) - Single-header HTTP server, used by `llama-server` - MIT license -- [stb-image](https://github.com/nothings/stb) - Single-header image format decoder, used by multimodal subsystem - Public domain -- [nlohmann/json](https://github.com/nlohmann/json) - Single-header JSON library, used by various tools/examples - MIT License -- [minja](https://github.com/google/minja) - Minimal Jinja parser in C++, used by various tools/examples - MIT License -- [linenoise.cpp](./tools/run/linenoise.cpp/linenoise.cpp) - C++ library that provides readline-like line editing capabilities, used by `llama-run` - BSD 2-Clause License -- [curl](https://curl.se/) - Client-side URL transfer library, used by various tools/examples - [CURL License](https://curl.se/docs/copyright.html) -- [miniaudio.h](https://github.com/mackron/miniaudio) - Single-header audio format decoder, used by multimodal subsystem - Public domain -- [subprocess.h](https://github.com/sheredom/subprocess.h) - Single-header process launching solution for C and C++ - Public domain +MIT (same as llama.cpp) From 650c8f31ae75f26d20973ec23f0ccbf7be75542c Mon Sep 17 00:00:00 2001 From: EliteGPT AI Date: Wed, 31 Dec 2025 20:41:56 +1000 Subject: [PATCH 4/4] docs: simplify - server-first approach --- README.md | 110 +++++++++++++++++++++++++++--------------------------- 1 file changed, 54 insertions(+), 56 deletions(-) diff --git a/README.md b/README.md index 92dbccf9dd..9fe32c1433 100644 --- a/README.md +++ b/README.md @@ -2,83 +2,81 @@ > **Fork with Qwen3-Omni multimodal architecture support** -[![Qwen3-Omni](https://img.shields.io/badge/Qwen3--Omni-Supported-green)](https://huggingface.co/phnxsystms/Qwen3-Omni-30B-A3B-Instruct-GGUF) -[![Models](https://img.shields.io/badge/GGUF%20Models-HuggingFace-yellow)](https://huggingface.co/phnxsystms/Qwen3-Omni-30B-A3B-Instruct-GGUF) - -## What's Added - -Support for **Qwen3-Omni**, Alibaba's multimodal LLM: - -- `LLM_ARCH_QWEN3OMNI` - Main LLM architecture (MoE: 48 layers, 128 experts) -- `PROJECTOR_TYPE_QWEN3O` - Vision encoder support -- IMROPE position encoding for multimodal inputs - -**Note:** Audio encoder support is WIP. - ## Quick Start ```bash -# Clone this fork +# Clone and build git clone https://github.com/phnxsystms/llama.cpp.git cd llama.cpp +cmake -B build -DGGML_CUDA=ON +cmake --build build -j -# Build with CUDA -mkdir build && cd build -cmake .. -DGGML_CUDA=ON -DCMAKE_BUILD_TYPE=Release -cmake --build . -j - -# Download models from HuggingFace +# Download models huggingface-cli download phnxsystms/Qwen3-Omni-30B-A3B-Instruct-GGUF --local-dir models/ - -# Text inference -./bin/llama-cli -m models/qwen3-omni-30B-Q8_0.gguf -p "Hello!" -ngl 99 - -# Vision inference -./bin/llama-mtmd-cli \ - -m models/qwen3-omni-30B-Q8_0.gguf \ - --mmproj models/mmproj-qwen3-omni-30B-F16-fixed.gguf \ - --image your_image.jpg \ - -p "What's in this image?" ``` -## Distributed Inference (RPC) +## Run Server (Recommended) -For large models, use llama.cpp's RPC backend to distribute across multiple machines: +Spin up an OpenAI-compatible API server: ```bash -# On worker nodes - start RPC server -./bin/llama-rpc-server --host 0.0.0.0 --port 50052 +./build/bin/llama-server \ + -m models/qwen3-omni-30B-Q8_0.gguf \ + --mmproj models/mmproj-qwen3-omni-30B-F16-fixed.gguf \ + --host 0.0.0.0 \ + --port 8080 \ + -ngl 99 +``` -# On main node - connect to workers -./bin/llama-cli \ +Then use it: +```bash +curl http://localhost:8080/v1/chat/completions \ + -H "Content-Type: application/json" \ + -d '{"messages":[{"role":"user","content":"Hello!"}]}' +``` + +## CLI Usage + +```bash +# Text +./build/bin/llama-cli -m models/qwen3-omni-30B-Q8_0.gguf -p "Hello!" -ngl 99 + +# Vision +./build/bin/llama-mtmd-cli \ + -m models/qwen3-omni-30B-Q8_0.gguf \ + --mmproj models/mmproj-qwen3-omni-30B-F16-fixed.gguf \ + --image photo.jpg \ + -p "Describe this image" +``` + +## Multi-GPU / Distributed + +Model is 31GB - for multi-GPU or distributed inference: + +```bash +# Distributed: start RPC on worker machines +./build/bin/llama-rpc-server --host 0.0.0.0 --port 50052 + +# Main: connect to workers +./build/bin/llama-server \ -m models/qwen3-omni-30B-Q8_0.gguf \ --rpc worker1:50052,worker2:50052 \ - -ngl 99 \ - -p "Hello!" + -ngl 99 ``` ## Models -| Model | Size | Description | -|-------|------|-------------| -| [qwen3-omni-30B-Q8_0.gguf](https://huggingface.co/phnxsystms/Qwen3-Omni-30B-A3B-Instruct-GGUF/resolve/main/qwen3-omni-30B-Q8_0.gguf) | 31GB | Main LLM (Q8_0) | -| [mmproj-qwen3-omni-30B-F16-fixed.gguf](https://huggingface.co/phnxsystms/Qwen3-Omni-30B-A3B-Instruct-GGUF/resolve/main/mmproj-qwen3-omni-30B-F16-fixed.gguf) | 2.3GB | Vision projector (F16) | +| File | Size | +|------|------| +| [qwen3-omni-30B-Q8_0.gguf](https://huggingface.co/phnxsystms/Qwen3-Omni-30B-A3B-Instruct-GGUF) | 31GB | +| [mmproj-qwen3-omni-30B-F16-fixed.gguf](https://huggingface.co/phnxsystms/Qwen3-Omni-30B-A3B-Instruct-GGUF) | 2.3GB | -## Performance +## Status -Tested on multi-GPU distributed setup: -- **41-44 tokens/sec** inference speed -- Text and vision inference working - -## Files Changed - -``` -src/llama-arch.cpp # Architecture registration -src/llama-model.cpp # Model loading & graph building -tools/mtmd/clip.cpp # Vision projector support -tools/mtmd/mtmd.cpp # Multimodal pipeline -``` +- βœ… Text inference +- βœ… Vision inference +- 🚧 Audio (WIP) ## License -MIT (same as llama.cpp) +MIT