diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index 3fdfc5bf56..8cc4963fb2 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -514,8 +514,7 @@ class ModelBase: raise NotImplementedError("set_gguf_parameters() must be implemented in subclasses") def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - + del bid # unused return [(self.map_tensor_name(name), data_torch)] def tensor_force_quant(self, name: str, new_name: str, bid: int | None, n_dims: int) -> gguf.GGMLQuantizationType | bool: @@ -1981,13 +1980,9 @@ class GPTNeoXModel(TextModel): self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_eps"]) def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads")) n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed")) - tensors: list[tuple[str, Tensor]] = [] - if re.match(r"gpt_neox\.layers\.\d+\.attention\.query_key_value\.weight", name): # Map bloom-style qkv_linear to gpt-style qkv_linear # bloom: https://github.com/huggingface/transformers/blob/main/src/transformers/models/bloom/modeling_bloom.py#L238-L252 # noqa @@ -2014,9 +2009,7 @@ class GPTNeoXModel(TextModel): ) logger.info("re-format attention.linear_qkv.bias") - tensors.append((self.map_tensor_name(name), data_torch)) - - return tensors + yield from super().modify_tensors(data_torch, name, bid) @ModelBase.register("BloomForCausalLM", "BloomModel") @@ -2036,15 +2029,11 @@ class BloomModel(TextModel): self.gguf_writer.add_file_type(self.ftype) def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads")) n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed")) name = re.sub(r'transformer\.', '', name) - tensors: list[tuple[str, Tensor]] = [] - if re.match(r"h\.\d+\.self_attention\.query_key_value\.weight", name): # Map bloom-style qkv_linear to gpt-style qkv_linear # bloom: https://github.com/huggingface/transformers/blob/main/src/transformers/models/bloom/modeling_bloom.py#L238-L252 # noqa @@ -2071,9 +2060,7 @@ class BloomModel(TextModel): ) logger.info("re-format attention.linear_qkv.bias") - tensors.append((self.map_tensor_name(name), data_torch)) - - return tensors + yield from super().modify_tensors(data_torch, name, bid) @ModelBase.register("MPTForCausalLM") @@ -2108,15 +2095,13 @@ class MPTModel(TextModel): self.gguf_writer.add_max_alibi_bias(0.0) def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - if "scales" in name: new_name = self.map_tensor_name(name, try_suffixes=(".weight", ".bias", ".scales")) new_name = new_name.replace("scales", "act.scales") else: new_name = self.map_tensor_name(name, try_suffixes=(".weight", ".bias")) - return [(new_name, data_torch)] + yield from super().modify_tensors(data_torch, new_name, bid) @ModelBase.register("OrionForCausalLM") @@ -2170,11 +2155,9 @@ class BaichuanModel(TextModel): head_count = self.hparams["num_attention_heads"] head_count_kv = self.hparams.get("num_key_value_heads", head_count) - tensors: list[tuple[str, Tensor]] = [] - if bid is not None and name == f"model.layers.{bid}.self_attn.W_pack.weight": logger.info(f"Unpacking and permuting layer {bid}") - tensors = [ + yield from [ (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid), self._reverse_hf_permute_part(data_torch, 0, head_count, head_count)), (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid), @@ -2183,9 +2166,7 @@ class BaichuanModel(TextModel): self._reverse_hf_part(data_torch, 2)), ] else: - tensors = [(self.map_tensor_name(name), data_torch)] - - return tensors + yield from self.modify_tensors(data_torch, self.map_tensor_name(name), bid) def _reverse_hf_permute(self, weights: Tensor, n_head: int, n_kv_head: int | None = None) -> Tensor: if n_kv_head is not None and n_head != n_kv_head: @@ -2266,8 +2247,6 @@ class XverseModel(TextModel): self.gguf_writer.add_rope_dimension_count(self.hparams["hidden_size"] // self.hparams["num_attention_heads"]) def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - head_count = self.hparams["num_attention_heads"] head_count_kv = self.hparams.get("num_key_value_heads", head_count) @@ -2277,7 +2256,7 @@ class XverseModel(TextModel): if name.endswith("k_proj.weight"): data_torch = self._reverse_hf_permute(data_torch, head_count, head_count_kv) - return [(self.map_tensor_name(name), data_torch)] + yield from super().modify_tensors(data_torch, name, bid) def _reverse_hf_permute(self, weights: Tensor, n_head: int, n_kv_head: int | None = None) -> Tensor: if n_kv_head is not None and n_head != n_kv_head: @@ -2314,8 +2293,6 @@ class FalconModel(TextModel): self.gguf_writer.add_file_type(self.ftype) def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - # QKV tensor transform # The original query_key_value tensor contains n_head_kv "kv groups", # each consisting of n_head/n_head_kv query weights followed by one key @@ -2337,7 +2314,7 @@ class FalconModel(TextModel): v = qkv[:, [-1]].reshape(n_head_kv * head_dim, head_dim * n_head) data_torch = torch.cat((q, k, v)).reshape_as(data_torch) - return [(self.map_tensor_name(name), data_torch)] + yield from super().modify_tensors(data_torch, name, bid) @ModelBase.register("GPTBigCodeForCausalLM") @@ -2399,22 +2376,20 @@ class RefactModel(TextModel): n_head_kv = 1 head_dim = self.hparams["n_embd"] // n_head - tensors: list[tuple[str, Tensor]] = [] - if bid is not None: if name == f"transformer.h.{bid}.attn.kv.weight": - tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid), data_torch[:n_head_kv * head_dim])) - tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid), data_torch[n_head_kv * head_dim:])) - elif name == f"transformer.h.{bid}.attn.q.weight": - tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid), data_torch)) - elif name == f"transformer.h.{bid}.mlp.gate_up_proj.weight": - tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.FFN_GATE, bid), data_torch[:ff_dim])) - tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.FFN_UP, bid), data_torch[ff_dim:])) + yield from super().modify_tensors(data_torch[:n_head_kv * head_dim], self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid), bid) + yield from super().modify_tensors(data_torch[n_head_kv * head_dim:], self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid), bid) + return + if name == f"transformer.h.{bid}.attn.q.weight": + yield from super().modify_tensors(data_torch, self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid), bid) + return + if name == f"transformer.h.{bid}.mlp.gate_up_proj.weight": + yield from super().modify_tensors(data_torch[:ff_dim], self.format_tensor_name(gguf.MODEL_TENSOR.FFN_GATE, bid), bid) + yield from super().modify_tensors(data_torch[ff_dim:], self.format_tensor_name(gguf.MODEL_TENSOR.FFN_UP, bid), bid) + return - if len(tensors) == 0: - tensors.append((self.map_tensor_name(name), data_torch)) - - return tensors + yield from super().modify_tensors(data_torch, name, bid) @ModelBase.register("StableLmForCausalLM", "StableLMEpochForCausalLM", "LlavaStableLMEpochForCausalLM") @@ -2461,7 +2436,7 @@ class StableLMModel(TextModel): if len(self._q_norms[bid]) >= n_head: return self._stack_qk_norm(bid, n_head, self._q_norms[bid], "q_layernorm") else: - return [] + return if name.find("k_layernorm.norms") != -1: assert bid is not None @@ -2474,9 +2449,9 @@ class StableLMModel(TextModel): if len(self._k_norms[bid]) >= n_kv_head: return self._stack_qk_norm(bid, n_kv_head, self._k_norms[bid], "k_layernorm") else: - return [] + return - return [(self.map_tensor_name(name), data_torch)] + yield from super().modify_tensors(data_torch, name, bid) def _stack_qk_norm(self, bid: int, n_head: int, norms: dict[str, Tensor], layer_name: str = "q_layernorm"): datas: list[Tensor] = [] @@ -2488,9 +2463,8 @@ class StableLMModel(TextModel): data_torch = torch.stack(datas, dim=0) merged_name = f"model.layers.{bid}.self_attn.{layer_name}.weight" - new_name = self.map_tensor_name(merged_name) - return [(new_name, data_torch)] + yield from super().modify_tensors(data_torch, merged_name, bid) def prepare_tensors(self): super().prepare_tensors() @@ -2616,7 +2590,7 @@ class LlamaModel(TextModel): ) if is_multimodal_tensor: - return [] # skip vision tensors + return # skip vision tensors elif self.hf_arch == "LlamaModel": name = "model." + name elif name.startswith("model.text_model"): @@ -2642,8 +2616,6 @@ class LlamaModel(TextModel): self._experts[bid][name] = data_torch if len(self._experts[bid]) >= n_experts * 3: - tensors: list[tuple[str, Tensor]] = [] - # merge the experts into a single 3d tensor for wid in ["w1", "w2", "w3"]: datas: list[Tensor] = [] @@ -2657,14 +2629,12 @@ class LlamaModel(TextModel): merged_name = f"layers.{bid}.feed_forward.experts.{wid}.weight" - new_name = self.map_tensor_name(merged_name) - - tensors.append((new_name, data_torch)) - return tensors + yield from super().modify_tensors(data_torch, merged_name, bid) + return else: - return [] + return - return [(self.map_tensor_name(name), data_torch)] + yield from super().modify_tensors(data_torch, name, bid) def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]: if rope_params := self.rope_parameters.get("full_attention", self.rope_parameters): @@ -2755,8 +2725,6 @@ class AfmoeModel(LlamaModel): self._experts[bid][name] = data_torch if len(self._experts[bid]) >= n_experts * 3: - tensors: list[tuple[str, Tensor]] = [] - # merge the experts into a single 3d tensor for w_name in ["gate_proj", "up_proj", "down_proj"]: datas: list[Tensor] = [] @@ -2768,17 +2736,16 @@ class AfmoeModel(LlamaModel): data_torch = torch.stack(datas, dim=0) merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight" - new_name = self.map_tensor_name(merged_name) - tensors.append((new_name, data_torch)) + yield from super().modify_tensors(data_torch, merged_name, bid) - return tensors + return else: - return [] + return if name.endswith(".expert_bias"): name = name.replace(".expert_bias", ".expert_bias.bias") - return [(self.map_tensor_name(name), data_torch)] + yield from super().modify_tensors(data_torch, name, bid) @ModelBase.register( @@ -2835,7 +2802,6 @@ class LlavaVisionModel(MmprojModel): self.gguf_writer.add_vision_spatial_merge_size(self.global_config["spatial_merge_size"]) def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused n_head = ( self.hparams["num_attention_heads"] if not self.is_mistral_format else self.find_vparam(["num_attention_heads"]) ) @@ -2856,7 +2822,8 @@ class LlavaVisionModel(MmprojModel): data_torch = LlamaModel.permute(data_torch, n_head, n_head) if name.endswith(("k_proj.weight", "k_proj.bias")) and not self.is_mistral_format: data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head) - return [(self.map_tensor_name(name), data_torch)] + yield from super().modify_tensors(data_torch, name, bid) + return embed_key = "embed_tokens.weight" if not self.is_mistral_format else "tok_embeddings.weight" if self.img_break_tok_id > 0 and embed_key in name: @@ -2864,9 +2831,9 @@ class LlavaVisionModel(MmprojModel): # for pixtral model, we need to extract the [IMG_BREAK] token embedding img_break_embd = data_torch[self.img_break_tok_id] name = gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.V_TOK_EMBD_IMG_BREAK] - return [(self.map_tensor_name(name), img_break_embd)] + yield from super().modify_tensors(img_break_embd, name, bid) - return [] # skip other tensors + return # skip other tensors @ModelBase.register("Idefics3ForConditionalGeneration", "SmolVLMForConditionalGeneration") @@ -2897,13 +2864,12 @@ class SmolVLMModel(MmprojModel): return super().tensor_force_quant(name, new_name, bid, n_dims) def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused is_vision_tensor = "vision_tower" in name or "vision_model" in name or "model.connector" in name if is_vision_tensor: - return [(self.map_tensor_name(name), data_torch)] + yield from super().modify_tensors(data_torch, name, bid) - return [] # skip other tensors + return # skip other tensors @ModelBase.register( @@ -2942,18 +2908,17 @@ class Llama4Model(LlamaModel): name_gate = name.replace("gate_up_proj", "gate_proj.weight") dim_half = data_torch.shape[-1] // 2 gate_proj_weight, up_proj_weight = data_torch.transpose(-1, -2).split(dim_half, dim=-2) - return [ - (self.map_tensor_name(name_gate), gate_proj_weight), - (self.map_tensor_name(name_up), up_proj_weight) - ] + yield from super().modify_tensors(gate_proj_weight, name_gate, bid) + yield from super().modify_tensors(up_proj_weight, name_up, bid) + return if name.endswith("down_proj"): name += ".weight" data_torch = data_torch.transpose(-1, -2) if "multi_modal_projector" in name or "vision_model" in name: - return [] - return super().modify_tensors(data_torch, name, bid) + return + yield from super().modify_tensors(data_torch, name, bid) @ModelBase.register("Llama4ForConditionalGeneration") @@ -2967,16 +2932,15 @@ class Llama4VisionModel(MmprojModel): self.gguf_writer.add_vision_use_gelu(True) def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused if "multi_modal_projector" in name or "vision_model" in name: # process vision tensors if "positional_embedding_vlm" in name and ".weight" not in name: name += ".weight" if "multi_modal_projector.linear_1" in name: # despite the name with number postfix, this is a single fully connected layer - return [(gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.V_MMPROJ_FC] + '.weight', data_torch)] - return [(self.map_tensor_name(name), data_torch)] - return [] + yield (gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.V_MMPROJ_FC] + '.weight', data_torch) + else: + yield from super().modify_tensors(data_torch, name, bid) @ModelBase.register( @@ -3008,9 +2972,9 @@ class Mistral3Model(LlamaModel): def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None): name = name.replace("language_model.", "") if "multi_modal_projector" in name or "vision_tower" in name: - return [] + return - return super().modify_tensors(data_torch, name, bid) + yield from super().modify_tensors(data_torch, name, bid) @ModelBase.register("DeciLMForCausalLM") @@ -3149,7 +3113,7 @@ class DeciModel(TextModel): data_torch = DeciModel.permute(data_torch, n_head, n_head) if name.endswith(("k_proj.weight", "k_proj.bias")): data_torch = DeciModel.permute(data_torch, n_head, n_kv_head) - return [(self.map_tensor_name(name), data_torch)] + yield from super().modify_tensors(data_torch, name, bid) def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]: if rope_params := self.rope_parameters.get("full_attention", self.rope_parameters): @@ -3223,7 +3187,7 @@ class BitnetModel(TextModel): # transform weight into 1/0/-1 (in fp32) data_torch = self.weight_quant(data_torch) - yield (new_name, data_torch) + yield from super().modify_tensors(data_torch, name, bid) @ModelBase.register("GrokForCausalLM", "Grok1ForCausalLM") @@ -3279,11 +3243,11 @@ class GrokModel(TextModel): _cur_expert = "" def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - tensors: list[tuple[str, Tensor]] = [] + deferred: list[tuple[Tensor, str, int | None]] = [] is_expert = ".moe." in name or ".block_sparse_moe.experts." in name if not is_expert: - tensors.append((self.map_tensor_name(name), data_torch)) + deferred.append((data_torch, name, bid)) # process the experts separately if is_expert or self._cur_expert: @@ -3298,11 +3262,11 @@ class GrokModel(TextModel): if name in self._experts[bid]: self._cur_expert = name self._experts[bid][name].append(data_torch) - return [] + return elif is_expert: self._cur_expert = name self._experts[bid][name] = [data_torch] - return [] + return else: self._cur_expert = "" @@ -3324,11 +3288,10 @@ class GrokModel(TextModel): merged_name = f"transformer.decoder_layer.{bid}.moe.{wid[0]}.weight" - new_name = self.map_tensor_name(merged_name) + yield from super().modify_tensors(data_torch, merged_name, bid) - yield (new_name, data_torch) - - yield from tensors + for t in deferred: + yield from super().modify_tensors(*t) @ModelBase.register("DbrxForCausalLM") @@ -3360,8 +3323,6 @@ class DbrxModel(TextModel): logger.info(f"gguf: file type = {self.ftype}") def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - n_expert = self.hparams["ffn_config"]["moe_num_experts"] n_ff = self.hparams["ffn_config"]["ffn_hidden_size"] n_embd = self.hparams["d_model"] @@ -3392,7 +3353,7 @@ class DbrxModel(TextModel): # https://huggingface.co/databricks/dbrx-instruct/blob/main/model.safetensors.index.json#L15 new_name = self.map_tensor_name(name if not experts else name + ".weight", try_suffixes=(".weight",)) - return [(new_name, data_torch)] + yield from super().modify_tensors(data_torch, new_name, bid) def tensor_force_quant(self, name: str, new_name: str, bid: int | None, n_dims: int) -> gguf.GGMLQuantizationType | bool: del name, new_name, bid # unused @@ -3437,8 +3398,6 @@ class MiniCPMModel(TextModel): self._set_vocab_sentencepiece() def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - n_head = self.hparams["num_attention_heads"] n_kv_head = self.hparams.get("num_key_value_heads") @@ -3448,7 +3407,7 @@ class MiniCPMModel(TextModel): if name.endswith(("k_proj.weight")): data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head) - return [(self.map_tensor_name(name), data_torch)] + yield from super().modify_tensors(data_torch, name, bid) @ModelBase.register("MiniCPM3ForCausalLM") @@ -3558,7 +3517,7 @@ class Qwen2Model(TextModel): or name.startswith("vision_model") or name.startswith("audio_tower") \ or name.startswith("model.vision_tower") or name.startswith("model.multi_modal_projector"): # skip vision and audio tensors - return [] + return yield from super().modify_tensors(data_torch, name, bid) @@ -3755,23 +3714,20 @@ class Ernie4_5Model(TextModel): total_k_dim = num_kv_heads * head_dim total_v_dim = num_kv_heads * head_dim q_proj_weight, k_proj_weight, v_proj_weight = data_torch.split([total_q_dim, total_k_dim, total_v_dim], dim=0) - return [ - (self.map_tensor_name(name_q), q_proj_weight), - (self.map_tensor_name(name_k), k_proj_weight), - (self.map_tensor_name(name_v), v_proj_weight) - ] + yield from super().modify_tensors(q_proj_weight, name_q, bid) + yield from super().modify_tensors(k_proj_weight, name_k, bid) + yield from super().modify_tensors(v_proj_weight, name_v, bid) # split the up_gate_proj into gate and up # up_gate_proj shape: [2 * intermediate_size, hidden_size] - if "up_gate_proj" in name: + elif "up_gate_proj" in name: name_up = name.replace("up_gate_proj.weight", "up_proj.weight") name_gate = name.replace("up_gate_proj.weight", "gate_proj.weight") dim_half = data_torch.shape[0] // 2 gate_proj_weight, up_proj_weight = data_torch.split(dim_half, dim=0) - return [ - (self.map_tensor_name(name_gate), gate_proj_weight), - (self.map_tensor_name(name_up), up_proj_weight) - ] - return [(self.map_tensor_name(name), data_torch)] + yield from super().modify_tensors(gate_proj_weight, name_gate, bid) + yield from super().modify_tensors(up_proj_weight, name_up, bid) + else: + yield from super().modify_tensors(data_torch, name, bid) @ModelBase.register("Ernie4_5_MoeForCausalLM") @@ -3804,20 +3760,20 @@ class Ernie4_5MoeModel(Ernie4_5Model): # skip Multi-Token Prediction (MTP) layers (again, same as DeepseekV2) match = re.match(r"model.mtp_block.(\d+)", name) if match: - return [] + return # skip all other MTP tensors for now match = re.match(r"model.mtp_emb_norm.(\d+)", name) if match: - return [] + return match = re.match(r"model.mtp_hidden_norm.(\d+)", name) if match: - return [] + return match = re.match(r"model.mtp_linear_proj.(\d+)", name) if match: - return [] + return # process the experts separately if name.find("mlp.experts") != -1: @@ -3830,8 +3786,6 @@ class Ernie4_5MoeModel(Ernie4_5Model): self._experts[bid][name] = data_torch if len(self._experts[bid]) >= n_experts * 3: - tensors: list[tuple[str, Tensor]] = [] - # merge the experts into a single 3d tensor for w_name in ["gate_proj", "up_proj", "down_proj"]: datas: list[Tensor] = [] @@ -3843,13 +3797,9 @@ class Ernie4_5MoeModel(Ernie4_5Model): data_torch = torch.stack(datas, dim=0) merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight" - new_name = self.map_tensor_name(merged_name) - tensors.append((new_name, data_torch)) - - return tensors - else: - return [] - return [(self.map_tensor_name(name), data_torch)] + yield from super().modify_tensors(data_torch, merged_name, bid) + else: + yield from super().modify_tensors(data_torch, name, bid) def prepare_tensors(self): super().prepare_tensors() @@ -3880,14 +3830,13 @@ class Qwen2VLModel(TextModel): self._set_vocab_gpt2() def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused if name.startswith("thinker."): name = name.replace("thinker.", "") if name.startswith("visual") or name.startswith("audio") or \ name.startswith("talker") or name.startswith("token2wav"): # skip multimodal tensors - return [] - return [(self.map_tensor_name(name), data_torch)] + return + yield from super().modify_tensors(data_torch, name, bid) @ModelBase.register("Qwen2VLModel", "Qwen2VLForConditionalGeneration", "Qwen2_5_VLForConditionalGeneration") @@ -3936,7 +3885,6 @@ class Qwen2VLVisionModel(MmprojModel): return super().tensor_force_quant(name, new_name, bid, n_dims) def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused if name.startswith("visual."): # process visual tensors # split QKV tensors if needed @@ -3950,23 +3898,18 @@ class Qwen2VLVisionModel(MmprojModel): wq = data_torch[:c] wk = data_torch[c: c * 2] wv = data_torch[c * 2:] - return [ - (self.map_tensor_name(name.replace("qkv", "q")), wq), - (self.map_tensor_name(name.replace("qkv", "k")), wk), - (self.map_tensor_name(name.replace("qkv", "v")), wv), - ] + yield from super().modify_tensors(wq, name.replace("qkv", "q"), bid) + yield from super().modify_tensors(wk, name.replace("qkv", "k"), bid) + yield from super().modify_tensors(wv, name.replace("qkv", "v"), bid) elif 'patch_embed.proj.weight' in name: # split Conv3D into Conv2Ds c1, c2, kt, kh, kw = data_torch.shape del c1, c2, kh, kw # unused assert kt == 2, "Current implmentation only support temporal_patch_size of 2" - return [ - (gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.V_ENC_EMBD_PATCH] + ".weight" , data_torch[:, :, 0, ...]), - (gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.V_ENC_EMBD_PATCH] + ".weight.1", data_torch[:, :, 1, ...]), - ] + yield (gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.V_ENC_EMBD_PATCH] + ".weight" , data_torch[:, :, 0, ...]) + yield (gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.V_ENC_EMBD_PATCH] + ".weight.1", data_torch[:, :, 1, ...]) else: - return [(self.map_tensor_name(name), data_torch)] - return [] # skip other tensors + yield from super().modify_tensors(data_torch, name, bid) @ModelBase.register("Qwen2_5OmniModel") @@ -4022,10 +3965,8 @@ class Qwen25OmniModel(Qwen2VLVisionModel): if "audio_bos_eos_token" in name: # this tensor is left unused in transformers code # https://github.com/huggingface/transformers/blob/6e3063422c4b1c014aa60c32b9254fd2902f0f28/src/transformers/models/qwen2_5_omni/modular_qwen2_5_omni.py#L1809 - return [] - return [(self.map_tensor_name(name), data_torch)] - - return super().modify_tensors(data_torch, name, bid) + return + yield from super().modify_tensors(data_torch, name, bid) @ModelBase.register("InternVisionModel") @@ -4072,7 +4013,6 @@ class InternVisionModel(MmprojModel): return name def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused vision_prefix = ['vision_model', 'mlp', 'model.vision_tower', 'model.multi_modal_projector'] # deal with intern-s1 special case name = self._mapping_interns1_name(name) @@ -4094,13 +4034,11 @@ class InternVisionModel(MmprojModel): wq = data_torch[:c] wk = data_torch[c: c * 2] wv = data_torch[c * 2:] - return [ - (self.map_tensor_name(name.replace("attn.qkv", "self_attn.q_proj")), wq), - (self.map_tensor_name(name.replace("attn.qkv", "self_attn.k_proj")), wk), - (self.map_tensor_name(name.replace("attn.qkv", "self_attn.v_proj")), wv), - ] - return [(self.map_tensor_name(name), data_torch)] - return [] # skip other tensors + yield from super().modify_tensors(wq, name.replace("attn.qkv", "self_attn.q_proj"), bid) + yield from super().modify_tensors(wk, name.replace("attn.qkv", "self_attn.k_proj"), bid) + yield from super().modify_tensors(wv, name.replace("attn.qkv", "self_attn.v_proj"), bid) + else: + yield from super().modify_tensors(data_torch, name, bid) @ModelBase.register("WavTokenizerDec") @@ -4108,18 +4046,16 @@ class WavTokenizerDecModel(TextModel): model_arch = gguf.MODEL_ARCH.WAVTOKENIZER_DEC def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - if \ name.endswith("codebook.cluster_size") or \ name.endswith("codebook.embed_avg") or \ name.endswith("codebook.inited"): logger.debug(f"Skipping {name!r}") - return [] + return logger.info(f"{self.map_tensor_name(name)} -> {data_torch.shape}") - return [(self.map_tensor_name(name), data_torch)] + yield from super().modify_tensors(data_torch, name, bid) def set_vocab(self): self._set_vocab_none() @@ -4174,7 +4110,8 @@ class Qwen2MoeModel(TextModel): # Need PyTorch: (128, 2048, 768) [reversed of GGML] # So: permute(0, 2, 1): (128, 768, 2048) -> (128, 2048, 768) permuted = data_torch.permute(0, 2, 1).contiguous() - return [(self.map_tensor_name(mapped), permuted)] + yield from super().modify_tensors(permuted, mapped, bid) + return if name.endswith("mlp.experts.gate_up_proj") or name.endswith("mlp.experts.gate_up_proj.weight"): if data_torch.ndim < 3 or data_torch.shape[-1] % 2 != 0: @@ -4192,14 +4129,13 @@ class Qwen2MoeModel(TextModel): mapped_up = f"{base}.up_proj.weight" perm_gate = gate.permute(0, 2, 1).contiguous() perm_up = up.permute(0, 2, 1).contiguous() - return [ - (self.map_tensor_name(mapped_gate), perm_gate), - (self.map_tensor_name(mapped_up), perm_up), - ] + yield from super().modify_tensors(perm_gate, mapped_gate, bid) + yield from super().modify_tensors(perm_up, mapped_up, bid) + return if name.startswith("mlp") or name.startswith("vision_model") or name.startswith("model.vision_tower") or name.startswith("model.multi_modal_projector") or name.startswith("model.visual"): # skip visual tensors - return [] + return if name.find("experts") != -1: n_experts = self.hparams["num_experts"] assert bid is not None @@ -4210,8 +4146,6 @@ class Qwen2MoeModel(TextModel): self._experts[bid][name] = data_torch if len(self._experts[bid]) >= n_experts * 3: - tensors: list[tuple[str, Tensor]] = [] - # merge the experts into a single 3d tensor for w_name in ["down_proj", "gate_proj", "up_proj"]: datas: list[Tensor] = [] @@ -4225,14 +4159,12 @@ class Qwen2MoeModel(TextModel): merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight" - new_name = self.map_tensor_name(merged_name) - - tensors.append((new_name, data_torch)) - return tensors + yield from super().modify_tensors(data_torch, merged_name, bid) + return else: - return [] + return - return [(self.map_tensor_name(name), data_torch)] + yield from super().modify_tensors(data_torch, name, bid) def prepare_tensors(self): super().prepare_tensors() @@ -4312,7 +4244,7 @@ class Qwen3Model(Qwen2Model): def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: if "model.vision_" in name: # skip multimodal tensors - return [] + return if self.is_rerank: is_tied_head = self.is_tied_embeddings and "embed_tokens" in name @@ -4322,13 +4254,12 @@ class Qwen3Model(Qwen2Model): gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.CLS_OUT] + ".weight", self._get_cls_out_tensor(data_torch), ) + yield cls_out_head if is_tied_head: - embed = (self.map_tensor_name(name), data_torch) - return [cls_out_head, embed] - if is_real_head: - return [cls_out_head] + yield from super().modify_tensors(data_torch, name, bid) + return - return super().modify_tensors(data_torch, name, bid) + yield from super().modify_tensors(data_torch, name, bid) @ModelBase.register("Qwen3MoeForCausalLM") @@ -4366,7 +4297,7 @@ class Qwen3NextModel(Qwen2MoeModel): def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: if name.startswith("mtp"): - return [] # ignore MTP layers for now + return # ignore MTP layers for now if name.endswith(".A_log"): data_torch = -torch.exp(data_torch) elif name.endswith(".dt_bias"): @@ -4468,7 +4399,7 @@ class Qwen3VLVisionModel(MmprojModel): assert self.hparams_vision is not None # Skip text model tensors - they go in the text model file if name.startswith("model.language_model.") or name.startswith("lm_head."): - return [] + return if name.startswith("model.visual."): name = name.replace("model.visual.", "visual.", 1) @@ -4493,7 +4424,8 @@ class Qwen3VLVisionModel(MmprojModel): raise ValueError(f"Unexpected deepstack tensor: {name}") new_name = self.format_tensor_name(tensor_type, idx, suffix=f".{suffix}") - return [(new_name, data_torch)] + yield from super().modify_tensors(data_torch, new_name, bid) + return if name.startswith("visual.merger."): suffix = name.split(".", 2)[2] @@ -4513,7 +4445,8 @@ class Qwen3VLVisionModel(MmprojModel): new_name = self.format_tensor_name(gguf.MODEL_TENSOR.V_POST_NORM, suffix=f".{suffix.split('.', 1)[1]}") else: raise ValueError(f"Unexpected merger tensor: {name}") - return [(new_name, data_torch)] + yield (new_name, data_torch) + return if name == "visual.patch_embed.proj.weight": # split Conv3D into Conv2Ds along temporal dimension @@ -4521,20 +4454,21 @@ class Qwen3VLVisionModel(MmprojModel): del c1, c2 if kt != 2: raise ValueError("Current implementation only supports temporal_patch_size of 2") - return [ - (gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.V_ENC_EMBD_PATCH] + ".weight", data_torch[:, :, 0, ...]), - (gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.V_ENC_EMBD_PATCH] + ".weight.1", data_torch[:, :, 1, ...]), - ] + yield (gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.V_ENC_EMBD_PATCH] + ".weight", data_torch[:, :, 0, ...]) + yield (gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.V_ENC_EMBD_PATCH] + ".weight.1", data_torch[:, :, 1, ...]) + return if name == "visual.patch_embed.proj.bias": # Include the bias - it's used by the C++ code - return [(gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.V_ENC_EMBD_PATCH] + ".bias", data_torch)] + yield (gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.V_ENC_EMBD_PATCH] + ".bias", data_torch) + return if name.startswith("visual."): - return [(self.map_tensor_name(name), data_torch)] + yield from super().modify_tensors(data_torch, name, bid) + return # Fall back to parent class for other tensors - return super().modify_tensors(data_torch, name, bid) + yield from super().modify_tensors(data_torch, name, bid) @ModelBase.register("Glm4vForConditionalGeneration", "Glm4vMoeForConditionalGeneration") @@ -4557,8 +4491,9 @@ class Glm4VVisionModel(Qwen3VLVisionModel): if name.startswith("model.visual."): name = name.replace("model.visual.", "visual.") if name.startswith("visual.merger."): - return [(self.map_tensor_name(name), data_torch)] - return super().modify_tensors(data_torch, name, bid) + yield from ModelBase.modify_tensors(self, data_torch, name, bid) + return + yield from super().modify_tensors(data_torch, name, bid) @ModelBase.register("Qwen3VLForConditionalGeneration") @@ -4576,9 +4511,9 @@ class Qwen3VLTextModel(Qwen3Model): def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: # Skip vision tensors - they go in the mmproj file if name.startswith("model.visual."): - return [] + return - return super().modify_tensors(data_torch, name, bid) + yield from super().modify_tensors(data_torch, name, bid) @ModelBase.register("Qwen3VLMoeForConditionalGeneration") @@ -4594,9 +4529,9 @@ class Qwen3VLMoeTextModel(Qwen3MoeModel): def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: # Skip vision tensors - they go in the mmproj file if name.startswith("model.visual."): - return [] + return - return super().modify_tensors(data_torch, name, bid) + yield from super().modify_tensors(data_torch, name, bid) @ModelBase.register("GPT2LMHeadModel") @@ -4613,22 +4548,17 @@ class GPT2Model(TextModel): self.gguf_writer.add_file_type(self.ftype) def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - - tensors: list[tuple[str, Tensor]] = [] - # we don't need these if name.endswith((".attn.bias", ".attn.masked_bias")): - return tensors + yield from super().modify_tensors(data_torch, name, bid) + return if name.endswith((".c_attn.weight", ".c_proj.weight", ".c_fc.weight", ".c_proj.weight")): data_torch = data_torch.transpose(1, 0) new_name = self.map_tensor_name(name) - tensors.append((new_name, data_torch)) - - return tensors + yield from super().modify_tensors(data_torch, new_name, bid) @ModelBase.register("PhiForCausalLM") @@ -4852,8 +4782,6 @@ class PhiMoeModel(Phi3MiniModel): self._experts[bid][name] = data_torch if len(self._experts[bid]) >= n_experts * 3: - tensors: list[tuple[str, Tensor]] = [] - # merge the experts into a single 3d tensor for w_name in ["w1", "w2", "w3"]: datas: list[Tensor] = [] @@ -4867,14 +4795,12 @@ class PhiMoeModel(Phi3MiniModel): merged_name = f"model.layers.{bid}.block_sparse_moe.experts.{w_name}.weight" - new_name = self.map_tensor_name(merged_name) - - tensors.append((new_name, data_torch)) - return tensors + yield from super().modify_tensors(data_torch, merged_name, bid) + return else: - return [] + return - return [(self.map_tensor_name(name), data_torch)] + yield from super().modify_tensors(data_torch, name, bid) def prepare_tensors(self): super().prepare_tensors() @@ -4920,8 +4846,6 @@ class PlamoModel(TextModel): return data_torch def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - new_name = self.map_tensor_name(name) # shuffle for broadcasting of gqa in ggml_mul_mat @@ -4930,7 +4854,7 @@ class PlamoModel(TextModel): elif new_name.endswith("attn_output.weight"): data_torch = self.shuffle_attn_output_weight(data_torch) - return [(new_name, data_torch)] + yield from super().modify_tensors(data_torch, name, bid) @ModelBase.register("Plamo2ForCausalLM", "PLaMo2ForCausalLM") @@ -4991,8 +4915,6 @@ class Plamo2Model(TextModel): self.gguf_writer.add_file_type(self.ftype) def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - if name.endswith(".A_log"): data_torch = -torch.exp(data_torch) elif name.endswith(".dt_bias"): @@ -5021,9 +4943,7 @@ class Plamo2Model(TextModel): elif name.endswith(".norm.weight"): data_torch += 1.0 - new_name = self.map_tensor_name(name) - - return [(new_name, data_torch)] + yield from super().modify_tensors(data_torch, name, bid) @ModelBase.register("Plamo3ForCausalLM", "PLaMo3ForCausalLM") @@ -5072,7 +4992,7 @@ class Plamo3Model(TextModel): elif name.endswith(".norm.weight"): data_torch = data_torch + 1.0 - return [(self.map_tensor_name(name), data_torch)] + yield from super().modify_tensors(data_torch, name, bid) @ModelBase.register("CodeShellForCausalLM") @@ -5234,7 +5154,7 @@ class InternLM2Model(TextModel): name = name.replace("language_model.", "") # InternVL if name.startswith("mlp") or name.startswith("vision_model"): # skip visual tensors - return [] + return if bid is not None and f"model.layers.{bid}.attention.wqkv" in name: qkv = data_torch @@ -5247,13 +5167,11 @@ class InternLM2Model(TextModel): k = LlamaModel.permute(k.reshape((-1, k.shape[-1])), num_heads, num_kv_heads) v = v.reshape((-1, v.shape[-1])) - return [ - (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid), q), - (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid), k), - (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid), v), - ] + yield from super().modify_tensors(q, self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid), bid) + yield from super().modify_tensors(k, self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid), bid) + yield from super().modify_tensors(v, self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid), bid) else: - return [(self.map_tensor_name(name), data_torch)] + yield from super().modify_tensors(data_torch, name, bid) @ModelBase.register("InternLM3ForCausalLM") @@ -5305,12 +5223,12 @@ class InternLM3Model(TextModel): name = name.replace("language_model.", "") # InternVL if name.startswith("mlp") or name.startswith("vision_model"): # skip visual tensors - return [] + return if name.endswith(("q_proj.weight", "q_proj.bias")): data_torch = LlamaModel.permute(data_torch, n_head, n_head) if name.endswith(("k_proj.weight", "k_proj.bias")): data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head) - return [(self.map_tensor_name(name), data_torch)] + yield from super().modify_tensors(data_torch, name, bid) @ModelBase.register("BertModel", "BertForMaskedLM", "CamembertModel", "BertForSequenceClassification") @@ -5365,8 +5283,6 @@ class BertModel(TextModel): special_vocab.add_to_gguf(self.gguf_writer) def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - if name.startswith("bert."): name = name[5:] @@ -5378,13 +5294,13 @@ class BertModel(TextModel): # we are only using BERT for embeddings so we don't need the pooling layer if name in ("embeddings.position_ids", "pooler.dense.weight", "pooler.dense.bias"): - return [] # we don't need these + return # we don't need these if name.startswith("cls.predictions"): - return [] + return if name.startswith("cls.seq_relationship"): - return [] + return if self.cls_out_labels: # For BertForSequenceClassification (direct projection layer) @@ -5394,7 +5310,7 @@ class BertModel(TextModel): if name == "classifier.bias": name = "classifier.out_proj.bias" - return [(self.map_tensor_name(name), data_torch)] + yield from super().modify_tensors(data_torch, name, bid) def _xlmroberta_tokenizer_init(self) -> None: # we need the pad_token_id to know how to chop down position_embd matrix @@ -5549,9 +5465,9 @@ class DistilBertModel(BertModel): # These layers act as MLM head, so we don't need them if name.startswith("vocab_"): - return [] + return - return super().modify_tensors(data_torch, name, bid) + yield from super().modify_tensors(data_torch, name, bid) @ModelBase.register("RobertaModel", "RobertaForSequenceClassification") @@ -5594,7 +5510,7 @@ class RobertaModel(BertModel): if self._position_offset is not None: data_torch = data_torch[self._position_offset:,:] - return super().modify_tensors(data_torch, name, bid) + yield from super().modify_tensors(data_torch, name, bid) @ModelBase.register("NomicBertModel") @@ -5647,7 +5563,7 @@ class NomicBertModel(BertModel): def modify_tensors(self, data_torch: torch.Tensor, name: str, bid: int | None) -> Iterable[tuple[str, torch.Tensor]]: # If the tensor is an experts bias tensor, skip it by returning an empty list. if "mlp.experts.bias" in name: - return [] # Explicitly return an empty list. + return # Explicitly return. if "mlp.experts.mlp.w1" in name: data_torch = data_torch.view(self.hparams["num_experts"], self.hparams["n_inner"], self.hparams["n_embd"]) @@ -5658,7 +5574,7 @@ class NomicBertModel(BertModel): data_torch = data_torch.transpose(1, 2) name += ".weight" - return [(self.map_tensor_name(name), data_torch)] + yield from super().modify_tensors(data_torch, name, bid) def set_gguf_parameters(self): super().set_gguf_parameters() @@ -5698,12 +5614,12 @@ class NeoBert(BertModel): def modify_tensors(self, data_torch, name, bid): if name.startswith("decoder."): - return [] + return if name.startswith("model."): name = name[6:] - return super().modify_tensors(data_torch, name, bid) + yield from super().modify_tensors(data_torch, name, bid) @ModelBase.register("XLMRobertaModel", "XLMRobertaForSequenceClassification") @@ -5760,7 +5676,7 @@ class XLMRobertaModel(BertModel): if name.endswith(".0.lora_A") or name.endswith(".0.lora_B"): if name.startswith("pooler.dense"): - return [] + return num_loras = data_torch.size(0) assert num_loras == len(self._lora_names) @@ -5776,9 +5692,9 @@ class XLMRobertaModel(BertModel): new_name = new_name[:-1] + ("a" if new_name[-1:] == "b" else "b") lora_writer.add_tensor(new_name, data.float().numpy(), raw_dtype=gguf.GGMLQuantizationType.F32) - return [] + return - return super().modify_tensors(data_torch, name, bid) + yield from super().modify_tensors(data_torch, name, bid) def set_gguf_parameters(self): super().set_gguf_parameters() @@ -5837,19 +5753,17 @@ class GemmaModel(TextModel): self.gguf_writer.add_file_type(self.ftype) def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - # lm_head is not used in llama.cpp, while autoawq will include this tensor in model # To prevent errors, skip loading lm_head.weight. if name == "lm_head.weight": logger.debug(f"Skipping get tensor {name!r} in safetensors so that convert can end normally.") - return [] + return # ref: https://github.com/huggingface/transformers/blob/fc37f38915372c15992b540dfcbbe00a916d4fc6/src/transformers/models/gemma/modeling_gemma.py#L89 if name.endswith("norm.weight"): data_torch = data_torch + 1 - return [(self.map_tensor_name(name), data_torch)] + yield from super().modify_tensors(data_torch, name, bid) @ModelBase.register("Gemma2ForCausalLM") @@ -5883,19 +5797,17 @@ class Gemma2Model(TextModel): self.gguf_writer.add_sliding_window(self.hparams["sliding_window"]) def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - # lm_head is not used in llama.cpp, while autoawq will include this tensor in model # To prevent errors, skip loading lm_head.weight. if name == "lm_head.weight": logger.debug(f"Skipping get tensor {name!r} in safetensors so that convert can end normally.") - return [] + return # ref: https://github.com/huggingface/transformers/blob/fc37f38915372c15992b540dfcbbe00a916d4fc6/src/transformers/models/gemma/modeling_gemma.py#L89 if name.endswith("norm.weight"): data_torch = data_torch + 1 - return [(self.map_tensor_name(name), data_torch)] + yield from super().modify_tensors(data_torch, name, bid) @ModelBase.register("Gemma3ForCausalLM", "Gemma3ForConditionalGeneration") @@ -5930,14 +5842,12 @@ class Gemma3Model(TextModel): self.gguf_writer.add_head_count_kv(hparams.get("num_key_value_heads", 4)) def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - if "language_model." in name: name = name.replace("language_model.", "") elif name.startswith("multi_modal_projector.") or name.startswith("vision_tower.") \ or name.startswith("multimodal_projector.") or name.startswith("vision_model."): - return [] # skip vision tensors + return # skip vision tensors # remove OOV (out-of-vocabulary) rows in token_embd if "embed_tokens.weight" in name: @@ -5953,7 +5863,7 @@ class Gemma3Model(TextModel): if name.endswith("norm.weight"): data_torch = data_torch + self.norm_shift - return [(self.map_tensor_name(name), data_torch)] + yield from super().modify_tensors(data_torch, name, bid) @ModelBase.register("Gemma3TextModel") @@ -6059,10 +5969,8 @@ class Gemma3VisionModel(MmprojModel): return super().tensor_force_quant(name, new_name, bid, n_dims) def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - if "vision_model.head." in name: - return [] # skip redundant tensors for tinygemma3 + return # skip redundant tensors for tinygemma3 if name.startswith("multi_modal_projector.") or name.startswith("vision_tower.") \ or name.startswith("multimodal_projector.") or name.startswith("vision_model."): @@ -6076,9 +5984,9 @@ class Gemma3VisionModel(MmprojModel): logger.info(f"Correcting norm value for '{name}'") data_torch = data_torch + 1 - return [(self.map_tensor_name(name), data_torch)] + yield from super().modify_tensors(data_torch, name, bid) - return [] # skip other tensors + return # skip other tensors class ConformerAudioModel(MmprojModel): @@ -6103,7 +6011,7 @@ class ConformerAudioModel(MmprojModel): self._batch_norm_tensors[bid][name] = data_torch if len(self._batch_norm_tensors[bid]) < 5: - return [] + return weight = self._batch_norm_tensors[bid][f"conformer.layers.{bid}.conv.batch_norm.weight"] bias = self._batch_norm_tensors[bid][f"conformer.layers.{bid}.conv.batch_norm.bias"] @@ -6113,10 +6021,9 @@ class ConformerAudioModel(MmprojModel): a = weight / torch.sqrt(running_var + eps) b = bias - running_mean * a - return [ - (self.map_tensor_name(f"conformer.layers.{bid}.conv.batch_norm.weight"), a), - (self.map_tensor_name(f"conformer.layers.{bid}.conv.batch_norm.bias"), b), - ] + yield from super().modify_tensors(a, f"conformer.layers.{bid}.conv.batch_norm.weight", bid) + yield from super().modify_tensors(b, f"conformer.layers.{bid}.conv.batch_norm.bias", bid) + return # reshape conv weights if name.startswith("conformer.pre_encode.conv.") and name.endswith(".bias"): @@ -6128,7 +6035,7 @@ class ConformerAudioModel(MmprojModel): assert data_torch.shape[2] == 1 data_torch = data_torch.reshape(data_torch.shape[0], data_torch.shape[1]) - return [(self.map_tensor_name(name), data_torch)] + yield from super().modify_tensors(data_torch, name, bid) @ModelBase.register("Gemma3nForConditionalGeneration") @@ -6227,14 +6134,14 @@ class Gemma3nVisionAudioModel(ConformerAudioModel): def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: if (ConformerAudioModel.is_audio_tensor(name)): name = name.replace("model.audio_tower.conformer.", "conformer.layers.") - return super().modify_tensors(data_torch, name, bid) + yield from super().modify_tensors(data_torch, name, bid) # Gemma3n uses # - model.embed_vision.* for projection layers # - model.vision_tower.* for vision encoder # Skip non-vision tensors if not (name.startswith("model.embed_vision.") or name.startswith("model.vision_tower.")): - return [] + return if name.startswith("model.vision_tower.timm_model.blocks."): # Double-indexed block tensors through custom logic @@ -6246,7 +6153,7 @@ class Gemma3nVisionAudioModel(ConformerAudioModel): if new_name.endswith("conv_stem.conv.bias") or new_name.endswith("layer_scale.gamma"): data_torch = data_torch.unsqueeze(0).unsqueeze(-1).unsqueeze(-1) # [1, C, 1, 1] - return [(new_name, data_torch)] + yield from super().modify_tensors(data_torch, new_name, bid) @ModelBase.register("Gemma3nForCausalLM", "Gemma3nForConditionalGeneration") @@ -6324,7 +6231,7 @@ class Gemma3NModel(Gemma3Model): # TODO: implement self.prediction_coefs.weight.clamp_(...) if "language_model." not in name: - return [] # skip non-language model tensors + return # skip non-language model tensors # Pad token embeddings for vision/audio special tokens (262144-262399) if "embed_tokens.weight" in name or "embed_tokens_per_layer" in name: @@ -6346,7 +6253,8 @@ class Gemma3NModel(Gemma3Model): # Continue with normal processing name = name.replace("language_model.", "") - return [(self.map_tensor_name(name), data_torch)] + yield from super().modify_tensors(data_torch, name, bid) + return if "altup_unembed_projections" in name: data_torch = data_torch.to(device="cpu") @@ -6362,9 +6270,10 @@ class Gemma3NModel(Gemma3Model): raise ValueError(f"Unknown name: {name}") out = self._stack_matrices(self._altup_unembd) if out is not None: - return [(self.map_tensor_name("model.altup_unembed_projections.weight"), out)] + yield from super().modify_tensors(out, "model.altup_unembed_projections.weight", bid) + return else: - return [] + return if "altup_projections" in name: data_torch = data_torch.to(device="cpu") @@ -6378,11 +6287,12 @@ class Gemma3NModel(Gemma3Model): raise ValueError(f"Unknown name: {name}") out = self._stack_matrices(self._altup_proj) if out is not None: - return [(self.map_tensor_name("model.altup_projections.weight"), out)] + yield from super().modify_tensors(out, "model.altup_projections.weight", bid) + return else: - return [] + return - return super().modify_tensors(data_torch, name, bid) + yield from super().modify_tensors(data_torch, name, bid) @ModelBase.register("Starcoder2ForCausalLM") @@ -6765,11 +6675,11 @@ class MambaModel(TextModel): if self._tok_embd is not None and new_name == output_name: if torch.equal(self._tok_embd, data_torch): logger.debug(f"{output_name} is equivalent to {tok_embd_name}, omitting") - return [] + return elif new_name == tok_embd_name: self._tok_embd = data_torch - return [(new_name, data_torch)] + yield from super().modify_tensors(data_torch, new_name, bid) @ModelBase.register("Mamba2ForCausalLM") @@ -7025,8 +6935,6 @@ class OlmoModel(TextModel): # Same as super class, but permuting q_proj, k_proj # Copied from: LlamaModel def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - n_head = self.hparams["num_attention_heads"] n_kv_head = self.hparams.get("num_key_value_heads") @@ -7035,7 +6943,7 @@ class OlmoModel(TextModel): if name.endswith("k_proj.weight"): data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head) - return [(self.map_tensor_name(name), data_torch)] + yield from super().modify_tensors(data_torch, name, bid) @ModelBase.register("SeedOssForCausalLM") @@ -7091,8 +6999,6 @@ class OlmoeModel(TextModel): self._experts[bid][name] = data_torch if len(self._experts[bid]) >= n_experts * 3: - tensors: list[tuple[str, Tensor]] = [] - # merge the experts into a single 3d tensor for w_name in ["down_proj", "gate_proj", "up_proj"]: datas: list[Tensor] = [] @@ -7106,14 +7012,12 @@ class OlmoeModel(TextModel): merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight" - new_name = self.map_tensor_name(merged_name) - - tensors.append((new_name, data_torch)) - return tensors + yield from super().modify_tensors(data_torch, merged_name, bid) + return else: - return [] + return - return [(self.map_tensor_name(name), data_torch)] + yield from super().modify_tensors(data_torch, name, bid) # Copied from: Qwen2MoeModel def prepare_tensors(self): @@ -7336,8 +7240,6 @@ class ArcticModel(TextModel): self._experts[bid][name] = data_torch if len(self._experts[bid]) >= n_experts * 3: - tensors: list[tuple[str, Tensor]] = [] - # merge the experts into a single 3d tensor for wid in ["w1", "w2", "w3"]: datas: list[Tensor] = [] @@ -7351,14 +7253,12 @@ class ArcticModel(TextModel): merged_name = f"layers.{bid}.feed_forward.experts.{wid}.weight" - new_name = self.map_tensor_name(merged_name) - - tensors.append((new_name, data_torch)) - return tensors + yield from super().modify_tensors(data_torch, merged_name, bid) + return else: - return [] + return - return [(self.map_tensor_name(name), data_torch)] + yield from super().modify_tensors(data_torch, name, bid) def prepare_tensors(self): super().prepare_tensors() @@ -7425,8 +7325,6 @@ class DeepseekModel(TextModel): self._experts[bid][name] = data_torch if len(self._experts[bid]) >= n_experts * 3: - tensors: list[tuple[str, Tensor]] = [] - # merge the experts into a single 3d tensor for w_name in ["down_proj", "gate_proj", "up_proj"]: datas: list[Tensor] = [] @@ -7440,14 +7338,12 @@ class DeepseekModel(TextModel): merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight" - new_name = self.map_tensor_name(merged_name) - - tensors.append((new_name, data_torch)) - return tensors + yield from super().modify_tensors(data_torch, merged_name, bid) + return else: - return [] + return - return [(self.map_tensor_name(name), data_torch)] + yield from super().modify_tensors(data_torch, name, bid) def prepare_tensors(self): super().prepare_tensors() @@ -7583,9 +7479,9 @@ class DeepseekV2Model(TextModel): def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: # skip vision tensors and remove "language_model." for Kimi-VL if "vision_tower" in name or "multi_modal_projector" in name: - return [] + return if name.startswith("siglip2.") or name.startswith("merger."): - return [] + return if name.startswith("language_model."): name = name.replace("language_model.", "") @@ -7593,7 +7489,7 @@ class DeepseekV2Model(TextModel): if self.hparams.get("tie_word_embeddings", False): if name == "lm_head.weight" or name == "model.lm_head.weight": logger.info("Skipping tied output layer 'lm_head.weight' (will use token_embd.weight)") - return [] + return # rename e_score_correction_bias tensors if name.endswith("e_score_correction_bias"): @@ -7603,7 +7499,7 @@ class DeepseekV2Model(TextModel): block_count = self.hparams["num_hidden_layers"] match = re.match(r"model.layers.(\d+)", name) if match and int(match.group(1)) >= block_count: - return [] + return # process the experts separately if name.find("mlp.experts") != -1: @@ -7616,8 +7512,6 @@ class DeepseekV2Model(TextModel): self._experts[bid][name] = data_torch if len(self._experts[bid]) >= n_experts * 3: - tensors: list[tuple[str, Tensor]] = [] - # merge the experts into a single 3d tensor for w_name in ["down_proj", "gate_proj", "up_proj"]: datas: list[Tensor] = [] @@ -7631,12 +7525,10 @@ class DeepseekV2Model(TextModel): merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight" - new_name = self.map_tensor_name(merged_name) - - tensors.append((new_name, data_torch)) - return tensors + yield from super().modify_tensors(data_torch, merged_name, bid) + return else: - return [] + return # note: MLA with the absorption optimization, needs these two split and k_b_proj transposed if name.endswith("kv_b_proj.weight"): @@ -7653,12 +7545,11 @@ class DeepseekV2Model(TextModel): k_b, v_b = torch.split(kv_b, [qk_nope_head_dim, v_head_dim], dim=1) k_b = k_b.transpose(1, 2) - return [ - (self.map_tensor_name(name_kb), k_b), - (self.map_tensor_name(name_vb), v_b) - ] + yield from super().modify_tensors(k_b, name_kb, bid) + yield from super().modify_tensors(v_b, name_vb, bid) + return - return [(self.map_tensor_name(name), data_torch)] + yield from super().modify_tensors(data_torch, name, bid) def prepare_tensors(self): super().prepare_tensors() @@ -7700,9 +7591,8 @@ class MiniMaxM2Model(TextModel): # not enough expert weights to merge if len(expert_cache) < n_experts * len(expert_weights): - return [] + return - tensors: list[tuple[str, Tensor]] = [] for w_name in expert_weights: datas: list[Tensor] = [] @@ -7714,12 +7604,12 @@ class MiniMaxM2Model(TextModel): data_torch = torch.stack(datas, dim=0) merged_name = f"model.layers.{bid}.block_sparse_moe.experts.{w_name}.weight" new_name = self.map_tensor_name(merged_name) - tensors.append((new_name, data_torch)) + yield from super().modify_tensors(data_torch, new_name, bid) del self._experts_cache[bid] - return tensors + return - return super().modify_tensors(data_torch, name, bid) + yield from super().modify_tensors(data_torch, name, bid) @ModelBase.register("MiMoV2FlashForCausalLM") @@ -7761,7 +7651,7 @@ class MimoV2Model(TextModel): # TODO: mimo v2 does not indicate the number of next-token-prediction layers, therefore we cannot do the same way as GLM4_MOE if "model.mtp." in name: - return [] + return # process the experts separately if name.find("mlp.experts") != -1: @@ -7774,8 +7664,6 @@ class MimoV2Model(TextModel): self._experts[bid][name] = data_torch if len(self._experts[bid]) >= n_experts * 3: - tensors: list[tuple[str, Tensor]] = [] - # merge the experts into a single 3d tensor for w_name in ["gate_proj", "up_proj", "down_proj"]: datas: list[Tensor] = [] @@ -7787,13 +7675,12 @@ class MimoV2Model(TextModel): data_torch = torch.stack(datas, dim=0) merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight" - new_name = self.map_tensor_name(merged_name) - tensors.append((new_name, data_torch)) - return tensors + yield from super().modify_tensors(data_torch, merged_name, bid) + return else: - return [] - return [(self.map_tensor_name(name), data_torch)] + return + yield from super().modify_tensors(data_torch, name, bid) def prepare_tensors(self): super().prepare_tensors() @@ -7837,8 +7724,8 @@ class PanguEmbeddedModel(TextModel): if name == "lm_head.weight": if self.hparams.get("tie_word_embeddings", False): logger.info("Skipping tied output layer 'lm_head.weight'") - return [] - return [(self.map_tensor_name(name), data_torch)] + return + yield from super().modify_tensors(data_torch, name, bid) @ModelBase.register("Dots1ForCausalLM") @@ -7860,8 +7747,9 @@ class Dots1Model(Qwen2MoeModel): if name.endswith("e_score_correction_bias"): name = name.replace("e_score_correction_bias", "e_score_correction.bias") if "shared_experts" in name: - return [(self.map_tensor_name(name), data_torch)] - return super().modify_tensors(data_torch, name, bid) + yield from ModelBase.modify_tensors(self, data_torch, name, bid) + else: + yield from super().modify_tensors(data_torch, name, bid) @ModelBase.register("PLMForCausalLM") @@ -7880,9 +7768,6 @@ class PLMModel(TextModel): self.gguf_writer.add_value_length(hparams["v_head_dim"]) self.gguf_writer.add_rope_dimension_count(hparams["qk_rope_head_dim"]) - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - return [(self.map_tensor_name(name), data_torch)] - def prepare_tensors(self): super().prepare_tensors() @@ -8013,8 +7898,6 @@ class T5Model(TextModel): self.gguf_writer.add_file_type(self.ftype) def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - # T5 based models contain shared token embeddings tensors saved randomly as either "encoder.embed_tokens.weight", # "decoder.embed_tokens.weight" or "shared.weight" tensor. In some models there are even multiple of them stored # in the safetensors files. We use the first tensor from these three as the token embeddings for both encoder @@ -8025,9 +7908,9 @@ class T5Model(TextModel): self.shared_token_embeddings_found = True else: logger.debug(f"Skipping shared tensor {name!r} in safetensors so that convert can end normally.") - return [] + return - return [(self.map_tensor_name(name), data_torch)] + yield from super().modify_tensors(data_torch, name, bid) @ModelBase.register("T5EncoderModel") @@ -8149,8 +8032,6 @@ class T5EncoderModel(TextModel): self.gguf_writer.add_file_type(self.ftype) def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - # T5 based models contain shared token embeddings tensors saved randomly as either "encoder.embed_tokens.weight", # "decoder.embed_tokens.weight" or "shared.weight" tensor. In some models there are even multiple of them stored # in the safetensors files. We use the first tensor from these three as the token embeddings for both encoder @@ -8161,9 +8042,9 @@ class T5EncoderModel(TextModel): self.shared_token_embeddings_found = True else: logger.debug(f"Skipping shared tensor {name!r} in safetensors so that convert can end normally.") - return [] + return - return [(self.map_tensor_name(name), data_torch)] + yield from super().modify_tensors(data_torch, name, bid) @ModelBase.register("JAISLMHeadModel") @@ -8211,13 +8092,9 @@ class JaisModel(TextModel): self.gguf_writer.add_file_type(self.ftype) def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - - tensors: list[tuple[str, Tensor]] = [] - # we don't need these if name.endswith((".attn.bias")): - return tensors + return if name.endswith(("relative_pe.slopes")): # Calculate max ALiBi bias (this is the inverse of the ALiBi calculation) @@ -8228,7 +8105,7 @@ class JaisModel(TextModel): first_val = float(data_torch[0].item()) self.max_alibi_bias = -round(math.log2(first_val) * n_head_closest_log2) - return tensors + return if name.endswith((".c_attn.weight", ".c_proj.weight", ".c_fc.weight", ".c_fc2.weight")): data_torch = data_torch.transpose(1, 0) @@ -8236,13 +8113,11 @@ class JaisModel(TextModel): new_name = self.map_tensor_name(name) if new_name == self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD): - tensors.append((new_name, data_torch * self.embeddings_scale)) + yield from super().modify_tensors(data_torch * self.embeddings_scale, new_name, bid) elif new_name == self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT): - tensors.append((new_name, data_torch * self.width_scale)) + yield from super().modify_tensors(data_torch * self.width_scale, new_name, bid) else: - tensors.append((new_name, data_torch)) - - return tensors + yield from super().modify_tensors(data_torch, new_name, bid) def prepare_tensors(self): super().prepare_tensors() @@ -8308,7 +8183,7 @@ class Glm4Model(TextModel): def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: if name.startswith("model.visual."): # ignore visual part of Glm4v - return [] + return elif name.startswith("model.language_model."): name = name.replace("language_model.", "") # for Glm4v if self.use_mrope: @@ -8321,7 +8196,7 @@ class Glm4Model(TextModel): data_torch = Glm4Model.normal_to_neox(data_torch, n_head, n_head, head_dim, self.partial_rotary_factor) if name.endswith(("k_proj.weight", "k_proj.bias")): data_torch = Glm4Model.normal_to_neox(data_torch, n_head, n_kv_head, head_dim, self.partial_rotary_factor) - return super().modify_tensors(data_torch, name, bid) + yield from super().modify_tensors(data_torch, name, bid) @ModelBase.register("Glm4MoeForCausalLM", "Glm4vMoeForConditionalGeneration") @@ -8396,13 +8271,14 @@ class Glm4MoeModel(TextModel): self, data_torch: Tensor, name: str, bid: int | None ) -> Iterable[tuple[str, Tensor]]: if name.startswith("model.visual."): # ignore visual part - return [] + return elif name.startswith("model.language_model."): name = name.replace("language_model.", "") # for multimodal variants # Handle main token embedding (but not layer-specific NextN embeddings) if name == "model.embed_tokens.weight" and ".layers." not in name: - return [(self.map_tensor_name("token_embd.weight"), data_torch)] + yield from super().modify_tensors(data_torch, "token_embd.weight", bid) + return # Handle routed experts if name.find("mlp.experts") != -1: @@ -8415,8 +8291,6 @@ class Glm4MoeModel(TextModel): self._experts[bid][name] = data_torch if len(self._experts[bid]) >= n_experts * 3: - tensors: list[tuple[str, Tensor]] = [] - # merge the experts into a single 3d tensor for w_name in ["down_proj", "gate_proj", "up_proj"]: datas: list[Tensor] = [] @@ -8430,18 +8304,15 @@ class Glm4MoeModel(TextModel): merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight" - new_name = self.map_tensor_name(merged_name) - tensors.append((new_name, data_torch)) - return tensors + yield from super().modify_tensors(data_torch, merged_name, bid) + return else: - return [] + return if name.endswith("e_score_correction_bias"): name = name.replace("e_score_correction_bias", "e_score_correction.bias") - new_name = self.map_tensor_name(name) - - return [(new_name, data_torch)] + yield from super().modify_tensors(data_torch, name, bid) def prepare_tensors(self): super().prepare_tensors() @@ -8624,13 +8495,11 @@ class ChatGLMModel(TextModel): self.gguf_writer.add_rope_freq_base(rope_freq) def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - if name.endswith(".rotary_pos_emb.inv_freq") or name.startswith("model.vision."): - return [] + return name = name.removeprefix("transformer.") - return [(self.map_tensor_name(name), data_torch)] + yield from super().modify_tensors(data_torch, name, bid) @ModelBase.register("NemotronForCausalLM") @@ -8671,7 +8540,7 @@ class NemotronModel(TextModel): if name.endswith("norm.weight"): data_torch = data_torch + 1 - return [(self.map_tensor_name(name), data_torch)] + yield from super().modify_tensors(data_torch, name, bid) @ModelBase.register("ExaoneForCausalLM") @@ -8827,11 +8696,9 @@ class ExaoneMoEModel(Exaone4Model): new_name = remapper[_n.stem] + _n.suffix # set shared weights for all NextN/MTP layers - tensors = [] for bid in range(self.hparams['num_hidden_layers'], self.block_count): - new_name = new_name.format(bid=bid) - tensors.append((self.map_tensor_name(new_name), data_torch)) - return tensors + yield from super().modify_tensors(data_torch, new_name.format(bid=bid), bid) + return if name.endswith("e_score_correction_bias"): name = name.replace("e_score_correction_bias", "e_score_correction.bias") @@ -8846,8 +8713,6 @@ class ExaoneMoEModel(Exaone4Model): self._experts[bid][name] = data_torch if len(self._experts[bid]) >= n_experts * 3: - tensors: list[tuple[str, Tensor]] = [] - # merge the experts into a single 3d tensor for w_name in ["down_proj", "gate_proj", "up_proj"]: datas: list[Tensor] = [] @@ -8863,12 +8728,12 @@ class ExaoneMoEModel(Exaone4Model): new_name = self.map_tensor_name(merged_name) - tensors.append((new_name, data_torch)) - return tensors + yield from super().modify_tensors(data_torch, new_name, bid) + return else: - return [] + return - return [(self.map_tensor_name(name), data_torch)] + yield from super().modify_tensors(data_torch, name, bid) def prepare_tensors(self): super().prepare_tensors() @@ -8938,10 +8803,8 @@ class GraniteMoeModel(GraniteModel): ffn_dim = self.hparams["intermediate_size"] assert data_torch.shape[-2] == 2 * ffn_dim, "Merged FFN tensor size must be 2 * intermediate_size" gate, up = data_torch.split(ffn_dim, dim=-2) - return [ - (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_GATE_EXP, bid), gate), - (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_UP_EXP, bid), up), - ] + yield from super().modify_tensors(gate, self.format_tensor_name(gguf.MODEL_TENSOR.FFN_GATE_EXP, bid), bid) + yield from super().modify_tensors(up, self.format_tensor_name(gguf.MODEL_TENSOR.FFN_UP_EXP, bid), bid) has_experts = bool(self.hparams.get('num_local_experts')) @@ -8950,21 +8813,18 @@ class GraniteMoeModel(GraniteModel): assert data_torch.shape[-2] == 2 * ffn_dim, "Merged FFN tensor size must be 2 * shared_intermediate_size" gate, up = data_torch.split(ffn_dim, dim=-2) if has_experts: - return [ - (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_GATE_SHEXP, bid), gate), - (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_UP_SHEXP, bid), up), - ] - return [ - (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_GATE, bid), gate), - (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_UP, bid), up), - ] + yield from super().modify_tensors(gate,self.format_tensor_name(gguf.MODEL_TENSOR.FFN_GATE_SHEXP, bid), bid) + yield from super().modify_tensors(up, self.format_tensor_name(gguf.MODEL_TENSOR.FFN_UP_SHEXP, bid), bid) + return + yield from super().modify_tensors(gate, self.format_tensor_name(gguf.MODEL_TENSOR.FFN_GATE, bid), bid) + yield from super().modify_tensors(up, self.format_tensor_name(gguf.MODEL_TENSOR.FFN_UP, bid), bid) + return if not has_experts and name.endswith("shared_mlp.output_linear.weight"): - return [ - (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_DOWN, bid), data_torch) - ] + yield from super().modify_tensors(data_torch, self.format_tensor_name(gguf.MODEL_TENSOR.FFN_DOWN, bid), bid) + return - return super().modify_tensors(data_torch, name, bid) + yield from super().modify_tensors(data_torch, name, bid) @ModelBase.register("GraniteMoeHybridForCausalLM", "BambaForCausalLM") @@ -9058,7 +8918,7 @@ class GraniteHybridModel(Mamba2Model, GraniteMoeModel): return Mamba2Model.modify_tensors(self, data_torch, name, bid) elif bid in self._attn_layers: return GraniteMoeModel.modify_tensors(self, data_torch, name, bid) - return [(self.map_tensor_name(name), data_torch)] + yield from super().modify_tensors(data_torch, name, bid) def set_gguf_parameters(self): """This method merges params from both parents and some that are @@ -9190,34 +9050,34 @@ class NemotronHModel(GraniteHybridModel): if self.is_moe and bid is not None: if name.endswith("mixer.gate.e_score_correction_bias"): new_name = name.replace("e_score_correction_bias", "e_score_correction.bias") - mapped_name = self.map_tensor_name(new_name) - return [(mapped_name, data_torch)] + yield from super().modify_tensors(data_torch, new_name, bid) + return if name.endswith("mixer.dt_bias"): new_name = name.replace("dt_bias", "dt.bias") - mapped_name = self.map_tensor_name(new_name) - return [(mapped_name, data_torch)] + yield from super().modify_tensors(data_torch, new_name, bid) + return if name.endswith("mixer.conv1d.weight"): squeezed_data = data_torch.squeeze() - mapped_name = self.map_tensor_name(name) - return [(mapped_name, squeezed_data)] + yield from super().modify_tensors(squeezed_data, name, bid) + return if name.endswith("mixer.A_log"): transformed_data = -torch.exp(data_torch) reshaped_data = transformed_data.squeeze().reshape(-1, 1) - mapped_name = self.map_tensor_name(name) - return [(mapped_name, reshaped_data)] + yield from super().modify_tensors(reshaped_data, name, bid) + return if name.endswith("mixer.D"): reshaped_data = data_torch.squeeze().reshape(-1, 1) - mapped_name = self.map_tensor_name(name) - return [(mapped_name, reshaped_data)] + yield from super().modify_tensors(reshaped_data, name, bid) + return if name.endswith("mixer.norm.weight"): reshaped_data = data_torch.reshape(self.n_group, -1) - mapped_name = self.map_tensor_name(name) - return [(mapped_name, reshaped_data)] + yield from super().modify_tensors(reshaped_data, name, bid) + return if name.find("mixer.experts") != -1: n_experts = self.hparams["n_routed_experts"] @@ -9230,7 +9090,6 @@ class NemotronHModel(GraniteHybridModel): if len(self._experts[bid]) >= n_experts * 2: # merge the experts into a single tensor - tensors: list[tuple[str, Tensor]] = [] for w_name in ["down_proj", "up_proj"]: datas: list[Tensor] = [] @@ -9241,14 +9100,13 @@ class NemotronHModel(GraniteHybridModel): data_torch = torch.stack(datas, dim=0) merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight" - new_name = self.map_tensor_name(merged_name) - tensors.append((new_name, data_torch)) - return tensors + yield from super().modify_tensors(data_torch, merged_name, bid) + return else: - return [] + return - return super().modify_tensors(data_torch, name, bid) + yield from super().modify_tensors(data_torch, name, bid) def prepare_tensors(self): super().prepare_tensors() @@ -9307,21 +9165,19 @@ class BailingMoeModel(TextModel): output_name = self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT) if name.endswith("attention.dense.weight"): - return [(self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_OUT, bid), data_torch)] + yield from super().modify_tensors(data_torch, self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_OUT, bid), bid) + return elif name.endswith("query_key_value.weight"): q, k, v = data_torch.split([n_head * head_dim, n_kv_head * head_dim, n_kv_head * head_dim], dim=-2) - return [ - (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid), BailingMoeModel.permute(q, n_head, n_head)), - (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid), BailingMoeModel.permute(k, n_head, n_kv_head)), - (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid), v) - ] + yield from super().modify_tensors(BailingMoeModel.permute(q, n_head, n_head), self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid), bid) + yield from super().modify_tensors(BailingMoeModel.permute(k, n_head, n_kv_head), self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid), bid) + yield from super().modify_tensors(v,self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid), bid) + return elif name.find("mlp.experts") != -1: n_experts = self.hparams["num_experts"] assert bid is not None - tensors: list[tuple[str, Tensor]] = [] - if self._experts is None: self._experts = [{} for _ in range(self.block_count)] @@ -9343,9 +9199,9 @@ class BailingMoeModel(TextModel): new_name = self.map_tensor_name(merged_name) - tensors.append((new_name, data_torch)) + yield from super().modify_tensors(data_torch, new_name, bid) - return tensors + return new_name = self.map_tensor_name(name) @@ -9353,7 +9209,7 @@ class BailingMoeModel(TextModel): data_torch = data_torch.float() data_torch /= torch.norm(data_torch, p=2, dim=0, keepdim=True) + 1e-7 - return [(new_name, data_torch)] + yield from super().modify_tensors(data_torch, new_name, bid) def prepare_tensors(self): super().prepare_tensors() @@ -9404,8 +9260,6 @@ class BailingMoeV2Model(TextModel): n_experts = self.hparams["num_experts"] assert bid is not None - tensors: list[tuple[str, Tensor]] = [] - if self._experts is None: self._experts = [{} for _ in range(self.block_count)] @@ -9425,16 +9279,13 @@ class BailingMoeV2Model(TextModel): merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight" - new_name = self.map_tensor_name(merged_name) - - tensors.append((new_name, data_torch)) - - return tensors + yield from super().modify_tensors(data_torch, merged_name, bid) + return if name.endswith(".expert_bias"): name = name.replace(".expert_bias", ".expert_bias.bias") - return [(self.map_tensor_name(name), data_torch)] + yield from super().modify_tensors(data_torch, name, bid) def prepare_tensors(self): super().prepare_tensors() @@ -9470,7 +9321,7 @@ class GroveMoeModel(TextModel): def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: if name.endswith(".expert_bias"): # FIXME?: Unused https://huggingface.co/inclusionAI/GroveMoE-Inst/blob/c4c69e5970d18907b5e6ddccdfd55176fe292df1/modeling_grove_moe.py#L303 - return [] + return # process the experts separately if name.find("chunk_experts") != -1: @@ -9483,8 +9334,6 @@ class GroveMoeModel(TextModel): self._chunk_experts[bid][name] = data_torch if len(self._chunk_experts[bid]) >= n_experts * 3: - tensors: list[tuple[str, Tensor]] = [] - # merge the experts into a single 3d tensor for w_name in ["down_proj", "gate_proj", "up_proj"]: datas: list[Tensor] = [] @@ -9498,12 +9347,10 @@ class GroveMoeModel(TextModel): merged_name = f"model.layers.{bid}.mlp.chunk_experts.{w_name}.weight" - new_name = self.map_tensor_name(merged_name) - - tensors.append((new_name, data_torch)) - return tensors + yield from super().modify_tensors(data_torch, merged_name, bid) + return else: - return [] + return elif name.find("experts") != -1: n_experts = self.hparams["num_experts"] assert bid is not None @@ -9514,8 +9361,6 @@ class GroveMoeModel(TextModel): self._experts[bid][name] = data_torch if len(self._experts[bid]) >= n_experts * 3: - tensors: list[tuple[str, Tensor]] = [] - # merge the experts into a single 3d tensor for w_name in ["down_proj", "gate_proj", "up_proj"]: datas: list[Tensor] = [] @@ -9529,14 +9374,12 @@ class GroveMoeModel(TextModel): merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight" - new_name = self.map_tensor_name(merged_name) - - tensors.append((new_name, data_torch)) - return tensors + yield from super().modify_tensors(data_torch, merged_name, bid) + return else: - return [] + return - return [(self.map_tensor_name(name), data_torch)] + yield from super().modify_tensors(data_torch, name, bid) def prepare_tensors(self): super().prepare_tensors() @@ -9570,7 +9413,7 @@ class ChameleonModel(TextModel): # ignore image tokenizer for now # TODO: remove this once image support is implemented for Chameleon if name.startswith("model.vqmodel"): - return [] + return n_head = self.hparams["num_attention_heads"] n_kv_head = self.hparams.get("num_key_value_heads") @@ -9585,7 +9428,7 @@ class ChameleonModel(TextModel): if name.endswith(("k_norm.weight", "k_norm.bias")): data_torch = ChameleonModel._reverse_hf_permute(data_torch, n_kv_head, hidden_dim) - return [(self.map_tensor_name(name), data_torch)] + yield from super().modify_tensors(data_torch, name, bid) # see: https://github.com/huggingface/transformers/blob/72fb02c47dbbe1999ae105319f24631cad6e2e00/src/transformers/models/chameleon/convert_chameleon_weights_to_hf.py#L176-L203 @staticmethod @@ -9630,11 +9473,9 @@ class GlmASRWhisperEncoderModel(MmprojModel): return super().tensor_force_quant(name, new_name, bid, n_dims) def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - if name.startswith("model.") or name.startswith("lm_head."): # skip language model tensors - return [] + return if name.startswith("audio_encoder.whisper."): name = name.replace("audio_encoder.whisper.","audio_tower.") @@ -9642,7 +9483,9 @@ class GlmASRWhisperEncoderModel(MmprojModel): name = name.replace("audio_encoder.", "audio_encoder.adapting.") if name.startswith("audio_encoder.audio_bos_eos_token."): - return [(self.map_tensor_name("model.vision.boi"), data_torch[0]), (self.map_tensor_name("model.vision.eoi"), data_torch[1])] + yield from super().modify_tensors(data_torch[0], "model.vision.boi", bid) + yield from super().modify_tensors(data_torch[1], "model.vision.eoi", bid) + return if name.startswith("audio_encoder.adapting."): name = name.replace("audio_encoder.adapting.","audio.multi_modal_projector.") @@ -9653,13 +9496,13 @@ class GlmASRWhisperEncoderModel(MmprojModel): if ".2." in name: name = name.replace(".2.", ".linear_2.") if ".proj." in name: - return [] + return if "conv1.bias" in name or "conv2.bias" in name: # transpose conv1 and conv2 bias data_torch = data_torch.unsqueeze(-1) - return [(self.map_tensor_name(name), data_torch)] + yield from super().modify_tensors(data_torch, name, bid) @ModelBase.register("Qwen2AudioForConditionalGeneration") @@ -9686,11 +9529,9 @@ class WhisperEncoderModel(MmprojModel): return super().tensor_force_quant(name, new_name, bid, n_dims) def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - if name.startswith("language_model."): # skip language model tensors - return [] + return # prevent clash naming with vision tensors if name.startswith("multi_modal_projector"): @@ -9700,7 +9541,7 @@ class WhisperEncoderModel(MmprojModel): # transpose conv1 and conv2 bias data_torch = data_torch.unsqueeze(-1) - return [(self.map_tensor_name(name), data_torch)] + yield from super().modify_tensors(data_torch, name, bid) @ModelBase.register("UltravoxModel") @@ -9944,7 +9785,7 @@ class HunYuanMoEModel(TextModel): if name == "lm_head.weight": if self.hparams.get("tie_word_embeddings", False): logger.info("Skipping tied output layer 'lm_head.weight'") - return [] + return if name.find("mlp.experts") != -1: n_experts = self.hparams["num_experts"] @@ -9957,7 +9798,6 @@ class HunYuanMoEModel(TextModel): if len(self._experts[bid]) >= n_experts * 3: # merge the experts into a single 3d tensor - tensors: list[tuple[str, Tensor]] = [] for w_name in ["down_proj", "gate_proj", "up_proj"]: datas: list[Tensor] = [] @@ -9968,14 +9808,13 @@ class HunYuanMoEModel(TextModel): data_torch = torch.stack(datas, dim=0) merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight" - new_name = self.map_tensor_name(merged_name) - tensors.append((new_name, data_torch)) - return tensors + yield from super().modify_tensors(data_torch, merged_name, bid) + return else: - return [] + return - return [(self.map_tensor_name(name), data_torch)] + yield from super().modify_tensors(data_torch, name, bid) def prepare_tensors(self): super().prepare_tensors() @@ -10020,8 +9859,6 @@ class LLaDAMoEModel(TextModel): self._experts[bid][name] = data_torch if len(self._experts[bid]) >= n_experts * 3: - tensors: list[tuple[str, Tensor]] = [] - # merge the experts into a single 3d tensor for w_name in ["down_proj", "gate_proj", "up_proj"]: datas: list[Tensor] = [] @@ -10035,14 +9872,12 @@ class LLaDAMoEModel(TextModel): merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight" - new_name = self.map_tensor_name(merged_name) - - tensors.append((new_name, data_torch)) - return tensors + yield from super().modify_tensors(data_torch, merged_name, bid) + return else: - return [] + return - return [(self.map_tensor_name(name), data_torch)] + yield from super().modify_tensors(data_torch, name, bid) # Copied from: Qwen2MoeModel def prepare_tensors(self): @@ -10141,9 +9976,9 @@ class HunYuanModel(TextModel): if name == "lm_head.weight": if self.hparams.get("tie_word_embeddings", False): logger.info("Skipping tied output layer 'lm_head.weight'") - return [] + return - return [(self.map_tensor_name(name), data_torch)] + yield from super().modify_tensors(data_torch, name, bid) @ModelBase.register("SmolLM3ForCausalLM") @@ -10223,8 +10058,6 @@ class GptOssModel(TextModel): return [] def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - if "sinks" in name: name += ".weight" @@ -10238,7 +10071,7 @@ class GptOssModel(TextModel): data_torch = data_torch.transpose(-1, -2) else: # otherwise, it should already be repacked to ggml MXFP4 format - return [] + return # split the gate_up into gate and up if "gate_up_proj" in name: @@ -10246,25 +10079,18 @@ class GptOssModel(TextModel): name_up = name.replace("gate_up_proj_bias", "up_proj.bias") name_gate = name.replace("gate_up_proj_bias", "gate_proj.bias") gate_proj_bias, up_proj_bias = data_torch[..., ::2], data_torch[..., 1::2] - return [ - (self.map_tensor_name(name_gate), gate_proj_bias), - (self.map_tensor_name(name_up), up_proj_bias) - ] + yield from super().modify_tensors(gate_proj_bias, name_gate, bid) + yield from super().modify_tensors(up_proj_bias, name_up, bid) elif "_blocks" not in name and "_scales" not in name: logger.warning(f"{name} is not in MXFP4, performance may be degraded") name_up = name.replace("gate_up_proj", "up_proj.weight") name_gate = name.replace("gate_up_proj", "gate_proj.weight") data_torch = data_torch.transpose(-1, -2) gate_proj_weight, up_proj_weight = data_torch[:, ::2, :], data_torch[:, 1::2, :] - return [ - (self.map_tensor_name(name_gate), gate_proj_weight), - (self.map_tensor_name(name_up), up_proj_weight) - ] - else: - # otherwise, it should already be repacked to ggml MXFP4 format - return [] - - return [(self.map_tensor_name(name), data_torch)] + yield from super().modify_tensors(gate_proj_weight, name_gate, bid) + yield from super().modify_tensors(up_proj_weight, name_up, bid) + else: + yield from super().modify_tensors(data_torch, name, bid) def set_vocab(self): self._set_vocab_gpt2() @@ -10312,7 +10138,7 @@ class LFM2Model(TextModel): def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: if self._is_vision_tensor(name) or ConformerAudioModel.is_audio_tensor(name): # skip multimodal tensors - return [] + return name = name.replace("language_model.", "") # vision name = name.replace("lfm.", "model.") # audio @@ -10321,7 +10147,7 @@ class LFM2Model(TextModel): if 'conv.conv' in name: data_torch = data_torch.squeeze(1) - return [(self.map_tensor_name(name), data_torch)] + yield from super().modify_tensors(data_torch, name, bid) def _is_vision_tensor(self, name: str) -> bool: return "vision_tower" in name or "multi_modal_projector" in name @@ -10336,7 +10162,7 @@ class LFM2ColBertModel(LFM2Model): if not name.startswith(self.dense_tensor_name): name = "model." + name - return super().modify_tensors(data_torch, name, bid) + yield from super().modify_tensors(data_torch, name, bid) def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]: # dense tensor is stored in a separate safetensors file @@ -10391,9 +10217,8 @@ class LFM2MoeModel(TextModel): # not enough expert weights to merge if len(expert_cache) < n_experts * len(expert_weights): - return [] + return - tensors: list[tuple[str, Tensor]] = [] for w_name in expert_weights: datas: list[Tensor] = [] @@ -10404,13 +10229,13 @@ class LFM2MoeModel(TextModel): data_torch = torch.stack(datas, dim=0) merged_name = f"layers.{bid}.feed_forward.experts.{w_name}.weight" - new_name = self.map_tensor_name(merged_name) - tensors.append((new_name, data_torch)) + + yield from super().modify_tensors(data_torch, merged_name, bid) del self._experts_cache[bid] - return tensors + return - return [(self.map_tensor_name(name), data_torch)] + yield from super().modify_tensors(data_torch, name, bid) def prepare_tensors(self): super().prepare_tensors() @@ -10436,7 +10261,6 @@ class LFM2VLModel(MmprojModel): self.gguf_writer.add_vision_block_count(self.find_vparam(self.n_block_keys) - vision_feature_layers_to_drop) def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused is_vision_tensor = "vision_tower" in name or "multi_modal_projector" in name if is_vision_tensor: @@ -10447,9 +10271,10 @@ class LFM2VLModel(MmprojModel): if "patch_embedding.weight" in name: data_torch = data_torch.view(data_torch.shape[0], 16, 16, 3).permute(0, 3, 1, 2) - return [(self.map_tensor_name(name), data_torch)] + yield from super().modify_tensors(data_torch, name, bid) + return - return [] # skip other tensors + return # skip other tensors @ModelBase.register("Lfm2AudioForConditionalGeneration") @@ -10474,17 +10299,17 @@ class LFM2AudioModel(ConformerAudioModel): def modify_tensors(self, data_torch, name, bid): # skip language model tensors if name.startswith("lfm."): - return [] + return # for training only if any(p in name for p in ["audio_loss_weight"]): - return [] + return # for audio output if any(p in name for p in ["codebook_offsets", "depth_embeddings", "depth_linear", "depthformer"]): - return [] + return - return super().modify_tensors(data_torch, name, bid) + yield from super().modify_tensors(data_torch, name, bid) @ModelBase.register("SmallThinkerForCausalLM") @@ -10529,8 +10354,6 @@ class SmallThinkerModel(TextModel): self._experts[bid][name] = data_torch if len(self._experts[bid]) >= n_experts * 3: - tensors: list[tuple[str, Tensor]] = [] - # merge the experts into a single 3d tensor for w_name in ["down", "gate", "up"]: datas: list[Tensor] = [] @@ -10544,14 +10367,12 @@ class SmallThinkerModel(TextModel): merged_name = f"model.layers.{bid}.block_sparse_moe.experts.{w_name}.weight" - new_name = self.map_tensor_name(merged_name) - - tensors.append((new_name, data_torch)) - return tensors + yield from super().modify_tensors(data_torch, merged_name, bid) + return else: - return [] + return - return [(self.map_tensor_name(name), data_torch)] + yield from super().modify_tensors(data_torch, name, bid) def prepare_tensors(self): super().prepare_tensors() @@ -10584,12 +10405,12 @@ class ModernBertModel(BertModel): def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: # these layers act as MLM head, so we don't need them if name.startswith("decoder."): - return [] + return if name.startswith("model."): name = name[6:] - return super().modify_tensors(data_torch, name, bid) + yield from super().modify_tensors(data_torch, name, bid) @ModelBase.register("ApertusForCausalLM") @@ -10609,24 +10430,24 @@ class ApertusModel(LlamaModel): self._alpha_n[bid] = data_torch.to("cpu").float().item() if (len(self._alpha_n) == n_layers): self.gguf_writer.add_xielu_alpha_n([self._alpha_n[k] for k in sorted(self._alpha_n)]) - return [] + return if name.endswith(".act_fn.alpha_p"): self._alpha_p[bid] = data_torch.to("cpu").float().item() if (len(self._alpha_p) == n_layers): self.gguf_writer.add_xielu_alpha_p([self._alpha_p[k] for k in sorted(self._alpha_p)]) - return [] + return if name.endswith(".act_fn.beta"): self._beta[bid] = data_torch.to("cpu").float().item() if (len(self._beta) == n_layers): self.gguf_writer.add_xielu_beta([self._beta[k] for k in sorted(self._beta)]) - return [] + return if name.endswith(".act_fn.eps"): self._eps[bid] = data_torch.to("cpu").float().item() if (len(self._eps) == n_layers): self.gguf_writer.add_xielu_eps([self._eps[k] for k in sorted(self._eps)]) - return [] + return - return super().modify_tensors(data_torch, name, bid) + yield from super().modify_tensors(data_torch, name, bid) class MistralModel(LlamaModel): @@ -10789,7 +10610,7 @@ class MistralMoeModel(DeepseekV2Model): def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None): if name.startswith("vision_") or name.startswith("patch_merger.") or "mm_projector" in name: - return [] + return # rename certain tensors so that we can reuse DeepseekV2Model modify_tensors logic if name.endswith(".qscale_act"): @@ -10805,7 +10626,7 @@ class MistralMoeModel(DeepseekV2Model): name = name.replace(".w3.", ".up_proj.") name = "model." + name - return super().modify_tensors(data_torch, name, bid) + yield from super().modify_tensors(data_torch, name, bid) class PixtralModel(LlavaVisionModel): @@ -10850,7 +10671,7 @@ class LightOnOCRVisionModel(LlavaVisionModel): def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None): name = name.replace("model.vision_encoder.", "vision_tower.") name = name.replace("model.vision_projection.", "multi_modal_projector.") - return super().modify_tensors(data_torch, name, bid) + yield from super().modify_tensors(data_torch, name, bid) @ModelBase.register("KimiVLForConditionalGeneration") @@ -10870,24 +10691,20 @@ class KimiVLModel(MmprojModel): self.gguf_writer.add_vision_attention_layernorm_eps(self.hparams_vision.get("layer_norm_eps", 1e-5)) def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused is_vision_tensor = "vision_tower" in name or "multi_modal_projector" in name if is_vision_tensor: if "pos_emb.weight" in name: data_torch = data_torch.view(data_torch.shape[0] * data_torch.shape[1], data_torch.shape[2]) - elif "wqkv" in name: + + if "wqkv" in name: split_dim = 0 if "weight" in name else -1 wq, wk, wv = data_torch.chunk(3, dim=split_dim) - return [ - (self.map_tensor_name(name.replace("wqkv", "wq")), wq), - (self.map_tensor_name(name.replace("wqkv", "wk")), wk), - (self.map_tensor_name(name.replace("wqkv", "wv")), wv) - ] - - return [(self.map_tensor_name(name), data_torch)] - - return [] # skip other tensors + yield from super().modify_tensors(wq, name.replace("wqkv", "wq"), bid) + yield from super().modify_tensors(wk, name.replace("wqkv", "wk"), bid) + yield from super().modify_tensors(wv, name.replace("wqkv", "wv"), bid) + else: + yield from super().modify_tensors(data_torch, name, bid) @ModelBase.register("CogVLMForCausalLM") @@ -10899,12 +10716,10 @@ class CogVLMVisionModel(MmprojModel): self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.COGVLM) def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - if not name.startswith("model.vision."): - return [] + return - return [(self.map_tensor_name(name), data_torch)] + yield from super().modify_tensors(data_torch, name, bid) @ModelBase.register("CogVLMForCausalLM") @@ -10912,13 +10727,11 @@ class CogVLMModel(LlamaModel): model_arch = gguf.MODEL_ARCH.COGVLM def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - # block vision tensors if name.startswith("model.vision."): - return [] + return - return [(self.map_tensor_name(name), data_torch)] + yield from super().modify_tensors(data_torch, name, bid) @ModelBase.register("JanusForConditionalGeneration") @@ -10936,14 +10749,14 @@ class JanusProModel(LlamaModel): 'model.generation_head.', ) if name.startswith(skip_prefixes): - return [] + return if name.startswith('model.language_model.'): name = name.replace('model.language_model.', 'model.') elif name.startswith('language_model.'): name = name.replace('language_model.', '') - return super().modify_tensors(data_torch, name, bid) + yield from super().modify_tensors(data_torch, name, bid) @ModelBase.register("JanusForConditionalGeneration") @@ -10996,11 +10809,9 @@ class JanusProVisionModel(MmprojModel): return [(tensor_name, data_torch)] def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - # Skip language model tensors as they will be handled by `JanusProModel` if name.startswith(('model.language_model.', 'language_model.')): - return [] + return # Skip generation-related components skip_generation_prefixes = ( @@ -11014,17 +10825,19 @@ class JanusProVisionModel(MmprojModel): 'generation_head.', ) if name.startswith(skip_generation_prefixes): - return [] + return # Handle aligner tensors if name.startswith(('model.aligner.', 'aligner.')): - return list(self._map_aligner_tensor(data_torch, name)) + yield from self._map_aligner_tensor(data_torch, name) + return # Handle vision tensors if name.startswith(('model.vision_model.', 'vision_model.')): - return [(self.map_tensor_name(name), data_torch)] + yield from super().modify_tensors(data_torch, name, bid) + return - return [] + return @ModelBase.register("YoutuVLForConditionalGeneration") @@ -11063,21 +10876,18 @@ class YoutuVLVisionModel(MmprojModel): self.gguf_writer.add_vision_wa_layer_indexes(layers=fullatt_block_indexes) def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - # Skip language model tensors skip_prefixes = ('lm_head.', 'model.layers.', 'model.embed_tokens.', 'model.norm.') if name.startswith(skip_prefixes): - return [] + return # Try to map the tensor using TensorNameMap (handles vision encoder and projector) try: - new_name = self.map_tensor_name(name) - return [(new_name, data_torch)] + yield from super().modify_tensors(data_torch, name, bid) except ValueError: # If mapping fails, log warning and skip logger.warning(f"Cannot map tensor: {name}") - return [] + return @ModelBase.register("SolarOpenForCausalLM")