convert: add error message for mistral3 quantized weight (#17686)

This commit is contained in:
Xuan-Son Nguyen 2025-12-02 11:48:31 +01:00 committed by GitHub
parent 5d6bd842ea
commit 2c453c6c77
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
1 changed files with 4 additions and 0 deletions

View File

@ -2842,6 +2842,10 @@ class Mistral3Model(LlamaModel):
self.gguf_writer.add_attn_temperature_scale(rope_params["llama_4_scaling_beta"]) self.gguf_writer.add_attn_temperature_scale(rope_params["llama_4_scaling_beta"])
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None): def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None):
# TODO: probably not worth supporting quantized weight, as official BF16 is also available
if name.endswith("weight_scale_inv"):
raise ValueError("This is a quantized weight, please use BF16 weight instead")
name = name.replace("language_model.", "") name = name.replace("language_model.", "")
if "multi_modal_projector" in name or "vision_tower" in name: if "multi_modal_projector" in name or "vision_tower" in name:
return [] return []