From 673b45b5451431b27495e431fc45602cdc98a3aa Mon Sep 17 00:00:00 2001 From: Saurabh Dash Date: Fri, 13 Feb 2026 23:11:18 +0000 Subject: [PATCH] update comment --- convert_hf_to_gguf.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index ff6377e034..54e4925525 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -7267,7 +7267,8 @@ class Cohere2Model(TextModel): self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE) def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - # Cohere2 runtime in llama.cpp expects no bias tensors; skip them at conversion time. + # Cohere2 runtime in llama.cpp expects no bias tensors; + # the actual weight only contains 0-value tensors as bias, we can skip them if name.endswith(".bias"): if torch.any(data_torch != 0): raise ValueError(f"Bias tensor {name!r} is not zero.")