From 373da0e27680a8a21ed2dafeb515099981e24838 Mon Sep 17 00:00:00 2001 From: Xuan-Son Nguyen Date: Mon, 16 Feb 2026 12:03:45 +0100 Subject: [PATCH] Apply suggestion from @ngxson --- convert_hf_to_gguf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index f6a5b7a60e..9bbc9bad87 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -7284,7 +7284,7 @@ class Cohere2Model(TextModel): self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE) def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - # Cohere2 runtime in llama.cpp expects no bias tensors; + # Cohere2 runtime in llama.cpp expects no bias tensors; # the actual weight only contains 0-value tensors as bias, we can skip them if name.endswith(".bias"): if torch.any(data_torch != 0):