From 33dcb44aa20bbbebb687cbe058ca50768d170fc6 Mon Sep 17 00:00:00 2001 From: Francis Couture-Harpin Date: Thu, 6 Nov 2025 21:34:21 -0500 Subject: [PATCH] convert : handle naive-quantized models --- convert_hf_to_gguf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index 14ccb24942..2849163123 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -412,7 +412,7 @@ class ModelBase: raise NotImplementedError("Can't handle multiple config groups for compressed-tensors yet") weight_config = tuple(groups.values())[0]["weights"] - if quant_format == "float-quantized" or quant_format == "int-quantized": + if quant_format == "float-quantized" or quant_format == "int-quantized" or quant_format == "naive-quantized": block_size = weight_config.get("block_structure", None) strategy = weight_config.get("strategy") assert strategy == "channel" or strategy == "block"