convert : handle naive-quantized models

This commit is contained in:
Francis Couture-Harpin 2025-11-06 21:34:21 -05:00
parent d23bdd57b0
commit 33dcb44aa2
1 changed files with 1 additions and 1 deletions

View File

@ -412,7 +412,7 @@ class ModelBase:
raise NotImplementedError("Can't handle multiple config groups for compressed-tensors yet")
weight_config = tuple(groups.values())[0]["weights"]
if quant_format == "float-quantized" or quant_format == "int-quantized":
if quant_format == "float-quantized" or quant_format == "int-quantized" or quant_format == "naive-quantized":
block_size = weight_config.get("block_structure", None)
strategy = weight_config.get("strategy")
assert strategy == "channel" or strategy == "block"