gguf-py : skip endian-conversion of MXFP4 data (#17523)
* gguf_convert_endian.py: skip MXFP4 data * Use gguf.constants.GGML_QUANT_SIZES to determine block sizes
This commit is contained in:
parent
b78db3bd50
commit
4fcd87cf7c
|
|
@ -19,6 +19,11 @@ import gguf
|
||||||
logger = logging.getLogger("gguf-convert-endian")
|
logger = logging.getLogger("gguf-convert-endian")
|
||||||
|
|
||||||
|
|
||||||
|
def byteswap_noop(tensor, block_offs):
|
||||||
|
# this function is used when byteswapping is not needed
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
def byteswap_q4_0(tensor, block_offs):
|
def byteswap_q4_0(tensor, block_offs):
|
||||||
# Each block_q4_0 consists of an f16 delta (scaling factor) followed by 16 int8 quantizations.
|
# Each block_q4_0 consists of an f16 delta (scaling factor) followed by 16 int8 quantizations.
|
||||||
|
|
||||||
|
|
@ -55,22 +60,11 @@ def byteswap_q6_k(tensor, block_offs):
|
||||||
|
|
||||||
|
|
||||||
byteswap_tensors = {
|
byteswap_tensors = {
|
||||||
gguf.GGMLQuantizationType.Q4_0: {
|
gguf.GGMLQuantizationType.Q4_0: byteswap_q4_0,
|
||||||
"block_size": 18, # 18 bytes = <f16 delta scaling factor> + 16 * <int8 quant>
|
gguf.GGMLQuantizationType.Q8_0: byteswap_q8_0,
|
||||||
"byteswap_func": byteswap_q4_0,
|
gguf.GGMLQuantizationType.Q4_K: byteswap_q4_k,
|
||||||
},
|
gguf.GGMLQuantizationType.Q6_K: byteswap_q6_k,
|
||||||
gguf.GGMLQuantizationType.Q8_0: {
|
gguf.GGMLQuantizationType.MXFP4: byteswap_noop,
|
||||||
"block_size": 34, # 34 bytes = <f16 delta scaling factor> + 32 * <int8 quant>
|
|
||||||
"byteswap_func": byteswap_q8_0,
|
|
||||||
},
|
|
||||||
gguf.GGMLQuantizationType.Q4_K: {
|
|
||||||
"block_size": 144, # 144 bytes = 2 * <f16 delta scaling factor> + 140 * <int8 quant>
|
|
||||||
"byteswap_func": byteswap_q4_k,
|
|
||||||
},
|
|
||||||
gguf.GGMLQuantizationType.Q6_K: {
|
|
||||||
"block_size": 210, # 210 bytes = <f16 delta scaling factor> + 208 * <int8 quant>
|
|
||||||
"byteswap_func": byteswap_q6_k,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -135,8 +129,8 @@ def convert_byteorder(reader: gguf.GGUFReader, args: argparse.Namespace) -> None
|
||||||
|
|
||||||
tensor.data.resize(newshape)
|
tensor.data.resize(newshape)
|
||||||
|
|
||||||
block_size = byteswap_tensors[tensor.tensor_type]["block_size"]
|
block_size = gguf.constants.GGML_QUANT_SIZES[tensor.tensor_type][1]
|
||||||
byteswap_func = byteswap_tensors[tensor.tensor_type]["byteswap_func"]
|
byteswap_func = byteswap_tensors[tensor.tensor_type]
|
||||||
|
|
||||||
n_blocks = len(tensor.data) // block_size
|
n_blocks = len(tensor.data) // block_size
|
||||||
for block_num in (inner_pbar := tqdm(range(n_blocks), desc="Byte-swapping Blocks", leave=False)):
|
for block_num in (inner_pbar := tqdm(range(n_blocks), desc="Byte-swapping Blocks", leave=False)):
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue