From 86103e7e065c4286b68ef710f8eb5cf938a60b43 Mon Sep 17 00:00:00 2001 From: Colin Kealty <3266127+bartowski1182@users.noreply.github.com> Date: Mon, 9 Mar 2026 15:46:31 -0400 Subject: [PATCH] Update name --- src/llama-quant.h | 2 +- tests/test-quant-type-selection.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/llama-quant.h b/src/llama-quant.h index 059f68fd09..fc7da927cd 100644 --- a/src/llama-quant.h +++ b/src/llama-quant.h @@ -39,7 +39,7 @@ struct quantize_state_impl { }; ggml_type llama_tensor_get_type(quantize_state_impl & qs, ggml_type new_type, const ggml_tensor * tensor, llama_ftype ftype); -ggml_type llama_ftype_default_type(llama_ftype ftype); +ggml_type llama_ftype_get_default_type(llama_ftype ftype); // Ftype name <-> enum conversions. // Returns (llama_ftype)-1 on failure. diff --git a/tests/test-quant-type-selection.cpp b/tests/test-quant-type-selection.cpp index a50e982eca..8dbfe34567 100644 --- a/tests/test-quant-type-selection.cpp +++ b/tests/test-quant-type-selection.cpp @@ -275,7 +275,7 @@ static std::vector> compute_quant_types(llama_ } init_quantize_state_counters(qs, names); - ggml_type default_type = llama_ftype_default_type(ftype); + ggml_type default_type = llama_ftype_get_default_type(ftype); std::vector> result; result.reserve(tensors.size());