From d648629f560d7759524e6a76368b825ebf9aa652 Mon Sep 17 00:00:00 2001 From: ddh0 Date: Thu, 12 Feb 2026 18:24:16 -0600 Subject: [PATCH] remove unused `std::vector tensors;` --- src/llama-quant.cpp | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/src/llama-quant.cpp b/src/llama-quant.cpp index c9b7e5c7ce..763c6e1baf 100644 --- a/src/llama-quant.cpp +++ b/src/llama-quant.cpp @@ -790,13 +790,6 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: weights.push_back(&it.second); } - // make a list of tensors (same pointers as from weights) - std::vector tensors; - tensors.reserve(weights.size()); - for (size_t i = 0; i < weights.size(); ++i) { - tensors.push_back(weights[i]->tensor); - } - if (!prune_list.empty()) { gguf_set_val_u32(ctx_out.get(), ml.llm_kv(LLM_KV_BLOCK_COUNT).c_str(), blk_id); } @@ -885,7 +878,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: for (size_t i = 0; i < ctx_outs.size(); ++i) { gguf_set_val_u16(ctx_outs[i].get(), ml.llm_kv(LLM_KV_SPLIT_NO).c_str(), i); gguf_set_val_u16(ctx_outs[i].get(), ml.llm_kv(LLM_KV_SPLIT_COUNT).c_str(), n_split); - gguf_set_val_i32(ctx_outs[i].get(), ml.llm_kv(LLM_KV_SPLIT_TENSORS_COUNT).c_str(), (int32_t)tensors.size()); + gguf_set_val_i32(ctx_outs[i].get(), ml.llm_kv(LLM_KV_SPLIT_TENSORS_COUNT).c_str(), (int32_t)weights.size()); } }