From 7bc60eb6eb4c2c1f2d109e41ef6db6401607a58b Mon Sep 17 00:00:00 2001 From: otegami Date: Tue, 21 Oct 2025 07:59:23 +0800 Subject: [PATCH] llama-context: fix build fails with `-Werror=missing-braces` MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ref: GH-16614 ## Why it failed When compiling with strict compiler flags (-Wmissing-braces -Werror=missing-braces), the build fails with the following error: ``` cmake \ -S . \ -B ../llama.cpp.build \ --preset=x64-linux-gcc-debug \ -DCMAKE_INSTALL_PREFIX=/tmp/local \ -DCMAKE_CXX_FLAGS="-Wmissing-braces -Werror=missing-braces" && \ cmake --build ../llama.cpp.build/ ... /home/otegami/work/cpp/llama.cpp/src/llama-context.cpp: In function ‘void llama_memory_breakdown_print(const llama_context*)’: /home/otegami/work/cpp/llama.cpp/src/llama-context.cpp:2801:25: error: missing braces around initializer for ‘std::__array_traits, 9>::_Type’ {aka ‘std::__cxx11::basic_string [9]’} [-Werror=missing-braces] ... ``` The issue is that std::array initialization requires double braces. ## How to fix This PR changes `{ 0 }` to `{{ 0 }}` for std::array initialization. This is part of a series of commits to fix missing braces warnings across the codebase. - Done: ~~src/llama-batch.h~~ - src/llama-context.cpp <- This PR is here. - tests/test-backend-ops.cpp - tests/test-gguf.cpp - tools/mtmd/clip.cpp Benefits: - std::array is a struct containing a C-style array, requiring nested braces - Enables stricter compiler warnings to catch potential issues --- src/llama-context.cpp | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/llama-context.cpp b/src/llama-context.cpp index bd348bcad3..b9626669b4 100644 --- a/src/llama-context.cpp +++ b/src/llama-context.cpp @@ -2798,7 +2798,7 @@ void llama_memory_breakdown_print(const struct llama_context * ctx) { const std::string template_gpu = "%s: | %s | %s = %s + (%s = %s + %s + %s) + %s |\n"; const std::string template_other = "%s: | %s | %s %s %s = %s + %s + %s %s |\n"; - table_data.push_back({template_header, "memory breakdown [MiB]", "total", "free", "self", "model", "context", "compute", "unaccounted"}); + table_data.push_back({{template_header, "memory breakdown [MiB]", "total", "free", "self", "model", "context", "compute", "unaccounted"}}); constexpr size_t MiB = 1024 * 1024; const std::vector desc_prefixes_strip = {"NVIDIA ", "GeForce ", "Tesla ", "AMD ", "Radeon ", "Instinct "}; @@ -2858,7 +2858,7 @@ void llama_memory_breakdown_print(const struct llama_context * ctx) { const size_t self = mb.model + mb.context + mb.compute; const size_t unaccounted = total - self - free; - table_data.push_back({ + table_data.push_back({{ template_gpu, " - " + name + " (" + desc + ")", std::to_string(total / MiB), @@ -2867,13 +2867,13 @@ void llama_memory_breakdown_print(const struct llama_context * ctx) { std::to_string(mb.model / MiB), std::to_string(mb.context / MiB), std::to_string(mb.compute / MiB), - std::to_string(unaccounted / MiB)}); + std::to_string(unaccounted / MiB)}}); } // print memory breakdown for host: { const size_t self = mb_host.model + mb_host.context + mb_host.compute; - table_data.push_back({ + table_data.push_back({{ template_other, " - Host", "", // total @@ -2882,7 +2882,7 @@ void llama_memory_breakdown_print(const struct llama_context * ctx) { std::to_string(mb_host.model / MiB), std::to_string(mb_host.context / MiB), std::to_string(mb_host.compute / MiB), - ""}); // unaccounted + ""}}); // unaccounted } // print memory breakdown for all remaining buffer types: @@ -2894,7 +2894,7 @@ void llama_memory_breakdown_print(const struct llama_context * ctx) { } const std::string name = ggml_backend_buft_name(buft); const size_t self = mb.model + mb.context + mb.compute; - table_data.push_back({ + table_data.push_back({{ template_other, " - " + name, "", // total @@ -2903,7 +2903,7 @@ void llama_memory_breakdown_print(const struct llama_context * ctx) { std::to_string(mb.model / MiB), std::to_string(mb.context / MiB), std::to_string(mb.compute / MiB), - ""}); // unaccounted + ""}}); // unaccounted seen_buffer_types.insert(buft); }