llama-context: fix build fails with `-Werror=missing-braces`

ref: GH-16614

## Why it failed

When compiling with strict compiler flags (-Wmissing-braces -Werror=missing-braces), the build fails with the following error:

```
cmake \
  -S . \
  -B ../llama.cpp.build \
  --preset=x64-linux-gcc-debug \
  -DCMAKE_INSTALL_PREFIX=/tmp/local \
  -DCMAKE_CXX_FLAGS="-Wmissing-braces -Werror=missing-braces" && \
cmake --build ../llama.cpp.build/
...
/home/otegami/work/cpp/llama.cpp/src/llama-context.cpp: In function ‘void llama_memory_breakdown_print(const llama_context*)’:
/home/otegami/work/cpp/llama.cpp/src/llama-context.cpp:2801:25: error: missing braces around initializer for ‘std::__array_traits<std::__cxx11::basic_string<char>, 9>::_Type’ {aka ‘std::__cxx11::basic_string<char> [9]’} [-Werror=missing-braces]
...
```

The issue is that std::array initialization requires double braces.

## How to fix

This PR changes `{ 0 }` to `{{ 0 }}` for std::array initialization.

This is part of a series of commits to fix missing braces warnings across the codebase.
- Done: ~~src/llama-batch.h~~
- src/llama-context.cpp <- This PR is here.
- tests/test-backend-ops.cpp
- tests/test-gguf.cpp
- tools/mtmd/clip.cpp

Benefits:
- std::array is a struct containing a C-style array, requiring nested braces
- Enables stricter compiler warnings to catch potential issues
This commit is contained in:
otegami 2025-10-21 07:59:23 +08:00
parent 6de8ed7519
commit 7bc60eb6eb
1 changed files with 7 additions and 7 deletions

View File

@ -2798,7 +2798,7 @@ void llama_memory_breakdown_print(const struct llama_context * ctx) {
const std::string template_gpu = "%s: | %s | %s = %s + (%s = %s + %s + %s) + %s |\n";
const std::string template_other = "%s: | %s | %s %s %s = %s + %s + %s %s |\n";
table_data.push_back({template_header, "memory breakdown [MiB]", "total", "free", "self", "model", "context", "compute", "unaccounted"});
table_data.push_back({{template_header, "memory breakdown [MiB]", "total", "free", "self", "model", "context", "compute", "unaccounted"}});
constexpr size_t MiB = 1024 * 1024;
const std::vector<std::string> desc_prefixes_strip = {"NVIDIA ", "GeForce ", "Tesla ", "AMD ", "Radeon ", "Instinct "};
@ -2858,7 +2858,7 @@ void llama_memory_breakdown_print(const struct llama_context * ctx) {
const size_t self = mb.model + mb.context + mb.compute;
const size_t unaccounted = total - self - free;
table_data.push_back({
table_data.push_back({{
template_gpu,
" - " + name + " (" + desc + ")",
std::to_string(total / MiB),
@ -2867,13 +2867,13 @@ void llama_memory_breakdown_print(const struct llama_context * ctx) {
std::to_string(mb.model / MiB),
std::to_string(mb.context / MiB),
std::to_string(mb.compute / MiB),
std::to_string(unaccounted / MiB)});
std::to_string(unaccounted / MiB)}});
}
// print memory breakdown for host:
{
const size_t self = mb_host.model + mb_host.context + mb_host.compute;
table_data.push_back({
table_data.push_back({{
template_other,
" - Host",
"", // total
@ -2882,7 +2882,7 @@ void llama_memory_breakdown_print(const struct llama_context * ctx) {
std::to_string(mb_host.model / MiB),
std::to_string(mb_host.context / MiB),
std::to_string(mb_host.compute / MiB),
""}); // unaccounted
""}}); // unaccounted
}
// print memory breakdown for all remaining buffer types:
@ -2894,7 +2894,7 @@ void llama_memory_breakdown_print(const struct llama_context * ctx) {
}
const std::string name = ggml_backend_buft_name(buft);
const size_t self = mb.model + mb.context + mb.compute;
table_data.push_back({
table_data.push_back({{
template_other,
" - " + name,
"", // total
@ -2903,7 +2903,7 @@ void llama_memory_breakdown_print(const struct llama_context * ctx) {
std::to_string(mb.model / MiB),
std::to_string(mb.context / MiB),
std::to_string(mb.compute / MiB),
""}); // unaccounted
""}}); // unaccounted
seen_buffer_types.insert(buft);
}