From f76e53108c14d81a42f089b52d7a7625bf7d95f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=A4=C3=9Fler?= Date: Sun, 22 Mar 2026 20:51:41 +0100 Subject: [PATCH] fixup --- src/llama-mmap.h | 2 +- src/llama-model-loader.cpp | 2 +- src/llama.cpp | 18 ++++++++++-- tests/CMakeLists.txt | 1 - tests/test-model-load-fd.cpp | 54 ------------------------------------ 5 files changed, 17 insertions(+), 60 deletions(-) delete mode 100644 tests/test-model-load-fd.cpp diff --git a/src/llama-mmap.h b/src/llama-mmap.h index 32fab23119..b7d5c61e95 100644 --- a/src/llama-mmap.h +++ b/src/llama-mmap.h @@ -21,7 +21,7 @@ struct llama_file { size_t tell() const; size_t size() const; - int file_id() const; + int file_id() const; // fileno overload void seek(size_t offset, int whence) const; diff --git a/src/llama-model-loader.cpp b/src/llama-model-loader.cpp index 3ad8a51d9b..2457a7ed4b 100644 --- a/src/llama-model-loader.cpp +++ b/src/llama-model-loader.cpp @@ -659,7 +659,7 @@ llama_model_loader::llama_model_loader( LLAMA_LOG_INFO("%s: additional %d GGUFs metadata loaded.\n", __func__, n_split - 1); } - } else if (file) { + } else if (file != nullptr) { struct ggml_context * ctx = NULL; struct gguf_init_params params = { /*.no_alloc = */ true, diff --git a/src/llama.cpp b/src/llama.cpp index d35fb2cbe6..1810a59d8e 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -891,9 +891,21 @@ static struct llama_model * llama_model_load_from_file_impl( std::vector & splits, FILE * file, struct llama_model_params params) { - if (metadata == nullptr && path_model.empty() && !file) { - LLAMA_LOG_ERROR("%s: no model source provided\n", __func__); - return nullptr; + { + int n_sources_defined = 0; + if (metadata != nullptr) { + n_sources_defined++; + } + if (!path_model.empty()) { + n_sources_defined++; + } + if (file != nullptr) { + n_sources_defined++; + } + if (n_sources_defined != 1) { + LLAMA_LOG_ERROR("%s: exactly one out metadata, path_model, and file must be defined\n", __func__); + return nullptr; + } } ggml_time_init(); diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index c333fe7e40..9582164b58 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -240,7 +240,6 @@ llama_build_and_test(test-gguf.cpp) llama_build_and_test(test-backend-ops.cpp) llama_build_and_test(test-model-load-cancel.cpp LABEL "model") -llama_build_and_test(test-model-load-fd.cpp LABEL "model") llama_build_and_test(test-autorelease.cpp LABEL "model") llama_build_and_test(test-backend-sampler.cpp LABEL "model") diff --git a/tests/test-model-load-fd.cpp b/tests/test-model-load-fd.cpp deleted file mode 100644 index b7ff237ee5..0000000000 --- a/tests/test-model-load-fd.cpp +++ /dev/null @@ -1,54 +0,0 @@ -#include "llama.h" -#include "get-model.h" - -#include -#include - -#ifdef _WIN32 -int main(int /*argc*/, char ** /*argv*/) { - fprintf(stderr, "skipping on Windows\n"); - return EXIT_SUCCESS; -} -#else -# include -# include - -int main(int argc, char ** argv) { - auto * model_path = get_model_or_exit(argc, argv); - - llama_backend_init(); - - const int fd = open(model_path, O_RDONLY); - if (fd < 0) { - fprintf(stderr, "failed to open %s\n", model_path); - return EXIT_FAILURE; - } - - FILE * f = fdopen(dup(fd), "rb"); - close(fd); - if (!f) { - fprintf(stderr, "failed to fdopen\n"); - return EXIT_FAILURE; - } - - auto params = llama_model_default_params(); - params.use_mmap = true; - params.vocab_only = true; - - struct llama_model * model = llama_model_load_from_file_ptr(f, params); - fclose(f); - - if (model == nullptr) { - fprintf(stderr, "load from file pointer failed\n"); - return EXIT_FAILURE; - } - - const int n_vocab = llama_vocab_n_tokens(llama_model_get_vocab(model)); - fprintf(stderr, "loaded %d tokens via file pointer\n", n_vocab); - - llama_model_free(model); - llama_backend_free(); - - return n_vocab > 0 ? EXIT_SUCCESS : EXIT_FAILURE; -} -#endif // _WIN32