This commit is contained in:
Johannes Gäßler 2026-03-22 20:51:41 +01:00
parent 6de1857936
commit f76e53108c
5 changed files with 17 additions and 60 deletions

View File

@ -21,7 +21,7 @@ struct llama_file {
size_t tell() const;
size_t size() const;
int file_id() const;
int file_id() const; // fileno overload
void seek(size_t offset, int whence) const;

View File

@ -659,7 +659,7 @@ llama_model_loader::llama_model_loader(
LLAMA_LOG_INFO("%s: additional %d GGUFs metadata loaded.\n", __func__, n_split - 1);
}
} else if (file) {
} else if (file != nullptr) {
struct ggml_context * ctx = NULL;
struct gguf_init_params params = {
/*.no_alloc = */ true,

View File

@ -891,9 +891,21 @@ static struct llama_model * llama_model_load_from_file_impl(
std::vector<std::string> & splits,
FILE * file,
struct llama_model_params params) {
if (metadata == nullptr && path_model.empty() && !file) {
LLAMA_LOG_ERROR("%s: no model source provided\n", __func__);
return nullptr;
{
int n_sources_defined = 0;
if (metadata != nullptr) {
n_sources_defined++;
}
if (!path_model.empty()) {
n_sources_defined++;
}
if (file != nullptr) {
n_sources_defined++;
}
if (n_sources_defined != 1) {
LLAMA_LOG_ERROR("%s: exactly one out metadata, path_model, and file must be defined\n", __func__);
return nullptr;
}
}
ggml_time_init();

View File

@ -240,7 +240,6 @@ llama_build_and_test(test-gguf.cpp)
llama_build_and_test(test-backend-ops.cpp)
llama_build_and_test(test-model-load-cancel.cpp LABEL "model")
llama_build_and_test(test-model-load-fd.cpp LABEL "model")
llama_build_and_test(test-autorelease.cpp LABEL "model")
llama_build_and_test(test-backend-sampler.cpp LABEL "model")

View File

@ -1,54 +0,0 @@
#include "llama.h"
#include "get-model.h"
#include <cstdio>
#include <cstdlib>
#ifdef _WIN32
int main(int /*argc*/, char ** /*argv*/) {
fprintf(stderr, "skipping on Windows\n");
return EXIT_SUCCESS;
}
#else
# include <fcntl.h>
# include <unistd.h>
int main(int argc, char ** argv) {
auto * model_path = get_model_or_exit(argc, argv);
llama_backend_init();
const int fd = open(model_path, O_RDONLY);
if (fd < 0) {
fprintf(stderr, "failed to open %s\n", model_path);
return EXIT_FAILURE;
}
FILE * f = fdopen(dup(fd), "rb");
close(fd);
if (!f) {
fprintf(stderr, "failed to fdopen\n");
return EXIT_FAILURE;
}
auto params = llama_model_default_params();
params.use_mmap = true;
params.vocab_only = true;
struct llama_model * model = llama_model_load_from_file_ptr(f, params);
fclose(f);
if (model == nullptr) {
fprintf(stderr, "load from file pointer failed\n");
return EXIT_FAILURE;
}
const int n_vocab = llama_vocab_n_tokens(llama_model_get_vocab(model));
fprintf(stderr, "loaded %d tokens via file pointer\n", n_vocab);
llama_model_free(model);
llama_backend_free();
return n_vocab > 0 ? EXIT_SUCCESS : EXIT_FAILURE;
}
#endif // _WIN32