llama : add --no-host to disable host buffers (#16310)

* implement --no-host to disable host buffer

* fix equal_mparams

* move no-host enumeration order together with other model params

---------

Co-authored-by: slaren <slarengh@gmail.com>
This commit is contained in:
Gadflyii 2025-10-06 12:55:53 -05:00 committed by GitHub
parent c08002a198
commit 3df2244df4
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 56 additions and 10 deletions

View File

@ -2584,6 +2584,13 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
params.no_extra_bufts = true; params.no_extra_bufts = true;
} }
).set_env("LLAMA_ARG_NO_REPACK")); ).set_env("LLAMA_ARG_NO_REPACK"));
add_opt(common_arg(
{"--no-host"},
"bypass host buffer allowing extra buffers to be used",
[](common_params & params) {
params.no_host = true;
}
).set_env("LLAMA_ARG_NO_HOST"));
add_opt(common_arg( add_opt(common_arg(
{"-ctk", "--cache-type-k"}, "TYPE", {"-ctk", "--cache-type-k"}, "TYPE",
string_format( string_format(

View File

@ -1133,6 +1133,7 @@ struct llama_model_params common_model_params_to_llama(common_params & params) {
mparams.use_mlock = params.use_mlock; mparams.use_mlock = params.use_mlock;
mparams.check_tensors = params.check_tensors; mparams.check_tensors = params.check_tensors;
mparams.use_extra_bufts = !params.no_extra_bufts; mparams.use_extra_bufts = !params.no_extra_bufts;
mparams.no_host = params.no_host;
if (params.kv_overrides.empty()) { if (params.kv_overrides.empty()) {
mparams.kv_overrides = NULL; mparams.kv_overrides = NULL;

View File

@ -392,6 +392,7 @@ struct common_params {
bool check_tensors = false; // validate tensor data bool check_tensors = false; // validate tensor data
bool no_op_offload = false; // globally disable offload host tensor operations to device bool no_op_offload = false; // globally disable offload host tensor operations to device
bool no_extra_bufts = false; // disable extra buffer types (used for weight repacking) bool no_extra_bufts = false; // disable extra buffer types (used for weight repacking)
bool no_host = false; // bypass host buffer allowing extra buffers to be used
bool single_turn = false; // single turn chat conversation bool single_turn = false; // single turn chat conversation

View File

@ -296,6 +296,7 @@ extern "C" {
bool use_mlock; // force system to keep model in RAM bool use_mlock; // force system to keep model in RAM
bool check_tensors; // validate model tensor data bool check_tensors; // validate model tensor data
bool use_extra_bufts; // use extra buffer types (used for weight repacking) bool use_extra_bufts; // use extra buffer types (used for weight repacking)
bool no_host; // bypass host buffer allowing extra buffers to be used
}; };
// NOTE: changing the default values of parameters marked as [EXPERIMENTAL] may cause crashes or incorrect results in certain configurations // NOTE: changing the default values of parameters marked as [EXPERIMENTAL] may cause crashes or incorrect results in certain configurations

View File

@ -310,7 +310,7 @@ static ggml_backend_buffer_type_t select_weight_buft(const llama_hparams & hpara
} }
// CPU: ACCEL -> GPU host -> CPU extra -> CPU // CPU: ACCEL -> GPU host -> CPU extra -> CPU
static buft_list_t make_cpu_buft_list(const std::vector<ggml_backend_dev_t> & devices, bool use_extra_bufts) { static buft_list_t make_cpu_buft_list(const std::vector<ggml_backend_dev_t> & devices, bool use_extra_bufts, bool no_host) {
buft_list_t buft_list; buft_list_t buft_list;
// add ACCEL buffer types // add ACCEL buffer types
@ -331,6 +331,7 @@ static buft_list_t make_cpu_buft_list(const std::vector<ggml_backend_dev_t> & de
// generally, this will be done using the first device in the list // generally, this will be done using the first device in the list
// a better approach would be to handle this on a weight-by-weight basis using the offload_op // a better approach would be to handle this on a weight-by-weight basis using the offload_op
// function of the device to determine if it would benefit from being stored in a host buffer // function of the device to determine if it would benefit from being stored in a host buffer
if (!no_host) {
for (auto * dev : devices) { for (auto * dev : devices) {
ggml_backend_buffer_type_t buft = ggml_backend_dev_host_buffer_type(dev); ggml_backend_buffer_type_t buft = ggml_backend_dev_host_buffer_type(dev);
if (buft) { if (buft) {
@ -338,6 +339,7 @@ static buft_list_t make_cpu_buft_list(const std::vector<ggml_backend_dev_t> & de
break; break;
} }
} }
}
// add extra buffer types // add extra buffer types
if (use_extra_bufts) { if (use_extra_bufts) {
@ -2083,7 +2085,7 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
LLAMA_LOG_INFO("%s: loading model tensors, this can take a while... (mmap = %s)\n", __func__, ml.use_mmap ? "true" : "false"); LLAMA_LOG_INFO("%s: loading model tensors, this can take a while... (mmap = %s)\n", __func__, ml.use_mmap ? "true" : "false");
// build a list of buffer types for the CPU and GPU devices // build a list of buffer types for the CPU and GPU devices
pimpl->cpu_buft_list = make_cpu_buft_list(devices, params.use_extra_bufts); pimpl->cpu_buft_list = make_cpu_buft_list(devices, params.use_extra_bufts, params.no_host);
for (auto * dev : devices) { for (auto * dev : devices) {
buft_list_t buft_list = make_gpu_buft_list(dev, split_mode, tensor_split); buft_list_t buft_list = make_gpu_buft_list(dev, split_mode, tensor_split);
// add CPU buffer types as a fallback // add CPU buffer types as a fallback
@ -19865,6 +19867,7 @@ llama_model_params llama_model_default_params() {
/*.use_mlock =*/ false, /*.use_mlock =*/ false,
/*.check_tensors =*/ false, /*.check_tensors =*/ false,
/*.use_extra_bufts =*/ true, /*.use_extra_bufts =*/ true,
/*.no_host =*/ false,
}; };
return result; return result;

View File

@ -336,6 +336,7 @@ struct cmd_params {
std::vector<bool> use_mmap; std::vector<bool> use_mmap;
std::vector<bool> embeddings; std::vector<bool> embeddings;
std::vector<bool> no_op_offload; std::vector<bool> no_op_offload;
std::vector<bool> no_host;
ggml_numa_strategy numa; ggml_numa_strategy numa;
int reps; int reps;
ggml_sched_priority prio; ggml_sched_priority prio;
@ -373,6 +374,7 @@ static const cmd_params cmd_params_defaults = {
/* use_mmap */ { true }, /* use_mmap */ { true },
/* embeddings */ { false }, /* embeddings */ { false },
/* no_op_offload */ { false }, /* no_op_offload */ { false },
/* no_host */ { false },
/* numa */ GGML_NUMA_STRATEGY_DISABLED, /* numa */ GGML_NUMA_STRATEGY_DISABLED,
/* reps */ 5, /* reps */ 5,
/* prio */ GGML_SCHED_PRIO_NORMAL, /* prio */ GGML_SCHED_PRIO_NORMAL,
@ -453,6 +455,8 @@ static void print_usage(int /* argc */, char ** argv) {
printf(" -ot --override-tensor <tensor name pattern>=<buffer type>;...\n"); printf(" -ot --override-tensor <tensor name pattern>=<buffer type>;...\n");
printf(" (default: disabled)\n"); printf(" (default: disabled)\n");
printf(" -nopo, --no-op-offload <0|1> (default: 0)\n"); printf(" -nopo, --no-op-offload <0|1> (default: 0)\n");
printf(" --no-host <0|1> (default: %s)\n",
join(cmd_params_defaults.no_host, ",").c_str());
printf("\n"); printf("\n");
printf( printf(
"Multiple values can be given for each parameter by separating them with ','\n" "Multiple values can be given for each parameter by separating them with ','\n"
@ -782,6 +786,13 @@ static cmd_params parse_cmd_params(int argc, char ** argv) {
} }
auto p = string_split<bool>(argv[i], split_delim); auto p = string_split<bool>(argv[i], split_delim);
params.no_op_offload.insert(params.no_op_offload.end(), p.begin(), p.end()); params.no_op_offload.insert(params.no_op_offload.end(), p.begin(), p.end());
} else if (arg == "--no-host") {
if (++i >= argc) {
invalid_param = true;
break;
}
auto p = string_split<bool>(argv[i], split_delim);
params.no_host.insert(params.no_host.end(), p.begin(), p.end());
} else if (arg == "-ts" || arg == "--tensor-split") { } else if (arg == "-ts" || arg == "--tensor-split") {
if (++i >= argc) { if (++i >= argc) {
invalid_param = true; invalid_param = true;
@ -1003,6 +1014,9 @@ static cmd_params parse_cmd_params(int argc, char ** argv) {
if (params.no_op_offload.empty()) { if (params.no_op_offload.empty()) {
params.no_op_offload = cmd_params_defaults.no_op_offload; params.no_op_offload = cmd_params_defaults.no_op_offload;
} }
if (params.no_host.empty()) {
params.no_host = cmd_params_defaults.no_host;
}
if (params.n_threads.empty()) { if (params.n_threads.empty()) {
params.n_threads = cmd_params_defaults.n_threads; params.n_threads = cmd_params_defaults.n_threads;
} }
@ -1044,6 +1058,7 @@ struct cmd_params_instance {
bool use_mmap; bool use_mmap;
bool embeddings; bool embeddings;
bool no_op_offload; bool no_op_offload;
bool no_host;
llama_model_params to_llama_mparams() const { llama_model_params to_llama_mparams() const {
llama_model_params mparams = llama_model_default_params(); llama_model_params mparams = llama_model_default_params();
@ -1056,6 +1071,7 @@ struct cmd_params_instance {
mparams.main_gpu = main_gpu; mparams.main_gpu = main_gpu;
mparams.tensor_split = tensor_split.data(); mparams.tensor_split = tensor_split.data();
mparams.use_mmap = use_mmap; mparams.use_mmap = use_mmap;
mparams.no_host = no_host;
if (n_cpu_moe <= 0) { if (n_cpu_moe <= 0) {
if (tensor_buft_overrides.empty()) { if (tensor_buft_overrides.empty()) {
@ -1101,6 +1117,7 @@ struct cmd_params_instance {
split_mode == other.split_mode && split_mode == other.split_mode &&
main_gpu == other.main_gpu && use_mmap == other.use_mmap && tensor_split == other.tensor_split && main_gpu == other.main_gpu && use_mmap == other.use_mmap && tensor_split == other.tensor_split &&
devices == other.devices && devices == other.devices &&
no_host == other.no_host &&
vec_tensor_buft_override_equal(tensor_buft_overrides, other.tensor_buft_overrides); vec_tensor_buft_override_equal(tensor_buft_overrides, other.tensor_buft_overrides);
} }
@ -1136,6 +1153,7 @@ static std::vector<cmd_params_instance> get_cmd_params_instances(const cmd_param
for (const auto & ts : params.tensor_split) for (const auto & ts : params.tensor_split)
for (const auto & ot : params.tensor_buft_overrides) for (const auto & ot : params.tensor_buft_overrides)
for (const auto & mmp : params.use_mmap) for (const auto & mmp : params.use_mmap)
for (const auto & noh : params.no_host)
for (const auto & embd : params.embeddings) for (const auto & embd : params.embeddings)
for (const auto & nopo : params.no_op_offload) for (const auto & nopo : params.no_op_offload)
for (const auto & nb : params.n_batch) for (const auto & nb : params.n_batch)
@ -1178,6 +1196,7 @@ static std::vector<cmd_params_instance> get_cmd_params_instances(const cmd_param
/* .use_mmap = */ mmp, /* .use_mmap = */ mmp,
/* .embeddings = */ embd, /* .embeddings = */ embd,
/* .no_op_offload= */ nopo, /* .no_op_offload= */ nopo,
/* .no_host = */ noh,
}; };
instances.push_back(instance); instances.push_back(instance);
} }
@ -1211,6 +1230,7 @@ static std::vector<cmd_params_instance> get_cmd_params_instances(const cmd_param
/* .use_mmap = */ mmp, /* .use_mmap = */ mmp,
/* .embeddings = */ embd, /* .embeddings = */ embd,
/* .no_op_offload= */ nopo, /* .no_op_offload= */ nopo,
/* .no_host = */ noh,
}; };
instances.push_back(instance); instances.push_back(instance);
} }
@ -1244,6 +1264,7 @@ static std::vector<cmd_params_instance> get_cmd_params_instances(const cmd_param
/* .use_mmap = */ mmp, /* .use_mmap = */ mmp,
/* .embeddings = */ embd, /* .embeddings = */ embd,
/* .no_op_offload= */ nopo, /* .no_op_offload= */ nopo,
/* .no_host = */ noh,
}; };
instances.push_back(instance); instances.push_back(instance);
} }
@ -1282,6 +1303,7 @@ struct test {
bool use_mmap; bool use_mmap;
bool embeddings; bool embeddings;
bool no_op_offload; bool no_op_offload;
bool no_host;
int n_prompt; int n_prompt;
int n_gen; int n_gen;
int n_depth; int n_depth;
@ -1318,6 +1340,7 @@ struct test {
use_mmap = inst.use_mmap; use_mmap = inst.use_mmap;
embeddings = inst.embeddings; embeddings = inst.embeddings;
no_op_offload = inst.no_op_offload; no_op_offload = inst.no_op_offload;
no_host = inst.no_host;
n_prompt = inst.n_prompt; n_prompt = inst.n_prompt;
n_gen = inst.n_gen; n_gen = inst.n_gen;
n_depth = inst.n_depth; n_depth = inst.n_depth;
@ -1375,8 +1398,8 @@ struct test {
"type_k", "type_v", "n_gpu_layers", "n_cpu_moe", "split_mode", "type_k", "type_v", "n_gpu_layers", "n_cpu_moe", "split_mode",
"main_gpu", "no_kv_offload", "flash_attn", "devices", "tensor_split", "main_gpu", "no_kv_offload", "flash_attn", "devices", "tensor_split",
"tensor_buft_overrides", "use_mmap", "embeddings", "no_op_offload", "tensor_buft_overrides", "use_mmap", "embeddings", "no_op_offload",
"n_prompt", "n_gen", "n_depth", "test_time", "avg_ns", "no_host", "n_prompt", "n_gen", "n_depth", "test_time",
"stddev_ns", "avg_ts", "stddev_ts" "avg_ns", "stddev_ns", "avg_ts", "stddev_ts"
}; };
return fields; return fields;
} }
@ -1391,7 +1414,7 @@ struct test {
return INT; return INT;
} }
if (field == "f16_kv" || field == "no_kv_offload" || field == "cpu_strict" || field == "flash_attn" || if (field == "f16_kv" || field == "no_kv_offload" || field == "cpu_strict" || field == "flash_attn" ||
field == "use_mmap" || field == "embeddings") { field == "use_mmap" || field == "embeddings" || field == "no_host") {
return BOOL; return BOOL;
} }
if (field == "avg_ts" || field == "stddev_ts") { if (field == "avg_ts" || field == "stddev_ts") {
@ -1466,6 +1489,7 @@ struct test {
std::to_string(use_mmap), std::to_string(use_mmap),
std::to_string(embeddings), std::to_string(embeddings),
std::to_string(no_op_offload), std::to_string(no_op_offload),
std::to_string(no_host),
std::to_string(n_prompt), std::to_string(n_prompt),
std::to_string(n_gen), std::to_string(n_gen),
std::to_string(n_depth), std::to_string(n_depth),
@ -1654,6 +1678,9 @@ struct markdown_printer : public printer {
if (field == "no_op_offload") { if (field == "no_op_offload") {
return 4; return 4;
} }
if (field == "no_host") {
return 4;
}
int width = std::max((int) field.length(), 10); int width = std::max((int) field.length(), 10);
@ -1688,6 +1715,9 @@ struct markdown_printer : public printer {
if (field == "no_op_offload") { if (field == "no_op_offload") {
return "nopo"; return "nopo";
} }
if (field == "no_host") {
return "noh";
}
if (field == "devices") { if (field == "devices") {
return "dev"; return "dev";
} }
@ -1768,6 +1798,9 @@ struct markdown_printer : public printer {
if (params.no_op_offload.size() > 1 || params.no_op_offload != cmd_params_defaults.no_op_offload) { if (params.no_op_offload.size() > 1 || params.no_op_offload != cmd_params_defaults.no_op_offload) {
fields.emplace_back("no_op_offload"); fields.emplace_back("no_op_offload");
} }
if (params.no_host.size() > 1 || params.no_host != cmd_params_defaults.no_host) {
fields.emplace_back("no_host");
}
fields.emplace_back("test"); fields.emplace_back("test");
fields.emplace_back("t/s"); fields.emplace_back("t/s");