diff --git a/include/llama-cpp.h b/include/llama-cpp.h index 8f6368177d..807e77f628 100644 --- a/include/llama-cpp.h +++ b/include/llama-cpp.h @@ -21,7 +21,9 @@ struct llama_sampler_deleter { }; struct llama_adapter_lora_deleter { - void operator()(llama_adapter_lora * adapter) { llama_adapter_lora_free(adapter); } + void operator()(llama_adapter_lora *) { + // llama_adapter_lora_free is deprecated + } }; typedef std::unique_ptr llama_model_ptr; diff --git a/include/llama.h b/include/llama.h index 8b3c8a7b10..fcefb5ab4e 100644 --- a/include/llama.h +++ b/include/llama.h @@ -633,7 +633,8 @@ extern "C" { // Manually free a LoRA adapter // NOTE: loaded adapters will be free when the associated model is deleted - LLAMA_API void llama_adapter_lora_free(struct llama_adapter_lora * adapter); + LLAMA_API DEPRECATED(void llama_adapter_lora_free(struct llama_adapter_lora * adapter), + "adapters are now freed together with the associated model"); // Get the invocation tokens if the current lora is an alora LLAMA_API uint64_t llama_adapter_get_alora_n_invocation_tokens(const struct llama_adapter_lora * adapter); diff --git a/src/llama-adapter.cpp b/src/llama-adapter.cpp index 5ff22b18ab..d6a5800e63 100644 --- a/src/llama-adapter.cpp +++ b/src/llama-adapter.cpp @@ -146,11 +146,9 @@ llama_adapter_lora_weight * llama_adapter_lora::get_weight(ggml_tensor * w) { return nullptr; } -static void llama_adapter_lora_init_impl(const char * path_lora, llama_adapter_lora & adapter) { +static void llama_adapter_lora_init_impl(llama_model & model, const char * path_lora, llama_adapter_lora & adapter) { LLAMA_LOG_INFO("%s: loading lora adapter from '%s' ...\n", __func__, path_lora); - llama_model & model = adapter.model; - ggml_context * ctx_init; gguf_init_params meta_gguf_params = { /* .no_alloc = */ true, @@ -420,10 +418,10 @@ static void llama_adapter_lora_init_impl(const char * path_lora, llama_adapter_l } llama_adapter_lora * llama_adapter_lora_init(llama_model * model, const char * path_lora) { - llama_adapter_lora * adapter = new llama_adapter_lora(*model); + llama_adapter_lora * adapter = new llama_adapter_lora(); try { - llama_adapter_lora_init_impl(path_lora, *adapter); + llama_adapter_lora_init_impl(*model, path_lora, *adapter); return adapter; } catch (const std::exception & err) { LLAMA_LOG_ERROR("%s: failed to apply lora adapter: %s\n", __func__, err.what()); @@ -473,13 +471,8 @@ int32_t llama_adapter_meta_val_str_by_index(const llama_adapter_lora * adapter, return snprintf(buf, buf_size, "%s", it->second.c_str()); } -void llama_adapter_lora_free(llama_adapter_lora * adapter) { - // remove adapter from associated model - auto & model = adapter->model; - GGML_ASSERT(model.loras.find(adapter) != model.loras.end()); - model.loras.erase(adapter); - - delete adapter; +void llama_adapter_lora_free(llama_adapter_lora *) { + // deprecated: adapters are freed by llama_model's destructor } uint64_t llama_adapter_get_alora_n_invocation_tokens(const struct llama_adapter_lora * adapter) { diff --git a/src/llama-adapter.h b/src/llama-adapter.h index 42d64a6e0b..d275d25425 100644 --- a/src/llama-adapter.h +++ b/src/llama-adapter.h @@ -59,8 +59,6 @@ struct llama_adapter_lora_weight { }; struct llama_adapter_lora { - llama_model & model; - // map tensor name to lora_a_b std::unordered_map ab_map; @@ -75,7 +73,7 @@ struct llama_adapter_lora { // activated lora (aLoRA) std::vector alora_invocation_tokens; - llama_adapter_lora(llama_model & model) : model(model) {} + llama_adapter_lora() = default; ~llama_adapter_lora() = default; llama_adapter_lora_weight * get_weight(ggml_tensor * w); diff --git a/src/llama-model.cpp b/src/llama-model.cpp index 1b220af83e..0f2af5256a 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -469,7 +469,7 @@ llama_model::llama_model(const llama_model_params & params) : params(params), pi llama_model::~llama_model() { for (auto * lora : loras) { - llama_adapter_lora_free(lora); + delete lora; } }