ggml, llama : use defaulted constructors/destructors (#17649)

This commit is contained in:
Herman Semenoff 2025-12-03 09:12:18 +03:00 committed by GitHub
parent 16cc3c606e
commit 37adc9c6ba
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 6 additions and 7 deletions

View File

@ -1169,7 +1169,7 @@ void gguf_set_tensor_data(struct gguf_context * ctx, const char * name, const vo
struct gguf_writer_base { struct gguf_writer_base {
size_t written_bytes {0u}; size_t written_bytes {0u};
~gguf_writer_base(void) {} ~gguf_writer_base(void) = default;
// we bet on devirtualization // we bet on devirtualization
virtual void write(int8_t val) = 0; virtual void write(int8_t val) = 0;

View File

@ -37,7 +37,7 @@ void llama_log_callback_default(ggml_log_level level, const char * text, void *
template <typename T> template <typename T>
struct no_init { struct no_init {
T value; T value;
no_init() { /* do nothing */ } no_init() = default;
}; };
struct time_meas { struct time_meas {

View File

@ -423,8 +423,8 @@ static buft_list_t make_gpu_buft_list(ggml_backend_dev_t dev, llama_split_mode s
} }
struct llama_model::impl { struct llama_model::impl {
impl() {} impl() = default;
~impl() {} ~impl() = default;
uint64_t n_elements = 0; uint64_t n_elements = 0;
@ -461,7 +461,7 @@ llama_model::llama_model(const llama_model_params & params) : params(params), pi
pimpl->has_tensor_overrides = params.tensor_buft_overrides && params.tensor_buft_overrides[0].pattern; pimpl->has_tensor_overrides = params.tensor_buft_overrides && params.tensor_buft_overrides[0].pattern;
} }
llama_model::~llama_model() {} llama_model::~llama_model() = default;
void llama_model::load_stats(llama_model_loader & ml) { void llama_model::load_stats(llama_model_loader & ml) {
pimpl->n_elements = ml.n_elements; pimpl->n_elements = ml.n_elements;

View File

@ -3253,8 +3253,7 @@ void llama_vocab::impl::print_info() const {
llama_vocab::llama_vocab() : pimpl(new impl(*this)) { llama_vocab::llama_vocab() : pimpl(new impl(*this)) {
} }
llama_vocab::~llama_vocab() { llama_vocab::~llama_vocab() = default;
}
void llama_vocab::load(llama_model_loader & ml, const LLM_KV & kv) { void llama_vocab::load(llama_model_loader & ml, const LLM_KV & kv) {
pimpl->load(ml, kv); pimpl->load(ml, kv);