From c34d053d0a4a6e73b82125ae9f73fa81d42918d7 Mon Sep 17 00:00:00 2001 From: Djip007 <3705339+Djip007@users.noreply.github.com> Date: Sun, 21 Dec 2025 10:16:01 +0100 Subject: [PATCH] Add c++ wrapper for backend. - a backend can be create with full C++ - update cpu traits - update cpu_repack - update cpu_amx to use it. - extract GGML_LOG - correct extra_buffer register order for GPU --- ggml/include/ggml.h | 2 +- ggml/src/CMakeLists.txt | 2 + ggml/src/ggml-backend-impl.h | 2 +- ggml/src/ggml-cpu/amx/amx.cpp | 132 ++------- ggml/src/ggml-cpu/repack.cpp | 84 ++---- ggml/src/ggml-cpu/traits.cpp | 74 +++++ ggml/src/ggml-cpu/traits.h | 27 ++ ggml/src/ggml-impl.h | 35 +-- ggml/src/ggml-log.h | 48 +++ ggml/src/ggml.c | 2 +- ggml/src/ggml_cpp_wrapper.cpp | 532 ++++++++++++++++++++++++++++++++++ ggml/src/ggml_cpp_wrapper.h | 154 ++++++++++ src/llama-model.cpp | 6 +- 13 files changed, 901 insertions(+), 199 deletions(-) create mode 100644 ggml/src/ggml-log.h create mode 100644 ggml/src/ggml_cpp_wrapper.cpp create mode 100644 ggml/src/ggml_cpp_wrapper.h diff --git a/ggml/include/ggml.h b/ggml/include/ggml.h index 25f9601e9b..72301af351 100644 --- a/ggml/include/ggml.h +++ b/ggml/include/ggml.h @@ -705,7 +705,7 @@ extern "C" { typedef uint8_t ggml_guid[16]; typedef ggml_guid * ggml_guid_t; - GGML_API bool ggml_guid_matches(ggml_guid_t guid_a, ggml_guid_t guid_b); + GGML_API bool ggml_guid_matches(const ggml_guid * guid_a, const ggml_guid * guid_b); // misc diff --git a/ggml/src/CMakeLists.txt b/ggml/src/CMakeLists.txt index 78853304d9..2046c9d8d3 100644 --- a/ggml/src/CMakeLists.txt +++ b/ggml/src/CMakeLists.txt @@ -200,6 +200,8 @@ add_library(ggml-base ggml.cpp ggml-alloc.c ggml-backend.cpp + ggml_cpp_wrapper.cpp + ggml_cpp_wrapper.h ggml-opt.cpp ggml-threading.cpp ggml-threading.h diff --git a/ggml/src/ggml-backend-impl.h b/ggml/src/ggml-backend-impl.h index 59190b7c46..2a98d66ffb 100644 --- a/ggml/src/ggml-backend-impl.h +++ b/ggml/src/ggml-backend-impl.h @@ -120,7 +120,7 @@ extern "C" { }; struct ggml_backend { - ggml_guid_t guid; + const ggml_guid_t guid; struct ggml_backend_i iface; ggml_backend_dev_t device; void * context; diff --git a/ggml/src/ggml-cpu/amx/amx.cpp b/ggml/src/ggml-cpu/amx/amx.cpp index 9baf3e025e..1caf25f685 100644 --- a/ggml/src/ggml-cpu/amx/amx.cpp +++ b/ggml/src/ggml-cpu/amx/amx.cpp @@ -35,111 +35,52 @@ class tensor_traits : public ggml::cpu::tensor_traits { } }; -static ggml::cpu::tensor_traits * get_tensor_traits(ggml_backend_buffer_t, struct ggml_tensor *) { +static ggml::cpu::tensor_traits * get_tensor_traits(struct ggml_tensor *) { static tensor_traits traits; return &traits; } } // namespace ggml::cpu::amx -// AMX buffer interface -static void ggml_backend_amx_buffer_free_buffer(ggml_backend_buffer_t buffer) { - free(buffer->context); -} +namespace ggml::cpu::amx { -static void * ggml_backend_amx_buffer_get_base(ggml_backend_buffer_t buffer) { - return (void *) (buffer->context); -} +// AMX buffer +class buffer : public ggml::cpu::buffer { +public: + buffer(std::size_t size) : ggml::cpu::buffer(size) { } -static enum ggml_status ggml_backend_amx_buffer_init_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) { - tensor->extra = (void *) ggml::cpu::amx::get_tensor_traits(buffer, tensor); + virtual ~buffer() { } - GGML_UNUSED(buffer); - return GGML_STATUS_SUCCESS; -} - -static void ggml_backend_amx_buffer_memset_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, - uint8_t value, size_t offset, size_t size) { - memset((char *) tensor->data + offset, value, size); - - GGML_UNUSED(buffer); -} - -static void ggml_backend_amx_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, - const void * data, size_t offset, size_t size) { - if (qtype_has_amx_kernels(tensor->type)) { - GGML_LOG_DEBUG("%s: amx repack tensor %s of type %s\n", __func__, tensor->name, ggml_type_name(tensor->type)); - ggml_backend_amx_convert_weight(tensor, data, offset, size); - } else { - memcpy((char *) tensor->data + offset, data, size); + ggml_status init_tensor(ggml_tensor& tensor) override { + tensor->extra = (void *) ggml::cpu::amx::get_tensor_traits(&tensor); + return GGML_STATUS_SUCCESS; } - GGML_UNUSED(buffer); -} - -/* -// need to figure what we need to do with buffer->extra. -static void ggml_backend_amx_buffer_get_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) { - GGML_ASSERT(!qtype_has_amx_kernels(tensor->type)); - memcpy(data, (const char *)tensor->data + offset, size); - - GGML_UNUSED(buffer); -} - -static bool ggml_backend_amx_buffer_cpy_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst) { - if (ggml_backend_buffer_is_host(src->buffer)) { - if (qtype_has_amx_kernels(src->type)) { - ggml_backend_amx_convert_weight(dst, src->data, 0, ggml_nbytes(dst)); + void set_tensor(ggml_tensor & tensor, const void * data, std::size_t offset, std::size_t size) override { + if (qtype_has_amx_kernels(tensor.type)) { + GGML_LOG_DEBUG("%s: amx repack tensor %s of type %s\n", __func__, tensor.name, ggml_type_name(tensor.type)); + ggml_backend_amx_convert_weight(&tensor, data, offset, size); } else { - memcpy(dst->data, src->data, ggml_nbytes(src)); + memcpy((char *) tensor.data + offset, data, size); } - return true; } - return false; - GGML_UNUSED(buffer); -} -*/ - -static void ggml_backend_amx_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) { - memset(buffer->context, value, buffer->size); -} - -static ggml_backend_buffer_i ggml_backend_amx_buffer_interface = { - /* .free_buffer = */ ggml_backend_amx_buffer_free_buffer, - /* .get_base = */ ggml_backend_amx_buffer_get_base, - /* .init_tensor = */ ggml_backend_amx_buffer_init_tensor, - /* .memset_tensor = */ ggml_backend_amx_buffer_memset_tensor, - /* .set_tensor = */ ggml_backend_amx_buffer_set_tensor, - /* .get_tensor = */ nullptr, - /* .cpy_tensor = */ nullptr, - /* .clear = */ ggml_backend_amx_buffer_clear, - /* .reset = */ nullptr, }; -static const char * ggml_backend_amx_buffer_type_get_name(ggml_backend_buffer_type_t buft) { - return "AMX"; +class extra_buffer_type : ggml::cpu::extra_buffer_type { - GGML_UNUSED(buft); -} - -static ggml_backend_buffer_t ggml_backend_amx_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { - void * data = ggml_aligned_malloc(size); - if (data == NULL) { - fprintf(stderr, "%s: failed to allocate buffer of size %zu\n", __func__, size); - return NULL; + const std::string& get_name() override { + static const std::string name {"AMX"}; + return name; } - return ggml_backend_buffer_init(buft, ggml_backend_amx_buffer_interface, data, size); -} + ggml::cpp::backend::buffer* alloc_buffer(std::size_t size) override { + return new buffer(size); + } -static size_t ggml_backend_amx_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) { - return TENSOR_ALIGNMENT; + std::size_t get_alloc_size(const ggml_tensor& tensor) override { + return ggml_backend_amx_get_alloc_size(&tensor); + } - GGML_UNUSED(buft); -} - -namespace ggml::cpu::amx { -class extra_buffer_type : ggml::cpu::extra_buffer_type { bool supports_op(ggml_backend_dev_t, const struct ggml_tensor * op) override { if (op->op != GGML_OP_MUL_MAT) { return false; @@ -198,12 +139,6 @@ class extra_buffer_type : ggml::cpu::extra_buffer_type { }; } // namespace ggml::cpu::amx -static size_t ggml_backend_amx_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) { - return ggml_backend_amx_get_alloc_size(tensor); - - GGML_UNUSED(buft); -} - #define ARCH_GET_XCOMP_PERM 0x1022 #define ARCH_REQ_XCOMP_PERM 0x1023 #define XFEATURE_XTILECFG 17 @@ -224,24 +159,11 @@ static bool ggml_amx_init() { } ggml_backend_buffer_type_t ggml_backend_amx_buffer_type() { - static struct ggml_backend_buffer_type ggml_backend_buffer_type_amx = { - /* .iface = */ { - /* .get_name = */ ggml_backend_amx_buffer_type_get_name, - /* .alloc_buffer = */ ggml_backend_amx_buffer_type_alloc_buffer, - /* .get_alignment = */ ggml_backend_amx_buffer_type_get_alignment, - /* .get_max_size = */ nullptr, // defaults to SIZE_MAX - /* .get_alloc_size = */ ggml_backend_amx_buffer_type_get_alloc_size, - /* .is_host = */ nullptr, - }, - /* .device = */ ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0), - /* .context = */ new ggml::cpu::amx::extra_buffer_type(), - }; - + static auto* buffer_type = ggml::cpu::c_wrapper(new ggml::cpu::amx::extra_buffer_type()); if (!ggml_amx_init()) { return nullptr; } - - return &ggml_backend_buffer_type_amx; + return buffer_type; } #endif // defined(__AMX_INT8__) && defined(__AVX512VNNI__) diff --git a/ggml/src/ggml-cpu/repack.cpp b/ggml/src/ggml-cpu/repack.cpp index 6b76ab3bfb..d5f6948974 100644 --- a/ggml/src/ggml-cpu/repack.cpp +++ b/ggml/src/ggml-cpu/repack.cpp @@ -4720,54 +4720,42 @@ static const ggml::cpu::tensor_traits * ggml_repack_get_optimal_repack_type(cons return nullptr; } -static enum ggml_status ggml_backend_cpu_repack_buffer_init_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) { - tensor->extra = (void *) const_cast(ggml_repack_get_optimal_repack_type(tensor)); +namespace ggml::cpu::repack { - GGML_UNUSED(buffer); - return GGML_STATUS_SUCCESS; -} +class buffer : public ggml::cpu::buffer { +public: + buffer(std::size_t size) : ggml::cpu::buffer(size) { } -static void ggml_backend_cpu_repack_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, - const void * data, size_t offset, size_t size) { - GGML_ASSERT(offset == 0); - GGML_ASSERT(size == ggml_nbytes(tensor)); + virtual ~buffer() { } - auto tensor_traits = (ggml::cpu::repack::tensor_traits_base *) tensor->extra; - auto OK = tensor_traits->repack(tensor, data, size); - - GGML_ASSERT(OK == 0); - GGML_UNUSED(buffer); -} - -static const char * ggml_backend_cpu_repack_buffer_type_get_name(ggml_backend_buffer_type_t buft) { - return "CPU_REPACK"; - - GGML_UNUSED(buft); -} - -static ggml_backend_buffer_t ggml_backend_cpu_repack_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { - ggml_backend_buffer_t buffer = ggml_backend_buft_alloc_buffer(ggml_backend_cpu_buffer_type(), size); - - if (buffer == nullptr) { - return nullptr; + ggml_status init_tensor(ggml_tensor & tensor) override { + tensor.extra = (void *) const_cast(ggml_repack_get_optimal_repack_type(&tensor)); + return GGML_STATUS_SUCCESS; } - buffer->buft = buft; - buffer->iface.init_tensor = ggml_backend_cpu_repack_buffer_init_tensor; - buffer->iface.set_tensor = ggml_backend_cpu_repack_buffer_set_tensor; - buffer->iface.get_tensor = nullptr; - buffer->iface.cpy_tensor = nullptr; - return buffer; -} + void set_tensor(ggml_tensor & tensor, const void * data, std::size_t offset, std::size_t size) override { + GGML_ASSERT(offset == 0); + GGML_ASSERT(size == ggml_nbytes(&tensor)); -static size_t ggml_backend_cpu_repack_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) { - return TENSOR_ALIGNMENT; + auto tensor_traits = (ggml::cpu::repack::tensor_traits_base *) tensor.extra; + auto OK = tensor_traits->repack(&tensor, data, size); - GGML_UNUSED(buft); -} + GGML_ASSERT(OK == 0); + } +}; + +class extra_buffer_type : public ggml::cpu::extra_buffer_type { +public: + + const std::string& get_name() override { + static const std::string name {"CPU_REPACK"}; + return name; + } + + ggml::cpp::backend::buffer* alloc_buffer(std::size_t size) override { + return new buffer(size); + } -namespace ggml::cpu::repack { -class extra_buffer_type : ggml::cpu::extra_buffer_type { bool supports_op(ggml_backend_dev_t, const struct ggml_tensor * op) override { if ( op->op == GGML_OP_MUL_MAT && op->src[0]->buffer && @@ -4816,18 +4804,6 @@ class extra_buffer_type : ggml::cpu::extra_buffer_type { } // namespace ggml::cpu::repack ggml_backend_buffer_type_t ggml_backend_cpu_repack_buffer_type(void) { - static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type_repack = { - /* .iface = */ { - /* .get_name = */ ggml_backend_cpu_repack_buffer_type_get_name, - /* .alloc_buffer = */ ggml_backend_cpu_repack_buffer_type_alloc_buffer, - /* .get_alignment = */ ggml_backend_cpu_repack_buffer_type_get_alignment, - /* .get_max_size = */ nullptr, // defaults to SIZE_MAX - /* .get_alloc_size = */ nullptr, // defaults to ggml_nbytes - /* .is_host = */ nullptr, - }, - /* .device = */ ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0), - /* .context = */ new ggml::cpu::repack::extra_buffer_type(), - }; - - return &ggml_backend_cpu_buffer_type_repack; + static auto* buffer_type = ggml::cpu::c_wrapper(new ggml::cpu::repack::extra_buffer_type()); + return buffer_type; } diff --git a/ggml/src/ggml-cpu/traits.cpp b/ggml/src/ggml-cpu/traits.cpp index 4f32f10255..65349ab381 100644 --- a/ggml/src/ggml-cpu/traits.cpp +++ b/ggml/src/ggml-cpu/traits.cpp @@ -2,11 +2,85 @@ #include "ggml-backend-impl.h" #include "ggml-backend.h" +#include "ggml-cpu.h" + +#include namespace ggml::cpu { + +buffer::buffer(std::size_t size) : m_size(size) { + m_data = new (std::align_val_t(32)) uint8_t[m_size]; + GGML_ASSERT(m_data); +} + +buffer::~buffer() { + delete [] m_data; +} + +void* buffer::get_base() { + return m_data; +} + +void buffer::memset_tensor(ggml_tensor & tensor, uint8_t value, std::size_t offset, std::size_t size) { + GGML_ASSERT(value == 0); + memset((uint8_t *) tensor.data + offset, value, size); +} + +void buffer::get_tensor(const ggml_tensor &, void *, std::size_t, std::size_t size) { + GGML_ASSERT(size == 0); +} + +void buffer::clear(uint8_t value) { + memset(m_data, value, m_size); +} + tensor_traits::~tensor_traits() {} extra_buffer_type::~extra_buffer_type() {} + +namespace { + const char *buffer_type_get_name (ggml_backend_buffer_type_t buft) { + auto& ctx = *((extra_buffer_type*) (buft->context)); + return ctx.get_name().c_str(); + } + std::size_t buffer_type_get_alignment (ggml_backend_buffer_type_t buft) { + auto& ctx = *((extra_buffer_type*) (buft->context)); + return ctx.get_alignment(); + } + std::size_t buffer_type_get_max_size (ggml_backend_buffer_type_t buft) { + auto& ctx = *((extra_buffer_type*) (buft->context)); + return ctx.get_max_size(); + } + std::size_t buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) { + auto& ctx = *((extra_buffer_type*) (buft->context)); + return ctx.get_alloc_size(*tensor); + } + bool buffer_type_is_host(ggml_backend_buffer_type_t /*buft*/) { + return false; + } + ggml_backend_buffer_t buffer_type_alloc_buffer (ggml_backend_buffer_type_t buft, std::size_t size) { + auto& ctx = *((extra_buffer_type*) (buft->context)); + return c_wrapper(buft, ctx.alloc_buffer(size), size); + } +} + +ggml_backend_buffer_type_t c_wrapper(extra_buffer_type* ctx) { + if (!ctx) { return nullptr; } + return new ggml_backend_buffer_type { + /* .iface = */ { + /* .get_name = */ buffer_type_get_name, + /* .alloc_buffer = */ buffer_type_alloc_buffer, + /* .get_alignment = */ buffer_type_get_alignment, + /* .get_max_size = */ buffer_type_get_max_size, + /* .get_alloc_size = */ buffer_type_get_alloc_size, + /* .is_host = */ buffer_type_is_host, + }, + /* .device = */ ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0), + /* .context = */ ctx, + }; +} + + } // namespace ggml::cpu bool ggml_cpu_extra_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * op) { diff --git a/ggml/src/ggml-cpu/traits.h b/ggml/src/ggml-cpu/traits.h index f4e0990ddf..27cedfdbd1 100644 --- a/ggml/src/ggml-cpu/traits.h +++ b/ggml/src/ggml-cpu/traits.h @@ -4,7 +4,9 @@ #include "ggml.h" #ifdef __cplusplus +# include "ggml_cpp_wrapper.h" # include +# include extern "C" { #endif @@ -24,12 +26,37 @@ class tensor_traits { virtual bool compute_forward(struct ggml_compute_params * params, struct ggml_tensor * op) = 0; }; +// a simple buffer for cpu +class buffer : public ggml::cpp::backend::buffer { +public: + buffer(std::size_t size); + virtual ~buffer(); + void* get_base() override; + void memset_tensor(ggml_tensor & tensor, uint8_t value, std::size_t offset, std::size_t size) override; + void get_tensor(const ggml_tensor &, void *, std::size_t, std::size_t size) override; + void clear(uint8_t value) override; +protected: + const std::size_t m_size; + uint8_t* m_data; +}; + class extra_buffer_type { public: virtual ~extra_buffer_type(); + // the base buffer_type fct + virtual const std::string& get_name() = 0; + virtual ggml::cpp::backend::buffer* alloc_buffer(std::size_t size) = 0; + virtual std::size_t get_alignment() { return TENSOR_ALIGNMENT; } + virtual std::size_t get_max_size() { return SIZE_MAX; } + virtual std::size_t get_alloc_size(const ggml_tensor& tensor) { return ggml_nbytes(&tensor); } + + // the extra fct virtual bool supports_op(ggml_backend_dev_t dev, const struct ggml_tensor * op) = 0; virtual tensor_traits * get_tensor_traits(const struct ggml_tensor * op) = 0; }; + +ggml_backend_buffer_type_t c_wrapper(extra_buffer_type* ctx); + } // namespace ggml::cpu // implemented in ggml-cpu.cpp. diff --git a/ggml/src/ggml-impl.h b/ggml/src/ggml-impl.h index 9256865595..2c50dc32cd 100644 --- a/ggml/src/ggml-impl.h +++ b/ggml/src/ggml-impl.h @@ -4,6 +4,7 @@ #include "ggml.h" #include "gguf.h" +#include "ggml-log.h" #include #include @@ -105,40 +106,6 @@ static inline bool ggml_impl_is_view(const struct ggml_tensor * t) { static inline float ggml_compute_softplus_f32(float input) { return (input > 20.0f) ? input : logf(1 + expf(input)); } -// -// logging -// - -GGML_ATTRIBUTE_FORMAT(2, 3) -GGML_API void ggml_log_internal (enum ggml_log_level level, const char * format, ...); -GGML_API void ggml_log_callback_default(enum ggml_log_level level, const char * text, void * user_data); - -#define GGML_LOG(...) ggml_log_internal(GGML_LOG_LEVEL_NONE , __VA_ARGS__) -#define GGML_LOG_INFO(...) ggml_log_internal(GGML_LOG_LEVEL_INFO , __VA_ARGS__) -#define GGML_LOG_WARN(...) ggml_log_internal(GGML_LOG_LEVEL_WARN , __VA_ARGS__) -#define GGML_LOG_ERROR(...) ggml_log_internal(GGML_LOG_LEVEL_ERROR, __VA_ARGS__) -#define GGML_LOG_DEBUG(...) ggml_log_internal(GGML_LOG_LEVEL_DEBUG, __VA_ARGS__) -#define GGML_LOG_CONT(...) ggml_log_internal(GGML_LOG_LEVEL_CONT , __VA_ARGS__) - -#define GGML_DEBUG 0 - -#if (GGML_DEBUG >= 1) -#define GGML_PRINT_DEBUG(...) GGML_LOG_DEBUG(__VA_ARGS__) -#else -#define GGML_PRINT_DEBUG(...) -#endif - -#if (GGML_DEBUG >= 5) -#define GGML_PRINT_DEBUG_5(...) GGML_LOG_DEBUG(__VA_ARGS__) -#else -#define GGML_PRINT_DEBUG_5(...) -#endif - -#if (GGML_DEBUG >= 10) -#define GGML_PRINT_DEBUG_10(...) GGML_LOG_DEBUG(__VA_ARGS__) -#else -#define GGML_PRINT_DEBUG_10(...) -#endif // tensor params diff --git a/ggml/src/ggml-log.h b/ggml/src/ggml-log.h new file mode 100644 index 0000000000..0edcc81c7e --- /dev/null +++ b/ggml/src/ggml-log.h @@ -0,0 +1,48 @@ +#pragma once + +#include "ggml.h" + +// GGML internal header + +#ifdef __cplusplus +extern "C" { +#endif + +// +// logging: implemented in ggml.c +// + +GGML_ATTRIBUTE_FORMAT(2, 3) +GGML_API void ggml_log_internal (enum ggml_log_level level, const char * format, ...); +GGML_API void ggml_log_callback_default(enum ggml_log_level level, const char * text, void * user_data); + +#define GGML_LOG(...) ggml_log_internal(GGML_LOG_LEVEL_NONE , __VA_ARGS__) +#define GGML_LOG_INFO(...) ggml_log_internal(GGML_LOG_LEVEL_INFO , __VA_ARGS__) +#define GGML_LOG_WARN(...) ggml_log_internal(GGML_LOG_LEVEL_WARN , __VA_ARGS__) +#define GGML_LOG_ERROR(...) ggml_log_internal(GGML_LOG_LEVEL_ERROR, __VA_ARGS__) +#define GGML_LOG_DEBUG(...) ggml_log_internal(GGML_LOG_LEVEL_DEBUG, __VA_ARGS__) +#define GGML_LOG_CONT(...) ggml_log_internal(GGML_LOG_LEVEL_CONT , __VA_ARGS__) + +#define GGML_DEBUG 0 + +#if (GGML_DEBUG >= 1) +#define GGML_PRINT_DEBUG(...) GGML_LOG_DEBUG(__VA_ARGS__) +#else +#define GGML_PRINT_DEBUG(...) +#endif + +#if (GGML_DEBUG >= 5) +#define GGML_PRINT_DEBUG_5(...) GGML_LOG_DEBUG(__VA_ARGS__) +#else +#define GGML_PRINT_DEBUG_5(...) +#endif + +#if (GGML_DEBUG >= 10) +#define GGML_PRINT_DEBUG_10(...) GGML_LOG_DEBUG(__VA_ARGS__) +#else +#define GGML_PRINT_DEBUG_10(...) +#endif + +#ifdef __cplusplus +} +#endif diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c index e5b83e1447..ce6854dc3a 100644 --- a/ggml/src/ggml.c +++ b/ggml/src/ggml.c @@ -492,7 +492,7 @@ void ggml_fp32_to_bf16_row(const float * x, ggml_bf16_t * y, int64_t n) { } } -bool ggml_guid_matches(ggml_guid_t guid_a, ggml_guid_t guid_b) { +bool ggml_guid_matches(const ggml_guid * guid_a, const ggml_guid * guid_b) { return memcmp(guid_a, guid_b, sizeof(ggml_guid)) == 0; } diff --git a/ggml/src/ggml_cpp_wrapper.cpp b/ggml/src/ggml_cpp_wrapper.cpp new file mode 100644 index 0000000000..f798eefb78 --- /dev/null +++ b/ggml/src/ggml_cpp_wrapper.cpp @@ -0,0 +1,532 @@ +#include "ggml_cpp_wrapper.h" + +#include "ggml-backend-impl.h" +#include "ggml.h" + +#include +#include +#include + +namespace ggml::cpp::backend { + +// TODO: voir si on ne cree pas une fontion static plutot que friend. +ggml_backend_buffer_type_t* backend_dev_get_extra_bufts(ggml_backend_dev_t device) { + auto& ctx = *((ggml::cpp::backend::device*) (device->context)); + if (ctx.m_ggml_extra_buffers_type.size() == 0) { // need init of extra buffer wrappers + for (auto* buft : ctx.m_extra_buffers_type) { + auto* c_buft = c_wrapper(device, buft); + ctx.m_ggml_extra_buffers_type.push_back(c_buft); + } + ctx.m_ggml_extra_buffers_type.push_back(nullptr); + } + return ctx.m_ggml_extra_buffers_type.data(); +} + + namespace { // unnamed namespace + + //========================================================= + // les wrappper pour ggml_backend_buffer + void buffer_free_buffer(ggml_backend_buffer_t buf) { + auto* ctx = (ggml::cpp::backend::buffer*) (buf->context); + delete ctx; + // delete buf; NO => deleted by the core. + } + void * buffer_get_base(ggml_backend_buffer_t buf) { + auto& ctx = *((ggml::cpp::backend::buffer*) (buf->context)); + return ctx.get_base(); + } + ggml_status buffer_init_tensor(ggml_backend_buffer_t buf, ggml_tensor * tensor) { + auto& ctx = *((ggml::cpp::backend::buffer*) (buf->context)); + return ctx.init_tensor(*tensor); + } + void buffer_memset_tensor(ggml_backend_buffer_t buf, ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) { + auto& ctx = *((ggml::cpp::backend::buffer*) (buf->context)); + ctx.memset_tensor(*tensor, value, offset, size); + } + void buffer_set_tensor(ggml_backend_buffer_t buf, ggml_tensor * tensor, const void * data, size_t offset, size_t size) { + auto& ctx = *((ggml::cpp::backend::buffer*) (buf->context)); + ctx.set_tensor(*tensor, data, offset, size); + } + void buffer_get_tensor(ggml_backend_buffer_t buf, const ggml_tensor * tensor, void * data, size_t offset, size_t size) { + auto& ctx = *((ggml::cpp::backend::buffer*) (buf->context)); + ctx.get_tensor(*tensor, data, offset, size); + } + bool buffer_cpy_tensor(ggml_backend_buffer_t buf, const ggml_tensor * src, ggml_tensor * dst) { + auto& ctx = *((ggml::cpp::backend::buffer*) (buf->context)); + return ctx.cpy_tensor(*src, *dst); + } + void buffer_clear(ggml_backend_buffer_t buf, uint8_t value) { + auto& ctx = *((ggml::cpp::backend::buffer*) (buf->context)); + ctx.clear(value); + } + void buffer_reset(ggml_backend_buffer_t buf) { + auto& ctx = *((ggml::cpp::backend::buffer*) (buf->context)); + ctx.reset(); + } + + //========================================================= + // wrapppers for ggml_backend_buffer_type + const char *buffer_type_get_name (ggml_backend_buffer_type_t buft) { + auto& ctx = *((ggml::cpp::backend::buffer_type*) (buft->context)); + return ctx.get_name().c_str(); + } + std::size_t buffer_type_get_alignment (ggml_backend_buffer_type_t buft) { + auto& ctx = *((ggml::cpp::backend::buffer_type*) (buft->context)); + return ctx.get_alignment(); + } + std::size_t buffer_type_get_max_size (ggml_backend_buffer_type_t buft) { + auto& ctx = *((ggml::cpp::backend::buffer_type*) (buft->context)); + return ctx.get_max_size(); + } + std::size_t buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) { + auto& ctx = *((ggml::cpp::backend::buffer_type*) (buft->context)); + return ctx.get_alloc_size(*tensor); + } + bool buffer_type_is_host (ggml_backend_buffer_type_t buft) { + auto& ctx = *((ggml::cpp::backend::buffer_type*) (buft->context)); + return ctx.is_host(); + } + ggml_backend_buffer_t buffer_type_alloc_buffer (ggml_backend_buffer_type_t buft, std::size_t size) { + auto& ctx = *((ggml::cpp::backend::buffer_type*) (buft->context)); + return c_wrapper(buft, ctx.alloc_buffer(size), size); + } + + //========================================================= + // wrapppers for ggml_backend + const char * backend_get_name(ggml_backend_t bkd) { + auto& ctx = *((ggml::cpp::backend::backend*) (bkd->context)); + return ctx.get_name().c_str(); + } + void backend_free(ggml_backend_t backend) { + auto* ctx = (ggml::cpp::backend::backend*) (backend->context); + delete ctx; + delete backend; + } + void backend_set_tensor_async(ggml_backend_t bkd, ggml_tensor * tensor, const void * data, std::size_t offset, std::size_t size) { + auto& ctx = *((ggml::cpp::backend::backend*) (bkd->context)); + ctx.set_tensor_async(*tensor, data, offset, size); + } + void backend_get_tensor_async(ggml_backend_t bkd, const ggml_tensor * tensor, void * data, std::size_t offset, std::size_t size) { + auto& ctx = *((ggml::cpp::backend::backend*) (bkd->context)); + ctx.get_tensor_async(*tensor, data, offset, size); + } + bool backend_cpy_tensor_async(ggml_backend_t backend_src, ggml_backend_t backend_dst, const ggml_tensor * src, ggml_tensor * dst) { + auto& ctx = *((ggml::cpp::backend::backend*) (backend_dst->context)); + return ctx.cpy_tensor_async(backend_src, *src, *dst); + } + void backend_synchronize(ggml_backend_t bkd) { + auto& ctx = *((ggml::cpp::backend::backend*) (bkd->context)); + ctx.synchronize(); + } + enum ggml_status backend_graph_compute(ggml_backend_t bkd, ggml_cgraph * cgraph) { + auto& ctx = *((ggml::cpp::backend::backend*) (bkd->context)); + return ctx.graph_compute(*cgraph); + } + void backend_event_record(ggml_backend_t bkd, ggml_backend_event_t evt) { + auto& ctx = *((ggml::cpp::backend::backend*) (bkd->context)); + ctx.event_record(*((event*) evt)); + } + void backend_event_wait (ggml_backend_t bkd, ggml_backend_event_t evt) { + auto& ctx = *((ggml::cpp::backend::backend*) (bkd->context)); + ctx.event_wait(*((event*) evt)); + } + void backend_set_n_threads(ggml_backend_t bkd, int n_threads) { + auto& ctx = *((ggml::cpp::backend::backend*) (bkd->context)); + ctx.set_n_threads(n_threads); + } + + //========================================================= + // wrapppers for ggml_backend_device + const char * device_get_name(ggml_backend_dev_t dev) { + auto& ctx = *((ggml::cpp::backend::device*) (dev->context)); + return ctx.get_name().c_str(); + } + const char * device_get_description(ggml_backend_dev_t dev) { + auto& ctx = *((ggml::cpp::backend::device*) (dev->context)); + return ctx.get_description().c_str(); + } + void device_get_memory(ggml_backend_dev_t dev, std::size_t * free, std::size_t * total) { + auto& ctx = *((ggml::cpp::backend::device*) (dev->context)); + ctx.get_memory(*free, *total); + } + enum ggml_backend_dev_type device_get_type(ggml_backend_dev_t dev) { + auto& ctx = *((ggml::cpp::backend::device*) (dev->context)); + return ctx.get_type(); + } + void device_get_props(ggml_backend_dev_t dev, struct ggml_backend_dev_props * props) { + auto& ctx = *((ggml::cpp::backend::device*) (dev->context)); + props->name = ctx.get_name().c_str(); + props->description = ctx.get_description().c_str(); + ctx.get_memory(props->memory_free, props->memory_total); + props->type = ctx.get_type(); + props->caps.async = ctx.caps_async(); + props->caps.host_buffer = ctx.caps_host_buffer(); + props->caps.buffer_from_host_ptr = ctx.caps_buffer_from_host_ptr(); + props->caps.events = ctx.caps_events(); + } + ggml_backend_t device_init_backend(ggml_backend_dev_t dev, const char * params) { + auto& ctx = *((ggml::cpp::backend::device*) (dev->context)); + return c_wrapper(dev, &ctx.init_backend(params?params:"")); + } + ggml_backend_buffer_type_t device_get_buffer_type(ggml_backend_dev_t dev) { + // Note: nothing to delete it. + auto& ctx = *((ggml::cpp::backend::device*) (dev->context)); + return c_wrapper(dev, &ctx.get_buffer_type()); + } + ggml_backend_buffer_type_t device_get_host_buffer_type(ggml_backend_dev_t dev) { + auto& ctx = *((ggml::cpp::backend::device*) (dev->context)); + auto* bft = ctx.get_host_buffer_type(); + if (bft) { + return c_wrapper(dev, bft); + } + return nullptr; + } + ggml_backend_buffer_t device_buffer_from_host_ptr(ggml_backend_dev_t dev, void * ptr, std::size_t size, std::size_t max_tensor_size) { + auto& ctx = *((ggml::cpp::backend::device*) (dev->context)); + auto* bft = ctx.get_from_host_ptr_buffer_type(); + if (!bft) { return nullptr; } + auto* buf = bft->register_buffer(ptr, size, max_tensor_size); + if (!buf) { return nullptr; } + // comment / ou memoriser ce wrapper, il n'y a pas de "delete" + auto * ggml_buf_type = c_wrapper(dev, bft); + return c_wrapper(ggml_buf_type, buf, size); + } + bool device_supports_op(ggml_backend_dev_t dev, const ggml_tensor * op) { + auto& ctx = *((ggml::cpp::backend::device*) (dev->context)); + return ctx.supports_op(*op); + } + bool device_supports_buft(ggml_backend_dev_t dev, ggml_backend_buffer_type_t buft) { + auto& ctx = *((ggml::cpp::backend::device*) (dev->context)); + return ctx.supports_buft(buft /*->context*/); + } + bool device_offload_op(ggml_backend_dev_t dev, const ggml_tensor * op) { + auto& ctx = *((ggml::cpp::backend::device*) (dev->context)); + return ctx.offload_op(*op); + } + ggml_backend_event_t device_event_new (ggml_backend_dev_t dev) { + auto& ctx = *((ggml::cpp::backend::device*) (dev->context)); + auto* evt = ctx.event_new(); + if (!evt) { return nullptr; } + return new ggml_backend_event { + dev, + evt, + }; + } + + void device_event_free(ggml_backend_dev_t /*dev*/, ggml_backend_event_t evt_c) { + auto* evt_cpp = (event*)(evt_c->context); + delete evt_cpp; + delete evt_c; + } + + void device_event_synchronize(ggml_backend_dev_t dev, ggml_backend_event_t evt_c) { + auto& ctx = *((ggml::cpp::backend::device*) (dev->context)); + auto* evt_cpp = (event*)(evt_c->context); + ctx.event_synchronize(*evt_cpp); + } + + //========================================================= + // wrapppers for ggml_backend_reg + const char * reg_get_name(ggml_backend_reg_t reg) { + auto& ctx = *((ggml::cpp::backend::reg*) (reg->context)); + return ctx.get_name().c_str(); + } + std::size_t reg_get_device_count(ggml_backend_reg_t reg) { + auto& ctx = *((ggml::cpp::backend::reg*) (reg->context)); + return ctx.get_device_count(); + } + ggml_backend_dev_t reg_get_device(ggml_backend_reg_t reg, std::size_t index) { + auto& ctx = *((ggml::cpp::backend::reg*) (reg->context)); + return c_wrapper(reg, &ctx.get_device(index)); + } + void * reg_get_proc_address(ggml_backend_reg_t /*reg*/, const char * cname) { + const auto name = std::string(cname); + if (name == "ggml_backend_set_n_threads") { + return (void *)backend_set_n_threads; + } + if (name == "ggml_backend_dev_get_extra_bufts") { + return (void*) backend_dev_get_extra_bufts; + } + return nullptr; + } + + } + + // les destructeurs... + buffer::~buffer() {} + buffer_type::~buffer_type() {} + event::~event() {} + backend::backend(device& dev): m_device(dev) {} + backend::~backend() { } + device::~device() { + // TODO: il faut detruire des wrapper des buffer_type??? + } + reg::~reg() {} + + // non virtual fct: + void device::register_extra_buffer_type(buffer_type* buft) { + GGML_ASSERT(m_ggml_extra_buffers_type.size() == 0); // pas encore initialisé! + m_extra_buffers_type.push_back(buft); + } + + //========================================================= + // the wrappers + ggml_backend_buffer_t c_wrapper(ggml_backend_buffer_type_t buft, buffer* ctx, std::size_t size) { + if (!ctx) { return nullptr; } + return new ggml_backend_buffer { + /* .interface = */ { + /* .free_buffer = */ buffer_free_buffer, + /* .get_base = */ buffer_get_base, + /* .init_tensor = */ buffer_init_tensor, + /* .memset_tensor = */ buffer_memset_tensor, + /* .set_tensor = */ buffer_set_tensor, + /* .get_tensor = */ buffer_get_tensor, + /* .cpy_tensor = */ buffer_cpy_tensor, + /* .clear = */ buffer_clear, + /* .reset = */ buffer_reset, + }, + /* .buft = */ buft, + /* .context = */ ctx, + /* .size = */ size, + /* .usage = */ GGML_BACKEND_BUFFER_USAGE_ANY + }; + } + + struct buffer_type_deleter { + void operator()(ggml_backend_buffer_type* c_buffer_type) { + delete (c_buffer_type); + } + }; + typedef std::unique_ptr c_buffer_type_ptr; + + ggml_backend_buffer_type_t c_wrapper(ggml_backend_dev_t device, buffer_type* ctx) { + // the ctx have to be "static". + static std::map map; + if (!ctx) { return nullptr; } + + auto it = map.find(ctx); + // add new wrapper if not find. + if (it == map.end()) { + auto* wrapper = new ggml_backend_buffer_type { + /* .iface = */ { + /* .get_name = */ buffer_type_get_name, + /* .alloc_buffer = */ buffer_type_alloc_buffer, + /* .get_alignment = */ buffer_type_get_alignment, + /* .get_max_size = */ buffer_type_get_max_size, + /* .get_alloc_size = */ buffer_type_get_alloc_size, + /* .is_host = */ buffer_type_is_host, + }, + /* .device = */ device, + /* .context = */ ctx, + }; + map[ctx] = c_buffer_type_ptr(wrapper); + return wrapper; + } + return it->second.get(); + } + + ggml_backend_t c_wrapper(ggml_backend_dev_t device, backend* ctx) { + if (!ctx) { return nullptr; } + auto& dev = *((ggml::cpp::backend::device*) (device->context)); + return new ggml_backend { + /* .guid = */ const_cast(ctx->get_guid()), + /* .iface = */ { + /* .get_name = */ backend_get_name, + /* .free = */ backend_free, + /* .set_tensor_async = */ dev.caps_async() ? backend_set_tensor_async : nullptr, + /* .get_tensor_async = */ dev.caps_async() ? backend_get_tensor_async : nullptr, + /* .cpy_tensor_async = */ dev.caps_async() ? backend_cpy_tensor_async : nullptr, + /* .synchronize = */ dev.caps_async() ? backend_synchronize : nullptr, + /* .graph_plan_create = */ nullptr, + /* .graph_plan_free = */ nullptr, + /* .graph_plan_update = */ nullptr, + /* .graph_plan_compute = */ nullptr, + /* .graph_compute = */ backend_graph_compute, + /* .event_record = */ dev.caps_events() ? backend_event_record : nullptr, + /* .event_wait = */ dev.caps_events() ? backend_event_wait : nullptr, + /* .graph_optimize = */ nullptr, + }, + /* .device = */ device, + /* .context = */ ctx + }; + } + + struct device_deleter { + void operator()(ggml_backend_device* c_device) { + delete (c_device); + } + }; + typedef std::unique_ptr c_device_ptr; + + ggml_backend_dev_t c_wrapper(ggml_backend_reg_t reg, device* ctx) { + // the ctx have to be "static" / "per backend_register" + static std::map map; + if (!ctx) { return nullptr; } + + auto it = map.find(ctx); + if (it == map.end()) { + auto* wrapper = new ggml_backend_device { + /* .iface = */{ + /* .get_name = */ device_get_name, + /* .get_description = */ device_get_description, + /* .get_memory = */ device_get_memory, + /* .get_type = */ device_get_type, + /* .get_props = */ device_get_props, + /* .init_backend = */ device_init_backend, + /* .get_buffer_type = */ device_get_buffer_type, + /* .get_host_buffer_type = */ ctx->caps_host_buffer() ? device_get_host_buffer_type : nullptr, + /* .buffer_from_host_ptr = */ ctx->caps_buffer_from_host_ptr() ? device_buffer_from_host_ptr : nullptr, + /* .supports_op = */ device_supports_op, + /* .supports_buft = */ device_supports_buft, + /* .offload_op = */ device_offload_op, + /* .event_new = */ ctx->caps_events() ? device_event_new : nullptr, + /* .event_free = */ ctx->caps_events() ? device_event_free : nullptr, + /* .event_synchronize = */ ctx->caps_events() ? device_event_synchronize : nullptr, + }, + /* .reg = */ reg, + /* .context = */ ctx, + }; + map[ctx] = c_device_ptr(wrapper); + return wrapper; + } + return it->second.get(); + } + + struct register_deleter { + void operator()(ggml_backend_reg_t c_register) { + delete (c_register); + } + }; + typedef std::unique_ptr c_register_ptr; + + ggml_backend_reg_t c_wrapper(reg* ctx) { + // the ctx have to be static. + static std::map map; + if (!ctx) { return nullptr; } + + auto it = map.find(ctx); + if (it == map.end()) { + auto* wrapper = new ggml_backend_reg { + /* .api_version = */ GGML_BACKEND_API_VERSION, + /* .iface = */ { + /* .get_name = */ reg_get_name, + /* .get_device_count = */ reg_get_device_count, + /* .get_device = */ reg_get_device, + /* .get_proc_address = */ reg_get_proc_address, + }, + /* .context = */ ctx, + }; + map[ctx] = c_register_ptr(wrapper); + //map[ctx] = wrapper; + return wrapper; + } + return it->second.get(); + //return it->second; + } + +} + + +// for simple CPU buffer: +namespace ggml::cpp::backend::cpu { + + // buffer + class buffer : public ggml::cpp::backend::buffer { + uint8_t* m_data = nullptr; + const std::size_t m_size; + + public: + buffer(std::size_t size, std::size_t alignment): m_size(size) { + m_data = new (std::align_val_t(alignment)) uint8_t[m_size]; + } + + buffer(void* ptr, std::size_t /*size*/): m_size(0) { + m_data = (uint8_t*) ptr; + } + + virtual ~buffer() { + if (m_size>0 && m_data) { + delete[] m_data; + } + m_data = nullptr; + } + + void* get_base() override { + return m_data; + } + + void memset_tensor(ggml_tensor & tensor, uint8_t value, std::size_t offset, std::size_t size) override { + memset((uint8_t *) tensor.data + offset, value, size); + } + + void set_tensor(ggml_tensor & tensor, const void * data, std::size_t offset, std::size_t size) override { + memcpy((uint8_t *)tensor.data + offset, data, size); + } + + void get_tensor(const ggml_tensor & tensor, void * data, std::size_t offset, std::size_t size) override { + memcpy(data, (uint8_t *)tensor.data + offset, size); + } + + bool cpy_tensor (const ggml_tensor & src, ggml_tensor & dst) override { + if (ggml_backend_buffer_is_host(src.buffer)) { + memcpy(dst.data, src.data, ggml_nbytes(&src)); + return true; + } + return false; + } + + void clear (uint8_t value) override { + memset(m_data, value, m_size); + } + }; + + // buffer_type + class buffer_type : public ggml::cpp::backend::buffer_type { + const std::string m_name; + const std::size_t m_alignment; + const bool m_from_ptr; + + public: + buffer_type(const std::string& name, bool from_ptr, std::size_t alignment) : + m_name(name), m_alignment(alignment), m_from_ptr(from_ptr) + {} + + virtual ~buffer_type() {} + + const std::string& get_name() override { + return m_name; + } + + buffer* alloc_buffer(std::size_t size) override { + GGML_ASSERT(!m_from_ptr && "buffer type not for allocatable buffer"); + return new buffer(size, m_alignment); + } + + std::size_t get_alignment() override { + return m_alignment; + } + + bool is_host() override { + return true; + } + + buffer* register_buffer(void * ptr, std::size_t size, std::size_t /*max_tensor_size*/) override { + GGML_ASSERT(m_from_ptr && "buffer type not for ptr memory"); + GGML_ASSERT((uintptr_t)ptr % m_alignment == 0 && "buffer pointer must be aligned"); + return new buffer(ptr, size); + } + }; + +} + +namespace ggml::cpp::backend { + + buffer_type* new_cpu_buffer_type( + const std::string& name, + bool from_ptr, + std::size_t alignment + ) { + return new ggml::cpp::backend::cpu::buffer_type(name, from_ptr, alignment); + } + +} diff --git a/ggml/src/ggml_cpp_wrapper.h b/ggml/src/ggml_cpp_wrapper.h new file mode 100644 index 0000000000..aba8971f5c --- /dev/null +++ b/ggml/src/ggml_cpp_wrapper.h @@ -0,0 +1,154 @@ +#pragma once +#ifndef __cplusplus +#error "This header is for C++ only" +#endif + +#include "ggml.h" + +#include "ggml-impl.h" +#include "ggml-backend.h" + +#include +#include + +namespace ggml::cpp::backend { + + class buffer { // ggml_backend_buffer_t + public: + virtual ~buffer(); + + virtual void* get_base() = 0; + virtual ggml_status init_tensor(ggml_tensor& /*tensor*/) { return GGML_STATUS_SUCCESS; } + + virtual void memset_tensor( ggml_tensor & tensor, uint8_t value, std::size_t offset, std::size_t size) = 0; + virtual void set_tensor ( ggml_tensor & tensor, const void * data, std::size_t offset, std::size_t size) = 0; + virtual void get_tensor (const ggml_tensor & tensor, void * data, std::size_t offset, std::size_t size) = 0; + + virtual bool cpy_tensor (const ggml_tensor & /*src*/, ggml_tensor & /*dst*/) { return false; } + + virtual void clear (uint8_t value) = 0; + virtual void reset () {} + }; + + class buffer_type { // ggml_backend_buffer_type_t + public: + virtual ~buffer_type(); + + virtual const std::string& get_name() = 0; + virtual buffer* alloc_buffer(std::size_t size) = 0; + virtual std::size_t get_alignment() { return TENSOR_ALIGNMENT; } + virtual std::size_t get_max_size() { return SIZE_MAX; } + virtual std::size_t get_alloc_size(const ggml_tensor& tensor) { return ggml_nbytes(&tensor); } + virtual bool is_host() { return false; } + // for pointer from memory pointer: + virtual buffer* register_buffer(void * /*ptr*/, std::size_t /*size*/, std::size_t /*max_tensor_size*/) { return nullptr; } + }; + + // TODO: manage event + class event { + public: + virtual ~event(); + }; + + // TODO: manage graph + //class graph_plan { + //public: + // virtual ~graph_plan(); + //}; + + class device; + + class backend { // ggml_backend_t + backend() = delete; + public: + backend(device& dev); + virtual ~backend(); + + virtual const std::string& get_name() = 0; + virtual const ggml_guid* get_guid() = 0; + + // need => device::caps_async() {return true;} + virtual void set_tensor_async( ggml_tensor & tensor, const void * data, size_t offset, size_t size) { ggml_backend_tensor_set(&tensor, data, offset, size); } + virtual void get_tensor_async(const ggml_tensor & tensor, void * data, size_t offset, size_t size) { ggml_backend_tensor_get(&tensor, data, offset, size); } + virtual bool cpy_tensor_async(ggml_backend_t /*backend_src*/,/* ggml_backend_t backend_dst==this,*/ const ggml_tensor & /*src*/, ggml_tensor & /*dst*/) { return false; } + virtual void synchronize() {} + + // TODO: manage graph + //virtual graph_plan& graph_plan_create(const ggml_cgraph & cgraph); + //virtual void graph_plan_free(graph_plan& plan); + //virtual void graph_plan_update(graph_plan& plan, const ggml_cgraph & cgraph); + //virtual enum ggml_status graph_plan_compute(graph_plan& plan); + + virtual enum ggml_status graph_compute(ggml_cgraph & cgraph) = 0; + + // need => device::caps_events() { return true; } + virtual void event_record (event & /*event*/) { GGML_ASSERT(false); } + virtual void event_wait (event & /*event*/) { GGML_ASSERT(false); } + + // the extra functions: + virtual void set_n_threads(int /*n_threads*/) { } + + protected: + device& m_device; + }; + + class device { // ggml_backend_dev_t + protected: + friend ggml_backend_buffer_type_t* backend_dev_get_extra_bufts(ggml_backend_dev_t device); + std::vector m_extra_buffers_type; + std::vector m_ggml_extra_buffers_type; + + public: + virtual ~device(); + + virtual const std::string& get_name() = 0; + virtual const std::string& get_description() = 0; + virtual void get_memory(std::size_t & free, std::size_t & total) = 0; + virtual enum ggml_backend_dev_type get_type() = 0; + virtual backend& init_backend(const std::string& params) = 0; + virtual buffer_type& get_buffer_type() = 0; + virtual buffer_type* get_host_buffer_type() { return nullptr; } + virtual buffer_type* get_from_host_ptr_buffer_type() { return nullptr; } + + virtual bool supports_op(const ggml_tensor & op) = 0; + virtual bool supports_buft(ggml_backend_buffer_type_t buft) = 0; + virtual bool offload_op(const ggml_tensor & /*op*/) { return false; } + + // event => caps_events() { return true; } + virtual event* event_new() { return nullptr; } + virtual void event_synchronize(event& /*event*/) { GGML_ASSERT(false); } + + //void get_props(struct ggml_backend_dev_props * props); ggml_backend_dev_caps + virtual bool caps_async() { return false; } + virtual bool caps_host_buffer() { return get_host_buffer_type() != nullptr; } + virtual bool caps_buffer_from_host_ptr() { return get_from_host_ptr_buffer_type() != nullptr; } + virtual bool caps_events() { return false; } + + protected: + void register_extra_buffer_type(buffer_type* buft); + + }; + + class reg { // ggml_backend_reg_t + public: + virtual ~reg(); + + virtual const std::string& get_name() = 0; + virtual std::size_t get_device_count() = 0; + virtual device& get_device(std::size_t index) = 0; + }; + + ggml_backend_buffer_t c_wrapper(ggml_backend_buffer_type_t buft, buffer* ctx, std::size_t size); + ggml_backend_buffer_type_t c_wrapper(ggml_backend_dev_t device, buffer_type* ctx); + ggml_backend_t c_wrapper(ggml_backend_dev_t device, backend* ctx); + ggml_backend_dev_t c_wrapper(ggml_backend_reg_t reg, device* ctx); + ggml_backend_reg_t c_wrapper(reg* ctx); + + // for simple cpu buffer: + buffer_type* new_cpu_buffer_type( + const std::string& name, + bool from_ptr=false, + std::size_t alignment = TENSOR_ALIGNMENT + ); + +} diff --git a/src/llama-model.cpp b/src/llama-model.cpp index e8e1bbf1cd..598fc409da 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -268,9 +268,6 @@ static buft_list_t make_gpu_buft_list(ggml_backend_dev_t dev, llama_split_mode s } } - // add the device default buffer type - buft_list.emplace_back(dev, ggml_backend_dev_buffer_type(dev)); - // add the device extra buffer type (if any) ggml_backend_reg_t reg = ggml_backend_dev_backend_reg(dev); auto ggml_backend_dev_get_extra_bufts_fn = (ggml_backend_dev_get_extra_bufts_t) @@ -284,6 +281,9 @@ static buft_list_t make_gpu_buft_list(ggml_backend_dev_t dev, llama_split_mode s } } + // add the device default buffer type + buft_list.emplace_back(dev, ggml_backend_dev_buffer_type(dev)); + return buft_list; }