From c34d053d0a4a6e73b82125ae9f73fa81d42918d7 Mon Sep 17 00:00:00 2001 From: Djip007 <3705339+Djip007@users.noreply.github.com> Date: Sun, 21 Dec 2025 10:16:01 +0100 Subject: [PATCH 1/3] Add c++ wrapper for backend. - a backend can be create with full C++ - update cpu traits - update cpu_repack - update cpu_amx to use it. - extract GGML_LOG - correct extra_buffer register order for GPU --- ggml/include/ggml.h | 2 +- ggml/src/CMakeLists.txt | 2 + ggml/src/ggml-backend-impl.h | 2 +- ggml/src/ggml-cpu/amx/amx.cpp | 132 ++------- ggml/src/ggml-cpu/repack.cpp | 84 ++---- ggml/src/ggml-cpu/traits.cpp | 74 +++++ ggml/src/ggml-cpu/traits.h | 27 ++ ggml/src/ggml-impl.h | 35 +-- ggml/src/ggml-log.h | 48 +++ ggml/src/ggml.c | 2 +- ggml/src/ggml_cpp_wrapper.cpp | 532 ++++++++++++++++++++++++++++++++++ ggml/src/ggml_cpp_wrapper.h | 154 ++++++++++ src/llama-model.cpp | 6 +- 13 files changed, 901 insertions(+), 199 deletions(-) create mode 100644 ggml/src/ggml-log.h create mode 100644 ggml/src/ggml_cpp_wrapper.cpp create mode 100644 ggml/src/ggml_cpp_wrapper.h diff --git a/ggml/include/ggml.h b/ggml/include/ggml.h index 25f9601e9b..72301af351 100644 --- a/ggml/include/ggml.h +++ b/ggml/include/ggml.h @@ -705,7 +705,7 @@ extern "C" { typedef uint8_t ggml_guid[16]; typedef ggml_guid * ggml_guid_t; - GGML_API bool ggml_guid_matches(ggml_guid_t guid_a, ggml_guid_t guid_b); + GGML_API bool ggml_guid_matches(const ggml_guid * guid_a, const ggml_guid * guid_b); // misc diff --git a/ggml/src/CMakeLists.txt b/ggml/src/CMakeLists.txt index 78853304d9..2046c9d8d3 100644 --- a/ggml/src/CMakeLists.txt +++ b/ggml/src/CMakeLists.txt @@ -200,6 +200,8 @@ add_library(ggml-base ggml.cpp ggml-alloc.c ggml-backend.cpp + ggml_cpp_wrapper.cpp + ggml_cpp_wrapper.h ggml-opt.cpp ggml-threading.cpp ggml-threading.h diff --git a/ggml/src/ggml-backend-impl.h b/ggml/src/ggml-backend-impl.h index 59190b7c46..2a98d66ffb 100644 --- a/ggml/src/ggml-backend-impl.h +++ b/ggml/src/ggml-backend-impl.h @@ -120,7 +120,7 @@ extern "C" { }; struct ggml_backend { - ggml_guid_t guid; + const ggml_guid_t guid; struct ggml_backend_i iface; ggml_backend_dev_t device; void * context; diff --git a/ggml/src/ggml-cpu/amx/amx.cpp b/ggml/src/ggml-cpu/amx/amx.cpp index 9baf3e025e..1caf25f685 100644 --- a/ggml/src/ggml-cpu/amx/amx.cpp +++ b/ggml/src/ggml-cpu/amx/amx.cpp @@ -35,111 +35,52 @@ class tensor_traits : public ggml::cpu::tensor_traits { } }; -static ggml::cpu::tensor_traits * get_tensor_traits(ggml_backend_buffer_t, struct ggml_tensor *) { +static ggml::cpu::tensor_traits * get_tensor_traits(struct ggml_tensor *) { static tensor_traits traits; return &traits; } } // namespace ggml::cpu::amx -// AMX buffer interface -static void ggml_backend_amx_buffer_free_buffer(ggml_backend_buffer_t buffer) { - free(buffer->context); -} +namespace ggml::cpu::amx { -static void * ggml_backend_amx_buffer_get_base(ggml_backend_buffer_t buffer) { - return (void *) (buffer->context); -} +// AMX buffer +class buffer : public ggml::cpu::buffer { +public: + buffer(std::size_t size) : ggml::cpu::buffer(size) { } -static enum ggml_status ggml_backend_amx_buffer_init_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) { - tensor->extra = (void *) ggml::cpu::amx::get_tensor_traits(buffer, tensor); + virtual ~buffer() { } - GGML_UNUSED(buffer); - return GGML_STATUS_SUCCESS; -} - -static void ggml_backend_amx_buffer_memset_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, - uint8_t value, size_t offset, size_t size) { - memset((char *) tensor->data + offset, value, size); - - GGML_UNUSED(buffer); -} - -static void ggml_backend_amx_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, - const void * data, size_t offset, size_t size) { - if (qtype_has_amx_kernels(tensor->type)) { - GGML_LOG_DEBUG("%s: amx repack tensor %s of type %s\n", __func__, tensor->name, ggml_type_name(tensor->type)); - ggml_backend_amx_convert_weight(tensor, data, offset, size); - } else { - memcpy((char *) tensor->data + offset, data, size); + ggml_status init_tensor(ggml_tensor& tensor) override { + tensor->extra = (void *) ggml::cpu::amx::get_tensor_traits(&tensor); + return GGML_STATUS_SUCCESS; } - GGML_UNUSED(buffer); -} - -/* -// need to figure what we need to do with buffer->extra. -static void ggml_backend_amx_buffer_get_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) { - GGML_ASSERT(!qtype_has_amx_kernels(tensor->type)); - memcpy(data, (const char *)tensor->data + offset, size); - - GGML_UNUSED(buffer); -} - -static bool ggml_backend_amx_buffer_cpy_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst) { - if (ggml_backend_buffer_is_host(src->buffer)) { - if (qtype_has_amx_kernels(src->type)) { - ggml_backend_amx_convert_weight(dst, src->data, 0, ggml_nbytes(dst)); + void set_tensor(ggml_tensor & tensor, const void * data, std::size_t offset, std::size_t size) override { + if (qtype_has_amx_kernels(tensor.type)) { + GGML_LOG_DEBUG("%s: amx repack tensor %s of type %s\n", __func__, tensor.name, ggml_type_name(tensor.type)); + ggml_backend_amx_convert_weight(&tensor, data, offset, size); } else { - memcpy(dst->data, src->data, ggml_nbytes(src)); + memcpy((char *) tensor.data + offset, data, size); } - return true; } - return false; - GGML_UNUSED(buffer); -} -*/ - -static void ggml_backend_amx_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) { - memset(buffer->context, value, buffer->size); -} - -static ggml_backend_buffer_i ggml_backend_amx_buffer_interface = { - /* .free_buffer = */ ggml_backend_amx_buffer_free_buffer, - /* .get_base = */ ggml_backend_amx_buffer_get_base, - /* .init_tensor = */ ggml_backend_amx_buffer_init_tensor, - /* .memset_tensor = */ ggml_backend_amx_buffer_memset_tensor, - /* .set_tensor = */ ggml_backend_amx_buffer_set_tensor, - /* .get_tensor = */ nullptr, - /* .cpy_tensor = */ nullptr, - /* .clear = */ ggml_backend_amx_buffer_clear, - /* .reset = */ nullptr, }; -static const char * ggml_backend_amx_buffer_type_get_name(ggml_backend_buffer_type_t buft) { - return "AMX"; +class extra_buffer_type : ggml::cpu::extra_buffer_type { - GGML_UNUSED(buft); -} - -static ggml_backend_buffer_t ggml_backend_amx_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { - void * data = ggml_aligned_malloc(size); - if (data == NULL) { - fprintf(stderr, "%s: failed to allocate buffer of size %zu\n", __func__, size); - return NULL; + const std::string& get_name() override { + static const std::string name {"AMX"}; + return name; } - return ggml_backend_buffer_init(buft, ggml_backend_amx_buffer_interface, data, size); -} + ggml::cpp::backend::buffer* alloc_buffer(std::size_t size) override { + return new buffer(size); + } -static size_t ggml_backend_amx_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) { - return TENSOR_ALIGNMENT; + std::size_t get_alloc_size(const ggml_tensor& tensor) override { + return ggml_backend_amx_get_alloc_size(&tensor); + } - GGML_UNUSED(buft); -} - -namespace ggml::cpu::amx { -class extra_buffer_type : ggml::cpu::extra_buffer_type { bool supports_op(ggml_backend_dev_t, const struct ggml_tensor * op) override { if (op->op != GGML_OP_MUL_MAT) { return false; @@ -198,12 +139,6 @@ class extra_buffer_type : ggml::cpu::extra_buffer_type { }; } // namespace ggml::cpu::amx -static size_t ggml_backend_amx_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) { - return ggml_backend_amx_get_alloc_size(tensor); - - GGML_UNUSED(buft); -} - #define ARCH_GET_XCOMP_PERM 0x1022 #define ARCH_REQ_XCOMP_PERM 0x1023 #define XFEATURE_XTILECFG 17 @@ -224,24 +159,11 @@ static bool ggml_amx_init() { } ggml_backend_buffer_type_t ggml_backend_amx_buffer_type() { - static struct ggml_backend_buffer_type ggml_backend_buffer_type_amx = { - /* .iface = */ { - /* .get_name = */ ggml_backend_amx_buffer_type_get_name, - /* .alloc_buffer = */ ggml_backend_amx_buffer_type_alloc_buffer, - /* .get_alignment = */ ggml_backend_amx_buffer_type_get_alignment, - /* .get_max_size = */ nullptr, // defaults to SIZE_MAX - /* .get_alloc_size = */ ggml_backend_amx_buffer_type_get_alloc_size, - /* .is_host = */ nullptr, - }, - /* .device = */ ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0), - /* .context = */ new ggml::cpu::amx::extra_buffer_type(), - }; - + static auto* buffer_type = ggml::cpu::c_wrapper(new ggml::cpu::amx::extra_buffer_type()); if (!ggml_amx_init()) { return nullptr; } - - return &ggml_backend_buffer_type_amx; + return buffer_type; } #endif // defined(__AMX_INT8__) && defined(__AVX512VNNI__) diff --git a/ggml/src/ggml-cpu/repack.cpp b/ggml/src/ggml-cpu/repack.cpp index 6b76ab3bfb..d5f6948974 100644 --- a/ggml/src/ggml-cpu/repack.cpp +++ b/ggml/src/ggml-cpu/repack.cpp @@ -4720,54 +4720,42 @@ static const ggml::cpu::tensor_traits * ggml_repack_get_optimal_repack_type(cons return nullptr; } -static enum ggml_status ggml_backend_cpu_repack_buffer_init_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) { - tensor->extra = (void *) const_cast(ggml_repack_get_optimal_repack_type(tensor)); +namespace ggml::cpu::repack { - GGML_UNUSED(buffer); - return GGML_STATUS_SUCCESS; -} +class buffer : public ggml::cpu::buffer { +public: + buffer(std::size_t size) : ggml::cpu::buffer(size) { } -static void ggml_backend_cpu_repack_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, - const void * data, size_t offset, size_t size) { - GGML_ASSERT(offset == 0); - GGML_ASSERT(size == ggml_nbytes(tensor)); + virtual ~buffer() { } - auto tensor_traits = (ggml::cpu::repack::tensor_traits_base *) tensor->extra; - auto OK = tensor_traits->repack(tensor, data, size); - - GGML_ASSERT(OK == 0); - GGML_UNUSED(buffer); -} - -static const char * ggml_backend_cpu_repack_buffer_type_get_name(ggml_backend_buffer_type_t buft) { - return "CPU_REPACK"; - - GGML_UNUSED(buft); -} - -static ggml_backend_buffer_t ggml_backend_cpu_repack_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { - ggml_backend_buffer_t buffer = ggml_backend_buft_alloc_buffer(ggml_backend_cpu_buffer_type(), size); - - if (buffer == nullptr) { - return nullptr; + ggml_status init_tensor(ggml_tensor & tensor) override { + tensor.extra = (void *) const_cast(ggml_repack_get_optimal_repack_type(&tensor)); + return GGML_STATUS_SUCCESS; } - buffer->buft = buft; - buffer->iface.init_tensor = ggml_backend_cpu_repack_buffer_init_tensor; - buffer->iface.set_tensor = ggml_backend_cpu_repack_buffer_set_tensor; - buffer->iface.get_tensor = nullptr; - buffer->iface.cpy_tensor = nullptr; - return buffer; -} + void set_tensor(ggml_tensor & tensor, const void * data, std::size_t offset, std::size_t size) override { + GGML_ASSERT(offset == 0); + GGML_ASSERT(size == ggml_nbytes(&tensor)); -static size_t ggml_backend_cpu_repack_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) { - return TENSOR_ALIGNMENT; + auto tensor_traits = (ggml::cpu::repack::tensor_traits_base *) tensor.extra; + auto OK = tensor_traits->repack(&tensor, data, size); - GGML_UNUSED(buft); -} + GGML_ASSERT(OK == 0); + } +}; + +class extra_buffer_type : public ggml::cpu::extra_buffer_type { +public: + + const std::string& get_name() override { + static const std::string name {"CPU_REPACK"}; + return name; + } + + ggml::cpp::backend::buffer* alloc_buffer(std::size_t size) override { + return new buffer(size); + } -namespace ggml::cpu::repack { -class extra_buffer_type : ggml::cpu::extra_buffer_type { bool supports_op(ggml_backend_dev_t, const struct ggml_tensor * op) override { if ( op->op == GGML_OP_MUL_MAT && op->src[0]->buffer && @@ -4816,18 +4804,6 @@ class extra_buffer_type : ggml::cpu::extra_buffer_type { } // namespace ggml::cpu::repack ggml_backend_buffer_type_t ggml_backend_cpu_repack_buffer_type(void) { - static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type_repack = { - /* .iface = */ { - /* .get_name = */ ggml_backend_cpu_repack_buffer_type_get_name, - /* .alloc_buffer = */ ggml_backend_cpu_repack_buffer_type_alloc_buffer, - /* .get_alignment = */ ggml_backend_cpu_repack_buffer_type_get_alignment, - /* .get_max_size = */ nullptr, // defaults to SIZE_MAX - /* .get_alloc_size = */ nullptr, // defaults to ggml_nbytes - /* .is_host = */ nullptr, - }, - /* .device = */ ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0), - /* .context = */ new ggml::cpu::repack::extra_buffer_type(), - }; - - return &ggml_backend_cpu_buffer_type_repack; + static auto* buffer_type = ggml::cpu::c_wrapper(new ggml::cpu::repack::extra_buffer_type()); + return buffer_type; } diff --git a/ggml/src/ggml-cpu/traits.cpp b/ggml/src/ggml-cpu/traits.cpp index 4f32f10255..65349ab381 100644 --- a/ggml/src/ggml-cpu/traits.cpp +++ b/ggml/src/ggml-cpu/traits.cpp @@ -2,11 +2,85 @@ #include "ggml-backend-impl.h" #include "ggml-backend.h" +#include "ggml-cpu.h" + +#include namespace ggml::cpu { + +buffer::buffer(std::size_t size) : m_size(size) { + m_data = new (std::align_val_t(32)) uint8_t[m_size]; + GGML_ASSERT(m_data); +} + +buffer::~buffer() { + delete [] m_data; +} + +void* buffer::get_base() { + return m_data; +} + +void buffer::memset_tensor(ggml_tensor & tensor, uint8_t value, std::size_t offset, std::size_t size) { + GGML_ASSERT(value == 0); + memset((uint8_t *) tensor.data + offset, value, size); +} + +void buffer::get_tensor(const ggml_tensor &, void *, std::size_t, std::size_t size) { + GGML_ASSERT(size == 0); +} + +void buffer::clear(uint8_t value) { + memset(m_data, value, m_size); +} + tensor_traits::~tensor_traits() {} extra_buffer_type::~extra_buffer_type() {} + +namespace { + const char *buffer_type_get_name (ggml_backend_buffer_type_t buft) { + auto& ctx = *((extra_buffer_type*) (buft->context)); + return ctx.get_name().c_str(); + } + std::size_t buffer_type_get_alignment (ggml_backend_buffer_type_t buft) { + auto& ctx = *((extra_buffer_type*) (buft->context)); + return ctx.get_alignment(); + } + std::size_t buffer_type_get_max_size (ggml_backend_buffer_type_t buft) { + auto& ctx = *((extra_buffer_type*) (buft->context)); + return ctx.get_max_size(); + } + std::size_t buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) { + auto& ctx = *((extra_buffer_type*) (buft->context)); + return ctx.get_alloc_size(*tensor); + } + bool buffer_type_is_host(ggml_backend_buffer_type_t /*buft*/) { + return false; + } + ggml_backend_buffer_t buffer_type_alloc_buffer (ggml_backend_buffer_type_t buft, std::size_t size) { + auto& ctx = *((extra_buffer_type*) (buft->context)); + return c_wrapper(buft, ctx.alloc_buffer(size), size); + } +} + +ggml_backend_buffer_type_t c_wrapper(extra_buffer_type* ctx) { + if (!ctx) { return nullptr; } + return new ggml_backend_buffer_type { + /* .iface = */ { + /* .get_name = */ buffer_type_get_name, + /* .alloc_buffer = */ buffer_type_alloc_buffer, + /* .get_alignment = */ buffer_type_get_alignment, + /* .get_max_size = */ buffer_type_get_max_size, + /* .get_alloc_size = */ buffer_type_get_alloc_size, + /* .is_host = */ buffer_type_is_host, + }, + /* .device = */ ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0), + /* .context = */ ctx, + }; +} + + } // namespace ggml::cpu bool ggml_cpu_extra_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * op) { diff --git a/ggml/src/ggml-cpu/traits.h b/ggml/src/ggml-cpu/traits.h index f4e0990ddf..27cedfdbd1 100644 --- a/ggml/src/ggml-cpu/traits.h +++ b/ggml/src/ggml-cpu/traits.h @@ -4,7 +4,9 @@ #include "ggml.h" #ifdef __cplusplus +# include "ggml_cpp_wrapper.h" # include +# include extern "C" { #endif @@ -24,12 +26,37 @@ class tensor_traits { virtual bool compute_forward(struct ggml_compute_params * params, struct ggml_tensor * op) = 0; }; +// a simple buffer for cpu +class buffer : public ggml::cpp::backend::buffer { +public: + buffer(std::size_t size); + virtual ~buffer(); + void* get_base() override; + void memset_tensor(ggml_tensor & tensor, uint8_t value, std::size_t offset, std::size_t size) override; + void get_tensor(const ggml_tensor &, void *, std::size_t, std::size_t size) override; + void clear(uint8_t value) override; +protected: + const std::size_t m_size; + uint8_t* m_data; +}; + class extra_buffer_type { public: virtual ~extra_buffer_type(); + // the base buffer_type fct + virtual const std::string& get_name() = 0; + virtual ggml::cpp::backend::buffer* alloc_buffer(std::size_t size) = 0; + virtual std::size_t get_alignment() { return TENSOR_ALIGNMENT; } + virtual std::size_t get_max_size() { return SIZE_MAX; } + virtual std::size_t get_alloc_size(const ggml_tensor& tensor) { return ggml_nbytes(&tensor); } + + // the extra fct virtual bool supports_op(ggml_backend_dev_t dev, const struct ggml_tensor * op) = 0; virtual tensor_traits * get_tensor_traits(const struct ggml_tensor * op) = 0; }; + +ggml_backend_buffer_type_t c_wrapper(extra_buffer_type* ctx); + } // namespace ggml::cpu // implemented in ggml-cpu.cpp. diff --git a/ggml/src/ggml-impl.h b/ggml/src/ggml-impl.h index 9256865595..2c50dc32cd 100644 --- a/ggml/src/ggml-impl.h +++ b/ggml/src/ggml-impl.h @@ -4,6 +4,7 @@ #include "ggml.h" #include "gguf.h" +#include "ggml-log.h" #include #include @@ -105,40 +106,6 @@ static inline bool ggml_impl_is_view(const struct ggml_tensor * t) { static inline float ggml_compute_softplus_f32(float input) { return (input > 20.0f) ? input : logf(1 + expf(input)); } -// -// logging -// - -GGML_ATTRIBUTE_FORMAT(2, 3) -GGML_API void ggml_log_internal (enum ggml_log_level level, const char * format, ...); -GGML_API void ggml_log_callback_default(enum ggml_log_level level, const char * text, void * user_data); - -#define GGML_LOG(...) ggml_log_internal(GGML_LOG_LEVEL_NONE , __VA_ARGS__) -#define GGML_LOG_INFO(...) ggml_log_internal(GGML_LOG_LEVEL_INFO , __VA_ARGS__) -#define GGML_LOG_WARN(...) ggml_log_internal(GGML_LOG_LEVEL_WARN , __VA_ARGS__) -#define GGML_LOG_ERROR(...) ggml_log_internal(GGML_LOG_LEVEL_ERROR, __VA_ARGS__) -#define GGML_LOG_DEBUG(...) ggml_log_internal(GGML_LOG_LEVEL_DEBUG, __VA_ARGS__) -#define GGML_LOG_CONT(...) ggml_log_internal(GGML_LOG_LEVEL_CONT , __VA_ARGS__) - -#define GGML_DEBUG 0 - -#if (GGML_DEBUG >= 1) -#define GGML_PRINT_DEBUG(...) GGML_LOG_DEBUG(__VA_ARGS__) -#else -#define GGML_PRINT_DEBUG(...) -#endif - -#if (GGML_DEBUG >= 5) -#define GGML_PRINT_DEBUG_5(...) GGML_LOG_DEBUG(__VA_ARGS__) -#else -#define GGML_PRINT_DEBUG_5(...) -#endif - -#if (GGML_DEBUG >= 10) -#define GGML_PRINT_DEBUG_10(...) GGML_LOG_DEBUG(__VA_ARGS__) -#else -#define GGML_PRINT_DEBUG_10(...) -#endif // tensor params diff --git a/ggml/src/ggml-log.h b/ggml/src/ggml-log.h new file mode 100644 index 0000000000..0edcc81c7e --- /dev/null +++ b/ggml/src/ggml-log.h @@ -0,0 +1,48 @@ +#pragma once + +#include "ggml.h" + +// GGML internal header + +#ifdef __cplusplus +extern "C" { +#endif + +// +// logging: implemented in ggml.c +// + +GGML_ATTRIBUTE_FORMAT(2, 3) +GGML_API void ggml_log_internal (enum ggml_log_level level, const char * format, ...); +GGML_API void ggml_log_callback_default(enum ggml_log_level level, const char * text, void * user_data); + +#define GGML_LOG(...) ggml_log_internal(GGML_LOG_LEVEL_NONE , __VA_ARGS__) +#define GGML_LOG_INFO(...) ggml_log_internal(GGML_LOG_LEVEL_INFO , __VA_ARGS__) +#define GGML_LOG_WARN(...) ggml_log_internal(GGML_LOG_LEVEL_WARN , __VA_ARGS__) +#define GGML_LOG_ERROR(...) ggml_log_internal(GGML_LOG_LEVEL_ERROR, __VA_ARGS__) +#define GGML_LOG_DEBUG(...) ggml_log_internal(GGML_LOG_LEVEL_DEBUG, __VA_ARGS__) +#define GGML_LOG_CONT(...) ggml_log_internal(GGML_LOG_LEVEL_CONT , __VA_ARGS__) + +#define GGML_DEBUG 0 + +#if (GGML_DEBUG >= 1) +#define GGML_PRINT_DEBUG(...) GGML_LOG_DEBUG(__VA_ARGS__) +#else +#define GGML_PRINT_DEBUG(...) +#endif + +#if (GGML_DEBUG >= 5) +#define GGML_PRINT_DEBUG_5(...) GGML_LOG_DEBUG(__VA_ARGS__) +#else +#define GGML_PRINT_DEBUG_5(...) +#endif + +#if (GGML_DEBUG >= 10) +#define GGML_PRINT_DEBUG_10(...) GGML_LOG_DEBUG(__VA_ARGS__) +#else +#define GGML_PRINT_DEBUG_10(...) +#endif + +#ifdef __cplusplus +} +#endif diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c index e5b83e1447..ce6854dc3a 100644 --- a/ggml/src/ggml.c +++ b/ggml/src/ggml.c @@ -492,7 +492,7 @@ void ggml_fp32_to_bf16_row(const float * x, ggml_bf16_t * y, int64_t n) { } } -bool ggml_guid_matches(ggml_guid_t guid_a, ggml_guid_t guid_b) { +bool ggml_guid_matches(const ggml_guid * guid_a, const ggml_guid * guid_b) { return memcmp(guid_a, guid_b, sizeof(ggml_guid)) == 0; } diff --git a/ggml/src/ggml_cpp_wrapper.cpp b/ggml/src/ggml_cpp_wrapper.cpp new file mode 100644 index 0000000000..f798eefb78 --- /dev/null +++ b/ggml/src/ggml_cpp_wrapper.cpp @@ -0,0 +1,532 @@ +#include "ggml_cpp_wrapper.h" + +#include "ggml-backend-impl.h" +#include "ggml.h" + +#include +#include +#include + +namespace ggml::cpp::backend { + +// TODO: voir si on ne cree pas une fontion static plutot que friend. +ggml_backend_buffer_type_t* backend_dev_get_extra_bufts(ggml_backend_dev_t device) { + auto& ctx = *((ggml::cpp::backend::device*) (device->context)); + if (ctx.m_ggml_extra_buffers_type.size() == 0) { // need init of extra buffer wrappers + for (auto* buft : ctx.m_extra_buffers_type) { + auto* c_buft = c_wrapper(device, buft); + ctx.m_ggml_extra_buffers_type.push_back(c_buft); + } + ctx.m_ggml_extra_buffers_type.push_back(nullptr); + } + return ctx.m_ggml_extra_buffers_type.data(); +} + + namespace { // unnamed namespace + + //========================================================= + // les wrappper pour ggml_backend_buffer + void buffer_free_buffer(ggml_backend_buffer_t buf) { + auto* ctx = (ggml::cpp::backend::buffer*) (buf->context); + delete ctx; + // delete buf; NO => deleted by the core. + } + void * buffer_get_base(ggml_backend_buffer_t buf) { + auto& ctx = *((ggml::cpp::backend::buffer*) (buf->context)); + return ctx.get_base(); + } + ggml_status buffer_init_tensor(ggml_backend_buffer_t buf, ggml_tensor * tensor) { + auto& ctx = *((ggml::cpp::backend::buffer*) (buf->context)); + return ctx.init_tensor(*tensor); + } + void buffer_memset_tensor(ggml_backend_buffer_t buf, ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) { + auto& ctx = *((ggml::cpp::backend::buffer*) (buf->context)); + ctx.memset_tensor(*tensor, value, offset, size); + } + void buffer_set_tensor(ggml_backend_buffer_t buf, ggml_tensor * tensor, const void * data, size_t offset, size_t size) { + auto& ctx = *((ggml::cpp::backend::buffer*) (buf->context)); + ctx.set_tensor(*tensor, data, offset, size); + } + void buffer_get_tensor(ggml_backend_buffer_t buf, const ggml_tensor * tensor, void * data, size_t offset, size_t size) { + auto& ctx = *((ggml::cpp::backend::buffer*) (buf->context)); + ctx.get_tensor(*tensor, data, offset, size); + } + bool buffer_cpy_tensor(ggml_backend_buffer_t buf, const ggml_tensor * src, ggml_tensor * dst) { + auto& ctx = *((ggml::cpp::backend::buffer*) (buf->context)); + return ctx.cpy_tensor(*src, *dst); + } + void buffer_clear(ggml_backend_buffer_t buf, uint8_t value) { + auto& ctx = *((ggml::cpp::backend::buffer*) (buf->context)); + ctx.clear(value); + } + void buffer_reset(ggml_backend_buffer_t buf) { + auto& ctx = *((ggml::cpp::backend::buffer*) (buf->context)); + ctx.reset(); + } + + //========================================================= + // wrapppers for ggml_backend_buffer_type + const char *buffer_type_get_name (ggml_backend_buffer_type_t buft) { + auto& ctx = *((ggml::cpp::backend::buffer_type*) (buft->context)); + return ctx.get_name().c_str(); + } + std::size_t buffer_type_get_alignment (ggml_backend_buffer_type_t buft) { + auto& ctx = *((ggml::cpp::backend::buffer_type*) (buft->context)); + return ctx.get_alignment(); + } + std::size_t buffer_type_get_max_size (ggml_backend_buffer_type_t buft) { + auto& ctx = *((ggml::cpp::backend::buffer_type*) (buft->context)); + return ctx.get_max_size(); + } + std::size_t buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) { + auto& ctx = *((ggml::cpp::backend::buffer_type*) (buft->context)); + return ctx.get_alloc_size(*tensor); + } + bool buffer_type_is_host (ggml_backend_buffer_type_t buft) { + auto& ctx = *((ggml::cpp::backend::buffer_type*) (buft->context)); + return ctx.is_host(); + } + ggml_backend_buffer_t buffer_type_alloc_buffer (ggml_backend_buffer_type_t buft, std::size_t size) { + auto& ctx = *((ggml::cpp::backend::buffer_type*) (buft->context)); + return c_wrapper(buft, ctx.alloc_buffer(size), size); + } + + //========================================================= + // wrapppers for ggml_backend + const char * backend_get_name(ggml_backend_t bkd) { + auto& ctx = *((ggml::cpp::backend::backend*) (bkd->context)); + return ctx.get_name().c_str(); + } + void backend_free(ggml_backend_t backend) { + auto* ctx = (ggml::cpp::backend::backend*) (backend->context); + delete ctx; + delete backend; + } + void backend_set_tensor_async(ggml_backend_t bkd, ggml_tensor * tensor, const void * data, std::size_t offset, std::size_t size) { + auto& ctx = *((ggml::cpp::backend::backend*) (bkd->context)); + ctx.set_tensor_async(*tensor, data, offset, size); + } + void backend_get_tensor_async(ggml_backend_t bkd, const ggml_tensor * tensor, void * data, std::size_t offset, std::size_t size) { + auto& ctx = *((ggml::cpp::backend::backend*) (bkd->context)); + ctx.get_tensor_async(*tensor, data, offset, size); + } + bool backend_cpy_tensor_async(ggml_backend_t backend_src, ggml_backend_t backend_dst, const ggml_tensor * src, ggml_tensor * dst) { + auto& ctx = *((ggml::cpp::backend::backend*) (backend_dst->context)); + return ctx.cpy_tensor_async(backend_src, *src, *dst); + } + void backend_synchronize(ggml_backend_t bkd) { + auto& ctx = *((ggml::cpp::backend::backend*) (bkd->context)); + ctx.synchronize(); + } + enum ggml_status backend_graph_compute(ggml_backend_t bkd, ggml_cgraph * cgraph) { + auto& ctx = *((ggml::cpp::backend::backend*) (bkd->context)); + return ctx.graph_compute(*cgraph); + } + void backend_event_record(ggml_backend_t bkd, ggml_backend_event_t evt) { + auto& ctx = *((ggml::cpp::backend::backend*) (bkd->context)); + ctx.event_record(*((event*) evt)); + } + void backend_event_wait (ggml_backend_t bkd, ggml_backend_event_t evt) { + auto& ctx = *((ggml::cpp::backend::backend*) (bkd->context)); + ctx.event_wait(*((event*) evt)); + } + void backend_set_n_threads(ggml_backend_t bkd, int n_threads) { + auto& ctx = *((ggml::cpp::backend::backend*) (bkd->context)); + ctx.set_n_threads(n_threads); + } + + //========================================================= + // wrapppers for ggml_backend_device + const char * device_get_name(ggml_backend_dev_t dev) { + auto& ctx = *((ggml::cpp::backend::device*) (dev->context)); + return ctx.get_name().c_str(); + } + const char * device_get_description(ggml_backend_dev_t dev) { + auto& ctx = *((ggml::cpp::backend::device*) (dev->context)); + return ctx.get_description().c_str(); + } + void device_get_memory(ggml_backend_dev_t dev, std::size_t * free, std::size_t * total) { + auto& ctx = *((ggml::cpp::backend::device*) (dev->context)); + ctx.get_memory(*free, *total); + } + enum ggml_backend_dev_type device_get_type(ggml_backend_dev_t dev) { + auto& ctx = *((ggml::cpp::backend::device*) (dev->context)); + return ctx.get_type(); + } + void device_get_props(ggml_backend_dev_t dev, struct ggml_backend_dev_props * props) { + auto& ctx = *((ggml::cpp::backend::device*) (dev->context)); + props->name = ctx.get_name().c_str(); + props->description = ctx.get_description().c_str(); + ctx.get_memory(props->memory_free, props->memory_total); + props->type = ctx.get_type(); + props->caps.async = ctx.caps_async(); + props->caps.host_buffer = ctx.caps_host_buffer(); + props->caps.buffer_from_host_ptr = ctx.caps_buffer_from_host_ptr(); + props->caps.events = ctx.caps_events(); + } + ggml_backend_t device_init_backend(ggml_backend_dev_t dev, const char * params) { + auto& ctx = *((ggml::cpp::backend::device*) (dev->context)); + return c_wrapper(dev, &ctx.init_backend(params?params:"")); + } + ggml_backend_buffer_type_t device_get_buffer_type(ggml_backend_dev_t dev) { + // Note: nothing to delete it. + auto& ctx = *((ggml::cpp::backend::device*) (dev->context)); + return c_wrapper(dev, &ctx.get_buffer_type()); + } + ggml_backend_buffer_type_t device_get_host_buffer_type(ggml_backend_dev_t dev) { + auto& ctx = *((ggml::cpp::backend::device*) (dev->context)); + auto* bft = ctx.get_host_buffer_type(); + if (bft) { + return c_wrapper(dev, bft); + } + return nullptr; + } + ggml_backend_buffer_t device_buffer_from_host_ptr(ggml_backend_dev_t dev, void * ptr, std::size_t size, std::size_t max_tensor_size) { + auto& ctx = *((ggml::cpp::backend::device*) (dev->context)); + auto* bft = ctx.get_from_host_ptr_buffer_type(); + if (!bft) { return nullptr; } + auto* buf = bft->register_buffer(ptr, size, max_tensor_size); + if (!buf) { return nullptr; } + // comment / ou memoriser ce wrapper, il n'y a pas de "delete" + auto * ggml_buf_type = c_wrapper(dev, bft); + return c_wrapper(ggml_buf_type, buf, size); + } + bool device_supports_op(ggml_backend_dev_t dev, const ggml_tensor * op) { + auto& ctx = *((ggml::cpp::backend::device*) (dev->context)); + return ctx.supports_op(*op); + } + bool device_supports_buft(ggml_backend_dev_t dev, ggml_backend_buffer_type_t buft) { + auto& ctx = *((ggml::cpp::backend::device*) (dev->context)); + return ctx.supports_buft(buft /*->context*/); + } + bool device_offload_op(ggml_backend_dev_t dev, const ggml_tensor * op) { + auto& ctx = *((ggml::cpp::backend::device*) (dev->context)); + return ctx.offload_op(*op); + } + ggml_backend_event_t device_event_new (ggml_backend_dev_t dev) { + auto& ctx = *((ggml::cpp::backend::device*) (dev->context)); + auto* evt = ctx.event_new(); + if (!evt) { return nullptr; } + return new ggml_backend_event { + dev, + evt, + }; + } + + void device_event_free(ggml_backend_dev_t /*dev*/, ggml_backend_event_t evt_c) { + auto* evt_cpp = (event*)(evt_c->context); + delete evt_cpp; + delete evt_c; + } + + void device_event_synchronize(ggml_backend_dev_t dev, ggml_backend_event_t evt_c) { + auto& ctx = *((ggml::cpp::backend::device*) (dev->context)); + auto* evt_cpp = (event*)(evt_c->context); + ctx.event_synchronize(*evt_cpp); + } + + //========================================================= + // wrapppers for ggml_backend_reg + const char * reg_get_name(ggml_backend_reg_t reg) { + auto& ctx = *((ggml::cpp::backend::reg*) (reg->context)); + return ctx.get_name().c_str(); + } + std::size_t reg_get_device_count(ggml_backend_reg_t reg) { + auto& ctx = *((ggml::cpp::backend::reg*) (reg->context)); + return ctx.get_device_count(); + } + ggml_backend_dev_t reg_get_device(ggml_backend_reg_t reg, std::size_t index) { + auto& ctx = *((ggml::cpp::backend::reg*) (reg->context)); + return c_wrapper(reg, &ctx.get_device(index)); + } + void * reg_get_proc_address(ggml_backend_reg_t /*reg*/, const char * cname) { + const auto name = std::string(cname); + if (name == "ggml_backend_set_n_threads") { + return (void *)backend_set_n_threads; + } + if (name == "ggml_backend_dev_get_extra_bufts") { + return (void*) backend_dev_get_extra_bufts; + } + return nullptr; + } + + } + + // les destructeurs... + buffer::~buffer() {} + buffer_type::~buffer_type() {} + event::~event() {} + backend::backend(device& dev): m_device(dev) {} + backend::~backend() { } + device::~device() { + // TODO: il faut detruire des wrapper des buffer_type??? + } + reg::~reg() {} + + // non virtual fct: + void device::register_extra_buffer_type(buffer_type* buft) { + GGML_ASSERT(m_ggml_extra_buffers_type.size() == 0); // pas encore initialisé! + m_extra_buffers_type.push_back(buft); + } + + //========================================================= + // the wrappers + ggml_backend_buffer_t c_wrapper(ggml_backend_buffer_type_t buft, buffer* ctx, std::size_t size) { + if (!ctx) { return nullptr; } + return new ggml_backend_buffer { + /* .interface = */ { + /* .free_buffer = */ buffer_free_buffer, + /* .get_base = */ buffer_get_base, + /* .init_tensor = */ buffer_init_tensor, + /* .memset_tensor = */ buffer_memset_tensor, + /* .set_tensor = */ buffer_set_tensor, + /* .get_tensor = */ buffer_get_tensor, + /* .cpy_tensor = */ buffer_cpy_tensor, + /* .clear = */ buffer_clear, + /* .reset = */ buffer_reset, + }, + /* .buft = */ buft, + /* .context = */ ctx, + /* .size = */ size, + /* .usage = */ GGML_BACKEND_BUFFER_USAGE_ANY + }; + } + + struct buffer_type_deleter { + void operator()(ggml_backend_buffer_type* c_buffer_type) { + delete (c_buffer_type); + } + }; + typedef std::unique_ptr c_buffer_type_ptr; + + ggml_backend_buffer_type_t c_wrapper(ggml_backend_dev_t device, buffer_type* ctx) { + // the ctx have to be "static". + static std::map map; + if (!ctx) { return nullptr; } + + auto it = map.find(ctx); + // add new wrapper if not find. + if (it == map.end()) { + auto* wrapper = new ggml_backend_buffer_type { + /* .iface = */ { + /* .get_name = */ buffer_type_get_name, + /* .alloc_buffer = */ buffer_type_alloc_buffer, + /* .get_alignment = */ buffer_type_get_alignment, + /* .get_max_size = */ buffer_type_get_max_size, + /* .get_alloc_size = */ buffer_type_get_alloc_size, + /* .is_host = */ buffer_type_is_host, + }, + /* .device = */ device, + /* .context = */ ctx, + }; + map[ctx] = c_buffer_type_ptr(wrapper); + return wrapper; + } + return it->second.get(); + } + + ggml_backend_t c_wrapper(ggml_backend_dev_t device, backend* ctx) { + if (!ctx) { return nullptr; } + auto& dev = *((ggml::cpp::backend::device*) (device->context)); + return new ggml_backend { + /* .guid = */ const_cast(ctx->get_guid()), + /* .iface = */ { + /* .get_name = */ backend_get_name, + /* .free = */ backend_free, + /* .set_tensor_async = */ dev.caps_async() ? backend_set_tensor_async : nullptr, + /* .get_tensor_async = */ dev.caps_async() ? backend_get_tensor_async : nullptr, + /* .cpy_tensor_async = */ dev.caps_async() ? backend_cpy_tensor_async : nullptr, + /* .synchronize = */ dev.caps_async() ? backend_synchronize : nullptr, + /* .graph_plan_create = */ nullptr, + /* .graph_plan_free = */ nullptr, + /* .graph_plan_update = */ nullptr, + /* .graph_plan_compute = */ nullptr, + /* .graph_compute = */ backend_graph_compute, + /* .event_record = */ dev.caps_events() ? backend_event_record : nullptr, + /* .event_wait = */ dev.caps_events() ? backend_event_wait : nullptr, + /* .graph_optimize = */ nullptr, + }, + /* .device = */ device, + /* .context = */ ctx + }; + } + + struct device_deleter { + void operator()(ggml_backend_device* c_device) { + delete (c_device); + } + }; + typedef std::unique_ptr c_device_ptr; + + ggml_backend_dev_t c_wrapper(ggml_backend_reg_t reg, device* ctx) { + // the ctx have to be "static" / "per backend_register" + static std::map map; + if (!ctx) { return nullptr; } + + auto it = map.find(ctx); + if (it == map.end()) { + auto* wrapper = new ggml_backend_device { + /* .iface = */{ + /* .get_name = */ device_get_name, + /* .get_description = */ device_get_description, + /* .get_memory = */ device_get_memory, + /* .get_type = */ device_get_type, + /* .get_props = */ device_get_props, + /* .init_backend = */ device_init_backend, + /* .get_buffer_type = */ device_get_buffer_type, + /* .get_host_buffer_type = */ ctx->caps_host_buffer() ? device_get_host_buffer_type : nullptr, + /* .buffer_from_host_ptr = */ ctx->caps_buffer_from_host_ptr() ? device_buffer_from_host_ptr : nullptr, + /* .supports_op = */ device_supports_op, + /* .supports_buft = */ device_supports_buft, + /* .offload_op = */ device_offload_op, + /* .event_new = */ ctx->caps_events() ? device_event_new : nullptr, + /* .event_free = */ ctx->caps_events() ? device_event_free : nullptr, + /* .event_synchronize = */ ctx->caps_events() ? device_event_synchronize : nullptr, + }, + /* .reg = */ reg, + /* .context = */ ctx, + }; + map[ctx] = c_device_ptr(wrapper); + return wrapper; + } + return it->second.get(); + } + + struct register_deleter { + void operator()(ggml_backend_reg_t c_register) { + delete (c_register); + } + }; + typedef std::unique_ptr c_register_ptr; + + ggml_backend_reg_t c_wrapper(reg* ctx) { + // the ctx have to be static. + static std::map map; + if (!ctx) { return nullptr; } + + auto it = map.find(ctx); + if (it == map.end()) { + auto* wrapper = new ggml_backend_reg { + /* .api_version = */ GGML_BACKEND_API_VERSION, + /* .iface = */ { + /* .get_name = */ reg_get_name, + /* .get_device_count = */ reg_get_device_count, + /* .get_device = */ reg_get_device, + /* .get_proc_address = */ reg_get_proc_address, + }, + /* .context = */ ctx, + }; + map[ctx] = c_register_ptr(wrapper); + //map[ctx] = wrapper; + return wrapper; + } + return it->second.get(); + //return it->second; + } + +} + + +// for simple CPU buffer: +namespace ggml::cpp::backend::cpu { + + // buffer + class buffer : public ggml::cpp::backend::buffer { + uint8_t* m_data = nullptr; + const std::size_t m_size; + + public: + buffer(std::size_t size, std::size_t alignment): m_size(size) { + m_data = new (std::align_val_t(alignment)) uint8_t[m_size]; + } + + buffer(void* ptr, std::size_t /*size*/): m_size(0) { + m_data = (uint8_t*) ptr; + } + + virtual ~buffer() { + if (m_size>0 && m_data) { + delete[] m_data; + } + m_data = nullptr; + } + + void* get_base() override { + return m_data; + } + + void memset_tensor(ggml_tensor & tensor, uint8_t value, std::size_t offset, std::size_t size) override { + memset((uint8_t *) tensor.data + offset, value, size); + } + + void set_tensor(ggml_tensor & tensor, const void * data, std::size_t offset, std::size_t size) override { + memcpy((uint8_t *)tensor.data + offset, data, size); + } + + void get_tensor(const ggml_tensor & tensor, void * data, std::size_t offset, std::size_t size) override { + memcpy(data, (uint8_t *)tensor.data + offset, size); + } + + bool cpy_tensor (const ggml_tensor & src, ggml_tensor & dst) override { + if (ggml_backend_buffer_is_host(src.buffer)) { + memcpy(dst.data, src.data, ggml_nbytes(&src)); + return true; + } + return false; + } + + void clear (uint8_t value) override { + memset(m_data, value, m_size); + } + }; + + // buffer_type + class buffer_type : public ggml::cpp::backend::buffer_type { + const std::string m_name; + const std::size_t m_alignment; + const bool m_from_ptr; + + public: + buffer_type(const std::string& name, bool from_ptr, std::size_t alignment) : + m_name(name), m_alignment(alignment), m_from_ptr(from_ptr) + {} + + virtual ~buffer_type() {} + + const std::string& get_name() override { + return m_name; + } + + buffer* alloc_buffer(std::size_t size) override { + GGML_ASSERT(!m_from_ptr && "buffer type not for allocatable buffer"); + return new buffer(size, m_alignment); + } + + std::size_t get_alignment() override { + return m_alignment; + } + + bool is_host() override { + return true; + } + + buffer* register_buffer(void * ptr, std::size_t size, std::size_t /*max_tensor_size*/) override { + GGML_ASSERT(m_from_ptr && "buffer type not for ptr memory"); + GGML_ASSERT((uintptr_t)ptr % m_alignment == 0 && "buffer pointer must be aligned"); + return new buffer(ptr, size); + } + }; + +} + +namespace ggml::cpp::backend { + + buffer_type* new_cpu_buffer_type( + const std::string& name, + bool from_ptr, + std::size_t alignment + ) { + return new ggml::cpp::backend::cpu::buffer_type(name, from_ptr, alignment); + } + +} diff --git a/ggml/src/ggml_cpp_wrapper.h b/ggml/src/ggml_cpp_wrapper.h new file mode 100644 index 0000000000..aba8971f5c --- /dev/null +++ b/ggml/src/ggml_cpp_wrapper.h @@ -0,0 +1,154 @@ +#pragma once +#ifndef __cplusplus +#error "This header is for C++ only" +#endif + +#include "ggml.h" + +#include "ggml-impl.h" +#include "ggml-backend.h" + +#include +#include + +namespace ggml::cpp::backend { + + class buffer { // ggml_backend_buffer_t + public: + virtual ~buffer(); + + virtual void* get_base() = 0; + virtual ggml_status init_tensor(ggml_tensor& /*tensor*/) { return GGML_STATUS_SUCCESS; } + + virtual void memset_tensor( ggml_tensor & tensor, uint8_t value, std::size_t offset, std::size_t size) = 0; + virtual void set_tensor ( ggml_tensor & tensor, const void * data, std::size_t offset, std::size_t size) = 0; + virtual void get_tensor (const ggml_tensor & tensor, void * data, std::size_t offset, std::size_t size) = 0; + + virtual bool cpy_tensor (const ggml_tensor & /*src*/, ggml_tensor & /*dst*/) { return false; } + + virtual void clear (uint8_t value) = 0; + virtual void reset () {} + }; + + class buffer_type { // ggml_backend_buffer_type_t + public: + virtual ~buffer_type(); + + virtual const std::string& get_name() = 0; + virtual buffer* alloc_buffer(std::size_t size) = 0; + virtual std::size_t get_alignment() { return TENSOR_ALIGNMENT; } + virtual std::size_t get_max_size() { return SIZE_MAX; } + virtual std::size_t get_alloc_size(const ggml_tensor& tensor) { return ggml_nbytes(&tensor); } + virtual bool is_host() { return false; } + // for pointer from memory pointer: + virtual buffer* register_buffer(void * /*ptr*/, std::size_t /*size*/, std::size_t /*max_tensor_size*/) { return nullptr; } + }; + + // TODO: manage event + class event { + public: + virtual ~event(); + }; + + // TODO: manage graph + //class graph_plan { + //public: + // virtual ~graph_plan(); + //}; + + class device; + + class backend { // ggml_backend_t + backend() = delete; + public: + backend(device& dev); + virtual ~backend(); + + virtual const std::string& get_name() = 0; + virtual const ggml_guid* get_guid() = 0; + + // need => device::caps_async() {return true;} + virtual void set_tensor_async( ggml_tensor & tensor, const void * data, size_t offset, size_t size) { ggml_backend_tensor_set(&tensor, data, offset, size); } + virtual void get_tensor_async(const ggml_tensor & tensor, void * data, size_t offset, size_t size) { ggml_backend_tensor_get(&tensor, data, offset, size); } + virtual bool cpy_tensor_async(ggml_backend_t /*backend_src*/,/* ggml_backend_t backend_dst==this,*/ const ggml_tensor & /*src*/, ggml_tensor & /*dst*/) { return false; } + virtual void synchronize() {} + + // TODO: manage graph + //virtual graph_plan& graph_plan_create(const ggml_cgraph & cgraph); + //virtual void graph_plan_free(graph_plan& plan); + //virtual void graph_plan_update(graph_plan& plan, const ggml_cgraph & cgraph); + //virtual enum ggml_status graph_plan_compute(graph_plan& plan); + + virtual enum ggml_status graph_compute(ggml_cgraph & cgraph) = 0; + + // need => device::caps_events() { return true; } + virtual void event_record (event & /*event*/) { GGML_ASSERT(false); } + virtual void event_wait (event & /*event*/) { GGML_ASSERT(false); } + + // the extra functions: + virtual void set_n_threads(int /*n_threads*/) { } + + protected: + device& m_device; + }; + + class device { // ggml_backend_dev_t + protected: + friend ggml_backend_buffer_type_t* backend_dev_get_extra_bufts(ggml_backend_dev_t device); + std::vector m_extra_buffers_type; + std::vector m_ggml_extra_buffers_type; + + public: + virtual ~device(); + + virtual const std::string& get_name() = 0; + virtual const std::string& get_description() = 0; + virtual void get_memory(std::size_t & free, std::size_t & total) = 0; + virtual enum ggml_backend_dev_type get_type() = 0; + virtual backend& init_backend(const std::string& params) = 0; + virtual buffer_type& get_buffer_type() = 0; + virtual buffer_type* get_host_buffer_type() { return nullptr; } + virtual buffer_type* get_from_host_ptr_buffer_type() { return nullptr; } + + virtual bool supports_op(const ggml_tensor & op) = 0; + virtual bool supports_buft(ggml_backend_buffer_type_t buft) = 0; + virtual bool offload_op(const ggml_tensor & /*op*/) { return false; } + + // event => caps_events() { return true; } + virtual event* event_new() { return nullptr; } + virtual void event_synchronize(event& /*event*/) { GGML_ASSERT(false); } + + //void get_props(struct ggml_backend_dev_props * props); ggml_backend_dev_caps + virtual bool caps_async() { return false; } + virtual bool caps_host_buffer() { return get_host_buffer_type() != nullptr; } + virtual bool caps_buffer_from_host_ptr() { return get_from_host_ptr_buffer_type() != nullptr; } + virtual bool caps_events() { return false; } + + protected: + void register_extra_buffer_type(buffer_type* buft); + + }; + + class reg { // ggml_backend_reg_t + public: + virtual ~reg(); + + virtual const std::string& get_name() = 0; + virtual std::size_t get_device_count() = 0; + virtual device& get_device(std::size_t index) = 0; + }; + + ggml_backend_buffer_t c_wrapper(ggml_backend_buffer_type_t buft, buffer* ctx, std::size_t size); + ggml_backend_buffer_type_t c_wrapper(ggml_backend_dev_t device, buffer_type* ctx); + ggml_backend_t c_wrapper(ggml_backend_dev_t device, backend* ctx); + ggml_backend_dev_t c_wrapper(ggml_backend_reg_t reg, device* ctx); + ggml_backend_reg_t c_wrapper(reg* ctx); + + // for simple cpu buffer: + buffer_type* new_cpu_buffer_type( + const std::string& name, + bool from_ptr=false, + std::size_t alignment = TENSOR_ALIGNMENT + ); + +} diff --git a/src/llama-model.cpp b/src/llama-model.cpp index e8e1bbf1cd..598fc409da 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -268,9 +268,6 @@ static buft_list_t make_gpu_buft_list(ggml_backend_dev_t dev, llama_split_mode s } } - // add the device default buffer type - buft_list.emplace_back(dev, ggml_backend_dev_buffer_type(dev)); - // add the device extra buffer type (if any) ggml_backend_reg_t reg = ggml_backend_dev_backend_reg(dev); auto ggml_backend_dev_get_extra_bufts_fn = (ggml_backend_dev_get_extra_bufts_t) @@ -284,6 +281,9 @@ static buft_list_t make_gpu_buft_list(ggml_backend_dev_t dev, llama_split_mode s } } + // add the device default buffer type + buft_list.emplace_back(dev, ggml_backend_dev_buffer_type(dev)); + return buft_list; } From d53d4e687b933c4a421d018f547465e7b6887de0 Mon Sep 17 00:00:00 2001 From: Djip007 <3705339+Djip007@users.noreply.github.com> Date: Mon, 2 Mar 2026 20:12:33 +0100 Subject: [PATCH 2/3] Update BLAS backend: - convert to C++ backend - add OpenBLAS as external build (OpenBLAS_BUILD fake vendor) - correct OpenMP build --- ggml/src/ggml-blas/CMakeLists.txt | 75 ++- ggml/src/ggml-blas/ggml-blas.cpp | 1027 ++++++++++++++++------------- 2 files changed, 637 insertions(+), 465 deletions(-) diff --git a/ggml/src/ggml-blas/CMakeLists.txt b/ggml/src/ggml-blas/CMakeLists.txt index c27dc174c0..db873b8b06 100644 --- a/ggml/src/ggml-blas/CMakeLists.txt +++ b/ggml/src/ggml-blas/CMakeLists.txt @@ -5,16 +5,23 @@ endif() # set(BLA_SIZEOF_INTEGER 8) #endif() +ggml_add_backend_library(ggml-blas + ggml-blas.cpp + ) + +if (GGML_OPENMP) + find_package(OpenMP REQUIRED) + add_compile_definitions(GGML_USE_OPENMP) + target_link_libraries(ggml-blas PRIVATE OpenMP::OpenMP_C OpenMP::OpenMP_CXX) + set(BLA_THREAD OMP) +endif() + set(BLA_VENDOR ${GGML_BLAS_VENDOR}) find_package(BLAS) if (BLAS_FOUND) message(STATUS "BLAS found, Libraries: ${BLAS_LIBRARIES}") - ggml_add_backend_library(ggml-blas - ggml-blas.cpp - ) - if (${GGML_BLAS_VENDOR} MATCHES "Apple") add_compile_definitions(ACCELERATE_NEW_LAPACK) add_compile_definitions(ACCELERATE_LAPACK_ILP64) @@ -68,7 +75,8 @@ if (BLAS_FOUND) endif() endif() - message(STATUS "BLAS found, Includes: ${BLAS_INCLUDE_DIRS}") + message(STATUS "BLAS found, Includes dirs : ${BLAS_INCLUDE_DIRS}") + message(STATUS "BLAS found, Includes flags: ${BLAS_LINKER_FLAGS}") target_compile_options(ggml-blas PRIVATE ${BLAS_LINKER_FLAGS}) @@ -84,6 +92,10 @@ if (BLAS_FOUND) add_compile_definitions(GGML_BLAS_USE_OPENBLAS) endif() + if ("${GGML_BLAS_VENDOR}" MATCHES "FlexiBLAS") + add_compile_definitions(GGML_BLAS_USE_FLEXIBLAS) + endif() + if ("${GGML_BLAS_VENDOR}" MATCHES "FLAME" OR "${GGML_BLAS_VENDOR}" MATCHES "AOCL" OR "${GGML_BLAS_VENDOR}" MATCHES "AOCL_mt") add_compile_definitions(GGML_BLAS_USE_BLIS) endif() @@ -92,8 +104,59 @@ if (BLAS_FOUND) add_compile_definitions(GGML_BLAS_USE_NVPL) endif() - target_link_libraries (ggml-blas PRIVATE ${BLAS_LIBRARIES}) + target_compile_features (ggml-blas PRIVATE c_std_11 cxx_std_20) + #target_link_libraries (ggml-blas PRIVATE ${BLAS_LIBRARIES}) + target_link_libraries (ggml-blas PRIVATE BLAS::BLAS) target_include_directories(ggml-blas SYSTEM PRIVATE ${BLAS_INCLUDE_DIRS}) + +elseif (${GGML_BLAS_VENDOR} MATCHES "OpenBLAS_BUILD") + # let build from source + message(STATUS "OpenBLAS build") + + add_compile_definitions(GGML_BLAS_USE_OPENBLAS) + + include(FetchContent) + FetchContent_Declare( + openblas + GIT_REPOSITORY https://github.com/OpenMathLib/OpenBLAS.git + GIT_TAG v0.3.31 + ) + + # https://www.openmathlib.org/OpenBLAS/docs/build_system/ + # https://github.com/OpenMathLib/OpenBLAS/blob/develop/CMakeLists.txt + set(BUILD_WITHOUT_LAPACK ON) + set(BUILD_TESTING OFF) + set(BUILD_STATIC_LIBS ON) + set(BUILD_SHARED_LIBS OFF) +if (GGML_OPENMP) + set(USE_OPENMP 1) + set(USE_THREAD 1) +else() + set(USE_OPENMP 0) + set(USE_THREAD 1) +endif() + set(BUILD_BFLOAT16 1) + set(BUILD_HFLOAT16 0) + set(BUILD_SINGLE 1) + set(ONLY_CBLAS 1) + #set(BUILD_DOUBLE 0) + #set(BUILD_COMPLEX 0) + #set(BUILD_COMPLEX16 0) + FetchContent_MakeAvailable(openblas) + FetchContent_GetProperties(openblas) + + add_compile_definitions(GGML_BLAS_USE_SBGEMM) + #add_compile_definitions(GGML_BLAS_USE_SHGEMM) + #add_compile_definitions(GGML_BLAS_USE_SGEMM_BATCHED) + #add_compile_definitions(GGML_BLAS_USE_SBGEMM_BATCHED) + #[...] + + target_compile_features (ggml-blas PRIVATE c_std_11 cxx_std_17) + target_link_directories (ggml-blas PRIVATE ${openblas_BINARY_DIR}/lib) + target_link_libraries (ggml-blas PRIVATE openblas) + + target_include_directories(ggml-blas SYSTEM PRIVATE ${openblas_SOURCE_DIR} ${openblas_BINARY_DIR}) + else() message(FATAL_ERROR "BLAS not found, please refer to " "https://cmake.org/cmake/help/latest/module/FindBLAS.html#blas-lapack-vendors" diff --git a/ggml/src/ggml-blas/ggml-blas.cpp b/ggml/src/ggml-blas/ggml-blas.cpp index 5de64b816f..00af1bc5cc 100644 --- a/ggml/src/ggml-blas/ggml-blas.cpp +++ b/ggml/src/ggml-blas/ggml-blas.cpp @@ -1,10 +1,15 @@ -#include "ggml-impl.h" +#include "ggml.h" +#include "ggml-log.h" + #include "ggml-blas.h" + +#include "ggml_cpp_wrapper.h" #include "ggml-backend-impl.h" #include #include -#include +#include +#include #if defined(GGML_BLAS_USE_ACCELERATE) # include @@ -18,501 +23,605 @@ # include #endif -struct ggml_backend_blas_context { - int n_threads = GGML_DEFAULT_N_THREADS; - std::unique_ptr work_data; - size_t work_size = 0; -#ifndef GGML_USE_OPENMP - std::vector> tasks; +#if defined(GGML_BLAS_USE_FLEXIBLAS) +# include #endif -}; - -static void ggml_backend_blas_mul_mat(ggml_backend_blas_context * ctx, struct ggml_tensor * dst) { - const struct ggml_tensor * src0 = dst->src[0]; - const struct ggml_tensor * src1 = dst->src[1]; - - GGML_TENSOR_BINARY_OP_LOCALS - - const enum ggml_type type = src0->type; - - GGML_ASSERT(ne0 == ne01); - GGML_ASSERT(ne1 == ne11); - GGML_ASSERT(ne2 == ne12); - GGML_ASSERT(ne3 == ne13); - - // we don't support permuted src0 or src1 - GGML_ASSERT(nb00 == ggml_type_size(type)); - GGML_ASSERT(nb10 == ggml_type_size(src1->type)); - - // dst cannot be transposed or permuted - GGML_ASSERT(nb0 == sizeof(float)); - GGML_ASSERT(nb0 <= nb1); - GGML_ASSERT(nb1 <= nb2); - GGML_ASSERT(nb2 <= nb3); - - // broadcast factors - const int64_t r2 = ne12/ne02; - const int64_t r3 = ne13/ne03; - - const int64_t ne_plane = ne01*ne00; - const size_t desired_wsize = type == GGML_TYPE_F32 ? 0 : ne03*ne02*ne_plane*sizeof(float); - - if (ctx->work_size < desired_wsize) { - ctx->work_data.reset(new char[desired_wsize]); - ctx->work_size = desired_wsize; - } - void * wdata = ctx->work_data.get(); - - // convert src0 to float - if (type != GGML_TYPE_F32) { - const auto * type_traits = ggml_get_type_traits(type); - ggml_to_float_t const to_float = type_traits->to_float; - - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - const void * x = (char *) src0->data + i02*nb02 + i03*nb03; - float * const wplane = (float *) wdata + i02*ne_plane + i03*ne02*ne_plane; - - const int min_cols_per_thread = 4096; - const int min_rows_per_thread = std::max((int)(min_cols_per_thread/ne00), 1); - const int n_threads = std::max(std::min(ctx->n_threads, (int)(ne01/min_rows_per_thread)), 1); #ifdef GGML_USE_OPENMP - #pragma omp parallel for num_threads(n_threads) - for (int64_t i01 = 0; i01 < ne01; i01++) { - to_float((const char *) x + i01*nb01, wplane + i01*ne00, ne00); +# include +#endif + +namespace ggml::backend::blas { + + static constexpr std::size_t MEMORY_ALIGNMENT = 64; // 512 bits + + // backend class + class backend : public ggml::cpp::backend::backend { + + int n_threads = GGML_DEFAULT_N_THREADS; + + std::unique_ptr work_data; + size_t work_size = 0; + + // for tensor convert (TODO: remove work_data) + // TODO: have a stack off buffer if we need 2+ work_data + void* m_work_data = nullptr; + std::size_t m_work_size = 0; + template + T* get_work(std::size_t size) { + std::size_t nb_byte = size * sizeof(T); + if (nb_byte > m_work_size) { + nb_byte = std::max(nb_byte , 2*m_work_size); + // force "aligned size" + nb_byte = ((nb_byte-1)/MEMORY_ALIGNMENT)+1; + nb_byte *= MEMORY_ALIGNMENT; + if (m_work_data) std::free(m_work_data); + m_work_size = nb_byte; + m_work_data = aligned_alloc(MEMORY_ALIGNMENT, m_work_size); + } + return (T*) m_work_data; + } + +#ifndef GGML_USE_OPENMP + std::vector> tasks; +#endif + + private: + + //void cblas_sgemm(OPENBLAS_CONST enum CBLAS_ORDER Order, + // OPENBLAS_CONST enum CBLAS_TRANSPOSE TransA, + // OPENBLAS_CONST enum CBLAS_TRANSPOSE TransB, + // OPENBLAS_CONST blasint M, + // OPENBLAS_CONST blasint N, + // OPENBLAS_CONST blasint K, + // OPENBLAS_CONST float alpha, + // OPENBLAS_CONST float *A, OPENBLAS_CONST blasint lda, + // OPENBLAS_CONST float *B, OPENBLAS_CONST blasint ldb, + // OPENBLAS_CONST float beta, + // float *C, OPENBLAS_CONST blasint ldc); + + //void cblas_sgemm_batch(OPENBLAS_CONST enum CBLAS_ORDER Order, OPENBLAS_CONST enum CBLAS_TRANSPOSE * TransA_array, OPENBLAS_CONST enum CBLAS_TRANSPOSE * TransB_array, OPENBLAS_CONST blasint * M_array, OPENBLAS_CONST blasint * N_array, OPENBLAS_CONST blasint * K_array, + // OPENBLAS_CONST float * alpha_array, OPENBLAS_CONST float ** A_array, OPENBLAS_CONST blasint * lda_array, OPENBLAS_CONST float ** B_array, OPENBLAS_CONST blasint * ldb_array, OPENBLAS_CONST float * beta_array, float ** C_array, OPENBLAS_CONST blasint * ldc_array, OPENBLAS_CONST blasint group_count, OPENBLAS_CONST blasint * group_size); + //void cblas_sgemm_batch_strided(OPENBLAS_CONST enum CBLAS_ORDER Order, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransA, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransB, OPENBLAS_CONST blasint M, OPENBLAS_CONST blasint N, OPENBLAS_CONST blasint K, OPENBLAS_CONST float alpha, OPENBLAS_CONST float * A, OPENBLAS_CONST blasint lda, OPENBLAS_CONST blasint stridea, OPENBLAS_CONST float * B, OPENBLAS_CONST blasint ldb, OPENBLAS_CONST blasint strideb, OPENBLAS_CONST float beta, float * C, OPENBLAS_CONST blasint ldc, OPENBLAS_CONST blasint stridec, OPENBLAS_CONST blasint group_size); + + //void cblas_sbgemm_batch_strided(OPENBLAS_CONST enum CBLAS_ORDER Order, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransA, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransB, OPENBLAS_CONST blasint M, OPENBLAS_CONST blasint N, OPENBLAS_CONST blasint K, OPENBLAS_CONST float alpha, OPENBLAS_CONST bfloat16 * A, OPENBLAS_CONST blasint lda, OPENBLAS_CONST blasint stridea, OPENBLAS_CONST bfloat16 * B, OPENBLAS_CONST blasint ldb, OPENBLAS_CONST blasint strideb, OPENBLAS_CONST float beta, float * C, OPENBLAS_CONST blasint ldc, OPENBLAS_CONST blasint stridec, OPENBLAS_CONST blasint group_size); +#ifdef GGML_BLAS_USE_SBGEMM + void sbgemm(const ggml_tensor & A, const ggml_tensor & B, ggml_tensor & C) { + GGML_ASSERT(A.ne[0] == B.ne[0]); // K + GGML_ASSERT(B.ne[1] == C.ne[1]); // N + GGML_ASSERT(A.ne[1] == C.ne[0]); // M + // for now! + GGML_ASSERT(A.type == GGML_TYPE_BF16); + GGML_ASSERT(B.type == GGML_TYPE_F32); + GGML_ASSERT(C.type == GGML_TYPE_F32); + + // convert B to BF16: + // - B contigue: (TODO: other case?) + GGML_ASSERT(((size_t)4*B.ne[0]*B.ne[1]*B.ne[2]) == B.nb[3]); + std::size_t sizeB = B.ne[0]*B.ne[1]*B.ne[2]*B.ne[3]; + auto* B_work = get_work(std::max(sizeB, B.ne[0]*(std::size_t)256)); + cblas_sbstobf16(sizeB, (const float*)B.data, 1, B_work, 1); + + // compute: + if (B.ne[2]*B.ne[3] == 1) { + if (B.ne[1] == 1) { + cblas_sbgemv(CblasRowMajor, CblasNoTrans, + A.ne[1], A.ne[0], + 1.0f, (const bfloat16*)A.data, A.nb[1]/2, + B_work, 1, + 0.0f, ( float* )C.data, 1); + } else { + cblas_sbgemm(CblasRowMajor, CblasNoTrans, CblasTrans, + C.ne[1], C.ne[0], B.ne[0], + 1.0f, B_work, B.nb[1]/4, + (const bfloat16*)A.data, A.nb[1]/2, + 0.0f, ( float* )C.data, C.nb[1]/4); + } + } else { + // use batched gemm + const blasint nb_group = C.ne[2]*C.ne[3]; + auto transB = CblasNoTrans; + auto transA = CblasTrans; + const blasint M = C.ne[1]; + const blasint N = C.ne[0]; + const blasint K = B.ne[0]; + const blasint ldA = A.nb[1]/2; + const blasint ldB = B.nb[1]/4; + const blasint ldC = C.nb[1]/4; + static constexpr float alpha = 1; + static constexpr float beta = 0; + // TODO: reduce A.nb[2] => 1 + std::vector< float *> C_fp32_v (nb_group, nullptr); + std::vector A_bf16_v (nb_group, nullptr); + std::vector B_bf16_v (nb_group, nullptr); + + // Tensors config to batched params + const bfloat16 * A_bf16 = (const bfloat16 *) A.data; + const bfloat16 * B_bf16 = (const bfloat16 *) B_work; + float * C_fp32 = ( float *) C.data; + // A[3/2] is broadcasted... + const int64_t r2 = B.ne[2]/A.ne[2]; + const int64_t r3 = B.ne[3]/A.ne[3]; + const std::size_t lda2 = A.nb[2]/A.nb[0]; + const std::size_t lda3 = A.nb[3]/A.nb[0]; + const std::size_t ldb2 = B.nb[2]/4; + const std::size_t ldb3 = B.nb[3]/4; + const std::size_t ldc2 = C.nb[2]/C.nb[0]; + const std::size_t ldc3 = C.nb[3]/C.nb[0]; + + for (int64_t j3 = 0; j3 < C.ne[3]; ++j3) { + for (int64_t j2 = 0; j2 < C.ne[2]; ++j2) { + auto lda = (j2/r2)*lda2+(j3/r3)*lda3; + auto ldb = j2*ldb2+j3*ldb3; + auto ldc = j2*ldc2+j3*ldc3; + A_bf16_v[j2+j3*C.ne[2]] = A_bf16+lda; + B_bf16_v[j2+j3*C.ne[2]] = B_bf16+ldb; + C_fp32_v[j2+j3*C.ne[2]] = C_fp32+ldc; + } + } + + cblas_sbgemm_batch(CblasRowMajor, &transB, &transA, + &M, &N, &K, + &alpha, B_bf16_v.data(), &ldB, + A_bf16_v.data(), &ldA, + &beta , C_fp32_v.data(), &ldC, + 1, &nb_group); + } + } +#endif + + void sgemm(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { + GGML_TENSOR_BINARY_OP_LOCALS + + const enum ggml_type type = src0->type; + + GGML_ASSERT(ne0 == ne01); + GGML_ASSERT(ne1 == ne11); + GGML_ASSERT(ne2 == ne12); + GGML_ASSERT(ne3 == ne13); + + // we don't support permuted src0 or src1 + GGML_ASSERT(nb00 == ggml_type_size(type)); + GGML_ASSERT(nb10 == ggml_type_size(src1->type)); + + // dst cannot be transposed or permuted + GGML_ASSERT(nb0 == sizeof(float)); + GGML_ASSERT(nb0 <= nb1); + GGML_ASSERT(nb1 <= nb2); + GGML_ASSERT(nb2 <= nb3); + + // broadcast factors + const int64_t r2 = ne12/ne02; + const int64_t r3 = ne13/ne03; + + const int64_t ne_plane = ne01*ne00; + const size_t desired_wsize = type == GGML_TYPE_F32 ? 0 : ne03*ne02*ne_plane*sizeof(float); + + if (work_size < desired_wsize) { + work_data.reset(new char[desired_wsize]); + work_size = desired_wsize; + } + void * wdata = work_data.get(); + + // convert src0 to float + if (type != GGML_TYPE_F32) { + const auto * type_traits = ggml_get_type_traits(type); + ggml_to_float_t const to_float = type_traits->to_float; + +#ifdef GGML_USE_OPENMP + const char * x = (char *) src0->data; + float * const wplane = (float *) wdata; + const int64_t nf_plane01 = ne00; + const int64_t nf_plane02 = ne01*ne00; + const int64_t nf_plane03 = ne02*ne01*ne00; + + #pragma omp parallel for collapse(3) num_threads(this->n_threads) schedule(static) + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = 0; i01 < ne01; i01++) { + to_float(x + i03*nb03 + i02*nb02 + i01*nb01, + wplane + i03*nf_plane03 + i02*nf_plane02 + i01*nf_plane01, + ne00); + } + } } #else - for (int i = 1; i < n_threads; i++) { - const int64_t start = i*ne01/n_threads; - const int64_t end = (i + 1)*ne01/n_threads; - if (start < end) { - ctx->tasks.push_back(std::async(std::launch::async, [=]() { + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + const void * x = (char *) src0->data + i02*nb02 + i03*nb03; + float * const wplane = (float *) wdata + i02*ne_plane + i03*ne02*ne_plane; + + const int min_cols_per_thread = 4096; + const int min_rows_per_thread = std::max((int)(min_cols_per_thread/ne00), 1); + const int n_threads = std::max(std::min(this->n_threads, (int)(ne01/min_rows_per_thread)), 1); + + for (int i = 1; i < n_threads; i++) { + const int64_t start = i*ne01/n_threads; + const int64_t end = (i + 1)*ne01/n_threads; + if (start < end) { + tasks.push_back(std::async(std::launch::async, [=]() { + for (int64_t i01 = start; i01 < end; i01++) { + to_float((const char *) x + i01*nb01, wplane + i01*ne00, ne00); + } + })); + } + } + { + // reuse the current thread for the first task + const int64_t start = 0; + const int64_t end = ne01/n_threads; for (int64_t i01 = start; i01 < end; i01++) { to_float((const char *) x + i01*nb01, wplane + i01*ne00, ne00); } - })); + } } } - { - // reuse the current thread for the first task - const int64_t start = 0; - const int64_t end = ne01/n_threads; - for (int64_t i01 = start; i01 < end; i01++) { - to_float((const char *) x + i01*nb01, wplane + i01*ne00, ne00); - } + // wait for all tasks to finish + for (auto & task : tasks) { + task.get(); } + tasks.clear(); #endif } + + for (int64_t i13 = 0; i13 < ne13; i13++) { + for (int64_t i12 = 0; i12 < ne12; i12++) { + const int64_t i03 = i13/r3; + const int64_t i02 = i12/r2; + + const float * x = (float *) ((char *) src0->data + i02*nb02 + i03*nb03); + const float * y = (float *) ((char *) src1->data + i12*nb12 + i13*nb13); + float * d = (float *) ((char *) dst->data + i12*nb2 + i13*nb3); + + if (type != GGML_TYPE_F32) { + x = (float *) wdata + i02*ne_plane + i03*ne02*ne_plane; + } + if (ne1 == 1) { + cblas_sgemv(CblasRowMajor, CblasNoTrans, + ne01, ne00, + 1.0f, x, ne00, + y, 1, + 0.0f, d, 1); + } else { + cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans, + ne1, ne01, ne10, + 1.0f, y, ne10, + x, ne00, + 0.0f, d, ne01); + } + } + } } -#ifndef GGML_USE_OPENMP - // wait for all tasks to finish - for (auto & task : ctx->tasks) { - task.get(); - } - ctx->tasks.clear(); -#endif - } + void out_prod(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { + GGML_TENSOR_BINARY_OP_LOCALS + GGML_ASSERT(ne0 == ne00); + GGML_ASSERT(ne1 == ne10); + GGML_ASSERT(ne2 == ne02); + GGML_ASSERT(ne02 == ne12); + GGML_ASSERT(ne3 == ne13); + GGML_ASSERT(ne03 == ne13); + + // we don't support permuted src0 or src1 + GGML_ASSERT(nb00 == sizeof(float)); + + // dst cannot be transposed or permuted + GGML_ASSERT(nb0 == sizeof(float)); + // GGML_ASSERT(nb0 <= nb1); + // GGML_ASSERT(nb1 <= nb2); + // GGML_ASSERT(nb2 <= nb3); + + // Arguments to ggml_compute_forward_out_prod (expressed as major,minor) + // src0: (k,n) + // src1: (k,m) + // dst: (m,n) + // + // Arguments to sgemm (see https://github.com/Reference-LAPACK/lapack/blob/master/BLAS/SRC/sgemm.f) + // Also expressed as (major,minor) + // a: (m,k): so src1 transposed + // b: (k,n): so src0 + // c: (m,n) + // + // However, if ggml_is_transposed(src1) is true, then + // src1->data already contains a transposed version, so sgemm mustn't + // transpose it further. + + int n = src0->ne[0]; + int k = src0->ne[1]; + int m = src1->ne[0]; + + CBLAS_TRANSPOSE transposeA; + int lda; + + if (!ggml_is_transposed(src1)) { + transposeA = CblasTrans; + lda = m; + } else { + transposeA = CblasNoTrans; + lda = k; + } + + float * a = (float *) ((char *) src1->data); + float * b = (float *) ((char *) src0->data); + float * c = (float *) ((char *) dst->data); + + cblas_sgemm(CblasRowMajor, transposeA, CblasNoTrans, m, n, k, 1.0, a, lda, b, n, 0.0, c, n); + } + + private: + const std::string m_name{"BLAS"}; + + public: + static constexpr ggml_guid s_guid = { 0x12, 0xa8, 0xae, 0xf4, 0xc0, 0x1e, 0x61, 0x97, 0x8f, 0xeb, 0x33, 0x04, 0xa1, 0x33, 0x51, 0x2d }; + + backend(const std::string& /*params*/, ggml::cpp::backend::device& dev) : + ggml::cpp::backend::backend(dev) + { } + + virtual ~backend() { + if (m_work_data) std::free(m_work_data); + } + + const std::string& get_name() override { + return m_name; + } + + const ggml_guid* get_guid() override { + return &s_guid; + } + + ggml_status graph_compute(ggml_cgraph & cgraph) override { + for (int i = 0; i < cgraph.n_nodes; i++) { + ggml_tensor * node = cgraph.nodes[i]; + if ((node->flags & GGML_TENSOR_FLAG_COMPUTE) == 0) { + continue; + } + switch (node->op) { + case GGML_OP_MUL_MAT: +#ifdef GGML_BLAS_USE_SBGEMM + if (node->src[0]->type == GGML_TYPE_BF16) { + sbgemm(*node->src[0], *node->src[1], *node); + } else +#endif +#ifdef GGML_BLAS_USE_SHGEMM + //if (node->src[0].type == GGML_TYPE_BF16) { + // shgemm(*node->src[0], *node->src[1], *node); + //} else +#endif + { + sgemm(node->src[0], node->src[1], node); + } + break; + + case GGML_OP_OUT_PROD: + out_prod(node->src[0], node->src[1], node); + break; + + case GGML_OP_NONE: + case GGML_OP_RESHAPE: + case GGML_OP_VIEW: + case GGML_OP_PERMUTE: + case GGML_OP_TRANSPOSE: + break; + + default: + GGML_ABORT("%s: unsupported op %s\n", __func__, ggml_op_desc(node)); + } + } + return GGML_STATUS_SUCCESS; + } + + void set_n_threads(int n_threads) override { + this->n_threads = n_threads; #if defined(GGML_BLAS_USE_OPENBLAS) - openblas_set_num_threads(ctx->n_threads); + openblas_set_num_threads(n_threads); +# if defined(GGML_USE_OPENMP) + omp_set_num_threads(n_threads); +# endif #elif defined(GGML_BLAS_USE_BLIS) - bli_thread_set_num_threads(ctx->n_threads); + bli_thread_set_num_threads(n_threads); +#elif defined(GGML_BLAS_USE_FLEXIBLAS) + flexiblas_set_num_threads(n_threads); #elif defined(GGML_BLAS_USE_NVPL) - nvpl_blas_set_num_threads(ctx->n_threads); + nvpl_blas_set_num_threads(n_threads); #endif - - for (int64_t i13 = 0; i13 < ne13; i13++) { - for (int64_t i12 = 0; i12 < ne12; i12++) { - const int64_t i03 = i13/r3; - const int64_t i02 = i12/r2; - - const float * x = (float *) ((char *) src0->data + i02*nb02 + i03*nb03); - const float * y = (float *) ((char *) src1->data + i12*nb12 + i13*nb13); - float * d = (float *) ((char *) dst->data + i12*nb2 + i13*nb3); - - if (type != GGML_TYPE_F32) { - x = (float *) wdata + i02*ne_plane + i03*ne02*ne_plane; - } - - cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans, - ne1, ne01, ne10, - 1.0f, y, ne10, - x, ne00, - 0.0f, d, ne01); - } - } -} - -static void ggml_backend_blas_out_prod(ggml_backend_blas_context * ctx, struct ggml_tensor * dst) { - const struct ggml_tensor * src0 = dst->src[0]; - const struct ggml_tensor * src1 = dst->src[1]; - - GGML_TENSOR_BINARY_OP_LOCALS - - GGML_ASSERT(ne0 == ne00); - GGML_ASSERT(ne1 == ne10); - GGML_ASSERT(ne2 == ne02); - GGML_ASSERT(ne02 == ne12); - GGML_ASSERT(ne3 == ne13); - GGML_ASSERT(ne03 == ne13); - - // we don't support permuted src0 or src1 - GGML_ASSERT(nb00 == sizeof(float)); - - // dst cannot be transposed or permuted - GGML_ASSERT(nb0 == sizeof(float)); - // GGML_ASSERT(nb0 <= nb1); - // GGML_ASSERT(nb1 <= nb2); - // GGML_ASSERT(nb2 <= nb3); - - // Arguments to ggml_compute_forward_out_prod (expressed as major,minor) - // src0: (k,n) - // src1: (k,m) - // dst: (m,n) - // - // Arguments to sgemm (see https://github.com/Reference-LAPACK/lapack/blob/master/BLAS/SRC/sgemm.f) - // Also expressed as (major,minor) - // a: (m,k): so src1 transposed - // b: (k,n): so src0 - // c: (m,n) - // - // However, if ggml_is_transposed(src1) is true, then - // src1->data already contains a transposed version, so sgemm mustn't - // transpose it further. - - int n = src0->ne[0]; - int k = src0->ne[1]; - int m = src1->ne[0]; - - CBLAS_TRANSPOSE transposeA; - int lda; - - if (!ggml_is_transposed(src1)) { - transposeA = CblasTrans; - lda = m; - } else { - transposeA = CblasNoTrans; - lda = k; - } - - float * a = (float *) ((char *) src1->data); - float * b = (float *) ((char *) src0->data); - float * c = (float *) ((char *) dst->data); - - cblas_sgemm(CblasRowMajor, transposeA, CblasNoTrans, m, n, k, 1.0, a, lda, b, n, 0.0, c, n); - - GGML_UNUSED(ctx); -} - -// backend interface - -static const char * ggml_backend_blas_get_name(ggml_backend_t backend) { - return "BLAS"; - - GGML_UNUSED(backend); -} - -static void ggml_backend_blas_free(ggml_backend_t backend) { - ggml_backend_blas_context * ctx = (ggml_backend_blas_context *)backend->context; - delete ctx; - delete backend; -} - -static enum ggml_status ggml_backend_blas_graph_compute(ggml_backend_t backend, struct ggml_cgraph * cgraph) { - ggml_backend_blas_context * ctx = (ggml_backend_blas_context *)backend->context; - - for (int i = 0; i < cgraph->n_nodes; i++) { - struct ggml_tensor * node = cgraph->nodes[i]; - - if ((node->flags & GGML_TENSOR_FLAG_COMPUTE) == 0) { - continue; } - switch (node->op) { - case GGML_OP_MUL_MAT: - ggml_backend_blas_mul_mat(ctx, node); - break; - - case GGML_OP_OUT_PROD: - ggml_backend_blas_out_prod(ctx, node); - break; - - case GGML_OP_NONE: - case GGML_OP_RESHAPE: - case GGML_OP_VIEW: - case GGML_OP_PERMUTE: - case GGML_OP_TRANSPOSE: - break; - - default: - GGML_ABORT("%s: unsupported op %s\n", __func__, ggml_op_desc(node)); - } - } - - return GGML_STATUS_SUCCESS; - - GGML_UNUSED(backend); -} - -static struct ggml_backend_i blas_backend_i = { - /* .get_name = */ ggml_backend_blas_get_name, - /* .free = */ ggml_backend_blas_free, - /* .set_tensor_async = */ NULL, - /* .get_tensor_async = */ NULL, - /* .cpy_tensor_async = */ NULL, - /* .synchronize = */ NULL, - /* .graph_plan_create = */ NULL, - /* .graph_plan_free = */ NULL, - /* .graph_plan_update = */ NULL, - /* .graph_plan_compute = */ NULL, - /* .graph_compute = */ ggml_backend_blas_graph_compute, - /* .event_record = */ NULL, - /* .event_wait = */ NULL, - /* .graph_optimize = */ NULL, -}; - -static ggml_guid_t ggml_backend_blas_guid(void) { - static ggml_guid guid = { 0x12, 0xa8, 0xae, 0xf4, 0xc0, 0x1e, 0x61, 0x97, 0x8f, 0xeb, 0x33, 0x04, 0xa1, 0x33, 0x51, 0x2d }; - return &guid; -} - -ggml_backend_t ggml_backend_blas_init(void) { - ggml_backend_blas_context * ctx = new ggml_backend_blas_context; - - ggml_backend_t backend = new ggml_backend { - /* .guid = */ ggml_backend_blas_guid(), - /* .iface = */ blas_backend_i, - /* .device = */ ggml_backend_reg_dev_get(ggml_backend_blas_reg(), 0), - /* .context = */ ctx, }; -#if defined(GGML_BLAS_USE_OPENBLAS) && defined(GGML_USE_OPENMP) - if (openblas_get_parallel() != OPENBLAS_OPENMP) { - GGML_LOG_DEBUG("%s: warning: ggml is using OpenMP, but OpenBLAS was compiled without OpenMP support\n", __func__); - } -#endif + // device class + class device : public ggml::cpp::backend::device { + const std::string m_name; + const std::string m_desc; + ggml::cpp::backend::buffer_type* m_cpu_buffer_type; // ggml_backend_cpu_buffer_type + ggml::cpp::backend::buffer_type* m_cpu_buffer_from_ptr_type; // ggml_backend_cpu_buffer_from_ptr_type + public: + device() : m_name("BLAS"), m_desc( + #if defined(GGML_BLAS_USE_ACCELERATE) + "Accelerate" + #elif defined(GGML_BLAS_USE_MKL) + "MKL" + #elif defined(GGML_BLAS_USE_BLIS) + "BLIS" + #elif defined(GGML_BLAS_USE_NVPL) + "NVPL" + #elif defined(GGML_BLAS_USE_OPENBLAS) + "OpenBLAS" + #else + "BLAS" + #endif + ) { + m_cpu_buffer_type = ggml::cpp::backend::new_cpu_buffer_type("BLAS", false, MEMORY_ALIGNMENT); + m_cpu_buffer_from_ptr_type = ggml::cpp::backend::new_cpu_buffer_type("BLAS_Mapped", true , MEMORY_ALIGNMENT); + } + + virtual ~device() { + delete m_cpu_buffer_type; + delete m_cpu_buffer_from_ptr_type; + } + + const std::string& get_name() override { + return m_name; + } + + const std::string& get_description() override { + return m_desc; + } + + void get_memory(std::size_t & free, std::size_t & total) override { + // no memory to report + total = 0; + free = 0; + } + + enum ggml_backend_dev_type get_type() override { + return GGML_BACKEND_DEVICE_TYPE_ACCEL; + } + + ggml::cpp::backend::backend& init_backend(const std::string& params) override { + auto back = new backend(params, *this); + return *back; + } + + ggml::cpp::backend::buffer_type& get_buffer_type() override { + return *m_cpu_buffer_type; + } + + bool caps_buffer_from_host_ptr() override { return true; } + ggml::cpp::backend::buffer_type* get_from_host_ptr_buffer_type() override { + return m_cpu_buffer_from_ptr_type; + } + + bool supports_op(const ggml_tensor & op) override { + const struct ggml_tensor * src0 = op.src[0]; + const struct ggml_tensor * src1 = op.src[1]; + + switch (op.op) { + case GGML_OP_NONE: + case GGML_OP_RESHAPE: + case GGML_OP_VIEW: + case GGML_OP_PERMUTE: + case GGML_OP_TRANSPOSE: + return true; + + case GGML_OP_MUL_MAT: + { + // BLAS usually is only faster for large matrices + const int64_t ne10 = src1->ne[0]; + + const int64_t ne0 = op.ne[0]; + const int64_t ne1 = op.ne[1]; + + // TODO: find the optimal value + const int64_t min_batch = 448; + + return ggml_is_contiguous(src0) && + ggml_is_contiguous(src1) && + src1->type == GGML_TYPE_F32 && + ( src0->type == GGML_TYPE_F32 || +#ifdef GGML_BLAS_USE_SBGEMM + ( src0->type == GGML_TYPE_BF16 && + ne1 >= min_batch + ) || +#endif + ( (ne0 >= min_batch && ne1 >= min_batch && ne10 >= min_batch) && + ggml_get_type_traits(src0->type)->to_float != NULL + ) + ); + } + + case GGML_OP_OUT_PROD: + return src0->type == GGML_TYPE_F32 && + src1->type == GGML_TYPE_F32 && + ggml_is_matrix(src0) && + ggml_is_matrix(src1) && + ggml_is_contiguous(src0) && + (ggml_is_contiguous(src1) || ggml_is_transposed(src1)) && + (src0->type == GGML_TYPE_F32 || ggml_get_type_traits(src0->type)->to_float != NULL); + + default: + return false; + + } + } + + bool supports_buft(ggml_backend_buffer_type_t buffer_type) override { + return ggml_backend_buft_is_host(buffer_type); + } + + }; + + // backend reg class + class reg: public ggml::cpp::backend::reg { + const std::string m_name{"BLAS"}; + device* m_device; + + public: + reg() { + m_device = new device(); +#if defined(GGML_BLAS_USE_OPENBLAS) + if (openblas_get_parallel() == OPENBLAS_SEQUENTIAL) { + GGML_LOG_WARN("%s: warning: OpenBLAS was compiled without parallel support\n", __func__); + } +# if defined(GGML_USE_OPENMP) + else if (openblas_get_parallel() != OPENBLAS_OPENMP) { + GGML_LOG_WARN("%s: warning: ggml is using OpenMP, but OpenBLAS was compiled without OpenMP support\n", __func__); + } +# else + if (openblas_get_parallel() == OPENBLAS_OPENMP) { + GGML_LOG_WARN("%s: warning: ggml is not using OpenMP, but OpenBLAS was compiled with OpenMP support\n", __func__); + } +# endif +#endif #if defined(BLIS_ENABLE_CBLAS) && defined(GGML_USE_OPENMP) && !defined(BLIS_ENABLE_OPENMP) - GGML_LOG_DEBUG("%s: warning: ggml is using OpenMP, but BLIS was compiled without OpenMP support\n", __func__); + GGML_LOG_WARN("%s: warning: ggml is using OpenMP, but BLIS was compiled without OpenMP support\n", __func__); #endif + } - return backend; + virtual ~reg() { } + + const std::string& get_name() override { + return m_name; + } + + std::size_t get_device_count() override { + return 1; + } + + device& get_device(std::size_t index) override { + GGML_ASSERT(index == 0); + return *m_device; + } + }; + + static ggml::backend::blas::reg& get_reg(void) { + static ggml::backend::blas::reg ctx; + return ctx; + } + +} + +// extern API: +ggml_backend_t ggml_backend_blas_init(void) { + auto* _reg = ggml_backend_blas_reg(); + auto* _device = _reg->iface.get_device(_reg, 0); + return _device->iface.init_backend(_device, ""); } bool ggml_backend_is_blas(ggml_backend_t backend) { - return backend != NULL && ggml_guid_matches(backend->guid, ggml_backend_blas_guid()); + return backend != NULL && ggml_guid_matches(backend->guid, &ggml::backend::blas::backend::s_guid); } void ggml_backend_blas_set_n_threads(ggml_backend_t backend_blas, int n_threads) { GGML_ASSERT(ggml_backend_is_blas(backend_blas)); - - ggml_backend_blas_context * ctx = (ggml_backend_blas_context *)backend_blas->context; - ctx->n_threads = n_threads; + auto& ctx = *((ggml::cpp::backend::backend*) (backend_blas->context)); + ctx.set_n_threads(n_threads); } -// device interface - -static const char * ggml_backend_blas_device_get_name(ggml_backend_dev_t dev) { - return "BLAS"; - - GGML_UNUSED(dev); -} - -static const char * ggml_backend_blas_device_get_description(ggml_backend_dev_t dev) { - #if defined(GGML_BLAS_USE_ACCELERATE) - return "Accelerate"; - #elif defined(GGML_BLAS_USE_MKL) - return "MKL"; - #elif defined(GGML_BLAS_USE_BLIS) - return "BLIS"; - #elif defined(GGML_BLAS_USE_NVPL) - return "NVPL"; - #elif defined(GGML_BLAS_USE_OPENBLAS) - return "OpenBLAS"; - #else - return "BLAS"; - #endif - - GGML_UNUSED(dev); -} - -static void ggml_backend_blas_device_get_memory(ggml_backend_dev_t dev, size_t * free, size_t * total) { - // no memory to report - *free = 0; - *total = 0; - - GGML_UNUSED(dev); -} - -static enum ggml_backend_dev_type ggml_backend_blas_device_get_type(ggml_backend_dev_t dev) { - return GGML_BACKEND_DEVICE_TYPE_ACCEL; - - GGML_UNUSED(dev); -} - -static void ggml_backend_blas_device_get_props(ggml_backend_dev_t dev, struct ggml_backend_dev_props * props) { - props->name = ggml_backend_blas_device_get_name(dev); - props->description = ggml_backend_blas_device_get_description(dev); - props->type = ggml_backend_blas_device_get_type(dev); - ggml_backend_blas_device_get_memory(dev, &props->memory_free, &props->memory_total); - props->caps = { - /* .async = */ false, - /* .host_buffer = */ false, - /* .buffer_from_host_ptr = */ true, - /* .events = */ false, - }; -} - -static ggml_backend_t ggml_backend_blas_device_init_backend(ggml_backend_dev_t dev, const char * params) { - return ggml_backend_blas_init(); - - GGML_UNUSED(dev); - GGML_UNUSED(params); -} - -static ggml_backend_buffer_type_t ggml_backend_blas_device_get_buffer_type(ggml_backend_dev_t dev) { - return ggml_backend_cpu_buffer_type(); - - GGML_UNUSED(dev); -} - -static ggml_backend_buffer_t ggml_backend_blas_device_buffer_from_host_ptr(ggml_backend_dev_t dev, void * ptr, size_t size, size_t max_tensor_size) { - return ggml_backend_cpu_buffer_from_ptr(ptr, size); - - GGML_UNUSED(dev); - GGML_UNUSED(max_tensor_size); -} - -static bool ggml_backend_blas_device_supports_op(ggml_backend_dev_t dev, const struct ggml_tensor * op) { - const struct ggml_tensor * src0 = op->src[0]; - const struct ggml_tensor * src1 = op->src[1]; - - switch (op->op) { - case GGML_OP_NONE: - case GGML_OP_RESHAPE: - case GGML_OP_VIEW: - case GGML_OP_PERMUTE: - case GGML_OP_TRANSPOSE: - return true; - - case GGML_OP_MUL_MAT: - { - // BLAS usually is only faster for large matrices - const struct ggml_tensor * src0 = op->src[0]; - const struct ggml_tensor * src1 = op->src[1]; - - const int64_t ne10 = src1->ne[0]; - - const int64_t ne0 = op->ne[0]; - const int64_t ne1 = op->ne[1]; - - // TODO: find the optimal value - const int64_t min_batch = 32; - - return ggml_is_contiguous(src0) && - ggml_is_contiguous(src1) && - src1->type == GGML_TYPE_F32 && - (ne0 >= min_batch && ne1 >= min_batch && ne10 >= min_batch) && - (src0->type == GGML_TYPE_F32 || ggml_get_type_traits(src0->type)->to_float != NULL); - } - - case GGML_OP_OUT_PROD: - return op->src[0]->type == GGML_TYPE_F32 && - op->src[1]->type == GGML_TYPE_F32 && - ggml_is_matrix(src0) && - ggml_is_matrix(src1) && - ggml_is_contiguous(src0) && - (ggml_is_contiguous(src1) || ggml_is_transposed(src1)) && - (src0->type == GGML_TYPE_F32 || ggml_get_type_traits(src0->type)->to_float != NULL); - - default: - return false; - - } - - GGML_UNUSED(dev); -} - -static bool ggml_backend_blas_device_supports_buft(ggml_backend_dev_t dev, ggml_backend_buffer_type_t buft) { - return ggml_backend_buft_is_host(buft); - - GGML_UNUSED(dev); -} - -static const struct ggml_backend_device_i ggml_backend_blas_device_i = { - /* .get_name = */ ggml_backend_blas_device_get_name, - /* .get_description = */ ggml_backend_blas_device_get_description, - /* .get_memory = */ ggml_backend_blas_device_get_memory, - /* .get_type = */ ggml_backend_blas_device_get_type, - /* .get_props = */ ggml_backend_blas_device_get_props, - /* .init_backend = */ ggml_backend_blas_device_init_backend, - /* .get_buffer_type = */ ggml_backend_blas_device_get_buffer_type, - /* .get_host_buffer_type = */ NULL, - /* .buffer_from_host_ptr = */ ggml_backend_blas_device_buffer_from_host_ptr, - /* .supports_op = */ ggml_backend_blas_device_supports_op, - /* .supports_buft = */ ggml_backend_blas_device_supports_buft, - /* .offload_op = */ NULL, - /* .event_new = */ NULL, - /* .event_free = */ NULL, - /* .event_synchronize = */ NULL, -}; - -// backend reg interface - -static const char * ggml_backend_blas_reg_get_name(ggml_backend_reg_t reg) { - return "BLAS"; - - GGML_UNUSED(reg); -} - -static size_t ggml_backend_blas_reg_get_device_count(ggml_backend_reg_t reg) { - return 1; - - GGML_UNUSED(reg); -} - -static ggml_backend_dev_t ggml_backend_blas_reg_get_device(ggml_backend_reg_t reg, size_t index) { - GGML_ASSERT(index == 0); - - static ggml_backend_device ggml_backend_blas_device = { - /* .iface = */ ggml_backend_blas_device_i, - /* .reg = */ reg, - /* .context = */ nullptr, - }; - - return &ggml_backend_blas_device; - - GGML_UNUSED(reg); - GGML_UNUSED(index); -} - -static void * ggml_backend_blas_get_proc_address(ggml_backend_reg_t reg, const char * name) { - if (std::strcmp(name, "ggml_backend_set_n_threads") == 0) { - return (void *)ggml_backend_blas_set_n_threads; - } - return NULL; - - GGML_UNUSED(reg); - GGML_UNUSED(name); -} - -static const struct ggml_backend_reg_i ggml_backend_blas_reg_i = { - /* .get_name = */ ggml_backend_blas_reg_get_name, - /* .get_device_count = */ ggml_backend_blas_reg_get_device_count, - /* .get_device = */ ggml_backend_blas_reg_get_device, - /* .get_proc_address = */ ggml_backend_blas_get_proc_address, -}; - ggml_backend_reg_t ggml_backend_blas_reg(void) { - static struct ggml_backend_reg ggml_backend_blas_reg = { - /* .api_version = */ GGML_BACKEND_API_VERSION, - /* .iface = */ ggml_backend_blas_reg_i, - /* .context = */ NULL, - }; - - return &ggml_backend_blas_reg; + return ggml::cpp::backend::c_wrapper(&ggml::backend::blas::get_reg()); } GGML_BACKEND_DL_IMPL(ggml_backend_blas_reg) From 44a86ee644135706df91ae6b1167109a5f6580b7 Mon Sep 17 00:00:00 2001 From: Djip007 <3705339+Djip007@users.noreply.github.com> Date: Thu, 5 Mar 2026 23:33:56 +0100 Subject: [PATCH 3/3] corrections: > tensor_traits "backend" - AMX - migrate kleidial - migrate spacemit > ggml-blas backend > update GGML_API for c++ support Not releted to this PR: > ggml-webgpu backend > ggm-metal backend --- CMakeLists.txt | 1 + cmake/common.cmake | 1 + ggml/include/ggml.h | 33 +++-- ggml/src/CMakeLists.txt | 5 + ggml/src/ggml-blas/CMakeLists.txt | 13 +- ggml/src/ggml-blas/ggml-blas.cpp | 55 ++++--- ggml/src/ggml-cpu/CMakeLists.txt | 4 + ggml/src/ggml-cpu/amx/amx.cpp | 4 +- ggml/src/ggml-cpu/kleidiai/kleidiai.cpp | 175 ++++++++++------------- ggml/src/ggml-cpu/spacemit/ime.cpp | 181 ++++++++++-------------- ggml/src/ggml-cpu/traits.cpp | 5 +- ggml/src/ggml-cpu/traits.h | 3 +- ggml/src/ggml-metal/ggml-metal.cpp | 8 +- ggml/src/ggml-webgpu/ggml-webgpu.cpp | 3 +- ggml/src/ggml_cpp_wrapper.cpp | 79 +++++++---- ggml/src/ggml_cpp_wrapper.h | 27 ++-- 16 files changed, 294 insertions(+), 303 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 69da97dc1e..0b816d9ae3 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -70,6 +70,7 @@ if (MSVC) add_compile_options("$<$:/utf-8>") add_compile_options("$<$:/bigobj>") add_compile_options("$<$:/bigobj>") + add_compile_options(/Zc:__cplusplus) endif() if (LLAMA_STANDALONE) diff --git a/cmake/common.cmake b/cmake/common.cmake index bcf403e0ee..f8934e3590 100644 --- a/cmake/common.cmake +++ b/cmake/common.cmake @@ -7,6 +7,7 @@ function(llama_add_compile_flags) list(APPEND CXX_FLAGS -Werror) elseif (CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") add_compile_options(/WX) + add_compile_options(/Zc:__cplusplus) endif() endif() diff --git a/ggml/include/ggml.h b/ggml/include/ggml.h index 72301af351..40eb1b2763 100644 --- a/ggml/include/ggml.h +++ b/ggml/include/ggml.h @@ -174,17 +174,34 @@ // #ifdef GGML_SHARED -# if defined(_WIN32) && !defined(__MINGW32__) -# ifdef GGML_BUILD -# define GGML_API __declspec(dllexport) extern -# else -# define GGML_API __declspec(dllimport) extern -# endif +# ifdef __cplusplus +# if defined(_WIN32) && !defined(__MINGW32__) +# ifdef GGML_BUILD +# define GGML_API __declspec(dllexport) +# else +# define GGML_API __declspec(dllimport) +# endif +# else +//# define GGML_API [[gnu::visibility ("default")]] +# define GGML_API __attribute__ ((visibility ("default"))) +# endif # else -# define GGML_API __attribute__ ((visibility ("default"))) extern +# if defined(_WIN32) && !defined(__MINGW32__) +# ifdef GGML_BUILD +# define GGML_API __declspec(dllexport) extern +# else +# define GGML_API __declspec(dllimport) extern +# endif +# else +# define GGML_API __attribute__ ((visibility ("default"))) extern +# endif # endif #else -# define GGML_API extern +# ifdef __cplusplus +# define GGML_API +# else +# define GGML_API extern +# endif #endif // TODO: support for clang diff --git a/ggml/src/CMakeLists.txt b/ggml/src/CMakeLists.txt index 2046c9d8d3..2a8aba3935 100644 --- a/ggml/src/CMakeLists.txt +++ b/ggml/src/CMakeLists.txt @@ -8,6 +8,10 @@ if (CMAKE_SYSTEM_NAME MATCHES "Linux") add_compile_definitions($<$:_GLIBCXX_ASSERTIONS>) endif() +if (MSVC) + add_compile_options(/Zc:__cplusplus) +endif() + if (NOT MSVC) if (GGML_SANITIZE_THREAD) add_compile_options(-fsanitize=thread) @@ -25,6 +29,7 @@ if (NOT MSVC) endif() endif() + if (GGML_FATAL_WARNINGS) if (CMAKE_CXX_COMPILER_ID MATCHES "GNU" OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") list(APPEND C_FLAGS -Werror) diff --git a/ggml/src/ggml-blas/CMakeLists.txt b/ggml/src/ggml-blas/CMakeLists.txt index db873b8b06..cdcea3adb3 100644 --- a/ggml/src/ggml-blas/CMakeLists.txt +++ b/ggml/src/ggml-blas/CMakeLists.txt @@ -9,7 +9,7 @@ ggml_add_backend_library(ggml-blas ggml-blas.cpp ) -if (GGML_OPENMP) +if (GGML_OPENMP_ENABLED STREQUAL "ON") find_package(OpenMP REQUIRED) add_compile_definitions(GGML_USE_OPENMP) target_link_libraries(ggml-blas PRIVATE OpenMP::OpenMP_C OpenMP::OpenMP_CXX) @@ -104,8 +104,10 @@ if (BLAS_FOUND) add_compile_definitions(GGML_BLAS_USE_NVPL) endif() - target_compile_features (ggml-blas PRIVATE c_std_11 cxx_std_20) - #target_link_libraries (ggml-blas PRIVATE ${BLAS_LIBRARIES}) + if (MSVC) + add_compile_options(/Zc:__cplusplus) + endif() + target_compile_features (ggml-blas PRIVATE c_std_11 cxx_std_17) target_link_libraries (ggml-blas PRIVATE BLAS::BLAS) target_include_directories(ggml-blas SYSTEM PRIVATE ${BLAS_INCLUDE_DIRS}) @@ -128,7 +130,7 @@ elseif (${GGML_BLAS_VENDOR} MATCHES "OpenBLAS_BUILD") set(BUILD_TESTING OFF) set(BUILD_STATIC_LIBS ON) set(BUILD_SHARED_LIBS OFF) -if (GGML_OPENMP) +if (GGML_OPENMP_ENABLED STREQUAL "ON") set(USE_OPENMP 1) set(USE_THREAD 1) else() @@ -151,6 +153,9 @@ endif() #add_compile_definitions(GGML_BLAS_USE_SBGEMM_BATCHED) #[...] + if (MSVC) + add_compile_options(/Zc:__cplusplus) + endif() target_compile_features (ggml-blas PRIVATE c_std_11 cxx_std_17) target_link_directories (ggml-blas PRIVATE ${openblas_BINARY_DIR}/lib) target_link_libraries (ggml-blas PRIVATE openblas) diff --git a/ggml/src/ggml-blas/ggml-blas.cpp b/ggml/src/ggml-blas/ggml-blas.cpp index 00af1bc5cc..a7e93de8ce 100644 --- a/ggml/src/ggml-blas/ggml-blas.cpp +++ b/ggml/src/ggml-blas/ggml-blas.cpp @@ -11,6 +11,8 @@ #include #include +static_assert(__cplusplus >= 201703L, "This file expects a C++17 compatible compiler."); + #if defined(GGML_BLAS_USE_ACCELERATE) # include #elif defined(GGML_BLAS_USE_MKL) @@ -32,7 +34,7 @@ #endif namespace ggml::backend::blas { - + static constexpr std::size_t MEMORY_ALIGNMENT = 64; // 512 bits // backend class @@ -40,26 +42,28 @@ namespace ggml::backend::blas { int n_threads = GGML_DEFAULT_N_THREADS; - std::unique_ptr work_data; - size_t work_size = 0; - - // for tensor convert (TODO: remove work_data) + // for tensor convert // TODO: have a stack off buffer if we need 2+ work_data - void* m_work_data = nullptr; + // nead C++17 for correct aligned buffer + struct alignas(MEMORY_ALIGNMENT) aligned_uint8_t { + uint8_t val; + }; + aligned_uint8_t* m_work_data = nullptr; std::size_t m_work_size = 0; template T* get_work(std::size_t size) { std::size_t nb_byte = size * sizeof(T); if (nb_byte > m_work_size) { nb_byte = std::max(nb_byte , 2*m_work_size); - // force "aligned size" + // force "aligned" size nb_byte = ((nb_byte-1)/MEMORY_ALIGNMENT)+1; nb_byte *= MEMORY_ALIGNMENT; - if (m_work_data) std::free(m_work_data); + if (m_work_data) delete[] m_work_data; m_work_size = nb_byte; - m_work_data = aligned_alloc(MEMORY_ALIGNMENT, m_work_size); + m_work_data = new aligned_uint8_t[m_work_size]; + GGML_ASSERT(reinterpret_cast(m_work_data) % MEMORY_ALIGNMENT == 0); } - return (T*) m_work_data; + return reinterpret_cast (m_work_data); } #ifndef GGML_USE_OPENMP @@ -67,8 +71,8 @@ namespace ggml::backend::blas { #endif private: - - //void cblas_sgemm(OPENBLAS_CONST enum CBLAS_ORDER Order, + + //void cblas_sgemm(OPENBLAS_CONST enum CBLAS_ORDER Order, // OPENBLAS_CONST enum CBLAS_TRANSPOSE TransA, // OPENBLAS_CONST enum CBLAS_TRANSPOSE TransB, // OPENBLAS_CONST blasint M, @@ -79,7 +83,7 @@ namespace ggml::backend::blas { // OPENBLAS_CONST float *B, OPENBLAS_CONST blasint ldb, // OPENBLAS_CONST float beta, // float *C, OPENBLAS_CONST blasint ldc); - + //void cblas_sgemm_batch(OPENBLAS_CONST enum CBLAS_ORDER Order, OPENBLAS_CONST enum CBLAS_TRANSPOSE * TransA_array, OPENBLAS_CONST enum CBLAS_TRANSPOSE * TransB_array, OPENBLAS_CONST blasint * M_array, OPENBLAS_CONST blasint * N_array, OPENBLAS_CONST blasint * K_array, // OPENBLAS_CONST float * alpha_array, OPENBLAS_CONST float ** A_array, OPENBLAS_CONST blasint * lda_array, OPENBLAS_CONST float ** B_array, OPENBLAS_CONST blasint * ldb_array, OPENBLAS_CONST float * beta_array, float ** C_array, OPENBLAS_CONST blasint * ldc_array, OPENBLAS_CONST blasint group_count, OPENBLAS_CONST blasint * group_size); //void cblas_sgemm_batch_strided(OPENBLAS_CONST enum CBLAS_ORDER Order, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransA, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransB, OPENBLAS_CONST blasint M, OPENBLAS_CONST blasint N, OPENBLAS_CONST blasint K, OPENBLAS_CONST float alpha, OPENBLAS_CONST float * A, OPENBLAS_CONST blasint lda, OPENBLAS_CONST blasint stridea, OPENBLAS_CONST float * B, OPENBLAS_CONST blasint ldb, OPENBLAS_CONST blasint strideb, OPENBLAS_CONST float beta, float * C, OPENBLAS_CONST blasint ldc, OPENBLAS_CONST blasint stridec, OPENBLAS_CONST blasint group_size); @@ -90,18 +94,18 @@ namespace ggml::backend::blas { GGML_ASSERT(A.ne[0] == B.ne[0]); // K GGML_ASSERT(B.ne[1] == C.ne[1]); // N GGML_ASSERT(A.ne[1] == C.ne[0]); // M - // for now! + // for now only this case: GGML_ASSERT(A.type == GGML_TYPE_BF16); GGML_ASSERT(B.type == GGML_TYPE_F32); GGML_ASSERT(C.type == GGML_TYPE_F32); - + // convert B to BF16: // - B contigue: (TODO: other case?) GGML_ASSERT(((size_t)4*B.ne[0]*B.ne[1]*B.ne[2]) == B.nb[3]); std::size_t sizeB = B.ne[0]*B.ne[1]*B.ne[2]*B.ne[3]; auto* B_work = get_work(std::max(sizeB, B.ne[0]*(std::size_t)256)); cblas_sbstobf16(sizeB, (const float*)B.data, 1, B_work, 1); - + // compute: if (B.ne[2]*B.ne[3] == 1) { if (B.ne[1] == 1) { @@ -195,13 +199,8 @@ namespace ggml::backend::blas { const int64_t r3 = ne13/ne03; const int64_t ne_plane = ne01*ne00; - const size_t desired_wsize = type == GGML_TYPE_F32 ? 0 : ne03*ne02*ne_plane*sizeof(float); - if (work_size < desired_wsize) { - work_data.reset(new char[desired_wsize]); - work_size = desired_wsize; - } - void * wdata = work_data.get(); + auto * wdata = get_work(type == GGML_TYPE_F32 ? 0 : ne03*ne02*ne_plane); // convert src0 to float if (type != GGML_TYPE_F32) { @@ -219,7 +218,7 @@ namespace ggml::backend::blas { for (int64_t i03 = 0; i03 < ne03; i03++) { for (int64_t i02 = 0; i02 < ne02; i02++) { for (int64_t i01 = 0; i01 < ne01; i01++) { - to_float(x + i03*nb03 + i02*nb02 + i01*nb01, + to_float(x + i03*nb03 + i02*nb02 + i01*nb01, wplane + i03*nf_plane03 + i02*nf_plane02 + i01*nf_plane01, ne00); } @@ -354,13 +353,13 @@ namespace ggml::backend::blas { public: static constexpr ggml_guid s_guid = { 0x12, 0xa8, 0xae, 0xf4, 0xc0, 0x1e, 0x61, 0x97, 0x8f, 0xeb, 0x33, 0x04, 0xa1, 0x33, 0x51, 0x2d }; - + backend(const std::string& /*params*/, ggml::cpp::backend::device& dev) : ggml::cpp::backend::backend(dev) { } virtual ~backend() { - if (m_work_data) std::free(m_work_data); + if (m_work_data) delete[] m_work_data; } const std::string& get_name() override { @@ -490,7 +489,7 @@ namespace ggml::backend::blas { } bool caps_buffer_from_host_ptr() override { return true; } - ggml::cpp::backend::buffer_type* get_from_host_ptr_buffer_type() override { + ggml::cpp::backend::buffer_type* get_from_host_ptr_buffer_type() override { return m_cpu_buffer_from_ptr_type; } @@ -523,7 +522,7 @@ namespace ggml::backend::blas { ( src0->type == GGML_TYPE_F32 || #ifdef GGML_BLAS_USE_SBGEMM ( src0->type == GGML_TYPE_BF16 && - ne1 >= min_batch + ne1 >= min_batch ) || #endif ( (ne0 >= min_batch && ne1 >= min_batch && ne10 >= min_batch) && @@ -561,7 +560,7 @@ namespace ggml::backend::blas { public: reg() { m_device = new device(); -#if defined(GGML_BLAS_USE_OPENBLAS) +#if defined(GGML_BLAS_USE_OPENBLAS) if (openblas_get_parallel() == OPENBLAS_SEQUENTIAL) { GGML_LOG_WARN("%s: warning: OpenBLAS was compiled without parallel support\n", __func__); } diff --git a/ggml/src/ggml-cpu/CMakeLists.txt b/ggml/src/ggml-cpu/CMakeLists.txt index 6ca3176a2f..3388a95a8c 100644 --- a/ggml/src/ggml-cpu/CMakeLists.txt +++ b/ggml/src/ggml-cpu/CMakeLists.txt @@ -54,6 +54,10 @@ function(ggml_add_cpu_backend_variant_impl tag_name) ggml-cpu/ops.cpp ) + if (MSVC) + add_compile_options(/Zc:__cplusplus) + endif() + target_compile_features(${GGML_CPU_NAME} PRIVATE c_std_11 cxx_std_17) target_include_directories(${GGML_CPU_NAME} PRIVATE . ggml-cpu) diff --git a/ggml/src/ggml-cpu/amx/amx.cpp b/ggml/src/ggml-cpu/amx/amx.cpp index 1caf25f685..4966486c72 100644 --- a/ggml/src/ggml-cpu/amx/amx.cpp +++ b/ggml/src/ggml-cpu/amx/amx.cpp @@ -51,7 +51,7 @@ public: virtual ~buffer() { } ggml_status init_tensor(ggml_tensor& tensor) override { - tensor->extra = (void *) ggml::cpu::amx::get_tensor_traits(&tensor); + tensor.extra = (void *) ggml::cpu::amx::get_tensor_traits(&tensor); return GGML_STATUS_SUCCESS; } @@ -66,7 +66,7 @@ public: }; -class extra_buffer_type : ggml::cpu::extra_buffer_type { +class extra_buffer_type : public ggml::cpu::extra_buffer_type { const std::string& get_name() override { static const std::string name {"AMX"}; diff --git a/ggml/src/ggml-cpu/kleidiai/kleidiai.cpp b/ggml/src/ggml-cpu/kleidiai/kleidiai.cpp index 9bcc18d442..2ec5268a88 100644 --- a/ggml/src/ggml-cpu/kleidiai/kleidiai.cpp +++ b/ggml/src/ggml-cpu/kleidiai/kleidiai.cpp @@ -1337,111 +1337,98 @@ public: } }; -static ggml::cpu::tensor_traits * get_tensor_traits(ggml_backend_buffer_t, struct ggml_tensor *) { +static ggml::cpu::tensor_traits * get_tensor_traits(struct ggml_tensor *) { static tensor_traits traits; return &traits; } -} // namespace ggml::cpu::kleidiai -static enum ggml_status ggml_backend_cpu_kleidiai_buffer_init_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) { - tensor->extra = (void *) ggml::cpu::kleidiai::get_tensor_traits(buffer, tensor); +// kleidiai buffer +class buffer : public ggml::cpu::buffer { +public: + buffer(std::size_t size) : ggml::cpu::buffer(size) { } - return GGML_STATUS_SUCCESS; - GGML_UNUSED(buffer); -} + virtual ~buffer() { } -static void ggml_backend_cpu_kleidiai_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, - const void * data, size_t offset, size_t size) { - GGML_ASSERT(offset == 0); - GGML_ASSERT(size == ggml_nbytes(tensor)); - - auto tensor_traits = (ggml::cpu::kleidiai::tensor_traits *) tensor->extra; - auto OK = tensor_traits->repack(tensor, data, size); - - GGML_ASSERT(OK == 0); - GGML_UNUSED(buffer); -} - -static const char * ggml_backend_cpu_kleidiai_buffer_type_get_name(ggml_backend_buffer_type_t buft) { - GGML_UNUSED(buft); - return "CPU_KLEIDIAI"; -} - -static ggml_backend_buffer_t ggml_backend_cpu_kleidiai_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { - ggml_backend_buffer_t buffer = ggml_backend_buft_alloc_buffer(ggml_backend_cpu_buffer_type(), size); - - if (buffer == nullptr) { - return nullptr; + ggml_status init_tensor(ggml_tensor& tensor) override { + tensor.extra = (void *) ggml::cpu::kleidiai::get_tensor_traits(&tensor); + return GGML_STATUS_SUCCESS; } - buffer->buft = buft; - buffer->iface.init_tensor = ggml_backend_cpu_kleidiai_buffer_init_tensor; - buffer->iface.set_tensor = ggml_backend_cpu_kleidiai_buffer_set_tensor; - buffer->iface.get_tensor = nullptr; - buffer->iface.cpy_tensor = nullptr; - return buffer; -} + void set_tensor(ggml_tensor & tensor, const void * data, std::size_t offset, std::size_t size) override { + GGML_ASSERT(offset == 0); + GGML_ASSERT(size == ggml_nbytes(&tensor)); -static size_t ggml_backend_cpu_kleidiai_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) { - GGML_UNUSED(buft); - return TENSOR_ALIGNMENT; -} + auto tensor_traits = (ggml::cpu::kleidiai::tensor_traits *) tensor.extra; + auto OK = tensor_traits->repack(&tensor, data, size); -static size_t ggml_backend_cpu_kleidiai_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const struct ggml_tensor * tensor) { - GGML_UNUSED(buft); - - if (tensor->type != GGML_TYPE_Q4_0 && tensor->type != GGML_TYPE_Q8_0) { - return ggml_nbytes(tensor); + GGML_ASSERT(OK == 0); } - const size_t n = tensor->ne[1]; - const size_t k = tensor->ne[0]; +}; - size_t cursor = sizeof(kleidiai_weight_header); - cursor = align_up(cursor, GGML_KLEIDIAI_PACK_ALIGN); +class extra_buffer_type : public ggml::cpu::extra_buffer_type { - std::array kernel_chain; - const bool want_q8 = tensor->type == GGML_TYPE_Q8_0; - const int slot_total = want_q8 ? kleidiai_collect_q8_chain(kernel_chain) - : kleidiai_collect_q4_chain(kernel_chain); - const bool allow_fallback = kleidiai_pack_fallback_allowed(); + const std::string& get_name() override { + static const std::string name {"CPU_KLEIDIAI"}; + return name; + } - size_t slot_count = 0; - for (int slot = 0; slot < slot_total; ++slot) { - if (!allow_fallback && slot > 0) { - break; - } - ggml_kleidiai_kernels * kernels = kernel_chain[slot]; - if (!kernels) { - continue; - } - kernel_info * kernel = &kernels->gemm; - rhs_packing_info * rhs_info = &kernels->rhs_info; - if (!kernel || !rhs_info || !rhs_info->packed_size_ex) { - continue; + ggml::cpp::backend::buffer* alloc_buffer(std::size_t size) override { + return new buffer(size); + } + + size_t get_alloc_size(const ggml_tensor& tensor) override { + + if (tensor.type != GGML_TYPE_Q4_0 && tensor.type != GGML_TYPE_Q8_0) { + return ggml_nbytes(&tensor); } - const ggml_type rhs_type = kernels->rhs_type; - const size_t block_len = rhs_type == GGML_TYPE_Q4_0 ? QK4_0 : - rhs_type == GGML_TYPE_Q8_0 ? QK8_0 : 0; - if (block_len == 0) { - continue; - } + const size_t n = tensor.ne[1]; + const size_t k = tensor.ne[0]; + size_t cursor = sizeof(kleidiai_weight_header); cursor = align_up(cursor, GGML_KLEIDIAI_PACK_ALIGN); - cursor += rhs_info->packed_size_ex(n, k, kernel->get_nr(), kernel->get_kr(), block_len); - ++slot_count; + + std::array kernel_chain; + const bool want_q8 = tensor.type == GGML_TYPE_Q8_0; + const int slot_total = want_q8 ? kleidiai_collect_q8_chain(kernel_chain) + : kleidiai_collect_q4_chain(kernel_chain); + const bool allow_fallback = kleidiai_pack_fallback_allowed(); + + size_t slot_count = 0; + for (int slot = 0; slot < slot_total; ++slot) { + if (!allow_fallback && slot > 0) { + break; + } + ggml_kleidiai_kernels * kernels = kernel_chain[slot]; + if (!kernels) { + continue; + } + kernel_info * kernel = &kernels->gemm; + rhs_packing_info * rhs_info = &kernels->rhs_info; + if (!kernel || !rhs_info || !rhs_info->packed_size_ex) { + continue; + } + + const ggml_type rhs_type = kernels->rhs_type; + const size_t block_len = rhs_type == GGML_TYPE_Q4_0 ? QK4_0 : + rhs_type == GGML_TYPE_Q8_0 ? QK8_0 : 0; + if (block_len == 0) { + continue; + } + + cursor = align_up(cursor, GGML_KLEIDIAI_PACK_ALIGN); + cursor += rhs_info->packed_size_ex(n, k, kernel->get_nr(), kernel->get_kr(), block_len); + ++slot_count; + } + + if (slot_count == 0) { + return ggml_nbytes(&tensor); + } + + return std::max(cursor, ggml_nbytes(&tensor)); } - if (slot_count == 0) { - return ggml_nbytes(tensor); - } - - return std::max(cursor, ggml_nbytes(tensor)); -} - -namespace ggml::cpu::kleidiai { -class extra_buffer_type : ggml::cpu::extra_buffer_type { bool supports_op(ggml_backend_dev_t, const struct ggml_tensor * op) override { std::array kernel_chain; const int slot_total = kleidiai_collect_kernel_chain(op, kernel_chain); @@ -1481,7 +1468,7 @@ class extra_buffer_type : ggml::cpu::extra_buffer_type { (op->src[1]->nb[1] * op->src[1]->ne[1] != op->src[1]->nb[2])) { return nullptr; } - return ggml::cpu::kleidiai::get_tensor_traits(NULL, NULL); + return ggml::cpu::kleidiai::get_tensor_traits(nullptr); } } } @@ -1491,21 +1478,7 @@ class extra_buffer_type : ggml::cpu::extra_buffer_type { } // namespace ggml::cpu::kleidiai ggml_backend_buffer_type_t ggml_backend_cpu_kleidiai_buffer_type(void) { - static ggml::cpu::kleidiai::extra_buffer_type ctx; - static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type_kleidiai = { - /* .iface = */ { - /* .get_name = */ ggml_backend_cpu_kleidiai_buffer_type_get_name, - /* .alloc_buffer = */ ggml_backend_cpu_kleidiai_buffer_type_alloc_buffer, - /* .get_alignment = */ ggml_backend_cpu_kleidiai_buffer_type_get_alignment, - /* .get_max_size = */ nullptr, // defaults to SIZE_MAX - /* .get_alloc_size = */ ggml_backend_cpu_kleidiai_buffer_type_get_alloc_size, - /* .is_host = */ nullptr, - }, - /* .device = */ ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0), - /* .context = */ &ctx, - }; - + static auto* buffer_type = ggml::cpu::c_wrapper(new ggml::cpu::kleidiai::extra_buffer_type()); init_kleidiai_context(); - - return &ggml_backend_cpu_buffer_type_kleidiai; + return buffer_type; } diff --git a/ggml/src/ggml-cpu/spacemit/ime.cpp b/ggml/src/ggml-cpu/spacemit/ime.cpp index 91fe1925ea..bbcbf67441 100644 --- a/ggml/src/ggml-cpu/spacemit/ime.cpp +++ b/ggml/src/ggml-cpu/spacemit/ime.cpp @@ -864,98 +864,80 @@ static const ggml::cpu::tensor_traits * ggml_riscv64_spacemit_get_optimal_repack return nullptr; } -static enum ggml_status ggml_backend_riscv64_spacemit_buffer_init_tensor(ggml_backend_buffer_t buffer, - struct ggml_tensor * tensor) { - tensor->extra = - (void *) const_cast(ggml_riscv64_spacemit_get_optimal_repack_type(tensor)); - - GGML_UNUSED(buffer); - - return GGML_STATUS_SUCCESS; -} - -static void ggml_backend_riscv64_spacemit_buffer_set_tensor(ggml_backend_buffer_t buffer, - struct ggml_tensor * tensor, - const void * data, - size_t offset, - size_t size) { - GGML_ASSERT(offset == 0); - GGML_ASSERT(size == ggml_nbytes(tensor)); - - auto tensor_traits = (ggml::cpu::riscv64_spacemit::tensor_traits_base *) tensor->extra; - if (tensor_traits) { - auto OK = tensor_traits->repack(tensor, data, size); - GGML_ASSERT(OK == 0); - } - - GGML_UNUSED(buffer); -} - -static const char * ggml_backend_cpu_riscv64_spacemit_buffer_type_get_name(ggml_backend_buffer_type_t buft) { - return "CPU_RISCV64_SPACEMIT"; - - GGML_UNUSED(buft); -} - -static ggml_backend_buffer_t ggml_backend_cpu_riscv64_spacemit_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, - size_t size) { - ggml_backend_buffer_t buffer = ggml_backend_buft_alloc_buffer(ggml_backend_cpu_buffer_type(), size); - - if (buffer == nullptr) { - return nullptr; - } - - buffer->buft = buft; - buffer->iface.init_tensor = ggml_backend_riscv64_spacemit_buffer_init_tensor; - buffer->iface.set_tensor = ggml_backend_riscv64_spacemit_buffer_set_tensor; - buffer->iface.get_tensor = nullptr; - buffer->iface.cpy_tensor = nullptr; - return buffer; -} - -static size_t ggml_backend_cpu_riscv64_spacemit_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) { - return 64; - - GGML_UNUSED(buft); -} - -static size_t ggml_backend_cpu_riscv64_spacemit_nbytes(ggml_backend_buffer_type_t buft, - const struct ggml_tensor * tensor) { - for (int i = 0; i < GGML_MAX_DIMS; ++i) { - if (tensor->ne[i] <= 0) { - return 0; - } - } - - size_t nbytes; - const size_t blck_size = ggml_blck_size(tensor->type); - if (blck_size == 1) { - nbytes = ggml_type_size(tensor->type); - for (int i = 0; i < GGML_MAX_DIMS; ++i) { - nbytes += (tensor->ne[i] - 1) * tensor->nb[i]; - } - } else { - nbytes = tensor->ne[0] * tensor->nb[0] / blck_size; - if (tensor->type == GGML_TYPE_Q4_K) { - GGML_ASSERT(nbytes % sizeof(block_q4_K) == 0); - nbytes = (nbytes / sizeof(block_q4_K)) * sizeof(block_q4_1) * 8; - for (int i = 1; i < GGML_MAX_DIMS; ++i) { - nbytes += (tensor->ne[i] - 1) * (tensor->nb[i] / sizeof(block_q4_K)) * sizeof(block_q4_1) * 8; - } - } else { - for (int i = 1; i < GGML_MAX_DIMS; ++i) { - nbytes += (tensor->ne[i] - 1) * tensor->nb[i]; - } - } - } - - GGML_UNUSED(buft); - return nbytes; -} - namespace ggml::cpu::riscv64_spacemit { -class extra_buffer_type : ggml::cpu::extra_buffer_type { + class buffer : public ggml::cpu::buffer { + public: + buffer(std::size_t size) : ggml::cpu::buffer(size) { } + + virtual ~buffer() { } + + ggml_status init_tensor(ggml_tensor& tensor) override { + tensor.extra = + (void *) const_cast(ggml_riscv64_spacemit_get_optimal_repack_type(&tensor)); + return GGML_STATUS_SUCCESS; + } + + void set_tensor(ggml_tensor & tensor, const void * data, std::size_t offset, std::size_t size) override { + GGML_ASSERT(offset == 0); + GGML_ASSERT(size == ggml_nbytes(&tensor)); + + auto tensor_traits = (ggml::cpu::riscv64_spacemit::tensor_traits_base *) tensor.extra; + if (tensor_traits) { + auto OK = tensor_traits->repack(&tensor, data, size); + GGML_ASSERT(OK == 0); + } + } + + }; + +class extra_buffer_type : public ggml::cpu::extra_buffer_type { + + const std::string& get_name() override { + static const std::string name {"CPU_RISCV64_SPACEMIT"}; + return name; + } + + ggml::cpp::backend::buffer* alloc_buffer(std::size_t size) override { + return new buffer(size); + } + + std::size_t get_alignment() override { + return 64; + } + + std::size_t get_alloc_size(const ggml_tensor& tensor) override { + for (int i = 0; i < GGML_MAX_DIMS; ++i) { + if (tensor.ne[i] <= 0) { + return 0; + } + } + + size_t nbytes; + const size_t blck_size = ggml_blck_size(tensor.type); + if (blck_size == 1) { + nbytes = ggml_type_size(tensor.type); + for (int i = 0; i < GGML_MAX_DIMS; ++i) { + nbytes += (tensor.ne[i] - 1) * tensor.nb[i]; + } + } else { + nbytes = tensor.ne[0] * tensor.nb[0] / blck_size; + if (tensor.type == GGML_TYPE_Q4_K) { + GGML_ASSERT(nbytes % sizeof(block_q4_K) == 0); + nbytes = (nbytes / sizeof(block_q4_K)) * sizeof(block_q4_1) * 8; + for (int i = 1; i < GGML_MAX_DIMS; ++i) { + nbytes += (tensor.ne[i] - 1) * (tensor.nb[i] / sizeof(block_q4_K)) * sizeof(block_q4_1) * 8; + } + } else { + for (int i = 1; i < GGML_MAX_DIMS; ++i) { + nbytes += (tensor.ne[i] - 1) * tensor.nb[i]; + } + } + } + + return nbytes; + } + bool supports_op(ggml_backend_dev_t, const struct ggml_tensor * op) override { switch (op->op) { case GGML_OP_MUL_MAT: @@ -1005,21 +987,6 @@ class extra_buffer_type : ggml::cpu::extra_buffer_type { } // namespace ggml::cpu::riscv64_spacemit ggml_backend_buffer_type_t ggml_backend_cpu_riscv64_spacemit_buffer_type(void) { - static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type_riscv64_spacemit = { - /* .iface = */ - { - /* .get_name = */ ggml_backend_cpu_riscv64_spacemit_buffer_type_get_name, - /* .alloc_buffer = */ ggml_backend_cpu_riscv64_spacemit_buffer_type_alloc_buffer, - /* .get_alignment = */ ggml_backend_cpu_riscv64_spacemit_buffer_type_get_alignment, - /* .get_max_size = */ nullptr, - /* .get_alloc_size = */ ggml_backend_cpu_riscv64_spacemit_nbytes, - /* .is_host = */ nullptr, - }, - /* .device = */ - ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0), - /* .context = */ - new ggml::cpu::riscv64_spacemit::extra_buffer_type(), - }; - - return &ggml_backend_cpu_buffer_type_riscv64_spacemit; + static auto* buffer_type = ggml::cpu::c_wrapper(new ggml::cpu::riscv64_spacemit::extra_buffer_type()); + return buffer_type; } diff --git a/ggml/src/ggml-cpu/traits.cpp b/ggml/src/ggml-cpu/traits.cpp index 65349ab381..a97c1dce53 100644 --- a/ggml/src/ggml-cpu/traits.cpp +++ b/ggml/src/ggml-cpu/traits.cpp @@ -6,11 +6,14 @@ #include +static_assert(__cplusplus >= 201703L, "This file expects a C++17 compatible compiler."); + namespace ggml::cpu { buffer::buffer(std::size_t size) : m_size(size) { - m_data = new (std::align_val_t(32)) uint8_t[m_size]; + m_data = new aligned_uint8_t[m_size]; GGML_ASSERT(m_data); + GGML_ASSERT(reinterpret_cast(m_data) % TENSOR_ALIGNMENT == 0); } buffer::~buffer() { diff --git a/ggml/src/ggml-cpu/traits.h b/ggml/src/ggml-cpu/traits.h index 27cedfdbd1..c8c6d561e0 100644 --- a/ggml/src/ggml-cpu/traits.h +++ b/ggml/src/ggml-cpu/traits.h @@ -36,8 +36,9 @@ public: void get_tensor(const ggml_tensor &, void *, std::size_t, std::size_t size) override; void clear(uint8_t value) override; protected: + struct alignas(TENSOR_ALIGNMENT) aligned_uint8_t { uint8_t val; }; const std::size_t m_size; - uint8_t* m_data; + aligned_uint8_t* m_data; }; class extra_buffer_type { diff --git a/ggml/src/ggml-metal/ggml-metal.cpp b/ggml/src/ggml-metal/ggml-metal.cpp index 9382ce53b3..3bf48b5b57 100644 --- a/ggml/src/ggml-metal/ggml-metal.cpp +++ b/ggml/src/ggml-metal/ggml-metal.cpp @@ -590,9 +590,7 @@ ggml_backend_t ggml_backend_metal_init(void) { return NULL; } - ggml_backend_t backend = (ggml_backend_t) malloc(sizeof(ggml_backend)); - - *backend = { + auto * backend = new ggml_backend { /* .guid = */ ggml_backend_metal_guid(), /* .interface = */ ggml_backend_metal_i, /* .device = */ dev, @@ -684,9 +682,7 @@ static ggml_backend_t ggml_backend_metal_device_init_backend(ggml_backend_dev_t return NULL; } - ggml_backend_t backend = (ggml_backend_t) malloc(sizeof(ggml_backend)); - - *backend = { + auto * backend = new ggml_backend { /* .guid = */ ggml_backend_metal_guid(), /* .interface = */ ggml_backend_metal_i, /* .device = */ dev, diff --git a/ggml/src/ggml-webgpu/ggml-webgpu.cpp b/ggml/src/ggml-webgpu/ggml-webgpu.cpp index 128b7dc3de..9761c3ea34 100644 --- a/ggml/src/ggml-webgpu/ggml-webgpu.cpp +++ b/ggml/src/ggml-webgpu/ggml-webgpu.cpp @@ -2946,8 +2946,7 @@ static ggml_backend_t ggml_backend_webgpu_backend_init(ggml_backend_dev_t dev, c backend_ctx->webgpu_ctx = initialize_webgpu_context(dev); // See GGML Backend Interface section - auto * backend = new ggml_backend(); - *backend = { + auto * backend = new ggml_backend { /* .guid = */ ggml_backend_webgpu_guid(), /* .interface = */ ggml_backend_webgpu_i, /* .device = */ dev, diff --git a/ggml/src/ggml_cpp_wrapper.cpp b/ggml/src/ggml_cpp_wrapper.cpp index f798eefb78..e4b3ef528d 100644 --- a/ggml/src/ggml_cpp_wrapper.cpp +++ b/ggml/src/ggml_cpp_wrapper.cpp @@ -7,9 +7,11 @@ #include #include +static_assert(__cplusplus >= 201703L, "This file expects a C++17 compatible compiler."); + namespace ggml::cpp::backend { -// TODO: voir si on ne cree pas une fontion static plutot que friend. +// may be best with a static methode than with a friend function. ggml_backend_buffer_type_t* backend_dev_get_extra_bufts(ggml_backend_dev_t device) { auto& ctx = *((ggml::cpp::backend::device*) (device->context)); if (ctx.m_ggml_extra_buffers_type.size() == 0) { // need init of extra buffer wrappers @@ -22,14 +24,14 @@ ggml_backend_buffer_type_t* backend_dev_get_extra_bufts(ggml_backend_dev_t devic return ctx.m_ggml_extra_buffers_type.data(); } - namespace { // unnamed namespace + namespace { // unamed namespace //========================================================= - // les wrappper pour ggml_backend_buffer + // wrappper for ggml_backend_buffer void buffer_free_buffer(ggml_backend_buffer_t buf) { auto* ctx = (ggml::cpp::backend::buffer*) (buf->context); delete ctx; - // delete buf; NO => deleted by the core. + // delete buf; NO => deleted by the ggml_core: ggml_backend_buffer_free(). } void * buffer_get_base(ggml_backend_buffer_t buf) { auto& ctx = *((ggml::cpp::backend::buffer*) (buf->context)); @@ -169,7 +171,6 @@ ggml_backend_buffer_type_t* backend_dev_get_extra_bufts(ggml_backend_dev_t devic return c_wrapper(dev, &ctx.init_backend(params?params:"")); } ggml_backend_buffer_type_t device_get_buffer_type(ggml_backend_dev_t dev) { - // Note: nothing to delete it. auto& ctx = *((ggml::cpp::backend::device*) (dev->context)); return c_wrapper(dev, &ctx.get_buffer_type()); } @@ -187,7 +188,6 @@ ggml_backend_buffer_type_t* backend_dev_get_extra_bufts(ggml_backend_dev_t devic if (!bft) { return nullptr; } auto* buf = bft->register_buffer(ptr, size, max_tensor_size); if (!buf) { return nullptr; } - // comment / ou memoriser ce wrapper, il n'y a pas de "delete" auto * ggml_buf_type = c_wrapper(dev, bft); return c_wrapper(ggml_buf_type, buf, size); } @@ -247,25 +247,26 @@ ggml_backend_buffer_type_t* backend_dev_get_extra_bufts(ggml_backend_dev_t devic if (name == "ggml_backend_dev_get_extra_bufts") { return (void*) backend_dev_get_extra_bufts; } + // TODO: add the other elements as needed. + // see how to manage them optionally if useful. return nullptr; } } - // les destructeurs... + // virtual destructors buffer::~buffer() {} buffer_type::~buffer_type() {} event::~event() {} backend::backend(device& dev): m_device(dev) {} backend::~backend() { } - device::~device() { - // TODO: il faut detruire des wrapper des buffer_type??? - } + device::~device() { } reg::~reg() {} // non virtual fct: void device::register_extra_buffer_type(buffer_type* buft) { - GGML_ASSERT(m_ggml_extra_buffers_type.size() == 0); // pas encore initialisé! + // have to be call early before any app ask for them. + GGML_ASSERT(m_ggml_extra_buffers_type.size() == 0); m_extra_buffers_type.push_back(buft); } @@ -300,7 +301,7 @@ ggml_backend_buffer_type_t* backend_dev_get_extra_bufts(ggml_backend_dev_t devic typedef std::unique_ptr c_buffer_type_ptr; ggml_backend_buffer_type_t c_wrapper(ggml_backend_dev_t device, buffer_type* ctx) { - // the ctx have to be "static". + // the ctx have to be "~static": owned by a device (or static). static std::map map; if (!ctx) { return nullptr; } @@ -417,11 +418,9 @@ ggml_backend_buffer_type_t* backend_dev_get_extra_bufts(ggml_backend_dev_t devic /* .context = */ ctx, }; map[ctx] = c_register_ptr(wrapper); - //map[ctx] = wrapper; return wrapper; } return it->second.get(); - //return it->second; } } @@ -431,21 +430,27 @@ ggml_backend_buffer_type_t* backend_dev_get_extra_bufts(ggml_backend_dev_t devic namespace ggml::cpp::backend::cpu { // buffer + template class buffer : public ggml::cpp::backend::buffer { - uint8_t* m_data = nullptr; + // correct aligned data for c++17. + struct alignas(ALIGNMENT) aligned_uint8_t { + uint8_t val; + }; + aligned_uint8_t* m_data = nullptr; const std::size_t m_size; public: - buffer(std::size_t size, std::size_t alignment): m_size(size) { - m_data = new (std::align_val_t(alignment)) uint8_t[m_size]; + buffer(std::size_t size): m_size(size) { + m_data = new aligned_uint8_t[m_size]; + GGML_ASSERT(reinterpret_cast(m_data) % ALIGNMENT == 0); } buffer(void* ptr, std::size_t /*size*/): m_size(0) { - m_data = (uint8_t*) ptr; + m_data = (aligned_uint8_t*) ptr; } virtual ~buffer() { - if (m_size>0 && m_data) { + if (m_size>0 && m_data) { delete[] m_data; } m_data = nullptr; @@ -481,14 +486,14 @@ namespace ggml::cpp::backend::cpu { }; // buffer_type + template class buffer_type : public ggml::cpp::backend::buffer_type { const std::string m_name; - const std::size_t m_alignment; const bool m_from_ptr; public: - buffer_type(const std::string& name, bool from_ptr, std::size_t alignment) : - m_name(name), m_alignment(alignment), m_from_ptr(from_ptr) + buffer_type(const std::string& name, bool from_ptr) : + m_name(name), m_from_ptr(from_ptr) {} virtual ~buffer_type() {} @@ -497,23 +502,23 @@ namespace ggml::cpp::backend::cpu { return m_name; } - buffer* alloc_buffer(std::size_t size) override { + buffer* alloc_buffer(std::size_t size) override { GGML_ASSERT(!m_from_ptr && "buffer type not for allocatable buffer"); - return new buffer(size, m_alignment); + return new buffer(size); } - std::size_t get_alignment() override { - return m_alignment; + std::size_t get_alignment() override { + return ALIGNMENT; } bool is_host() override { return true; } - buffer* register_buffer(void * ptr, std::size_t size, std::size_t /*max_tensor_size*/) override { + buffer* register_buffer(void * ptr, std::size_t size, std::size_t /*max_tensor_size*/) override { GGML_ASSERT(m_from_ptr && "buffer type not for ptr memory"); - GGML_ASSERT((uintptr_t)ptr % m_alignment == 0 && "buffer pointer must be aligned"); - return new buffer(ptr, size); + // GGML_ASSERT((uintptr_t)ptr % ALIGNMENT == 0 && "buffer pointer must be aligned"); + return new buffer(ptr, size); } }; @@ -526,7 +531,21 @@ namespace ggml::cpp::backend { bool from_ptr, std::size_t alignment ) { - return new ggml::cpp::backend::cpu::buffer_type(name, from_ptr, alignment); + // May be define alignment with supported SIMD size? + if (alignment <= 8) { // 64 bits + return new ggml::cpp::backend::cpu::buffer_type<8>(name, from_ptr); + } else + if (alignment <= 16) { // 128 bits (AVX) + return new ggml::cpp::backend::cpu::buffer_type<16>(name, from_ptr); + } else + if (alignment <= 32) { // 256 bits (AVX2) + return new ggml::cpp::backend::cpu::buffer_type<32>(name, from_ptr); + } else + if (alignment <= 64) { // 256 bits (AVX512) + return new ggml::cpp::backend::cpu::buffer_type<64>(name, from_ptr); + } else { // do we need more? + return new ggml::cpp::backend::cpu::buffer_type<128>(name, from_ptr); + } } } diff --git a/ggml/src/ggml_cpp_wrapper.h b/ggml/src/ggml_cpp_wrapper.h index aba8971f5c..217649e7cd 100644 --- a/ggml/src/ggml_cpp_wrapper.h +++ b/ggml/src/ggml_cpp_wrapper.h @@ -13,7 +13,7 @@ namespace ggml::cpp::backend { - class buffer { // ggml_backend_buffer_t + class GGML_API buffer { // ggml_backend_buffer_t public: virtual ~buffer(); @@ -30,7 +30,7 @@ namespace ggml::cpp::backend { virtual void reset () {} }; - class buffer_type { // ggml_backend_buffer_type_t + class GGML_API buffer_type { // ggml_backend_buffer_type_t public: virtual ~buffer_type(); @@ -45,7 +45,7 @@ namespace ggml::cpp::backend { }; // TODO: manage event - class event { + class GGML_API event { public: virtual ~event(); }; @@ -58,7 +58,7 @@ namespace ggml::cpp::backend { class device; - class backend { // ggml_backend_t + class GGML_API backend { // ggml_backend_t backend() = delete; public: backend(device& dev); @@ -92,7 +92,7 @@ namespace ggml::cpp::backend { device& m_device; }; - class device { // ggml_backend_dev_t + class GGML_API device { // ggml_backend_dev_t protected: friend ggml_backend_buffer_type_t* backend_dev_get_extra_bufts(ggml_backend_dev_t device); std::vector m_extra_buffers_type; @@ -125,11 +125,12 @@ namespace ggml::cpp::backend { virtual bool caps_events() { return false; } protected: + // have to be call by the device at init. void register_extra_buffer_type(buffer_type* buft); }; - class reg { // ggml_backend_reg_t + class GGML_API reg { // ggml_backend_reg_t public: virtual ~reg(); @@ -138,14 +139,14 @@ namespace ggml::cpp::backend { virtual device& get_device(std::size_t index) = 0; }; - ggml_backend_buffer_t c_wrapper(ggml_backend_buffer_type_t buft, buffer* ctx, std::size_t size); - ggml_backend_buffer_type_t c_wrapper(ggml_backend_dev_t device, buffer_type* ctx); - ggml_backend_t c_wrapper(ggml_backend_dev_t device, backend* ctx); - ggml_backend_dev_t c_wrapper(ggml_backend_reg_t reg, device* ctx); - ggml_backend_reg_t c_wrapper(reg* ctx); + GGML_API ggml_backend_buffer_t c_wrapper(ggml_backend_buffer_type_t buft, buffer* ctx, std::size_t size); + GGML_API ggml_backend_buffer_type_t c_wrapper(ggml_backend_dev_t device, buffer_type* ctx); + GGML_API ggml_backend_t c_wrapper(ggml_backend_dev_t device, backend* ctx); + GGML_API ggml_backend_dev_t c_wrapper(ggml_backend_reg_t reg, device* ctx); + GGML_API ggml_backend_reg_t c_wrapper(reg* ctx); - // for simple cpu buffer: - buffer_type* new_cpu_buffer_type( + // helper for simple cpu buffer type: + GGML_API buffer_type* new_cpu_buffer_type( const std::string& name, bool from_ptr=false, std::size_t alignment = TENSOR_ALIGNMENT