Merge 44a86ee644 into 9e2e2198b0
This commit is contained in:
commit
227471c595
|
|
@ -70,6 +70,7 @@ if (MSVC)
|
|||
add_compile_options("$<$<COMPILE_LANGUAGE:CXX>:/utf-8>")
|
||||
add_compile_options("$<$<COMPILE_LANGUAGE:C>:/bigobj>")
|
||||
add_compile_options("$<$<COMPILE_LANGUAGE:CXX>:/bigobj>")
|
||||
add_compile_options(/Zc:__cplusplus)
|
||||
endif()
|
||||
|
||||
if (LLAMA_STANDALONE)
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ function(llama_add_compile_flags)
|
|||
list(APPEND CXX_FLAGS -Werror)
|
||||
elseif (CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
|
||||
add_compile_options(/WX)
|
||||
add_compile_options(/Zc:__cplusplus)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
|
|
|
|||
|
|
@ -174,17 +174,34 @@
|
|||
//
|
||||
|
||||
#ifdef GGML_SHARED
|
||||
# if defined(_WIN32) && !defined(__MINGW32__)
|
||||
# ifdef GGML_BUILD
|
||||
# define GGML_API __declspec(dllexport) extern
|
||||
# else
|
||||
# define GGML_API __declspec(dllimport) extern
|
||||
# endif
|
||||
# ifdef __cplusplus
|
||||
# if defined(_WIN32) && !defined(__MINGW32__)
|
||||
# ifdef GGML_BUILD
|
||||
# define GGML_API __declspec(dllexport)
|
||||
# else
|
||||
# define GGML_API __declspec(dllimport)
|
||||
# endif
|
||||
# else
|
||||
//# define GGML_API [[gnu::visibility ("default")]]
|
||||
# define GGML_API __attribute__ ((visibility ("default")))
|
||||
# endif
|
||||
# else
|
||||
# define GGML_API __attribute__ ((visibility ("default"))) extern
|
||||
# if defined(_WIN32) && !defined(__MINGW32__)
|
||||
# ifdef GGML_BUILD
|
||||
# define GGML_API __declspec(dllexport) extern
|
||||
# else
|
||||
# define GGML_API __declspec(dllimport) extern
|
||||
# endif
|
||||
# else
|
||||
# define GGML_API __attribute__ ((visibility ("default"))) extern
|
||||
# endif
|
||||
# endif
|
||||
#else
|
||||
# define GGML_API extern
|
||||
# ifdef __cplusplus
|
||||
# define GGML_API
|
||||
# else
|
||||
# define GGML_API extern
|
||||
# endif
|
||||
#endif
|
||||
|
||||
// TODO: support for clang
|
||||
|
|
@ -705,7 +722,7 @@ extern "C" {
|
|||
typedef uint8_t ggml_guid[16];
|
||||
typedef ggml_guid * ggml_guid_t;
|
||||
|
||||
GGML_API bool ggml_guid_matches(ggml_guid_t guid_a, ggml_guid_t guid_b);
|
||||
GGML_API bool ggml_guid_matches(const ggml_guid * guid_a, const ggml_guid * guid_b);
|
||||
|
||||
// misc
|
||||
|
||||
|
|
|
|||
|
|
@ -8,6 +8,10 @@ if (CMAKE_SYSTEM_NAME MATCHES "Linux")
|
|||
add_compile_definitions($<$<CONFIG:Debug>:_GLIBCXX_ASSERTIONS>)
|
||||
endif()
|
||||
|
||||
if (MSVC)
|
||||
add_compile_options(/Zc:__cplusplus)
|
||||
endif()
|
||||
|
||||
if (NOT MSVC)
|
||||
if (GGML_SANITIZE_THREAD)
|
||||
add_compile_options(-fsanitize=thread)
|
||||
|
|
@ -25,6 +29,7 @@ if (NOT MSVC)
|
|||
endif()
|
||||
endif()
|
||||
|
||||
|
||||
if (GGML_FATAL_WARNINGS)
|
||||
if (CMAKE_CXX_COMPILER_ID MATCHES "GNU" OR CMAKE_CXX_COMPILER_ID MATCHES "Clang")
|
||||
list(APPEND C_FLAGS -Werror)
|
||||
|
|
@ -200,6 +205,8 @@ add_library(ggml-base
|
|||
ggml.cpp
|
||||
ggml-alloc.c
|
||||
ggml-backend.cpp
|
||||
ggml_cpp_wrapper.cpp
|
||||
ggml_cpp_wrapper.h
|
||||
ggml-opt.cpp
|
||||
ggml-threading.cpp
|
||||
ggml-threading.h
|
||||
|
|
|
|||
|
|
@ -120,7 +120,7 @@ extern "C" {
|
|||
};
|
||||
|
||||
struct ggml_backend {
|
||||
ggml_guid_t guid;
|
||||
const ggml_guid_t guid;
|
||||
struct ggml_backend_i iface;
|
||||
ggml_backend_dev_t device;
|
||||
void * context;
|
||||
|
|
|
|||
|
|
@ -5,16 +5,23 @@ endif()
|
|||
# set(BLA_SIZEOF_INTEGER 8)
|
||||
#endif()
|
||||
|
||||
ggml_add_backend_library(ggml-blas
|
||||
ggml-blas.cpp
|
||||
)
|
||||
|
||||
if (GGML_OPENMP_ENABLED STREQUAL "ON")
|
||||
find_package(OpenMP REQUIRED)
|
||||
add_compile_definitions(GGML_USE_OPENMP)
|
||||
target_link_libraries(ggml-blas PRIVATE OpenMP::OpenMP_C OpenMP::OpenMP_CXX)
|
||||
set(BLA_THREAD OMP)
|
||||
endif()
|
||||
|
||||
set(BLA_VENDOR ${GGML_BLAS_VENDOR})
|
||||
find_package(BLAS)
|
||||
|
||||
if (BLAS_FOUND)
|
||||
message(STATUS "BLAS found, Libraries: ${BLAS_LIBRARIES}")
|
||||
|
||||
ggml_add_backend_library(ggml-blas
|
||||
ggml-blas.cpp
|
||||
)
|
||||
|
||||
if (${GGML_BLAS_VENDOR} MATCHES "Apple")
|
||||
add_compile_definitions(ACCELERATE_NEW_LAPACK)
|
||||
add_compile_definitions(ACCELERATE_LAPACK_ILP64)
|
||||
|
|
@ -68,7 +75,8 @@ if (BLAS_FOUND)
|
|||
endif()
|
||||
endif()
|
||||
|
||||
message(STATUS "BLAS found, Includes: ${BLAS_INCLUDE_DIRS}")
|
||||
message(STATUS "BLAS found, Includes dirs : ${BLAS_INCLUDE_DIRS}")
|
||||
message(STATUS "BLAS found, Includes flags: ${BLAS_LINKER_FLAGS}")
|
||||
|
||||
target_compile_options(ggml-blas PRIVATE ${BLAS_LINKER_FLAGS})
|
||||
|
||||
|
|
@ -84,6 +92,10 @@ if (BLAS_FOUND)
|
|||
add_compile_definitions(GGML_BLAS_USE_OPENBLAS)
|
||||
endif()
|
||||
|
||||
if ("${GGML_BLAS_VENDOR}" MATCHES "FlexiBLAS")
|
||||
add_compile_definitions(GGML_BLAS_USE_FLEXIBLAS)
|
||||
endif()
|
||||
|
||||
if ("${GGML_BLAS_VENDOR}" MATCHES "FLAME" OR "${GGML_BLAS_VENDOR}" MATCHES "AOCL" OR "${GGML_BLAS_VENDOR}" MATCHES "AOCL_mt")
|
||||
add_compile_definitions(GGML_BLAS_USE_BLIS)
|
||||
endif()
|
||||
|
|
@ -92,8 +104,64 @@ if (BLAS_FOUND)
|
|||
add_compile_definitions(GGML_BLAS_USE_NVPL)
|
||||
endif()
|
||||
|
||||
target_link_libraries (ggml-blas PRIVATE ${BLAS_LIBRARIES})
|
||||
if (MSVC)
|
||||
add_compile_options(/Zc:__cplusplus)
|
||||
endif()
|
||||
target_compile_features (ggml-blas PRIVATE c_std_11 cxx_std_17)
|
||||
target_link_libraries (ggml-blas PRIVATE BLAS::BLAS)
|
||||
target_include_directories(ggml-blas SYSTEM PRIVATE ${BLAS_INCLUDE_DIRS})
|
||||
|
||||
elseif (${GGML_BLAS_VENDOR} MATCHES "OpenBLAS_BUILD")
|
||||
# let build from source
|
||||
message(STATUS "OpenBLAS build")
|
||||
|
||||
add_compile_definitions(GGML_BLAS_USE_OPENBLAS)
|
||||
|
||||
include(FetchContent)
|
||||
FetchContent_Declare(
|
||||
openblas
|
||||
GIT_REPOSITORY https://github.com/OpenMathLib/OpenBLAS.git
|
||||
GIT_TAG v0.3.31
|
||||
)
|
||||
|
||||
# https://www.openmathlib.org/OpenBLAS/docs/build_system/
|
||||
# https://github.com/OpenMathLib/OpenBLAS/blob/develop/CMakeLists.txt
|
||||
set(BUILD_WITHOUT_LAPACK ON)
|
||||
set(BUILD_TESTING OFF)
|
||||
set(BUILD_STATIC_LIBS ON)
|
||||
set(BUILD_SHARED_LIBS OFF)
|
||||
if (GGML_OPENMP_ENABLED STREQUAL "ON")
|
||||
set(USE_OPENMP 1)
|
||||
set(USE_THREAD 1)
|
||||
else()
|
||||
set(USE_OPENMP 0)
|
||||
set(USE_THREAD 1)
|
||||
endif()
|
||||
set(BUILD_BFLOAT16 1)
|
||||
set(BUILD_HFLOAT16 0)
|
||||
set(BUILD_SINGLE 1)
|
||||
set(ONLY_CBLAS 1)
|
||||
#set(BUILD_DOUBLE 0)
|
||||
#set(BUILD_COMPLEX 0)
|
||||
#set(BUILD_COMPLEX16 0)
|
||||
FetchContent_MakeAvailable(openblas)
|
||||
FetchContent_GetProperties(openblas)
|
||||
|
||||
add_compile_definitions(GGML_BLAS_USE_SBGEMM)
|
||||
#add_compile_definitions(GGML_BLAS_USE_SHGEMM)
|
||||
#add_compile_definitions(GGML_BLAS_USE_SGEMM_BATCHED)
|
||||
#add_compile_definitions(GGML_BLAS_USE_SBGEMM_BATCHED)
|
||||
#[...]
|
||||
|
||||
if (MSVC)
|
||||
add_compile_options(/Zc:__cplusplus)
|
||||
endif()
|
||||
target_compile_features (ggml-blas PRIVATE c_std_11 cxx_std_17)
|
||||
target_link_directories (ggml-blas PRIVATE ${openblas_BINARY_DIR}/lib)
|
||||
target_link_libraries (ggml-blas PRIVATE openblas)
|
||||
|
||||
target_include_directories(ggml-blas SYSTEM PRIVATE ${openblas_SOURCE_DIR} ${openblas_BINARY_DIR})
|
||||
|
||||
else()
|
||||
message(FATAL_ERROR "BLAS not found, please refer to "
|
||||
"https://cmake.org/cmake/help/latest/module/FindBLAS.html#blas-lapack-vendors"
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -54,6 +54,10 @@ function(ggml_add_cpu_backend_variant_impl tag_name)
|
|||
ggml-cpu/ops.cpp
|
||||
)
|
||||
|
||||
if (MSVC)
|
||||
add_compile_options(/Zc:__cplusplus)
|
||||
endif()
|
||||
|
||||
target_compile_features(${GGML_CPU_NAME} PRIVATE c_std_11 cxx_std_17)
|
||||
target_include_directories(${GGML_CPU_NAME} PRIVATE . ggml-cpu)
|
||||
|
||||
|
|
|
|||
|
|
@ -35,111 +35,52 @@ class tensor_traits : public ggml::cpu::tensor_traits {
|
|||
}
|
||||
};
|
||||
|
||||
static ggml::cpu::tensor_traits * get_tensor_traits(ggml_backend_buffer_t, struct ggml_tensor *) {
|
||||
static ggml::cpu::tensor_traits * get_tensor_traits(struct ggml_tensor *) {
|
||||
static tensor_traits traits;
|
||||
return &traits;
|
||||
}
|
||||
} // namespace ggml::cpu::amx
|
||||
|
||||
// AMX buffer interface
|
||||
static void ggml_backend_amx_buffer_free_buffer(ggml_backend_buffer_t buffer) {
|
||||
free(buffer->context);
|
||||
}
|
||||
namespace ggml::cpu::amx {
|
||||
|
||||
static void * ggml_backend_amx_buffer_get_base(ggml_backend_buffer_t buffer) {
|
||||
return (void *) (buffer->context);
|
||||
}
|
||||
// AMX buffer
|
||||
class buffer : public ggml::cpu::buffer {
|
||||
public:
|
||||
buffer(std::size_t size) : ggml::cpu::buffer(size) { }
|
||||
|
||||
static enum ggml_status ggml_backend_amx_buffer_init_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) {
|
||||
tensor->extra = (void *) ggml::cpu::amx::get_tensor_traits(buffer, tensor);
|
||||
virtual ~buffer() { }
|
||||
|
||||
GGML_UNUSED(buffer);
|
||||
return GGML_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static void ggml_backend_amx_buffer_memset_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor,
|
||||
uint8_t value, size_t offset, size_t size) {
|
||||
memset((char *) tensor->data + offset, value, size);
|
||||
|
||||
GGML_UNUSED(buffer);
|
||||
}
|
||||
|
||||
static void ggml_backend_amx_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor,
|
||||
const void * data, size_t offset, size_t size) {
|
||||
if (qtype_has_amx_kernels(tensor->type)) {
|
||||
GGML_LOG_DEBUG("%s: amx repack tensor %s of type %s\n", __func__, tensor->name, ggml_type_name(tensor->type));
|
||||
ggml_backend_amx_convert_weight(tensor, data, offset, size);
|
||||
} else {
|
||||
memcpy((char *) tensor->data + offset, data, size);
|
||||
ggml_status init_tensor(ggml_tensor& tensor) override {
|
||||
tensor.extra = (void *) ggml::cpu::amx::get_tensor_traits(&tensor);
|
||||
return GGML_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
GGML_UNUSED(buffer);
|
||||
}
|
||||
|
||||
/*
|
||||
// need to figure what we need to do with buffer->extra.
|
||||
static void ggml_backend_amx_buffer_get_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) {
|
||||
GGML_ASSERT(!qtype_has_amx_kernels(tensor->type));
|
||||
memcpy(data, (const char *)tensor->data + offset, size);
|
||||
|
||||
GGML_UNUSED(buffer);
|
||||
}
|
||||
|
||||
static bool ggml_backend_amx_buffer_cpy_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst) {
|
||||
if (ggml_backend_buffer_is_host(src->buffer)) {
|
||||
if (qtype_has_amx_kernels(src->type)) {
|
||||
ggml_backend_amx_convert_weight(dst, src->data, 0, ggml_nbytes(dst));
|
||||
void set_tensor(ggml_tensor & tensor, const void * data, std::size_t offset, std::size_t size) override {
|
||||
if (qtype_has_amx_kernels(tensor.type)) {
|
||||
GGML_LOG_DEBUG("%s: amx repack tensor %s of type %s\n", __func__, tensor.name, ggml_type_name(tensor.type));
|
||||
ggml_backend_amx_convert_weight(&tensor, data, offset, size);
|
||||
} else {
|
||||
memcpy(dst->data, src->data, ggml_nbytes(src));
|
||||
memcpy((char *) tensor.data + offset, data, size);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
||||
GGML_UNUSED(buffer);
|
||||
}
|
||||
*/
|
||||
|
||||
static void ggml_backend_amx_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) {
|
||||
memset(buffer->context, value, buffer->size);
|
||||
}
|
||||
|
||||
static ggml_backend_buffer_i ggml_backend_amx_buffer_interface = {
|
||||
/* .free_buffer = */ ggml_backend_amx_buffer_free_buffer,
|
||||
/* .get_base = */ ggml_backend_amx_buffer_get_base,
|
||||
/* .init_tensor = */ ggml_backend_amx_buffer_init_tensor,
|
||||
/* .memset_tensor = */ ggml_backend_amx_buffer_memset_tensor,
|
||||
/* .set_tensor = */ ggml_backend_amx_buffer_set_tensor,
|
||||
/* .get_tensor = */ nullptr,
|
||||
/* .cpy_tensor = */ nullptr,
|
||||
/* .clear = */ ggml_backend_amx_buffer_clear,
|
||||
/* .reset = */ nullptr,
|
||||
};
|
||||
|
||||
static const char * ggml_backend_amx_buffer_type_get_name(ggml_backend_buffer_type_t buft) {
|
||||
return "AMX";
|
||||
class extra_buffer_type : public ggml::cpu::extra_buffer_type {
|
||||
|
||||
GGML_UNUSED(buft);
|
||||
}
|
||||
|
||||
static ggml_backend_buffer_t ggml_backend_amx_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
|
||||
void * data = ggml_aligned_malloc(size);
|
||||
if (data == NULL) {
|
||||
fprintf(stderr, "%s: failed to allocate buffer of size %zu\n", __func__, size);
|
||||
return NULL;
|
||||
const std::string& get_name() override {
|
||||
static const std::string name {"AMX"};
|
||||
return name;
|
||||
}
|
||||
|
||||
return ggml_backend_buffer_init(buft, ggml_backend_amx_buffer_interface, data, size);
|
||||
}
|
||||
ggml::cpp::backend::buffer* alloc_buffer(std::size_t size) override {
|
||||
return new buffer(size);
|
||||
}
|
||||
|
||||
static size_t ggml_backend_amx_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) {
|
||||
return TENSOR_ALIGNMENT;
|
||||
std::size_t get_alloc_size(const ggml_tensor& tensor) override {
|
||||
return ggml_backend_amx_get_alloc_size(&tensor);
|
||||
}
|
||||
|
||||
GGML_UNUSED(buft);
|
||||
}
|
||||
|
||||
namespace ggml::cpu::amx {
|
||||
class extra_buffer_type : ggml::cpu::extra_buffer_type {
|
||||
bool supports_op(ggml_backend_dev_t, const struct ggml_tensor * op) override {
|
||||
if (op->op != GGML_OP_MUL_MAT) {
|
||||
return false;
|
||||
|
|
@ -198,12 +139,6 @@ class extra_buffer_type : ggml::cpu::extra_buffer_type {
|
|||
};
|
||||
} // namespace ggml::cpu::amx
|
||||
|
||||
static size_t ggml_backend_amx_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) {
|
||||
return ggml_backend_amx_get_alloc_size(tensor);
|
||||
|
||||
GGML_UNUSED(buft);
|
||||
}
|
||||
|
||||
#define ARCH_GET_XCOMP_PERM 0x1022
|
||||
#define ARCH_REQ_XCOMP_PERM 0x1023
|
||||
#define XFEATURE_XTILECFG 17
|
||||
|
|
@ -224,24 +159,11 @@ static bool ggml_amx_init() {
|
|||
}
|
||||
|
||||
ggml_backend_buffer_type_t ggml_backend_amx_buffer_type() {
|
||||
static struct ggml_backend_buffer_type ggml_backend_buffer_type_amx = {
|
||||
/* .iface = */ {
|
||||
/* .get_name = */ ggml_backend_amx_buffer_type_get_name,
|
||||
/* .alloc_buffer = */ ggml_backend_amx_buffer_type_alloc_buffer,
|
||||
/* .get_alignment = */ ggml_backend_amx_buffer_type_get_alignment,
|
||||
/* .get_max_size = */ nullptr, // defaults to SIZE_MAX
|
||||
/* .get_alloc_size = */ ggml_backend_amx_buffer_type_get_alloc_size,
|
||||
/* .is_host = */ nullptr,
|
||||
},
|
||||
/* .device = */ ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0),
|
||||
/* .context = */ new ggml::cpu::amx::extra_buffer_type(),
|
||||
};
|
||||
|
||||
static auto* buffer_type = ggml::cpu::c_wrapper(new ggml::cpu::amx::extra_buffer_type());
|
||||
if (!ggml_amx_init()) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return &ggml_backend_buffer_type_amx;
|
||||
return buffer_type;
|
||||
}
|
||||
|
||||
#endif // defined(__AMX_INT8__) && defined(__AVX512VNNI__)
|
||||
|
|
|
|||
|
|
@ -1337,111 +1337,98 @@ public:
|
|||
}
|
||||
};
|
||||
|
||||
static ggml::cpu::tensor_traits * get_tensor_traits(ggml_backend_buffer_t, struct ggml_tensor *) {
|
||||
static ggml::cpu::tensor_traits * get_tensor_traits(struct ggml_tensor *) {
|
||||
static tensor_traits traits;
|
||||
return &traits;
|
||||
}
|
||||
} // namespace ggml::cpu::kleidiai
|
||||
|
||||
static enum ggml_status ggml_backend_cpu_kleidiai_buffer_init_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) {
|
||||
tensor->extra = (void *) ggml::cpu::kleidiai::get_tensor_traits(buffer, tensor);
|
||||
// kleidiai buffer
|
||||
class buffer : public ggml::cpu::buffer {
|
||||
public:
|
||||
buffer(std::size_t size) : ggml::cpu::buffer(size) { }
|
||||
|
||||
return GGML_STATUS_SUCCESS;
|
||||
GGML_UNUSED(buffer);
|
||||
}
|
||||
virtual ~buffer() { }
|
||||
|
||||
static void ggml_backend_cpu_kleidiai_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor,
|
||||
const void * data, size_t offset, size_t size) {
|
||||
GGML_ASSERT(offset == 0);
|
||||
GGML_ASSERT(size == ggml_nbytes(tensor));
|
||||
|
||||
auto tensor_traits = (ggml::cpu::kleidiai::tensor_traits *) tensor->extra;
|
||||
auto OK = tensor_traits->repack(tensor, data, size);
|
||||
|
||||
GGML_ASSERT(OK == 0);
|
||||
GGML_UNUSED(buffer);
|
||||
}
|
||||
|
||||
static const char * ggml_backend_cpu_kleidiai_buffer_type_get_name(ggml_backend_buffer_type_t buft) {
|
||||
GGML_UNUSED(buft);
|
||||
return "CPU_KLEIDIAI";
|
||||
}
|
||||
|
||||
static ggml_backend_buffer_t ggml_backend_cpu_kleidiai_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
|
||||
ggml_backend_buffer_t buffer = ggml_backend_buft_alloc_buffer(ggml_backend_cpu_buffer_type(), size);
|
||||
|
||||
if (buffer == nullptr) {
|
||||
return nullptr;
|
||||
ggml_status init_tensor(ggml_tensor& tensor) override {
|
||||
tensor.extra = (void *) ggml::cpu::kleidiai::get_tensor_traits(&tensor);
|
||||
return GGML_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
buffer->buft = buft;
|
||||
buffer->iface.init_tensor = ggml_backend_cpu_kleidiai_buffer_init_tensor;
|
||||
buffer->iface.set_tensor = ggml_backend_cpu_kleidiai_buffer_set_tensor;
|
||||
buffer->iface.get_tensor = nullptr;
|
||||
buffer->iface.cpy_tensor = nullptr;
|
||||
return buffer;
|
||||
}
|
||||
void set_tensor(ggml_tensor & tensor, const void * data, std::size_t offset, std::size_t size) override {
|
||||
GGML_ASSERT(offset == 0);
|
||||
GGML_ASSERT(size == ggml_nbytes(&tensor));
|
||||
|
||||
static size_t ggml_backend_cpu_kleidiai_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) {
|
||||
GGML_UNUSED(buft);
|
||||
return TENSOR_ALIGNMENT;
|
||||
}
|
||||
auto tensor_traits = (ggml::cpu::kleidiai::tensor_traits *) tensor.extra;
|
||||
auto OK = tensor_traits->repack(&tensor, data, size);
|
||||
|
||||
static size_t ggml_backend_cpu_kleidiai_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const struct ggml_tensor * tensor) {
|
||||
GGML_UNUSED(buft);
|
||||
|
||||
if (tensor->type != GGML_TYPE_Q4_0 && tensor->type != GGML_TYPE_Q8_0) {
|
||||
return ggml_nbytes(tensor);
|
||||
GGML_ASSERT(OK == 0);
|
||||
}
|
||||
|
||||
const size_t n = tensor->ne[1];
|
||||
const size_t k = tensor->ne[0];
|
||||
};
|
||||
|
||||
size_t cursor = sizeof(kleidiai_weight_header);
|
||||
cursor = align_up(cursor, GGML_KLEIDIAI_PACK_ALIGN);
|
||||
class extra_buffer_type : public ggml::cpu::extra_buffer_type {
|
||||
|
||||
std::array<ggml_kleidiai_kernels *, GGML_KLEIDIAI_MAX_KERNEL_SLOTS> kernel_chain;
|
||||
const bool want_q8 = tensor->type == GGML_TYPE_Q8_0;
|
||||
const int slot_total = want_q8 ? kleidiai_collect_q8_chain(kernel_chain)
|
||||
: kleidiai_collect_q4_chain(kernel_chain);
|
||||
const bool allow_fallback = kleidiai_pack_fallback_allowed();
|
||||
const std::string& get_name() override {
|
||||
static const std::string name {"CPU_KLEIDIAI"};
|
||||
return name;
|
||||
}
|
||||
|
||||
size_t slot_count = 0;
|
||||
for (int slot = 0; slot < slot_total; ++slot) {
|
||||
if (!allow_fallback && slot > 0) {
|
||||
break;
|
||||
}
|
||||
ggml_kleidiai_kernels * kernels = kernel_chain[slot];
|
||||
if (!kernels) {
|
||||
continue;
|
||||
}
|
||||
kernel_info * kernel = &kernels->gemm;
|
||||
rhs_packing_info * rhs_info = &kernels->rhs_info;
|
||||
if (!kernel || !rhs_info || !rhs_info->packed_size_ex) {
|
||||
continue;
|
||||
ggml::cpp::backend::buffer* alloc_buffer(std::size_t size) override {
|
||||
return new buffer(size);
|
||||
}
|
||||
|
||||
size_t get_alloc_size(const ggml_tensor& tensor) override {
|
||||
|
||||
if (tensor.type != GGML_TYPE_Q4_0 && tensor.type != GGML_TYPE_Q8_0) {
|
||||
return ggml_nbytes(&tensor);
|
||||
}
|
||||
|
||||
const ggml_type rhs_type = kernels->rhs_type;
|
||||
const size_t block_len = rhs_type == GGML_TYPE_Q4_0 ? QK4_0 :
|
||||
rhs_type == GGML_TYPE_Q8_0 ? QK8_0 : 0;
|
||||
if (block_len == 0) {
|
||||
continue;
|
||||
}
|
||||
const size_t n = tensor.ne[1];
|
||||
const size_t k = tensor.ne[0];
|
||||
|
||||
size_t cursor = sizeof(kleidiai_weight_header);
|
||||
cursor = align_up(cursor, GGML_KLEIDIAI_PACK_ALIGN);
|
||||
cursor += rhs_info->packed_size_ex(n, k, kernel->get_nr(), kernel->get_kr(), block_len);
|
||||
++slot_count;
|
||||
|
||||
std::array<ggml_kleidiai_kernels *, GGML_KLEIDIAI_MAX_KERNEL_SLOTS> kernel_chain;
|
||||
const bool want_q8 = tensor.type == GGML_TYPE_Q8_0;
|
||||
const int slot_total = want_q8 ? kleidiai_collect_q8_chain(kernel_chain)
|
||||
: kleidiai_collect_q4_chain(kernel_chain);
|
||||
const bool allow_fallback = kleidiai_pack_fallback_allowed();
|
||||
|
||||
size_t slot_count = 0;
|
||||
for (int slot = 0; slot < slot_total; ++slot) {
|
||||
if (!allow_fallback && slot > 0) {
|
||||
break;
|
||||
}
|
||||
ggml_kleidiai_kernels * kernels = kernel_chain[slot];
|
||||
if (!kernels) {
|
||||
continue;
|
||||
}
|
||||
kernel_info * kernel = &kernels->gemm;
|
||||
rhs_packing_info * rhs_info = &kernels->rhs_info;
|
||||
if (!kernel || !rhs_info || !rhs_info->packed_size_ex) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const ggml_type rhs_type = kernels->rhs_type;
|
||||
const size_t block_len = rhs_type == GGML_TYPE_Q4_0 ? QK4_0 :
|
||||
rhs_type == GGML_TYPE_Q8_0 ? QK8_0 : 0;
|
||||
if (block_len == 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
cursor = align_up(cursor, GGML_KLEIDIAI_PACK_ALIGN);
|
||||
cursor += rhs_info->packed_size_ex(n, k, kernel->get_nr(), kernel->get_kr(), block_len);
|
||||
++slot_count;
|
||||
}
|
||||
|
||||
if (slot_count == 0) {
|
||||
return ggml_nbytes(&tensor);
|
||||
}
|
||||
|
||||
return std::max(cursor, ggml_nbytes(&tensor));
|
||||
}
|
||||
|
||||
if (slot_count == 0) {
|
||||
return ggml_nbytes(tensor);
|
||||
}
|
||||
|
||||
return std::max(cursor, ggml_nbytes(tensor));
|
||||
}
|
||||
|
||||
namespace ggml::cpu::kleidiai {
|
||||
class extra_buffer_type : ggml::cpu::extra_buffer_type {
|
||||
bool supports_op(ggml_backend_dev_t, const struct ggml_tensor * op) override {
|
||||
std::array<ggml_kleidiai_kernels *, GGML_KLEIDIAI_MAX_KERNEL_SLOTS> kernel_chain;
|
||||
const int slot_total = kleidiai_collect_kernel_chain(op, kernel_chain);
|
||||
|
|
@ -1481,7 +1468,7 @@ class extra_buffer_type : ggml::cpu::extra_buffer_type {
|
|||
(op->src[1]->nb[1] * op->src[1]->ne[1] != op->src[1]->nb[2])) {
|
||||
return nullptr;
|
||||
}
|
||||
return ggml::cpu::kleidiai::get_tensor_traits(NULL, NULL);
|
||||
return ggml::cpu::kleidiai::get_tensor_traits(nullptr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1491,21 +1478,7 @@ class extra_buffer_type : ggml::cpu::extra_buffer_type {
|
|||
} // namespace ggml::cpu::kleidiai
|
||||
|
||||
ggml_backend_buffer_type_t ggml_backend_cpu_kleidiai_buffer_type(void) {
|
||||
static ggml::cpu::kleidiai::extra_buffer_type ctx;
|
||||
static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type_kleidiai = {
|
||||
/* .iface = */ {
|
||||
/* .get_name = */ ggml_backend_cpu_kleidiai_buffer_type_get_name,
|
||||
/* .alloc_buffer = */ ggml_backend_cpu_kleidiai_buffer_type_alloc_buffer,
|
||||
/* .get_alignment = */ ggml_backend_cpu_kleidiai_buffer_type_get_alignment,
|
||||
/* .get_max_size = */ nullptr, // defaults to SIZE_MAX
|
||||
/* .get_alloc_size = */ ggml_backend_cpu_kleidiai_buffer_type_get_alloc_size,
|
||||
/* .is_host = */ nullptr,
|
||||
},
|
||||
/* .device = */ ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0),
|
||||
/* .context = */ &ctx,
|
||||
};
|
||||
|
||||
static auto* buffer_type = ggml::cpu::c_wrapper(new ggml::cpu::kleidiai::extra_buffer_type());
|
||||
init_kleidiai_context();
|
||||
|
||||
return &ggml_backend_cpu_buffer_type_kleidiai;
|
||||
return buffer_type;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4720,54 +4720,42 @@ static const ggml::cpu::tensor_traits * ggml_repack_get_optimal_repack_type(cons
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
static enum ggml_status ggml_backend_cpu_repack_buffer_init_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) {
|
||||
tensor->extra = (void *) const_cast<ggml::cpu::tensor_traits *>(ggml_repack_get_optimal_repack_type(tensor));
|
||||
namespace ggml::cpu::repack {
|
||||
|
||||
GGML_UNUSED(buffer);
|
||||
return GGML_STATUS_SUCCESS;
|
||||
}
|
||||
class buffer : public ggml::cpu::buffer {
|
||||
public:
|
||||
buffer(std::size_t size) : ggml::cpu::buffer(size) { }
|
||||
|
||||
static void ggml_backend_cpu_repack_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor,
|
||||
const void * data, size_t offset, size_t size) {
|
||||
GGML_ASSERT(offset == 0);
|
||||
GGML_ASSERT(size == ggml_nbytes(tensor));
|
||||
virtual ~buffer() { }
|
||||
|
||||
auto tensor_traits = (ggml::cpu::repack::tensor_traits_base *) tensor->extra;
|
||||
auto OK = tensor_traits->repack(tensor, data, size);
|
||||
|
||||
GGML_ASSERT(OK == 0);
|
||||
GGML_UNUSED(buffer);
|
||||
}
|
||||
|
||||
static const char * ggml_backend_cpu_repack_buffer_type_get_name(ggml_backend_buffer_type_t buft) {
|
||||
return "CPU_REPACK";
|
||||
|
||||
GGML_UNUSED(buft);
|
||||
}
|
||||
|
||||
static ggml_backend_buffer_t ggml_backend_cpu_repack_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
|
||||
ggml_backend_buffer_t buffer = ggml_backend_buft_alloc_buffer(ggml_backend_cpu_buffer_type(), size);
|
||||
|
||||
if (buffer == nullptr) {
|
||||
return nullptr;
|
||||
ggml_status init_tensor(ggml_tensor & tensor) override {
|
||||
tensor.extra = (void *) const_cast<ggml::cpu::tensor_traits *>(ggml_repack_get_optimal_repack_type(&tensor));
|
||||
return GGML_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
buffer->buft = buft;
|
||||
buffer->iface.init_tensor = ggml_backend_cpu_repack_buffer_init_tensor;
|
||||
buffer->iface.set_tensor = ggml_backend_cpu_repack_buffer_set_tensor;
|
||||
buffer->iface.get_tensor = nullptr;
|
||||
buffer->iface.cpy_tensor = nullptr;
|
||||
return buffer;
|
||||
}
|
||||
void set_tensor(ggml_tensor & tensor, const void * data, std::size_t offset, std::size_t size) override {
|
||||
GGML_ASSERT(offset == 0);
|
||||
GGML_ASSERT(size == ggml_nbytes(&tensor));
|
||||
|
||||
static size_t ggml_backend_cpu_repack_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) {
|
||||
return TENSOR_ALIGNMENT;
|
||||
auto tensor_traits = (ggml::cpu::repack::tensor_traits_base *) tensor.extra;
|
||||
auto OK = tensor_traits->repack(&tensor, data, size);
|
||||
|
||||
GGML_UNUSED(buft);
|
||||
}
|
||||
GGML_ASSERT(OK == 0);
|
||||
}
|
||||
};
|
||||
|
||||
class extra_buffer_type : public ggml::cpu::extra_buffer_type {
|
||||
public:
|
||||
|
||||
const std::string& get_name() override {
|
||||
static const std::string name {"CPU_REPACK"};
|
||||
return name;
|
||||
}
|
||||
|
||||
ggml::cpp::backend::buffer* alloc_buffer(std::size_t size) override {
|
||||
return new buffer(size);
|
||||
}
|
||||
|
||||
namespace ggml::cpu::repack {
|
||||
class extra_buffer_type : ggml::cpu::extra_buffer_type {
|
||||
bool supports_op(ggml_backend_dev_t, const struct ggml_tensor * op) override {
|
||||
if ( op->op == GGML_OP_MUL_MAT &&
|
||||
op->src[0]->buffer &&
|
||||
|
|
@ -4816,18 +4804,6 @@ class extra_buffer_type : ggml::cpu::extra_buffer_type {
|
|||
} // namespace ggml::cpu::repack
|
||||
|
||||
ggml_backend_buffer_type_t ggml_backend_cpu_repack_buffer_type(void) {
|
||||
static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type_repack = {
|
||||
/* .iface = */ {
|
||||
/* .get_name = */ ggml_backend_cpu_repack_buffer_type_get_name,
|
||||
/* .alloc_buffer = */ ggml_backend_cpu_repack_buffer_type_alloc_buffer,
|
||||
/* .get_alignment = */ ggml_backend_cpu_repack_buffer_type_get_alignment,
|
||||
/* .get_max_size = */ nullptr, // defaults to SIZE_MAX
|
||||
/* .get_alloc_size = */ nullptr, // defaults to ggml_nbytes
|
||||
/* .is_host = */ nullptr,
|
||||
},
|
||||
/* .device = */ ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0),
|
||||
/* .context = */ new ggml::cpu::repack::extra_buffer_type(),
|
||||
};
|
||||
|
||||
return &ggml_backend_cpu_buffer_type_repack;
|
||||
static auto* buffer_type = ggml::cpu::c_wrapper(new ggml::cpu::repack::extra_buffer_type());
|
||||
return buffer_type;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -864,98 +864,80 @@ static const ggml::cpu::tensor_traits * ggml_riscv64_spacemit_get_optimal_repack
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
static enum ggml_status ggml_backend_riscv64_spacemit_buffer_init_tensor(ggml_backend_buffer_t buffer,
|
||||
struct ggml_tensor * tensor) {
|
||||
tensor->extra =
|
||||
(void *) const_cast<ggml::cpu::tensor_traits *>(ggml_riscv64_spacemit_get_optimal_repack_type(tensor));
|
||||
|
||||
GGML_UNUSED(buffer);
|
||||
|
||||
return GGML_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static void ggml_backend_riscv64_spacemit_buffer_set_tensor(ggml_backend_buffer_t buffer,
|
||||
struct ggml_tensor * tensor,
|
||||
const void * data,
|
||||
size_t offset,
|
||||
size_t size) {
|
||||
GGML_ASSERT(offset == 0);
|
||||
GGML_ASSERT(size == ggml_nbytes(tensor));
|
||||
|
||||
auto tensor_traits = (ggml::cpu::riscv64_spacemit::tensor_traits_base *) tensor->extra;
|
||||
if (tensor_traits) {
|
||||
auto OK = tensor_traits->repack(tensor, data, size);
|
||||
GGML_ASSERT(OK == 0);
|
||||
}
|
||||
|
||||
GGML_UNUSED(buffer);
|
||||
}
|
||||
|
||||
static const char * ggml_backend_cpu_riscv64_spacemit_buffer_type_get_name(ggml_backend_buffer_type_t buft) {
|
||||
return "CPU_RISCV64_SPACEMIT";
|
||||
|
||||
GGML_UNUSED(buft);
|
||||
}
|
||||
|
||||
static ggml_backend_buffer_t ggml_backend_cpu_riscv64_spacemit_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft,
|
||||
size_t size) {
|
||||
ggml_backend_buffer_t buffer = ggml_backend_buft_alloc_buffer(ggml_backend_cpu_buffer_type(), size);
|
||||
|
||||
if (buffer == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
buffer->buft = buft;
|
||||
buffer->iface.init_tensor = ggml_backend_riscv64_spacemit_buffer_init_tensor;
|
||||
buffer->iface.set_tensor = ggml_backend_riscv64_spacemit_buffer_set_tensor;
|
||||
buffer->iface.get_tensor = nullptr;
|
||||
buffer->iface.cpy_tensor = nullptr;
|
||||
return buffer;
|
||||
}
|
||||
|
||||
static size_t ggml_backend_cpu_riscv64_spacemit_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) {
|
||||
return 64;
|
||||
|
||||
GGML_UNUSED(buft);
|
||||
}
|
||||
|
||||
static size_t ggml_backend_cpu_riscv64_spacemit_nbytes(ggml_backend_buffer_type_t buft,
|
||||
const struct ggml_tensor * tensor) {
|
||||
for (int i = 0; i < GGML_MAX_DIMS; ++i) {
|
||||
if (tensor->ne[i] <= 0) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
size_t nbytes;
|
||||
const size_t blck_size = ggml_blck_size(tensor->type);
|
||||
if (blck_size == 1) {
|
||||
nbytes = ggml_type_size(tensor->type);
|
||||
for (int i = 0; i < GGML_MAX_DIMS; ++i) {
|
||||
nbytes += (tensor->ne[i] - 1) * tensor->nb[i];
|
||||
}
|
||||
} else {
|
||||
nbytes = tensor->ne[0] * tensor->nb[0] / blck_size;
|
||||
if (tensor->type == GGML_TYPE_Q4_K) {
|
||||
GGML_ASSERT(nbytes % sizeof(block_q4_K) == 0);
|
||||
nbytes = (nbytes / sizeof(block_q4_K)) * sizeof(block_q4_1) * 8;
|
||||
for (int i = 1; i < GGML_MAX_DIMS; ++i) {
|
||||
nbytes += (tensor->ne[i] - 1) * (tensor->nb[i] / sizeof(block_q4_K)) * sizeof(block_q4_1) * 8;
|
||||
}
|
||||
} else {
|
||||
for (int i = 1; i < GGML_MAX_DIMS; ++i) {
|
||||
nbytes += (tensor->ne[i] - 1) * tensor->nb[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
GGML_UNUSED(buft);
|
||||
return nbytes;
|
||||
}
|
||||
|
||||
namespace ggml::cpu::riscv64_spacemit {
|
||||
|
||||
class extra_buffer_type : ggml::cpu::extra_buffer_type {
|
||||
class buffer : public ggml::cpu::buffer {
|
||||
public:
|
||||
buffer(std::size_t size) : ggml::cpu::buffer(size) { }
|
||||
|
||||
virtual ~buffer() { }
|
||||
|
||||
ggml_status init_tensor(ggml_tensor& tensor) override {
|
||||
tensor.extra =
|
||||
(void *) const_cast<ggml::cpu::tensor_traits *>(ggml_riscv64_spacemit_get_optimal_repack_type(&tensor));
|
||||
return GGML_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
void set_tensor(ggml_tensor & tensor, const void * data, std::size_t offset, std::size_t size) override {
|
||||
GGML_ASSERT(offset == 0);
|
||||
GGML_ASSERT(size == ggml_nbytes(&tensor));
|
||||
|
||||
auto tensor_traits = (ggml::cpu::riscv64_spacemit::tensor_traits_base *) tensor.extra;
|
||||
if (tensor_traits) {
|
||||
auto OK = tensor_traits->repack(&tensor, data, size);
|
||||
GGML_ASSERT(OK == 0);
|
||||
}
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
class extra_buffer_type : public ggml::cpu::extra_buffer_type {
|
||||
|
||||
const std::string& get_name() override {
|
||||
static const std::string name {"CPU_RISCV64_SPACEMIT"};
|
||||
return name;
|
||||
}
|
||||
|
||||
ggml::cpp::backend::buffer* alloc_buffer(std::size_t size) override {
|
||||
return new buffer(size);
|
||||
}
|
||||
|
||||
std::size_t get_alignment() override {
|
||||
return 64;
|
||||
}
|
||||
|
||||
std::size_t get_alloc_size(const ggml_tensor& tensor) override {
|
||||
for (int i = 0; i < GGML_MAX_DIMS; ++i) {
|
||||
if (tensor.ne[i] <= 0) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
size_t nbytes;
|
||||
const size_t blck_size = ggml_blck_size(tensor.type);
|
||||
if (blck_size == 1) {
|
||||
nbytes = ggml_type_size(tensor.type);
|
||||
for (int i = 0; i < GGML_MAX_DIMS; ++i) {
|
||||
nbytes += (tensor.ne[i] - 1) * tensor.nb[i];
|
||||
}
|
||||
} else {
|
||||
nbytes = tensor.ne[0] * tensor.nb[0] / blck_size;
|
||||
if (tensor.type == GGML_TYPE_Q4_K) {
|
||||
GGML_ASSERT(nbytes % sizeof(block_q4_K) == 0);
|
||||
nbytes = (nbytes / sizeof(block_q4_K)) * sizeof(block_q4_1) * 8;
|
||||
for (int i = 1; i < GGML_MAX_DIMS; ++i) {
|
||||
nbytes += (tensor.ne[i] - 1) * (tensor.nb[i] / sizeof(block_q4_K)) * sizeof(block_q4_1) * 8;
|
||||
}
|
||||
} else {
|
||||
for (int i = 1; i < GGML_MAX_DIMS; ++i) {
|
||||
nbytes += (tensor.ne[i] - 1) * tensor.nb[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nbytes;
|
||||
}
|
||||
|
||||
bool supports_op(ggml_backend_dev_t, const struct ggml_tensor * op) override {
|
||||
switch (op->op) {
|
||||
case GGML_OP_MUL_MAT:
|
||||
|
|
@ -1005,21 +987,6 @@ class extra_buffer_type : ggml::cpu::extra_buffer_type {
|
|||
} // namespace ggml::cpu::riscv64_spacemit
|
||||
|
||||
ggml_backend_buffer_type_t ggml_backend_cpu_riscv64_spacemit_buffer_type(void) {
|
||||
static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type_riscv64_spacemit = {
|
||||
/* .iface = */
|
||||
{
|
||||
/* .get_name = */ ggml_backend_cpu_riscv64_spacemit_buffer_type_get_name,
|
||||
/* .alloc_buffer = */ ggml_backend_cpu_riscv64_spacemit_buffer_type_alloc_buffer,
|
||||
/* .get_alignment = */ ggml_backend_cpu_riscv64_spacemit_buffer_type_get_alignment,
|
||||
/* .get_max_size = */ nullptr,
|
||||
/* .get_alloc_size = */ ggml_backend_cpu_riscv64_spacemit_nbytes,
|
||||
/* .is_host = */ nullptr,
|
||||
},
|
||||
/* .device = */
|
||||
ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0),
|
||||
/* .context = */
|
||||
new ggml::cpu::riscv64_spacemit::extra_buffer_type(),
|
||||
};
|
||||
|
||||
return &ggml_backend_cpu_buffer_type_riscv64_spacemit;
|
||||
static auto* buffer_type = ggml::cpu::c_wrapper(new ggml::cpu::riscv64_spacemit::extra_buffer_type());
|
||||
return buffer_type;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,11 +2,88 @@
|
|||
|
||||
#include "ggml-backend-impl.h"
|
||||
#include "ggml-backend.h"
|
||||
#include "ggml-cpu.h"
|
||||
|
||||
#include <new>
|
||||
|
||||
static_assert(__cplusplus >= 201703L, "This file expects a C++17 compatible compiler.");
|
||||
|
||||
namespace ggml::cpu {
|
||||
|
||||
buffer::buffer(std::size_t size) : m_size(size) {
|
||||
m_data = new aligned_uint8_t[m_size];
|
||||
GGML_ASSERT(m_data);
|
||||
GGML_ASSERT(reinterpret_cast<uintptr_t>(m_data) % TENSOR_ALIGNMENT == 0);
|
||||
}
|
||||
|
||||
buffer::~buffer() {
|
||||
delete [] m_data;
|
||||
}
|
||||
|
||||
void* buffer::get_base() {
|
||||
return m_data;
|
||||
}
|
||||
|
||||
void buffer::memset_tensor(ggml_tensor & tensor, uint8_t value, std::size_t offset, std::size_t size) {
|
||||
GGML_ASSERT(value == 0);
|
||||
memset((uint8_t *) tensor.data + offset, value, size);
|
||||
}
|
||||
|
||||
void buffer::get_tensor(const ggml_tensor &, void *, std::size_t, std::size_t size) {
|
||||
GGML_ASSERT(size == 0);
|
||||
}
|
||||
|
||||
void buffer::clear(uint8_t value) {
|
||||
memset(m_data, value, m_size);
|
||||
}
|
||||
|
||||
tensor_traits::~tensor_traits() {}
|
||||
|
||||
extra_buffer_type::~extra_buffer_type() {}
|
||||
|
||||
namespace {
|
||||
const char *buffer_type_get_name (ggml_backend_buffer_type_t buft) {
|
||||
auto& ctx = *((extra_buffer_type*) (buft->context));
|
||||
return ctx.get_name().c_str();
|
||||
}
|
||||
std::size_t buffer_type_get_alignment (ggml_backend_buffer_type_t buft) {
|
||||
auto& ctx = *((extra_buffer_type*) (buft->context));
|
||||
return ctx.get_alignment();
|
||||
}
|
||||
std::size_t buffer_type_get_max_size (ggml_backend_buffer_type_t buft) {
|
||||
auto& ctx = *((extra_buffer_type*) (buft->context));
|
||||
return ctx.get_max_size();
|
||||
}
|
||||
std::size_t buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) {
|
||||
auto& ctx = *((extra_buffer_type*) (buft->context));
|
||||
return ctx.get_alloc_size(*tensor);
|
||||
}
|
||||
bool buffer_type_is_host(ggml_backend_buffer_type_t /*buft*/) {
|
||||
return false;
|
||||
}
|
||||
ggml_backend_buffer_t buffer_type_alloc_buffer (ggml_backend_buffer_type_t buft, std::size_t size) {
|
||||
auto& ctx = *((extra_buffer_type*) (buft->context));
|
||||
return c_wrapper(buft, ctx.alloc_buffer(size), size);
|
||||
}
|
||||
}
|
||||
|
||||
ggml_backend_buffer_type_t c_wrapper(extra_buffer_type* ctx) {
|
||||
if (!ctx) { return nullptr; }
|
||||
return new ggml_backend_buffer_type {
|
||||
/* .iface = */ {
|
||||
/* .get_name = */ buffer_type_get_name,
|
||||
/* .alloc_buffer = */ buffer_type_alloc_buffer,
|
||||
/* .get_alignment = */ buffer_type_get_alignment,
|
||||
/* .get_max_size = */ buffer_type_get_max_size,
|
||||
/* .get_alloc_size = */ buffer_type_get_alloc_size,
|
||||
/* .is_host = */ buffer_type_is_host,
|
||||
},
|
||||
/* .device = */ ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0),
|
||||
/* .context = */ ctx,
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
} // namespace ggml::cpu
|
||||
|
||||
bool ggml_cpu_extra_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * op) {
|
||||
|
|
|
|||
|
|
@ -4,7 +4,9 @@
|
|||
#include "ggml.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
# include "ggml_cpp_wrapper.h"
|
||||
# include <vector>
|
||||
# include <string>
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
|
|
@ -24,12 +26,38 @@ class tensor_traits {
|
|||
virtual bool compute_forward(struct ggml_compute_params * params, struct ggml_tensor * op) = 0;
|
||||
};
|
||||
|
||||
// a simple buffer for cpu
|
||||
class buffer : public ggml::cpp::backend::buffer {
|
||||
public:
|
||||
buffer(std::size_t size);
|
||||
virtual ~buffer();
|
||||
void* get_base() override;
|
||||
void memset_tensor(ggml_tensor & tensor, uint8_t value, std::size_t offset, std::size_t size) override;
|
||||
void get_tensor(const ggml_tensor &, void *, std::size_t, std::size_t size) override;
|
||||
void clear(uint8_t value) override;
|
||||
protected:
|
||||
struct alignas(TENSOR_ALIGNMENT) aligned_uint8_t { uint8_t val; };
|
||||
const std::size_t m_size;
|
||||
aligned_uint8_t* m_data;
|
||||
};
|
||||
|
||||
class extra_buffer_type {
|
||||
public:
|
||||
virtual ~extra_buffer_type();
|
||||
// the base buffer_type fct
|
||||
virtual const std::string& get_name() = 0;
|
||||
virtual ggml::cpp::backend::buffer* alloc_buffer(std::size_t size) = 0;
|
||||
virtual std::size_t get_alignment() { return TENSOR_ALIGNMENT; }
|
||||
virtual std::size_t get_max_size() { return SIZE_MAX; }
|
||||
virtual std::size_t get_alloc_size(const ggml_tensor& tensor) { return ggml_nbytes(&tensor); }
|
||||
|
||||
// the extra fct
|
||||
virtual bool supports_op(ggml_backend_dev_t dev, const struct ggml_tensor * op) = 0;
|
||||
virtual tensor_traits * get_tensor_traits(const struct ggml_tensor * op) = 0;
|
||||
};
|
||||
|
||||
ggml_backend_buffer_type_t c_wrapper(extra_buffer_type* ctx);
|
||||
|
||||
} // namespace ggml::cpu
|
||||
|
||||
// implemented in ggml-cpu.cpp.
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@
|
|||
|
||||
#include "ggml.h"
|
||||
#include "gguf.h"
|
||||
#include "ggml-log.h"
|
||||
|
||||
#include <assert.h>
|
||||
#include <math.h>
|
||||
|
|
@ -105,40 +106,6 @@ static inline bool ggml_impl_is_view(const struct ggml_tensor * t) {
|
|||
static inline float ggml_compute_softplus_f32(float input) {
|
||||
return (input > 20.0f) ? input : logf(1 + expf(input));
|
||||
}
|
||||
//
|
||||
// logging
|
||||
//
|
||||
|
||||
GGML_ATTRIBUTE_FORMAT(2, 3)
|
||||
GGML_API void ggml_log_internal (enum ggml_log_level level, const char * format, ...);
|
||||
GGML_API void ggml_log_callback_default(enum ggml_log_level level, const char * text, void * user_data);
|
||||
|
||||
#define GGML_LOG(...) ggml_log_internal(GGML_LOG_LEVEL_NONE , __VA_ARGS__)
|
||||
#define GGML_LOG_INFO(...) ggml_log_internal(GGML_LOG_LEVEL_INFO , __VA_ARGS__)
|
||||
#define GGML_LOG_WARN(...) ggml_log_internal(GGML_LOG_LEVEL_WARN , __VA_ARGS__)
|
||||
#define GGML_LOG_ERROR(...) ggml_log_internal(GGML_LOG_LEVEL_ERROR, __VA_ARGS__)
|
||||
#define GGML_LOG_DEBUG(...) ggml_log_internal(GGML_LOG_LEVEL_DEBUG, __VA_ARGS__)
|
||||
#define GGML_LOG_CONT(...) ggml_log_internal(GGML_LOG_LEVEL_CONT , __VA_ARGS__)
|
||||
|
||||
#define GGML_DEBUG 0
|
||||
|
||||
#if (GGML_DEBUG >= 1)
|
||||
#define GGML_PRINT_DEBUG(...) GGML_LOG_DEBUG(__VA_ARGS__)
|
||||
#else
|
||||
#define GGML_PRINT_DEBUG(...)
|
||||
#endif
|
||||
|
||||
#if (GGML_DEBUG >= 5)
|
||||
#define GGML_PRINT_DEBUG_5(...) GGML_LOG_DEBUG(__VA_ARGS__)
|
||||
#else
|
||||
#define GGML_PRINT_DEBUG_5(...)
|
||||
#endif
|
||||
|
||||
#if (GGML_DEBUG >= 10)
|
||||
#define GGML_PRINT_DEBUG_10(...) GGML_LOG_DEBUG(__VA_ARGS__)
|
||||
#else
|
||||
#define GGML_PRINT_DEBUG_10(...)
|
||||
#endif
|
||||
|
||||
// tensor params
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,48 @@
|
|||
#pragma once
|
||||
|
||||
#include "ggml.h"
|
||||
|
||||
// GGML internal header
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
//
|
||||
// logging: implemented in ggml.c
|
||||
//
|
||||
|
||||
GGML_ATTRIBUTE_FORMAT(2, 3)
|
||||
GGML_API void ggml_log_internal (enum ggml_log_level level, const char * format, ...);
|
||||
GGML_API void ggml_log_callback_default(enum ggml_log_level level, const char * text, void * user_data);
|
||||
|
||||
#define GGML_LOG(...) ggml_log_internal(GGML_LOG_LEVEL_NONE , __VA_ARGS__)
|
||||
#define GGML_LOG_INFO(...) ggml_log_internal(GGML_LOG_LEVEL_INFO , __VA_ARGS__)
|
||||
#define GGML_LOG_WARN(...) ggml_log_internal(GGML_LOG_LEVEL_WARN , __VA_ARGS__)
|
||||
#define GGML_LOG_ERROR(...) ggml_log_internal(GGML_LOG_LEVEL_ERROR, __VA_ARGS__)
|
||||
#define GGML_LOG_DEBUG(...) ggml_log_internal(GGML_LOG_LEVEL_DEBUG, __VA_ARGS__)
|
||||
#define GGML_LOG_CONT(...) ggml_log_internal(GGML_LOG_LEVEL_CONT , __VA_ARGS__)
|
||||
|
||||
#define GGML_DEBUG 0
|
||||
|
||||
#if (GGML_DEBUG >= 1)
|
||||
#define GGML_PRINT_DEBUG(...) GGML_LOG_DEBUG(__VA_ARGS__)
|
||||
#else
|
||||
#define GGML_PRINT_DEBUG(...)
|
||||
#endif
|
||||
|
||||
#if (GGML_DEBUG >= 5)
|
||||
#define GGML_PRINT_DEBUG_5(...) GGML_LOG_DEBUG(__VA_ARGS__)
|
||||
#else
|
||||
#define GGML_PRINT_DEBUG_5(...)
|
||||
#endif
|
||||
|
||||
#if (GGML_DEBUG >= 10)
|
||||
#define GGML_PRINT_DEBUG_10(...) GGML_LOG_DEBUG(__VA_ARGS__)
|
||||
#else
|
||||
#define GGML_PRINT_DEBUG_10(...)
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
@ -590,9 +590,7 @@ ggml_backend_t ggml_backend_metal_init(void) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
ggml_backend_t backend = (ggml_backend_t) malloc(sizeof(ggml_backend));
|
||||
|
||||
*backend = {
|
||||
auto * backend = new ggml_backend {
|
||||
/* .guid = */ ggml_backend_metal_guid(),
|
||||
/* .interface = */ ggml_backend_metal_i,
|
||||
/* .device = */ dev,
|
||||
|
|
@ -684,9 +682,7 @@ static ggml_backend_t ggml_backend_metal_device_init_backend(ggml_backend_dev_t
|
|||
return NULL;
|
||||
}
|
||||
|
||||
ggml_backend_t backend = (ggml_backend_t) malloc(sizeof(ggml_backend));
|
||||
|
||||
*backend = {
|
||||
auto * backend = new ggml_backend {
|
||||
/* .guid = */ ggml_backend_metal_guid(),
|
||||
/* .interface = */ ggml_backend_metal_i,
|
||||
/* .device = */ dev,
|
||||
|
|
|
|||
|
|
@ -2946,8 +2946,7 @@ static ggml_backend_t ggml_backend_webgpu_backend_init(ggml_backend_dev_t dev, c
|
|||
backend_ctx->webgpu_ctx = initialize_webgpu_context(dev);
|
||||
|
||||
// See GGML Backend Interface section
|
||||
auto * backend = new ggml_backend();
|
||||
*backend = {
|
||||
auto * backend = new ggml_backend {
|
||||
/* .guid = */ ggml_backend_webgpu_guid(),
|
||||
/* .interface = */ ggml_backend_webgpu_i,
|
||||
/* .device = */ dev,
|
||||
|
|
|
|||
|
|
@ -492,7 +492,7 @@ void ggml_fp32_to_bf16_row(const float * x, ggml_bf16_t * y, int64_t n) {
|
|||
}
|
||||
}
|
||||
|
||||
bool ggml_guid_matches(ggml_guid_t guid_a, ggml_guid_t guid_b) {
|
||||
bool ggml_guid_matches(const ggml_guid * guid_a, const ggml_guid * guid_b) {
|
||||
return memcmp(guid_a, guid_b, sizeof(ggml_guid)) == 0;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,551 @@
|
|||
#include "ggml_cpp_wrapper.h"
|
||||
|
||||
#include "ggml-backend-impl.h"
|
||||
#include "ggml.h"
|
||||
|
||||
#include <cstdlib>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
|
||||
static_assert(__cplusplus >= 201703L, "This file expects a C++17 compatible compiler.");
|
||||
|
||||
namespace ggml::cpp::backend {
|
||||
|
||||
// may be best with a static methode than with a friend function.
|
||||
ggml_backend_buffer_type_t* backend_dev_get_extra_bufts(ggml_backend_dev_t device) {
|
||||
auto& ctx = *((ggml::cpp::backend::device*) (device->context));
|
||||
if (ctx.m_ggml_extra_buffers_type.size() == 0) { // need init of extra buffer wrappers
|
||||
for (auto* buft : ctx.m_extra_buffers_type) {
|
||||
auto* c_buft = c_wrapper(device, buft);
|
||||
ctx.m_ggml_extra_buffers_type.push_back(c_buft);
|
||||
}
|
||||
ctx.m_ggml_extra_buffers_type.push_back(nullptr);
|
||||
}
|
||||
return ctx.m_ggml_extra_buffers_type.data();
|
||||
}
|
||||
|
||||
namespace { // unamed namespace
|
||||
|
||||
//=========================================================
|
||||
// wrappper for ggml_backend_buffer
|
||||
void buffer_free_buffer(ggml_backend_buffer_t buf) {
|
||||
auto* ctx = (ggml::cpp::backend::buffer*) (buf->context);
|
||||
delete ctx;
|
||||
// delete buf; NO => deleted by the ggml_core: ggml_backend_buffer_free().
|
||||
}
|
||||
void * buffer_get_base(ggml_backend_buffer_t buf) {
|
||||
auto& ctx = *((ggml::cpp::backend::buffer*) (buf->context));
|
||||
return ctx.get_base();
|
||||
}
|
||||
ggml_status buffer_init_tensor(ggml_backend_buffer_t buf, ggml_tensor * tensor) {
|
||||
auto& ctx = *((ggml::cpp::backend::buffer*) (buf->context));
|
||||
return ctx.init_tensor(*tensor);
|
||||
}
|
||||
void buffer_memset_tensor(ggml_backend_buffer_t buf, ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) {
|
||||
auto& ctx = *((ggml::cpp::backend::buffer*) (buf->context));
|
||||
ctx.memset_tensor(*tensor, value, offset, size);
|
||||
}
|
||||
void buffer_set_tensor(ggml_backend_buffer_t buf, ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
|
||||
auto& ctx = *((ggml::cpp::backend::buffer*) (buf->context));
|
||||
ctx.set_tensor(*tensor, data, offset, size);
|
||||
}
|
||||
void buffer_get_tensor(ggml_backend_buffer_t buf, const ggml_tensor * tensor, void * data, size_t offset, size_t size) {
|
||||
auto& ctx = *((ggml::cpp::backend::buffer*) (buf->context));
|
||||
ctx.get_tensor(*tensor, data, offset, size);
|
||||
}
|
||||
bool buffer_cpy_tensor(ggml_backend_buffer_t buf, const ggml_tensor * src, ggml_tensor * dst) {
|
||||
auto& ctx = *((ggml::cpp::backend::buffer*) (buf->context));
|
||||
return ctx.cpy_tensor(*src, *dst);
|
||||
}
|
||||
void buffer_clear(ggml_backend_buffer_t buf, uint8_t value) {
|
||||
auto& ctx = *((ggml::cpp::backend::buffer*) (buf->context));
|
||||
ctx.clear(value);
|
||||
}
|
||||
void buffer_reset(ggml_backend_buffer_t buf) {
|
||||
auto& ctx = *((ggml::cpp::backend::buffer*) (buf->context));
|
||||
ctx.reset();
|
||||
}
|
||||
|
||||
//=========================================================
|
||||
// wrapppers for ggml_backend_buffer_type
|
||||
const char *buffer_type_get_name (ggml_backend_buffer_type_t buft) {
|
||||
auto& ctx = *((ggml::cpp::backend::buffer_type*) (buft->context));
|
||||
return ctx.get_name().c_str();
|
||||
}
|
||||
std::size_t buffer_type_get_alignment (ggml_backend_buffer_type_t buft) {
|
||||
auto& ctx = *((ggml::cpp::backend::buffer_type*) (buft->context));
|
||||
return ctx.get_alignment();
|
||||
}
|
||||
std::size_t buffer_type_get_max_size (ggml_backend_buffer_type_t buft) {
|
||||
auto& ctx = *((ggml::cpp::backend::buffer_type*) (buft->context));
|
||||
return ctx.get_max_size();
|
||||
}
|
||||
std::size_t buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) {
|
||||
auto& ctx = *((ggml::cpp::backend::buffer_type*) (buft->context));
|
||||
return ctx.get_alloc_size(*tensor);
|
||||
}
|
||||
bool buffer_type_is_host (ggml_backend_buffer_type_t buft) {
|
||||
auto& ctx = *((ggml::cpp::backend::buffer_type*) (buft->context));
|
||||
return ctx.is_host();
|
||||
}
|
||||
ggml_backend_buffer_t buffer_type_alloc_buffer (ggml_backend_buffer_type_t buft, std::size_t size) {
|
||||
auto& ctx = *((ggml::cpp::backend::buffer_type*) (buft->context));
|
||||
return c_wrapper(buft, ctx.alloc_buffer(size), size);
|
||||
}
|
||||
|
||||
//=========================================================
|
||||
// wrapppers for ggml_backend
|
||||
const char * backend_get_name(ggml_backend_t bkd) {
|
||||
auto& ctx = *((ggml::cpp::backend::backend*) (bkd->context));
|
||||
return ctx.get_name().c_str();
|
||||
}
|
||||
void backend_free(ggml_backend_t backend) {
|
||||
auto* ctx = (ggml::cpp::backend::backend*) (backend->context);
|
||||
delete ctx;
|
||||
delete backend;
|
||||
}
|
||||
void backend_set_tensor_async(ggml_backend_t bkd, ggml_tensor * tensor, const void * data, std::size_t offset, std::size_t size) {
|
||||
auto& ctx = *((ggml::cpp::backend::backend*) (bkd->context));
|
||||
ctx.set_tensor_async(*tensor, data, offset, size);
|
||||
}
|
||||
void backend_get_tensor_async(ggml_backend_t bkd, const ggml_tensor * tensor, void * data, std::size_t offset, std::size_t size) {
|
||||
auto& ctx = *((ggml::cpp::backend::backend*) (bkd->context));
|
||||
ctx.get_tensor_async(*tensor, data, offset, size);
|
||||
}
|
||||
bool backend_cpy_tensor_async(ggml_backend_t backend_src, ggml_backend_t backend_dst, const ggml_tensor * src, ggml_tensor * dst) {
|
||||
auto& ctx = *((ggml::cpp::backend::backend*) (backend_dst->context));
|
||||
return ctx.cpy_tensor_async(backend_src, *src, *dst);
|
||||
}
|
||||
void backend_synchronize(ggml_backend_t bkd) {
|
||||
auto& ctx = *((ggml::cpp::backend::backend*) (bkd->context));
|
||||
ctx.synchronize();
|
||||
}
|
||||
enum ggml_status backend_graph_compute(ggml_backend_t bkd, ggml_cgraph * cgraph) {
|
||||
auto& ctx = *((ggml::cpp::backend::backend*) (bkd->context));
|
||||
return ctx.graph_compute(*cgraph);
|
||||
}
|
||||
void backend_event_record(ggml_backend_t bkd, ggml_backend_event_t evt) {
|
||||
auto& ctx = *((ggml::cpp::backend::backend*) (bkd->context));
|
||||
ctx.event_record(*((event*) evt));
|
||||
}
|
||||
void backend_event_wait (ggml_backend_t bkd, ggml_backend_event_t evt) {
|
||||
auto& ctx = *((ggml::cpp::backend::backend*) (bkd->context));
|
||||
ctx.event_wait(*((event*) evt));
|
||||
}
|
||||
void backend_set_n_threads(ggml_backend_t bkd, int n_threads) {
|
||||
auto& ctx = *((ggml::cpp::backend::backend*) (bkd->context));
|
||||
ctx.set_n_threads(n_threads);
|
||||
}
|
||||
|
||||
//=========================================================
|
||||
// wrapppers for ggml_backend_device
|
||||
const char * device_get_name(ggml_backend_dev_t dev) {
|
||||
auto& ctx = *((ggml::cpp::backend::device*) (dev->context));
|
||||
return ctx.get_name().c_str();
|
||||
}
|
||||
const char * device_get_description(ggml_backend_dev_t dev) {
|
||||
auto& ctx = *((ggml::cpp::backend::device*) (dev->context));
|
||||
return ctx.get_description().c_str();
|
||||
}
|
||||
void device_get_memory(ggml_backend_dev_t dev, std::size_t * free, std::size_t * total) {
|
||||
auto& ctx = *((ggml::cpp::backend::device*) (dev->context));
|
||||
ctx.get_memory(*free, *total);
|
||||
}
|
||||
enum ggml_backend_dev_type device_get_type(ggml_backend_dev_t dev) {
|
||||
auto& ctx = *((ggml::cpp::backend::device*) (dev->context));
|
||||
return ctx.get_type();
|
||||
}
|
||||
void device_get_props(ggml_backend_dev_t dev, struct ggml_backend_dev_props * props) {
|
||||
auto& ctx = *((ggml::cpp::backend::device*) (dev->context));
|
||||
props->name = ctx.get_name().c_str();
|
||||
props->description = ctx.get_description().c_str();
|
||||
ctx.get_memory(props->memory_free, props->memory_total);
|
||||
props->type = ctx.get_type();
|
||||
props->caps.async = ctx.caps_async();
|
||||
props->caps.host_buffer = ctx.caps_host_buffer();
|
||||
props->caps.buffer_from_host_ptr = ctx.caps_buffer_from_host_ptr();
|
||||
props->caps.events = ctx.caps_events();
|
||||
}
|
||||
ggml_backend_t device_init_backend(ggml_backend_dev_t dev, const char * params) {
|
||||
auto& ctx = *((ggml::cpp::backend::device*) (dev->context));
|
||||
return c_wrapper(dev, &ctx.init_backend(params?params:""));
|
||||
}
|
||||
ggml_backend_buffer_type_t device_get_buffer_type(ggml_backend_dev_t dev) {
|
||||
auto& ctx = *((ggml::cpp::backend::device*) (dev->context));
|
||||
return c_wrapper(dev, &ctx.get_buffer_type());
|
||||
}
|
||||
ggml_backend_buffer_type_t device_get_host_buffer_type(ggml_backend_dev_t dev) {
|
||||
auto& ctx = *((ggml::cpp::backend::device*) (dev->context));
|
||||
auto* bft = ctx.get_host_buffer_type();
|
||||
if (bft) {
|
||||
return c_wrapper(dev, bft);
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
ggml_backend_buffer_t device_buffer_from_host_ptr(ggml_backend_dev_t dev, void * ptr, std::size_t size, std::size_t max_tensor_size) {
|
||||
auto& ctx = *((ggml::cpp::backend::device*) (dev->context));
|
||||
auto* bft = ctx.get_from_host_ptr_buffer_type();
|
||||
if (!bft) { return nullptr; }
|
||||
auto* buf = bft->register_buffer(ptr, size, max_tensor_size);
|
||||
if (!buf) { return nullptr; }
|
||||
auto * ggml_buf_type = c_wrapper(dev, bft);
|
||||
return c_wrapper(ggml_buf_type, buf, size);
|
||||
}
|
||||
bool device_supports_op(ggml_backend_dev_t dev, const ggml_tensor * op) {
|
||||
auto& ctx = *((ggml::cpp::backend::device*) (dev->context));
|
||||
return ctx.supports_op(*op);
|
||||
}
|
||||
bool device_supports_buft(ggml_backend_dev_t dev, ggml_backend_buffer_type_t buft) {
|
||||
auto& ctx = *((ggml::cpp::backend::device*) (dev->context));
|
||||
return ctx.supports_buft(buft /*->context*/);
|
||||
}
|
||||
bool device_offload_op(ggml_backend_dev_t dev, const ggml_tensor * op) {
|
||||
auto& ctx = *((ggml::cpp::backend::device*) (dev->context));
|
||||
return ctx.offload_op(*op);
|
||||
}
|
||||
ggml_backend_event_t device_event_new (ggml_backend_dev_t dev) {
|
||||
auto& ctx = *((ggml::cpp::backend::device*) (dev->context));
|
||||
auto* evt = ctx.event_new();
|
||||
if (!evt) { return nullptr; }
|
||||
return new ggml_backend_event {
|
||||
dev,
|
||||
evt,
|
||||
};
|
||||
}
|
||||
|
||||
void device_event_free(ggml_backend_dev_t /*dev*/, ggml_backend_event_t evt_c) {
|
||||
auto* evt_cpp = (event*)(evt_c->context);
|
||||
delete evt_cpp;
|
||||
delete evt_c;
|
||||
}
|
||||
|
||||
void device_event_synchronize(ggml_backend_dev_t dev, ggml_backend_event_t evt_c) {
|
||||
auto& ctx = *((ggml::cpp::backend::device*) (dev->context));
|
||||
auto* evt_cpp = (event*)(evt_c->context);
|
||||
ctx.event_synchronize(*evt_cpp);
|
||||
}
|
||||
|
||||
//=========================================================
|
||||
// wrapppers for ggml_backend_reg
|
||||
const char * reg_get_name(ggml_backend_reg_t reg) {
|
||||
auto& ctx = *((ggml::cpp::backend::reg*) (reg->context));
|
||||
return ctx.get_name().c_str();
|
||||
}
|
||||
std::size_t reg_get_device_count(ggml_backend_reg_t reg) {
|
||||
auto& ctx = *((ggml::cpp::backend::reg*) (reg->context));
|
||||
return ctx.get_device_count();
|
||||
}
|
||||
ggml_backend_dev_t reg_get_device(ggml_backend_reg_t reg, std::size_t index) {
|
||||
auto& ctx = *((ggml::cpp::backend::reg*) (reg->context));
|
||||
return c_wrapper(reg, &ctx.get_device(index));
|
||||
}
|
||||
void * reg_get_proc_address(ggml_backend_reg_t /*reg*/, const char * cname) {
|
||||
const auto name = std::string(cname);
|
||||
if (name == "ggml_backend_set_n_threads") {
|
||||
return (void *)backend_set_n_threads;
|
||||
}
|
||||
if (name == "ggml_backend_dev_get_extra_bufts") {
|
||||
return (void*) backend_dev_get_extra_bufts;
|
||||
}
|
||||
// TODO: add the other elements as needed.
|
||||
// see how to manage them optionally if useful.
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// virtual destructors
|
||||
buffer::~buffer() {}
|
||||
buffer_type::~buffer_type() {}
|
||||
event::~event() {}
|
||||
backend::backend(device& dev): m_device(dev) {}
|
||||
backend::~backend() { }
|
||||
device::~device() { }
|
||||
reg::~reg() {}
|
||||
|
||||
// non virtual fct:
|
||||
void device::register_extra_buffer_type(buffer_type* buft) {
|
||||
// have to be call early before any app ask for them.
|
||||
GGML_ASSERT(m_ggml_extra_buffers_type.size() == 0);
|
||||
m_extra_buffers_type.push_back(buft);
|
||||
}
|
||||
|
||||
//=========================================================
|
||||
// the wrappers
|
||||
ggml_backend_buffer_t c_wrapper(ggml_backend_buffer_type_t buft, buffer* ctx, std::size_t size) {
|
||||
if (!ctx) { return nullptr; }
|
||||
return new ggml_backend_buffer {
|
||||
/* .interface = */ {
|
||||
/* .free_buffer = */ buffer_free_buffer,
|
||||
/* .get_base = */ buffer_get_base,
|
||||
/* .init_tensor = */ buffer_init_tensor,
|
||||
/* .memset_tensor = */ buffer_memset_tensor,
|
||||
/* .set_tensor = */ buffer_set_tensor,
|
||||
/* .get_tensor = */ buffer_get_tensor,
|
||||
/* .cpy_tensor = */ buffer_cpy_tensor,
|
||||
/* .clear = */ buffer_clear,
|
||||
/* .reset = */ buffer_reset,
|
||||
},
|
||||
/* .buft = */ buft,
|
||||
/* .context = */ ctx,
|
||||
/* .size = */ size,
|
||||
/* .usage = */ GGML_BACKEND_BUFFER_USAGE_ANY
|
||||
};
|
||||
}
|
||||
|
||||
struct buffer_type_deleter {
|
||||
void operator()(ggml_backend_buffer_type* c_buffer_type) {
|
||||
delete (c_buffer_type);
|
||||
}
|
||||
};
|
||||
typedef std::unique_ptr<ggml_backend_buffer_type, buffer_type_deleter> c_buffer_type_ptr;
|
||||
|
||||
ggml_backend_buffer_type_t c_wrapper(ggml_backend_dev_t device, buffer_type* ctx) {
|
||||
// the ctx have to be "~static": owned by a device (or static).
|
||||
static std::map<buffer_type*, c_buffer_type_ptr> map;
|
||||
if (!ctx) { return nullptr; }
|
||||
|
||||
auto it = map.find(ctx);
|
||||
// add new wrapper if not find.
|
||||
if (it == map.end()) {
|
||||
auto* wrapper = new ggml_backend_buffer_type {
|
||||
/* .iface = */ {
|
||||
/* .get_name = */ buffer_type_get_name,
|
||||
/* .alloc_buffer = */ buffer_type_alloc_buffer,
|
||||
/* .get_alignment = */ buffer_type_get_alignment,
|
||||
/* .get_max_size = */ buffer_type_get_max_size,
|
||||
/* .get_alloc_size = */ buffer_type_get_alloc_size,
|
||||
/* .is_host = */ buffer_type_is_host,
|
||||
},
|
||||
/* .device = */ device,
|
||||
/* .context = */ ctx,
|
||||
};
|
||||
map[ctx] = c_buffer_type_ptr(wrapper);
|
||||
return wrapper;
|
||||
}
|
||||
return it->second.get();
|
||||
}
|
||||
|
||||
ggml_backend_t c_wrapper(ggml_backend_dev_t device, backend* ctx) {
|
||||
if (!ctx) { return nullptr; }
|
||||
auto& dev = *((ggml::cpp::backend::device*) (device->context));
|
||||
return new ggml_backend {
|
||||
/* .guid = */ const_cast<ggml_guid_t>(ctx->get_guid()),
|
||||
/* .iface = */ {
|
||||
/* .get_name = */ backend_get_name,
|
||||
/* .free = */ backend_free,
|
||||
/* .set_tensor_async = */ dev.caps_async() ? backend_set_tensor_async : nullptr,
|
||||
/* .get_tensor_async = */ dev.caps_async() ? backend_get_tensor_async : nullptr,
|
||||
/* .cpy_tensor_async = */ dev.caps_async() ? backend_cpy_tensor_async : nullptr,
|
||||
/* .synchronize = */ dev.caps_async() ? backend_synchronize : nullptr,
|
||||
/* .graph_plan_create = */ nullptr,
|
||||
/* .graph_plan_free = */ nullptr,
|
||||
/* .graph_plan_update = */ nullptr,
|
||||
/* .graph_plan_compute = */ nullptr,
|
||||
/* .graph_compute = */ backend_graph_compute,
|
||||
/* .event_record = */ dev.caps_events() ? backend_event_record : nullptr,
|
||||
/* .event_wait = */ dev.caps_events() ? backend_event_wait : nullptr,
|
||||
/* .graph_optimize = */ nullptr,
|
||||
},
|
||||
/* .device = */ device,
|
||||
/* .context = */ ctx
|
||||
};
|
||||
}
|
||||
|
||||
struct device_deleter {
|
||||
void operator()(ggml_backend_device* c_device) {
|
||||
delete (c_device);
|
||||
}
|
||||
};
|
||||
typedef std::unique_ptr<ggml_backend_device, device_deleter> c_device_ptr;
|
||||
|
||||
ggml_backend_dev_t c_wrapper(ggml_backend_reg_t reg, device* ctx) {
|
||||
// the ctx have to be "static" / "per backend_register"
|
||||
static std::map<device*, c_device_ptr> map;
|
||||
if (!ctx) { return nullptr; }
|
||||
|
||||
auto it = map.find(ctx);
|
||||
if (it == map.end()) {
|
||||
auto* wrapper = new ggml_backend_device {
|
||||
/* .iface = */{
|
||||
/* .get_name = */ device_get_name,
|
||||
/* .get_description = */ device_get_description,
|
||||
/* .get_memory = */ device_get_memory,
|
||||
/* .get_type = */ device_get_type,
|
||||
/* .get_props = */ device_get_props,
|
||||
/* .init_backend = */ device_init_backend,
|
||||
/* .get_buffer_type = */ device_get_buffer_type,
|
||||
/* .get_host_buffer_type = */ ctx->caps_host_buffer() ? device_get_host_buffer_type : nullptr,
|
||||
/* .buffer_from_host_ptr = */ ctx->caps_buffer_from_host_ptr() ? device_buffer_from_host_ptr : nullptr,
|
||||
/* .supports_op = */ device_supports_op,
|
||||
/* .supports_buft = */ device_supports_buft,
|
||||
/* .offload_op = */ device_offload_op,
|
||||
/* .event_new = */ ctx->caps_events() ? device_event_new : nullptr,
|
||||
/* .event_free = */ ctx->caps_events() ? device_event_free : nullptr,
|
||||
/* .event_synchronize = */ ctx->caps_events() ? device_event_synchronize : nullptr,
|
||||
},
|
||||
/* .reg = */ reg,
|
||||
/* .context = */ ctx,
|
||||
};
|
||||
map[ctx] = c_device_ptr(wrapper);
|
||||
return wrapper;
|
||||
}
|
||||
return it->second.get();
|
||||
}
|
||||
|
||||
struct register_deleter {
|
||||
void operator()(ggml_backend_reg_t c_register) {
|
||||
delete (c_register);
|
||||
}
|
||||
};
|
||||
typedef std::unique_ptr<ggml_backend_reg, register_deleter> c_register_ptr;
|
||||
|
||||
ggml_backend_reg_t c_wrapper(reg* ctx) {
|
||||
// the ctx have to be static.
|
||||
static std::map<reg*, c_register_ptr> map;
|
||||
if (!ctx) { return nullptr; }
|
||||
|
||||
auto it = map.find(ctx);
|
||||
if (it == map.end()) {
|
||||
auto* wrapper = new ggml_backend_reg {
|
||||
/* .api_version = */ GGML_BACKEND_API_VERSION,
|
||||
/* .iface = */ {
|
||||
/* .get_name = */ reg_get_name,
|
||||
/* .get_device_count = */ reg_get_device_count,
|
||||
/* .get_device = */ reg_get_device,
|
||||
/* .get_proc_address = */ reg_get_proc_address,
|
||||
},
|
||||
/* .context = */ ctx,
|
||||
};
|
||||
map[ctx] = c_register_ptr(wrapper);
|
||||
return wrapper;
|
||||
}
|
||||
return it->second.get();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
// for simple CPU buffer:
|
||||
namespace ggml::cpp::backend::cpu {
|
||||
|
||||
// buffer
|
||||
template<std::size_t ALIGNMENT>
|
||||
class buffer : public ggml::cpp::backend::buffer {
|
||||
// correct aligned data for c++17.
|
||||
struct alignas(ALIGNMENT) aligned_uint8_t {
|
||||
uint8_t val;
|
||||
};
|
||||
aligned_uint8_t* m_data = nullptr;
|
||||
const std::size_t m_size;
|
||||
|
||||
public:
|
||||
buffer(std::size_t size): m_size(size) {
|
||||
m_data = new aligned_uint8_t[m_size];
|
||||
GGML_ASSERT(reinterpret_cast<uintptr_t>(m_data) % ALIGNMENT == 0);
|
||||
}
|
||||
|
||||
buffer(void* ptr, std::size_t /*size*/): m_size(0) {
|
||||
m_data = (aligned_uint8_t*) ptr;
|
||||
}
|
||||
|
||||
virtual ~buffer() {
|
||||
if (m_size>0 && m_data) {
|
||||
delete[] m_data;
|
||||
}
|
||||
m_data = nullptr;
|
||||
}
|
||||
|
||||
void* get_base() override {
|
||||
return m_data;
|
||||
}
|
||||
|
||||
void memset_tensor(ggml_tensor & tensor, uint8_t value, std::size_t offset, std::size_t size) override {
|
||||
memset((uint8_t *) tensor.data + offset, value, size);
|
||||
}
|
||||
|
||||
void set_tensor(ggml_tensor & tensor, const void * data, std::size_t offset, std::size_t size) override {
|
||||
memcpy((uint8_t *)tensor.data + offset, data, size);
|
||||
}
|
||||
|
||||
void get_tensor(const ggml_tensor & tensor, void * data, std::size_t offset, std::size_t size) override {
|
||||
memcpy(data, (uint8_t *)tensor.data + offset, size);
|
||||
}
|
||||
|
||||
bool cpy_tensor (const ggml_tensor & src, ggml_tensor & dst) override {
|
||||
if (ggml_backend_buffer_is_host(src.buffer)) {
|
||||
memcpy(dst.data, src.data, ggml_nbytes(&src));
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void clear (uint8_t value) override {
|
||||
memset(m_data, value, m_size);
|
||||
}
|
||||
};
|
||||
|
||||
// buffer_type
|
||||
template<std::size_t ALIGNMENT>
|
||||
class buffer_type : public ggml::cpp::backend::buffer_type {
|
||||
const std::string m_name;
|
||||
const bool m_from_ptr;
|
||||
|
||||
public:
|
||||
buffer_type(const std::string& name, bool from_ptr) :
|
||||
m_name(name), m_from_ptr(from_ptr)
|
||||
{}
|
||||
|
||||
virtual ~buffer_type() {}
|
||||
|
||||
const std::string& get_name() override {
|
||||
return m_name;
|
||||
}
|
||||
|
||||
buffer<ALIGNMENT>* alloc_buffer(std::size_t size) override {
|
||||
GGML_ASSERT(!m_from_ptr && "buffer type not for allocatable buffer");
|
||||
return new buffer<ALIGNMENT>(size);
|
||||
}
|
||||
|
||||
std::size_t get_alignment() override {
|
||||
return ALIGNMENT;
|
||||
}
|
||||
|
||||
bool is_host() override {
|
||||
return true;
|
||||
}
|
||||
|
||||
buffer<ALIGNMENT>* register_buffer(void * ptr, std::size_t size, std::size_t /*max_tensor_size*/) override {
|
||||
GGML_ASSERT(m_from_ptr && "buffer type not for ptr memory");
|
||||
// GGML_ASSERT((uintptr_t)ptr % ALIGNMENT == 0 && "buffer pointer must be aligned");
|
||||
return new buffer<ALIGNMENT>(ptr, size);
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
namespace ggml::cpp::backend {
|
||||
|
||||
buffer_type* new_cpu_buffer_type(
|
||||
const std::string& name,
|
||||
bool from_ptr,
|
||||
std::size_t alignment
|
||||
) {
|
||||
// May be define alignment with supported SIMD size?
|
||||
if (alignment <= 8) { // 64 bits
|
||||
return new ggml::cpp::backend::cpu::buffer_type<8>(name, from_ptr);
|
||||
} else
|
||||
if (alignment <= 16) { // 128 bits (AVX)
|
||||
return new ggml::cpp::backend::cpu::buffer_type<16>(name, from_ptr);
|
||||
} else
|
||||
if (alignment <= 32) { // 256 bits (AVX2)
|
||||
return new ggml::cpp::backend::cpu::buffer_type<32>(name, from_ptr);
|
||||
} else
|
||||
if (alignment <= 64) { // 256 bits (AVX512)
|
||||
return new ggml::cpp::backend::cpu::buffer_type<64>(name, from_ptr);
|
||||
} else { // do we need more?
|
||||
return new ggml::cpp::backend::cpu::buffer_type<128>(name, from_ptr);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,155 @@
|
|||
#pragma once
|
||||
#ifndef __cplusplus
|
||||
#error "This header is for C++ only"
|
||||
#endif
|
||||
|
||||
#include "ggml.h"
|
||||
|
||||
#include "ggml-impl.h"
|
||||
#include "ggml-backend.h"
|
||||
|
||||
#include <string>
|
||||
#include <cstddef>
|
||||
|
||||
namespace ggml::cpp::backend {
|
||||
|
||||
class GGML_API buffer { // ggml_backend_buffer_t
|
||||
public:
|
||||
virtual ~buffer();
|
||||
|
||||
virtual void* get_base() = 0;
|
||||
virtual ggml_status init_tensor(ggml_tensor& /*tensor*/) { return GGML_STATUS_SUCCESS; }
|
||||
|
||||
virtual void memset_tensor( ggml_tensor & tensor, uint8_t value, std::size_t offset, std::size_t size) = 0;
|
||||
virtual void set_tensor ( ggml_tensor & tensor, const void * data, std::size_t offset, std::size_t size) = 0;
|
||||
virtual void get_tensor (const ggml_tensor & tensor, void * data, std::size_t offset, std::size_t size) = 0;
|
||||
|
||||
virtual bool cpy_tensor (const ggml_tensor & /*src*/, ggml_tensor & /*dst*/) { return false; }
|
||||
|
||||
virtual void clear (uint8_t value) = 0;
|
||||
virtual void reset () {}
|
||||
};
|
||||
|
||||
class GGML_API buffer_type { // ggml_backend_buffer_type_t
|
||||
public:
|
||||
virtual ~buffer_type();
|
||||
|
||||
virtual const std::string& get_name() = 0;
|
||||
virtual buffer* alloc_buffer(std::size_t size) = 0;
|
||||
virtual std::size_t get_alignment() { return TENSOR_ALIGNMENT; }
|
||||
virtual std::size_t get_max_size() { return SIZE_MAX; }
|
||||
virtual std::size_t get_alloc_size(const ggml_tensor& tensor) { return ggml_nbytes(&tensor); }
|
||||
virtual bool is_host() { return false; }
|
||||
// for pointer from memory pointer:
|
||||
virtual buffer* register_buffer(void * /*ptr*/, std::size_t /*size*/, std::size_t /*max_tensor_size*/) { return nullptr; }
|
||||
};
|
||||
|
||||
// TODO: manage event
|
||||
class GGML_API event {
|
||||
public:
|
||||
virtual ~event();
|
||||
};
|
||||
|
||||
// TODO: manage graph
|
||||
//class graph_plan {
|
||||
//public:
|
||||
// virtual ~graph_plan();
|
||||
//};
|
||||
|
||||
class device;
|
||||
|
||||
class GGML_API backend { // ggml_backend_t
|
||||
backend() = delete;
|
||||
public:
|
||||
backend(device& dev);
|
||||
virtual ~backend();
|
||||
|
||||
virtual const std::string& get_name() = 0;
|
||||
virtual const ggml_guid* get_guid() = 0;
|
||||
|
||||
// need => device::caps_async() {return true;}
|
||||
virtual void set_tensor_async( ggml_tensor & tensor, const void * data, size_t offset, size_t size) { ggml_backend_tensor_set(&tensor, data, offset, size); }
|
||||
virtual void get_tensor_async(const ggml_tensor & tensor, void * data, size_t offset, size_t size) { ggml_backend_tensor_get(&tensor, data, offset, size); }
|
||||
virtual bool cpy_tensor_async(ggml_backend_t /*backend_src*/,/* ggml_backend_t backend_dst==this,*/ const ggml_tensor & /*src*/, ggml_tensor & /*dst*/) { return false; }
|
||||
virtual void synchronize() {}
|
||||
|
||||
// TODO: manage graph
|
||||
//virtual graph_plan& graph_plan_create(const ggml_cgraph & cgraph);
|
||||
//virtual void graph_plan_free(graph_plan& plan);
|
||||
//virtual void graph_plan_update(graph_plan& plan, const ggml_cgraph & cgraph);
|
||||
//virtual enum ggml_status graph_plan_compute(graph_plan& plan);
|
||||
|
||||
virtual enum ggml_status graph_compute(ggml_cgraph & cgraph) = 0;
|
||||
|
||||
// need => device::caps_events() { return true; }
|
||||
virtual void event_record (event & /*event*/) { GGML_ASSERT(false); }
|
||||
virtual void event_wait (event & /*event*/) { GGML_ASSERT(false); }
|
||||
|
||||
// the extra functions:
|
||||
virtual void set_n_threads(int /*n_threads*/) { }
|
||||
|
||||
protected:
|
||||
device& m_device;
|
||||
};
|
||||
|
||||
class GGML_API device { // ggml_backend_dev_t
|
||||
protected:
|
||||
friend ggml_backend_buffer_type_t* backend_dev_get_extra_bufts(ggml_backend_dev_t device);
|
||||
std::vector<buffer_type*> m_extra_buffers_type;
|
||||
std::vector<ggml_backend_buffer_type_t> m_ggml_extra_buffers_type;
|
||||
|
||||
public:
|
||||
virtual ~device();
|
||||
|
||||
virtual const std::string& get_name() = 0;
|
||||
virtual const std::string& get_description() = 0;
|
||||
virtual void get_memory(std::size_t & free, std::size_t & total) = 0;
|
||||
virtual enum ggml_backend_dev_type get_type() = 0;
|
||||
virtual backend& init_backend(const std::string& params) = 0;
|
||||
virtual buffer_type& get_buffer_type() = 0;
|
||||
virtual buffer_type* get_host_buffer_type() { return nullptr; }
|
||||
virtual buffer_type* get_from_host_ptr_buffer_type() { return nullptr; }
|
||||
|
||||
virtual bool supports_op(const ggml_tensor & op) = 0;
|
||||
virtual bool supports_buft(ggml_backend_buffer_type_t buft) = 0;
|
||||
virtual bool offload_op(const ggml_tensor & /*op*/) { return false; }
|
||||
|
||||
// event => caps_events() { return true; }
|
||||
virtual event* event_new() { return nullptr; }
|
||||
virtual void event_synchronize(event& /*event*/) { GGML_ASSERT(false); }
|
||||
|
||||
//void get_props(struct ggml_backend_dev_props * props); ggml_backend_dev_caps
|
||||
virtual bool caps_async() { return false; }
|
||||
virtual bool caps_host_buffer() { return get_host_buffer_type() != nullptr; }
|
||||
virtual bool caps_buffer_from_host_ptr() { return get_from_host_ptr_buffer_type() != nullptr; }
|
||||
virtual bool caps_events() { return false; }
|
||||
|
||||
protected:
|
||||
// have to be call by the device at init.
|
||||
void register_extra_buffer_type(buffer_type* buft);
|
||||
|
||||
};
|
||||
|
||||
class GGML_API reg { // ggml_backend_reg_t
|
||||
public:
|
||||
virtual ~reg();
|
||||
|
||||
virtual const std::string& get_name() = 0;
|
||||
virtual std::size_t get_device_count() = 0;
|
||||
virtual device& get_device(std::size_t index) = 0;
|
||||
};
|
||||
|
||||
GGML_API ggml_backend_buffer_t c_wrapper(ggml_backend_buffer_type_t buft, buffer* ctx, std::size_t size);
|
||||
GGML_API ggml_backend_buffer_type_t c_wrapper(ggml_backend_dev_t device, buffer_type* ctx);
|
||||
GGML_API ggml_backend_t c_wrapper(ggml_backend_dev_t device, backend* ctx);
|
||||
GGML_API ggml_backend_dev_t c_wrapper(ggml_backend_reg_t reg, device* ctx);
|
||||
GGML_API ggml_backend_reg_t c_wrapper(reg* ctx);
|
||||
|
||||
// helper for simple cpu buffer type:
|
||||
GGML_API buffer_type* new_cpu_buffer_type(
|
||||
const std::string& name,
|
||||
bool from_ptr=false,
|
||||
std::size_t alignment = TENSOR_ALIGNMENT
|
||||
);
|
||||
|
||||
}
|
||||
|
|
@ -268,9 +268,6 @@ static buft_list_t make_gpu_buft_list(ggml_backend_dev_t dev, llama_split_mode s
|
|||
}
|
||||
}
|
||||
|
||||
// add the device default buffer type
|
||||
buft_list.emplace_back(dev, ggml_backend_dev_buffer_type(dev));
|
||||
|
||||
// add the device extra buffer type (if any)
|
||||
ggml_backend_reg_t reg = ggml_backend_dev_backend_reg(dev);
|
||||
auto ggml_backend_dev_get_extra_bufts_fn = (ggml_backend_dev_get_extra_bufts_t)
|
||||
|
|
@ -284,6 +281,9 @@ static buft_list_t make_gpu_buft_list(ggml_backend_dev_t dev, llama_split_mode s
|
|||
}
|
||||
}
|
||||
|
||||
// add the device default buffer type
|
||||
buft_list.emplace_back(dev, ggml_backend_dev_buffer_type(dev));
|
||||
|
||||
return buft_list;
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue