diff --git a/CODEOWNERS b/CODEOWNERS index 750096d9a1..2622245d41 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -47,6 +47,7 @@ /ggml/cmake/ @ggerganov /ggml/include/ @ggerganov /ggml/src/ggml-common.h @ggerganov +/ggml/src/ggml-blas/ @taronaeo /ggml/src/ggml-cpu/ @ggerganov /ggml/src/ggml-cpu/spacemit/ @alex-spacemit /ggml/src/ggml-cuda/fattn* @JohannesGaessler diff --git a/ggml/src/ggml-blas/CMakeLists.txt b/ggml/src/ggml-blas/CMakeLists.txt index 60ce4b1e02..cf226665aa 100644 --- a/ggml/src/ggml-blas/CMakeLists.txt +++ b/ggml/src/ggml-blas/CMakeLists.txt @@ -11,9 +11,10 @@ find_package(BLAS) if (BLAS_FOUND) message(STATUS "BLAS found, Libraries: ${BLAS_LIBRARIES}") - ggml_add_backend_library(ggml-blas - ggml-blas.cpp - ) + file(GLOB GGML_SOURCES_BLAS "*.c" "*.cpp") + file(GLOB GGML_HEADERS_BLAS "*.h" "*.hpp") + + ggml_add_backend_library(ggml-blas ${GGML_HEADERS_BLAS} ${GGML_SOURCES_BLAS}) if (${GGML_BLAS_VENDOR} MATCHES "Apple") add_compile_definitions(ACCELERATE_NEW_LAPACK) diff --git a/ggml/src/ggml-blas/common.hpp b/ggml/src/ggml-blas/common.hpp new file mode 100644 index 0000000000..bc1f6f3e93 --- /dev/null +++ b/ggml/src/ggml-blas/common.hpp @@ -0,0 +1,67 @@ +#pragma once + +#include "ggml.h" +#include "ggml-impl.h" +#include "ggml-backend-impl.h" + +#include +#include +#include + +#if defined(GGML_BLAS_USE_ACCELERATE) +# include +#elif defined(GGML_BLAS_USE_MKL) +# include +#elif defined(GGML_BLAS_USE_BLIS) +# include +#elif defined(GGML_BLAS_USE_NVPL) +# include +#else +# include +#endif + +#define GGML_BLAS_NAME "BLAS" +#define GGML_BLAS_VERSION GGML_BACKEND_API_VERSION + +#ifdef __cplusplus +extern "C" { +#endif + +struct ggml_backend_blas_buffer { + void * data; // dequantized data + size_t size; // ggml_nelements * sizeof(float) +}; + +struct ggml_backend_blas_buffer_context { + void * data; + size_t size; + std::vector buffers; + + ~ggml_backend_blas_buffer_context() { + ggml_aligned_free(data, size); + for (auto * extra : buffers) { + ggml_aligned_free(extra->data, extra->size); + delete extra; + } + } +}; + +struct ggml_backend_blas_buffer_type_context { + int n_threads; + +#ifndef GGML_USE_OPENMP + std::vector> tasks; +#endif +}; + +struct ggml_backend_blas_context { + int n_threads; +}; + +struct ggml_backend_blas_device_context { + char _dummy; // Prevent empty struct warning +}; + +#ifdef __cplusplus +} +#endif diff --git a/ggml/src/ggml-blas/ggml-blas.cpp b/ggml/src/ggml-blas/ggml-blas.cpp index 5b888cdd8c..4476cdaf4c 100644 --- a/ggml/src/ggml-blas/ggml-blas.cpp +++ b/ggml/src/ggml-blas/ggml-blas.cpp @@ -1,10 +1,18 @@ +#include "ggml.h" +#include "ggml-backend.h" #include "ggml-impl.h" -#include "ggml-blas.h" #include "ggml-backend-impl.h" +#include "ggml-blas.h" -#include -#include +#include "ggml-blas/common.hpp" +#include "ggml-blas/mmf.hpp" +#include "ggml-blas/out-prod.hpp" + +#include #include +#include +#include +#include #if defined(GGML_BLAS_USE_ACCELERATE) # include @@ -18,78 +26,143 @@ # include #endif -struct ggml_backend_blas_context { - int n_threads = GGML_DEFAULT_N_THREADS; - std::unique_ptr work_data; - size_t work_size = 0; -#ifndef GGML_USE_OPENMP - std::vector> tasks; -#endif -}; -static void ggml_backend_blas_mul_mat(ggml_backend_blas_context * ctx, struct ggml_tensor * dst) { - const struct ggml_tensor * src0 = dst->src[0]; - const struct ggml_tensor * src1 = dst->src[1]; +// BLAS backend - graph compute - GGML_TENSOR_BINARY_OP_LOCALS +static void ggml_blas_compute_forward_mul_mat( + const ggml_backend_blas_context * ctx, + ggml_tensor * dst) { - const enum ggml_type type = src0->type; + const ggml_tensor * src0 = dst->src[0]; // weights + const ggml_tensor * src1 = dst->src[1]; // inputs - GGML_ASSERT(ne0 == ne01); - GGML_ASSERT(ne1 == ne11); - GGML_ASSERT(ne2 == ne12); - GGML_ASSERT(ne3 == ne13); + ggml_blas_mul_mat_f(ctx, src0, src1, dst); +} - // we don't support permuted src0 or src1 - GGML_ASSERT(nb00 == ggml_type_size(type)); - GGML_ASSERT(nb10 == ggml_type_size(src1->type)); +static void ggml_blas_compute_forward_out_prod( + const ggml_backend_blas_context * ctx, + ggml_tensor * dst) { - // dst cannot be transposed or permuted - GGML_ASSERT(nb0 == sizeof(float)); - GGML_ASSERT(nb0 <= nb1); - GGML_ASSERT(nb1 <= nb2); - GGML_ASSERT(nb2 <= nb3); + const ggml_tensor * src0 = dst->src[0]; // inputs + const ggml_tensor * src1 = dst->src[1]; // weights - // broadcast factors - const int64_t r2 = ne12/ne02; - const int64_t r3 = ne13/ne03; + ggml_blas_out_prod_f(ctx, src0, src1, dst); +} - const int64_t ne_plane = ne01*ne00; - const size_t desired_wsize = type == GGML_TYPE_F32 ? 0 : ne03*ne02*ne_plane*sizeof(float); +// BLAS backend - buffer - if (ctx->work_size < desired_wsize) { - ctx->work_data.reset(new char[desired_wsize]); - ctx->work_size = desired_wsize; +static void ggml_backend_blas_buffer_free_buffer(ggml_backend_buffer_t buffer) { + GGML_ASSERT(buffer); + + ggml_backend_blas_buffer_context * ctx = (ggml_backend_blas_buffer_context *)buffer->context; + delete ctx; +} + +static void * ggml_backend_blas_buffer_get_base(ggml_backend_buffer_t buffer) { + GGML_ASSERT(buffer); + + ggml_backend_blas_buffer_context * ctx = (ggml_backend_blas_buffer_context *)buffer->context; + uintptr_t data = (uintptr_t)ctx->data; + + // align the buffer + if (data % TENSOR_ALIGNMENT != 0) { + data = GGML_PAD(data, TENSOR_ALIGNMENT); } - void * wdata = ctx->work_data.get(); - // convert src0 to float - if (type != GGML_TYPE_F32) { - const auto * type_traits = ggml_get_type_traits(type); + return (void *)data; +} + +static enum ggml_status ggml_backend_blas_buffer_init_tensor( + ggml_backend_buffer_t buffer, + ggml_tensor * tensor) { + + if (tensor->view_src != NULL) { + assert(tensor->view_src->buffer->buft == buffer->buft); + return GGML_STATUS_SUCCESS; + } + + if (tensor->type != GGML_TYPE_F32) { + ggml_backend_blas_buffer_context * ctx = (ggml_backend_blas_buffer_context *)buffer->context; + ggml_backend_blas_buffer * extra = new ggml_backend_blas_buffer; + + extra->data = ggml_aligned_malloc(ggml_nelements(tensor) * sizeof(float)); // sizeof(float) because dequantized + extra->size = ggml_nelements(tensor) * sizeof(float); + + tensor->extra = extra; + ctx->buffers.push_back(extra); + } + + return GGML_STATUS_SUCCESS; +} + +static void ggml_backend_blas_buffer_memset_tensor( + ggml_backend_buffer_t buffer, + ggml_tensor * tensor, + uint8_t value, + size_t offset, + size_t size) { + + GGML_ASSERT(tensor); + memset((char *)tensor->data + offset, value, size); + + GGML_UNUSED(buffer); +} + +static void ggml_backend_blas_buffer_set_tensor( + ggml_backend_buffer_t buffer, + ggml_tensor * tensor, + const void * data, + size_t offset, + size_t size) { + + GGML_ASSERT(tensor); + memcpy((char *)tensor->data + offset, data, size); + + ggml_backend_blas_buffer_type_context * buft_ctx = (ggml_backend_blas_buffer_type_context *)buffer->buft->context; + ggml_backend_blas_buffer * extra = (ggml_backend_blas_buffer *)tensor->extra; + + const int64_t ne00 = tensor->ne[0]; + const int64_t ne01 = tensor->ne[1]; + const int64_t ne02 = tensor->ne[2]; + const int64_t ne03 = tensor->ne[3]; + + const int64_t nb00 = tensor->nb[0]; + const int64_t nb01 = tensor->nb[1]; + const int64_t nb02 = tensor->nb[2]; + const int64_t nb03 = tensor->nb[3]; + + const int64_t ne_plane = ne01*ne00; + + if (tensor->buffer->usage == GGML_BACKEND_BUFFER_USAGE_WEIGHTS + && tensor->type != GGML_TYPE_F32 + && ggml_get_type_traits(tensor->type)->to_float != NULL) { + + const auto * type_traits = ggml_get_type_traits(tensor->type); ggml_to_float_t const to_float = type_traits->to_float; + GGML_ASSERT(to_float != nullptr); for (int64_t i03 = 0; i03 < ne03; i03++) { for (int64_t i02 = 0; i02 < ne02; i02++) { - const void * x = (char *) src0->data + i02*nb02 + i03*nb03; - float * const wplane = (float *) wdata + i02*ne_plane + i03*ne02*ne_plane; + const void * x = (char *)tensor->data + i02*nb02 + i03*nb03; + float * const wplane = (float *)extra->data + i02*ne_plane + i03*ne02*ne_plane; const int min_cols_per_thread = 4096; - const int min_rows_per_thread = std::max((int)(min_cols_per_thread/ne00), 1); - const int n_threads = std::max(std::min(ctx->n_threads, (int)(ne01/min_rows_per_thread)), 1); + const int min_rows_per_thread = std::max((int)(min_cols_per_thread / ne00), 1); + const int n_threads = std::max(std::min(buft_ctx->n_threads, (int)(ne01 / min_rows_per_thread)), 1); #ifdef GGML_USE_OPENMP #pragma omp parallel for num_threads(n_threads) for (int64_t i01 = 0; i01 < ne01; i01++) { - to_float((const char *) x + i01*nb01, wplane + i01*ne00, ne00); + to_float((const char *)x + i01*nb01, wplane + i01*ne00, ne00); } #else for (int i = 1; i < n_threads; i++) { - const int64_t start = i*ne01/n_threads; - const int64_t end = (i + 1)*ne01/n_threads; + const int64_t start = (i + 0) * ne01/n_threads; + const int64_t end = (i + 1) * ne01/n_threads; if (start < end) { - ctx->tasks.push_back(std::async(std::launch::async, [=]() { + buft_ctx->tasks.push_back(std::async(std::launch::async, [=]() { for (int64_t i01 = start; i01 < end; i01++) { - to_float((const char *) x + i01*nb01, wplane + i01*ne00, ne00); + to_float((const char *)x + i01*nb01, wplane + i01*ne00, ne00); } })); } @@ -99,7 +172,7 @@ static void ggml_backend_blas_mul_mat(ggml_backend_blas_context * ctx, struct gg const int64_t start = 0; const int64_t end = ne01/n_threads; for (int64_t i01 = start; i01 < end; i01++) { - to_float((const char *) x + i01*nb01, wplane + i01*ne00, ne00); + to_float((const char *)x + i01*nb01, wplane + i01*ne00, ne00); } } #endif @@ -108,143 +181,157 @@ static void ggml_backend_blas_mul_mat(ggml_backend_blas_context * ctx, struct gg #ifndef GGML_USE_OPENMP // wait for all tasks to finish - for (auto & task : ctx->tasks) { + for (auto & task : buft_ctx->tasks) { task.get(); } - ctx->tasks.clear(); + buft_ctx->tasks.clear(); #endif } -#if defined(OPENBLAS_VERSION) - openblas_set_num_threads(ctx->n_threads); -#endif - -#if defined(GGML_BLAS_USE_BLIS) - bli_thread_set_num_threads(ctx->n_threads); -#endif - -#if defined(GGML_BLAS_USE_NVPL) - nvpl_blas_set_num_threads(ctx->n_threads); -#endif - - for (int64_t i13 = 0; i13 < ne13; i13++) { - for (int64_t i12 = 0; i12 < ne12; i12++) { - const int64_t i03 = i13/r3; - const int64_t i02 = i12/r2; - - const float * x = (float *) ((char *) src0->data + i02*nb02 + i03*nb03); - const float * y = (float *) ((char *) src1->data + i12*nb12 + i13*nb13); - float * d = (float *) ((char *) dst->data + i12*nb2 + i13*nb3); - - if (type != GGML_TYPE_F32) { - x = (float *) wdata + i02*ne_plane + i03*ne02*ne_plane; - } - - cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans, - ne1, ne01, ne10, - 1.0f, y, ne10, - x, ne00, - 0.0f, d, ne01); - } - } + GGML_UNUSED(nb00); } -static void ggml_backend_blas_out_prod(ggml_backend_blas_context * ctx, struct ggml_tensor * dst) { - const struct ggml_tensor * src0 = dst->src[0]; - const struct ggml_tensor * src1 = dst->src[1]; +static void ggml_backend_blas_buffer_get_tensor( + ggml_backend_buffer_t buffer, + const ggml_tensor * tensor, + void * data, + size_t offset, + size_t size) { - GGML_TENSOR_BINARY_OP_LOCALS + GGML_ASSERT(tensor); + memcpy(data, (const char *)tensor->data + offset, size); - GGML_ASSERT(ne0 == ne00); - GGML_ASSERT(ne1 == ne10); - GGML_ASSERT(ne2 == ne02); - GGML_ASSERT(ne02 == ne12); - GGML_ASSERT(ne3 == ne13); - GGML_ASSERT(ne03 == ne13); - - // we don't support permuted src0 or src1 - GGML_ASSERT(nb00 == sizeof(float)); - - // dst cannot be transposed or permuted - GGML_ASSERT(nb0 == sizeof(float)); - // GGML_ASSERT(nb0 <= nb1); - // GGML_ASSERT(nb1 <= nb2); - // GGML_ASSERT(nb2 <= nb3); - - // Arguments to ggml_compute_forward_out_prod (expressed as major,minor) - // src0: (k,n) - // src1: (k,m) - // dst: (m,n) - // - // Arguments to sgemm (see https://github.com/Reference-LAPACK/lapack/blob/master/BLAS/SRC/sgemm.f) - // Also expressed as (major,minor) - // a: (m,k): so src1 transposed - // b: (k,n): so src0 - // c: (m,n) - // - // However, if ggml_is_transposed(src1) is true, then - // src1->data already contains a transposed version, so sgemm mustn't - // transpose it further. - - int n = src0->ne[0]; - int k = src0->ne[1]; - int m = src1->ne[0]; - - CBLAS_TRANSPOSE transposeA; - int lda; - - if (!ggml_is_transposed(src1)) { - transposeA = CblasTrans; - lda = m; - } else { - transposeA = CblasNoTrans; - lda = k; - } - - float * a = (float *) ((char *) src1->data); - float * b = (float *) ((char *) src0->data); - float * c = (float *) ((char *) dst->data); - - cblas_sgemm(CblasRowMajor, transposeA, CblasNoTrans, m, n, k, 1.0, a, lda, b, n, 0.0, c, n); - - GGML_UNUSED(ctx); + GGML_UNUSED(buffer); } -// backend interface +static void ggml_backend_blas_buffer_clear( + ggml_backend_buffer_t buffer, + uint8_t value) { + + GGML_ASSERT(buffer); + + ggml_backend_blas_buffer_context * ctx = (ggml_backend_blas_buffer_context *)buffer->context; + memset(ctx->data, value, ctx->size); +} + +static void ggml_backend_blas_buffer_reset(ggml_backend_buffer_t buffer) { + GGML_ASSERT(buffer); + + ggml_backend_blas_buffer_context * ctx = (ggml_backend_blas_buffer_context *)buffer->context; + for (auto * extra : ctx->buffers) { + ggml_aligned_free(extra->data, extra->size); + delete extra; + } + ctx->buffers.clear(); +} + +static const ggml_backend_buffer_i ggml_backend_blas_buffer_i = { + /* .free_buffer = */ ggml_backend_blas_buffer_free_buffer, + /* .get_base = */ ggml_backend_blas_buffer_get_base, + /* .init_tensor = */ ggml_backend_blas_buffer_init_tensor, + /* .memset_tensor = */ ggml_backend_blas_buffer_memset_tensor, + /* .set_tensor = */ ggml_backend_blas_buffer_set_tensor, + /* .get_tensor = */ ggml_backend_blas_buffer_get_tensor, + /* .cpy_tensor = */ NULL, + /* .clear = */ ggml_backend_blas_buffer_clear, + /* .reset = */ ggml_backend_blas_buffer_reset, +}; + +// BLAS backend buffer type + +static const char * ggml_backend_blas_buffer_type_get_name(ggml_backend_buffer_type_t buft) { + return GGML_BLAS_NAME; + GGML_UNUSED(buft); +} + +static ggml_backend_buffer_t ggml_backend_blas_buffer_type_alloc_buffer( + ggml_backend_buffer_type_t buft, + size_t size) { + + void * data = ggml_aligned_malloc(size); + if (data == nullptr) { + GGML_LOG_ERROR("%s: failed to allocate buffer of size %zu\n", __func__, size); + return NULL; + } + + ggml_backend_blas_buffer_context * ctx = new ggml_backend_blas_buffer_context; + ctx->data = data; + ctx->size = size; + + return ggml_backend_buffer_init(buft, ggml_backend_blas_buffer_i, ctx, size); +} + +static size_t ggml_backend_blas_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) { + + return TENSOR_ALIGNMENT; + GGML_UNUSED(buft); +} + +static bool ggml_backend_blas_buffer_type_is_host(ggml_backend_buffer_type_t buft) { + + return true; + GGML_UNUSED(buft); +} + +static ggml_backend_buffer_type_t ggml_backend_blas_buffer_type(void) { + static ggml_backend_blas_buffer_type_context buft_ctx = { + /* .n_threads = */ (int)std::thread::hardware_concurrency(), +#ifndef GGML_USE_OPENMP + /* .tasks = */ std::vector>(), +#endif + }; + + static ggml_backend_buffer_type ggml_backend_blas_buffer_type = { + /* .iface = */ { + /* .get_name = */ ggml_backend_blas_buffer_type_get_name, + /* .alloc_buffer = */ ggml_backend_blas_buffer_type_alloc_buffer, + /* .get_alignment = */ ggml_backend_blas_buffer_type_get_alignment, + /* .get_max_size = */ NULL, // defaults to SIZE_MAX + /* .get_alloc_size = */ NULL, // defaults to ggml_nbytes + /* .is_host = */ ggml_backend_blas_buffer_type_is_host, + }, + /* .device = */ NULL, + /* .context = */ &buft_ctx, + }; + + return &ggml_backend_blas_buffer_type; +} static const char * ggml_backend_blas_get_name(ggml_backend_t backend) { - return "BLAS"; + return GGML_BLAS_NAME; GGML_UNUSED(backend); } static void ggml_backend_blas_free(ggml_backend_t backend) { ggml_backend_blas_context * ctx = (ggml_backend_blas_context *)backend->context; + delete ctx; delete backend; } -static enum ggml_status ggml_backend_blas_graph_compute(ggml_backend_t backend, struct ggml_cgraph * cgraph) { +static ggml_status ggml_backend_blas_graph_compute( + ggml_backend_t backend, + ggml_cgraph * cgraph) { + ggml_backend_blas_context * ctx = (ggml_backend_blas_context *)backend->context; for (int i = 0; i < cgraph->n_nodes; i++) { - struct ggml_tensor * node = cgraph->nodes[i]; + ggml_tensor * node = cgraph->nodes[i]; + + if (ggml_op_is_empty(node->op)) { + continue; + } switch (node->op) { case GGML_OP_MUL_MAT: - ggml_backend_blas_mul_mat(ctx, node); - break; - + { + ggml_blas_compute_forward_mul_mat(ctx, node); + } break; case GGML_OP_OUT_PROD: - ggml_backend_blas_out_prod(ctx, node); - break; - - case GGML_OP_NONE: - case GGML_OP_RESHAPE: - case GGML_OP_VIEW: - case GGML_OP_PERMUTE: - case GGML_OP_TRANSPOSE: - break; + { + ggml_blas_compute_forward_out_prod(ctx, node); + } break; default: GGML_ABORT("%s: unsupported op %s\n", __func__, ggml_op_desc(node)); @@ -256,21 +343,21 @@ static enum ggml_status ggml_backend_blas_graph_compute(ggml_backend_t backend, GGML_UNUSED(backend); } -static struct ggml_backend_i blas_backend_i = { - /* .get_name = */ ggml_backend_blas_get_name, - /* .free = */ ggml_backend_blas_free, - /* .set_tensor_async = */ NULL, - /* .get_tensor_async = */ NULL, - /* .cpy_tensor_async = */ NULL, - /* .synchronize = */ NULL, - /* .graph_plan_create = */ NULL, - /* .graph_plan_free = */ NULL, - /* .graph_plan_update = */ NULL, - /* .graph_plan_compute = */ NULL, - /* .graph_compute = */ ggml_backend_blas_graph_compute, - /* .event_record = */ NULL, - /* .event_wait = */ NULL, - /* .graph_optimize = */ NULL, +static const ggml_backend_i ggml_backend_blas_i = { + /* .get_name = */ ggml_backend_blas_get_name, + /* .free = */ ggml_backend_blas_free, + /* .set_tensor_async = */ NULL, + /* .get_tensor_async = */ NULL, + /* .cpy_tensor_async = */ NULL, + /* .synchronize = */ NULL, + /* .graph_plan_create = */ NULL, + /* .graph_plan_free = */ NULL, + /* .graph_plan_update = */ NULL, + /* .graph_plan_compute = */ NULL, + /* .graph_compute = */ ggml_backend_blas_graph_compute, + /* .event_record = */ NULL, + /* .event_wait = */ NULL, + /* .graph_optimize = */ NULL, }; static ggml_guid_t ggml_backend_blas_guid(void) { @@ -280,10 +367,15 @@ static ggml_guid_t ggml_backend_blas_guid(void) { ggml_backend_t ggml_backend_blas_init(void) { ggml_backend_blas_context * ctx = new ggml_backend_blas_context; + if (ctx == NULL) { + return NULL; + } - ggml_backend_t backend = new ggml_backend { + ctx->n_threads = GGML_DEFAULT_N_THREADS; + + ggml_backend_t blas_backend = new ggml_backend { /* .guid = */ ggml_backend_blas_guid(), - /* .iface = */ blas_backend_i, + /* .iface = */ ggml_backend_blas_i, /* .device = */ ggml_backend_reg_dev_get(ggml_backend_blas_reg(), 0), /* .context = */ ctx, }; @@ -298,24 +390,39 @@ ggml_backend_t ggml_backend_blas_init(void) { GGML_LOG_DEBUG("%s: warning: ggml is using OpenMP, but BLIS was compiled without OpenMP support\n", __func__); #endif - return backend; + if (blas_backend == NULL) { + delete ctx; + return NULL; + } + + return blas_backend; } bool ggml_backend_is_blas(ggml_backend_t backend) { return backend != NULL && ggml_guid_matches(backend->guid, ggml_backend_blas_guid()); } -void ggml_backend_blas_set_n_threads(ggml_backend_t backend_blas, int n_threads) { - GGML_ASSERT(ggml_backend_is_blas(backend_blas)); +void ggml_backend_blas_set_n_threads(ggml_backend_t backend, int n_threads) { + GGML_ASSERT(ggml_backend_is_blas(backend)); - ggml_backend_blas_context * ctx = (ggml_backend_blas_context *)backend_blas->context; + ggml_backend_blas_context * ctx = (ggml_backend_blas_context *)backend->context; ctx->n_threads = n_threads; + +#if defined(OPENBLAS_VERSION) + openblas_set_num_threads(ctx->n_threads); +#endif + +#if defined(GGML_BLAS_USE_BLIS) + bli_thread_set_num_threads(ctx->n_threads); +#endif + +#if defined(GGML_BLAS_USE_NVPL) + nvpl_blas_set_num_threads(ctx->n_threads); +#endif } -// device interface - static const char * ggml_backend_blas_device_get_name(ggml_backend_dev_t dev) { - return "BLAS"; + return GGML_BLAS_NAME; GGML_UNUSED(dev); } @@ -332,14 +439,13 @@ static const char * ggml_backend_blas_device_get_description(ggml_backend_dev_t #elif defined(OPENBLAS_VERSION) return "OpenBLAS"; #else - return "BLAS"; + return GGML_BLAS_NAME; #endif GGML_UNUSED(dev); } static void ggml_backend_blas_device_get_memory(ggml_backend_dev_t dev, size_t * free, size_t * total) { - // TODO *free = 0; *total = 0; @@ -352,7 +458,7 @@ static enum ggml_backend_dev_type ggml_backend_blas_device_get_type(ggml_backend GGML_UNUSED(dev); } -static void ggml_backend_blas_device_get_props(ggml_backend_dev_t dev, struct ggml_backend_dev_props * props) { +static void ggml_backend_blas_device_get_props(ggml_backend_dev_t dev, ggml_backend_dev_props * props) { props->name = ggml_backend_blas_device_get_name(dev); props->description = ggml_backend_blas_device_get_description(dev); props->type = ggml_backend_blas_device_get_type(dev); @@ -360,7 +466,7 @@ static void ggml_backend_blas_device_get_props(ggml_backend_dev_t dev, struct gg props->caps = { /* .async = */ false, /* .host_buffer = */ false, - /* .buffer_from_host_ptr = */ true, + /* .buffer_from_host_ptr = */ false, /* .events = */ false, }; } @@ -373,75 +479,63 @@ static ggml_backend_t ggml_backend_blas_device_init_backend(ggml_backend_dev_t d } static ggml_backend_buffer_type_t ggml_backend_blas_device_get_buffer_type(ggml_backend_dev_t dev) { - return ggml_backend_cpu_buffer_type(); + return ggml_backend_blas_buffer_type(); GGML_UNUSED(dev); } -static ggml_backend_buffer_t ggml_backend_blas_device_buffer_from_host_ptr(ggml_backend_dev_t dev, void * ptr, size_t size, size_t max_tensor_size) { - return ggml_backend_cpu_buffer_from_ptr(ptr, size); +static bool ggml_backend_blas_device_supports_op(ggml_backend_dev_t dev, const ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; - GGML_UNUSED(dev); - GGML_UNUSED(max_tensor_size); -} - -static bool ggml_backend_blas_device_supports_op(ggml_backend_dev_t dev, const struct ggml_tensor * op) { - const struct ggml_tensor * src0 = op->src[0]; - const struct ggml_tensor * src1 = op->src[1]; - - switch (op->op) { - case GGML_OP_NONE: - case GGML_OP_RESHAPE: - case GGML_OP_VIEW: - case GGML_OP_PERMUTE: - case GGML_OP_TRANSPOSE: - return true; + if (ggml_op_is_empty(dst->op)) { + return true; + } + switch (dst->op) { case GGML_OP_MUL_MAT: { - // BLAS usually is only faster for large matrices - const struct ggml_tensor * src0 = op->src[0]; - const struct ggml_tensor * src1 = op->src[1]; - const int64_t ne10 = src1->ne[0]; - - const int64_t ne0 = op->ne[0]; - const int64_t ne1 = op->ne[1]; + const int64_t ne0 = dst->ne[0]; + const int64_t ne1 = dst->ne[1]; // TODO: find the optimal value - const int64_t min_batch = 32; + const int64_t min_batch = 1024; - return ggml_is_contiguous(src0) && - ggml_is_contiguous(src1) && - src1->type == GGML_TYPE_F32 && - (ne0 >= min_batch && ne1 >= min_batch && ne10 >= min_batch) && - (src0->type == GGML_TYPE_F32 || ggml_get_type_traits(src0->type)->to_float != NULL); + return ggml_is_contiguous(src0) + && ggml_is_contiguous(src1) + && src1->type == GGML_TYPE_F32 + // NOTE: llama-bench creates views that somehow does not go through init_tensor + // this prevents the uninitialized views from being used in BLAS + && src0->view_src == nullptr && src1->view_src == nullptr + && (ne0 >= min_batch && ne1 >= min_batch && ne10 >= min_batch) + && (src0->type == GGML_TYPE_F32 || ggml_get_type_traits(src0->type)->to_float != NULL); } case GGML_OP_OUT_PROD: - return op->src[0]->type == GGML_TYPE_F32 && - op->src[1]->type == GGML_TYPE_F32 && - ggml_is_matrix(src0) && - ggml_is_matrix(src1) && - ggml_is_contiguous(src0) && - (ggml_is_contiguous(src1) || ggml_is_transposed(src1)) && - (src0->type == GGML_TYPE_F32 || ggml_get_type_traits(src0->type)->to_float != NULL); + { + return src0->type == GGML_TYPE_F32 + && src1->type == GGML_TYPE_F32 + && ggml_is_matrix(src0) + && ggml_is_matrix(src1) + && ggml_is_contiguous(src0) + && (ggml_is_contiguous(src1) || ggml_is_transposed(src1)); + } default: return false; - } GGML_UNUSED(dev); } static bool ggml_backend_blas_device_supports_buft(ggml_backend_dev_t dev, ggml_backend_buffer_type_t buft) { - return ggml_backend_buft_is_host(buft); + return buft->iface.get_name == ggml_backend_blas_buffer_type_get_name; GGML_UNUSED(dev); } -static const struct ggml_backend_device_i ggml_backend_blas_device_i = { +static const ggml_backend_device_i ggml_backend_blas_device_i = { /* .get_name = */ ggml_backend_blas_device_get_name, /* .get_description = */ ggml_backend_blas_device_get_description, /* .get_memory = */ ggml_backend_blas_device_get_memory, @@ -450,7 +544,7 @@ static const struct ggml_backend_device_i ggml_backend_blas_device_i = { /* .init_backend = */ ggml_backend_blas_device_init_backend, /* .get_buffer_type = */ ggml_backend_blas_device_get_buffer_type, /* .get_host_buffer_type = */ NULL, - /* .buffer_from_host_ptr = */ ggml_backend_blas_device_buffer_from_host_ptr, + /* .buffer_from_host_ptr = */ NULL, /* .supports_op = */ ggml_backend_blas_device_supports_op, /* .supports_buft = */ ggml_backend_blas_device_supports_buft, /* .offload_op = */ NULL, @@ -459,10 +553,10 @@ static const struct ggml_backend_device_i ggml_backend_blas_device_i = { /* .event_synchronize = */ NULL, }; -// backend reg interface +// BLAS backend - backend (reg) static const char * ggml_backend_blas_reg_get_name(ggml_backend_reg_t reg) { - return "BLAS"; + return GGML_BLAS_NAME; GGML_UNUSED(reg); } @@ -476,29 +570,27 @@ static size_t ggml_backend_blas_reg_get_device_count(ggml_backend_reg_t reg) { static ggml_backend_dev_t ggml_backend_blas_reg_get_device(ggml_backend_reg_t reg, size_t index) { GGML_ASSERT(index == 0); + static ggml_backend_blas_device_context ctx; static ggml_backend_device ggml_backend_blas_device = { /* .iface = */ ggml_backend_blas_device_i, /* .reg = */ reg, - /* .context = */ nullptr, + /* .context = */ &ctx, }; return &ggml_backend_blas_device; - - GGML_UNUSED(reg); - GGML_UNUSED(index); } static void * ggml_backend_blas_get_proc_address(ggml_backend_reg_t reg, const char * name) { - if (std::strcmp(name, "ggml_backend_set_n_threads") == 0) { + if (strcmp(name, "ggml_backend_set_n_threads") == 0) { return (void *)ggml_backend_blas_set_n_threads; } - return NULL; + + return nullptr; GGML_UNUSED(reg); - GGML_UNUSED(name); } -static const struct ggml_backend_reg_i ggml_backend_blas_reg_i = { +static const ggml_backend_reg_i ggml_backend_blas_reg_i = { /* .get_name = */ ggml_backend_blas_reg_get_name, /* .get_device_count = */ ggml_backend_blas_reg_get_device_count, /* .get_device = */ ggml_backend_blas_reg_get_device, @@ -506,8 +598,8 @@ static const struct ggml_backend_reg_i ggml_backend_blas_reg_i = { }; ggml_backend_reg_t ggml_backend_blas_reg(void) { - static struct ggml_backend_reg ggml_backend_blas_reg = { - /* .api_version = */ GGML_BACKEND_API_VERSION, + static ggml_backend_reg ggml_backend_blas_reg = { + /* .api_version = */ GGML_BLAS_VERSION, /* .iface = */ ggml_backend_blas_reg_i, /* .context = */ NULL, }; diff --git a/ggml/src/ggml-blas/mmf.cpp b/ggml/src/ggml-blas/mmf.cpp new file mode 100644 index 0000000000..d2a0f4633f --- /dev/null +++ b/ggml/src/ggml-blas/mmf.cpp @@ -0,0 +1,59 @@ +#include "ggml.h" +#include "mmf.hpp" + +void ggml_blas_mul_mat_f( + const ggml_backend_blas_context * ctx, + const ggml_tensor * src0, + const ggml_tensor * src1, + ggml_tensor * dst) { + + GGML_TENSOR_BINARY_OP_LOCALS + + const ggml_type type = src0->type; + + GGML_ASSERT(ne0 == ne01); + GGML_ASSERT(ne1 == ne11); + GGML_ASSERT(ne2 == ne12); + GGML_ASSERT(ne3 == ne13); + + // we don't support permuted src0 or src1 + GGML_ASSERT(nb00 == ggml_type_size(type)); + GGML_ASSERT(nb10 == ggml_type_size(src1->type)); + + // dst cannot be transposed or permuted + GGML_ASSERT(nb0 == sizeof(float)); + GGML_ASSERT(nb0 <= nb1); + GGML_ASSERT(nb1 <= nb2); + GGML_ASSERT(nb2 <= nb3); + + // broadcast factors + const int64_t r2 = ne12/ne02; + const int64_t r3 = ne13/ne03; + const int64_t ne_plane = ne01*ne00; + + const ggml_backend_blas_buffer * extra = (ggml_backend_blas_buffer *)src0->extra; + + for (int64_t i13 = 0; i13 < ne13; i13++) { + for (int64_t i12 = 0; i12 < ne12; i12++) { + const int64_t i03 = i13/r3; + const int64_t i02 = i12/r2; + + const float * x = (float *) ((char *) src0->data + i02*nb02 + i03*nb03); + const float * y = (float *) ((char *) src1->data + i12*nb12 + i13*nb13); + float * d = (float *) ((char *) dst->data + i12*nb2 + i13*nb3); + + // switch to dequantized F32 data + if (type != GGML_TYPE_F32) { + x = (float *)extra->data + i02*ne_plane + i03*ne02*ne_plane; + } + + cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans, + ne1, ne01, ne10, + 1.0f, y, ne10, + x, ne00, + 0.0f, d, ne01); + } + } + + GGML_UNUSED(ctx); +} diff --git a/ggml/src/ggml-blas/mmf.hpp b/ggml/src/ggml-blas/mmf.hpp new file mode 100644 index 0000000000..0f8b50e0d4 --- /dev/null +++ b/ggml/src/ggml-blas/mmf.hpp @@ -0,0 +1,9 @@ +#pragma once + +#include "common.hpp" + +void ggml_blas_mul_mat_f( + const ggml_backend_blas_context * ctx, + const ggml_tensor * src0, + const ggml_tensor * src1, + ggml_tensor * dst); diff --git a/ggml/src/ggml-blas/out-prod.cpp b/ggml/src/ggml-blas/out-prod.cpp new file mode 100644 index 0000000000..dfd1cc930d --- /dev/null +++ b/ggml/src/ggml-blas/out-prod.cpp @@ -0,0 +1,65 @@ +#include "ggml.h" +#include "out-prod.hpp" + +void ggml_blas_out_prod_f( + const ggml_backend_blas_context * ctx, + const ggml_tensor * src0, + const ggml_tensor * src1, + ggml_tensor * dst) { + + GGML_TENSOR_BINARY_OP_LOCALS + + GGML_ASSERT(ne0 == ne00); + GGML_ASSERT(ne1 == ne10); + GGML_ASSERT(ne2 == ne02); + GGML_ASSERT(ne02 == ne12); + GGML_ASSERT(ne3 == ne13); + GGML_ASSERT(ne03 == ne13); + + // we don't support permuted src0 or src1 + GGML_ASSERT(nb00 == sizeof(float)); + + // dst cannot be transposed or permuted + GGML_ASSERT(nb0 == sizeof(float)); + // GGML_ASSERT(nb0 <= nb1); + // GGML_ASSERT(nb1 <= nb2); + // GGML_ASSERT(nb2 <= nb3); + + // Arguments to ggml_compute_forward_out_prod (expressed as major,minor) + // src0: (k,n) + // src1: (k,m) + // dst: (m,n) + // + // Arguments to sgemm (see https://github.com/Reference-LAPACK/lapack/blob/master/BLAS/SRC/sgemm.f) + // Also expressed as (major,minor) + // a: (m,k): so src1 transposed + // b: (k,n): so src0 + // c: (m,n) + // + // However, if ggml_is_transposed(src1) is true, then + // src1->data already contains a transposed version, so sgemm mustn't + // transpose it further. + + int n = src0->ne[0]; + int k = src0->ne[1]; + int m = src1->ne[0]; + + CBLAS_TRANSPOSE transposeA; + int lda; + + if (!ggml_is_transposed(src1)) { + transposeA = CblasTrans; + lda = m; + } else { + transposeA = CblasNoTrans; + lda = k; + } + + float * a = (float *) ((char *) src1->data); + float * b = (float *) ((char *) src0->data); + float * c = (float *) ((char *) dst->data); + + cblas_sgemm(CblasRowMajor, transposeA, CblasNoTrans, m, n, k, 1.0, a, lda, b, n, 0.0, c, n); + + GGML_UNUSED(ctx); +} diff --git a/ggml/src/ggml-blas/out-prod.hpp b/ggml/src/ggml-blas/out-prod.hpp new file mode 100644 index 0000000000..442911f4e7 --- /dev/null +++ b/ggml/src/ggml-blas/out-prod.hpp @@ -0,0 +1,9 @@ +#pragma once + +#include "common.hpp" + +void ggml_blas_out_prod_f( + const ggml_backend_blas_context * ctx, + const ggml_tensor * src0, + const ggml_tensor * src1, + ggml_tensor * dst); diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp index 6dedd8de58..4607721fc1 100644 --- a/tests/test-backend-ops.cpp +++ b/tests/test-backend-ops.cpp @@ -1169,6 +1169,9 @@ struct test_case { std::vector sentinels; + // Track weight tensors for separate buffer allocation with GGML_BACKEND_BUFFER_USAGE_WEIGHTS + std::vector weight_tensors; + std::string current_op_name; void add_sentinel(ggml_context * ctx) { @@ -1247,6 +1250,8 @@ struct test_case { const char * op_names_filter, printer * output_printer) { mode = MODE_TEST; + weight_tensors.clear(); + sentinels.clear(); ggml_init_params params = { /* .mem_size = */ ggml_tensor_overhead()*128 + ggml_graph_overhead(), @@ -1297,10 +1302,35 @@ struct test_case { // post-graph sentinel add_sentinel(ctx); - // allocate + // allocate weight tensors in a separate buffer with GGML_BACKEND_BUFFER_USAGE_WEIGHTS + ggml_backend_buffer_t weights_buf = nullptr; + if (!weight_tensors.empty()) { + // Calculate total size needed for weight tensors + size_t weight_size = 0; + for (ggml_tensor * wt : weight_tensors) { + weight_size += ggml_backend_buft_get_alloc_size(ggml_backend_get_default_buffer_type(backend1), wt); + } + weight_size = GGML_PAD(weight_size, ggml_backend_buft_get_alignment(ggml_backend_get_default_buffer_type(backend1))); + + weights_buf = ggml_backend_buft_alloc_buffer(ggml_backend_get_default_buffer_type(backend1), weight_size); + if (weights_buf == NULL) { + printf("failed to allocate weight tensors [%s] ", ggml_backend_name(backend1)); + ggml_free(ctx); + return test_status_t::FAIL; + } + ggml_backend_buffer_set_usage(weights_buf, GGML_BACKEND_BUFFER_USAGE_WEIGHTS); + + // Allocate each weight tensor in the weights buffer + ggml_tallocr weights_talloc = ggml_tallocr_new(weights_buf); + for (ggml_tensor * wt : weight_tensors) { + ggml_tallocr_alloc(&weights_talloc, wt); + } + } + + // allocate remaining tensors ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors(ctx, backend1); - if (buf == NULL) { + if (buf == NULL && weights_buf == NULL) { printf("failed to allocate tensors [%s] ", ggml_backend_name(backend1)); ggml_free(ctx); return test_status_t::FAIL; @@ -1400,6 +1430,9 @@ struct test_case { run_whole_graph() ? fused_nodes_to_verify.data() : nullptr, fused_nodes_to_verify.size()); + if (weights_buf) { + ggml_backend_buffer_free(weights_buf); + } ggml_backend_buffer_free(buf); ggml_free(ctx); @@ -1419,6 +1452,7 @@ struct test_case { bool eval_perf(ggml_backend_t backend, const char * op_names_filter, printer * output_printer) { mode = MODE_PERF; + weight_tensors.clear(); static const size_t graph_nodes = 8192; @@ -1447,10 +1481,34 @@ struct test_case { return true; } - // allocate + // allocate weight tensors in a separate buffer with GGML_BACKEND_BUFFER_USAGE_WEIGHTS + ggml_backend_buffer_ptr weights_buf(nullptr); // smart ptr + if (!weight_tensors.empty()) { + // Calculate total size needed for weight tensors + size_t weight_size = 0; + for (ggml_tensor * wt : weight_tensors) { + weight_size += ggml_backend_buft_get_alloc_size(ggml_backend_get_default_buffer_type(backend), wt); + } + weight_size = GGML_PAD(weight_size, ggml_backend_buft_get_alignment(ggml_backend_get_default_buffer_type(backend))); + + weights_buf.reset(ggml_backend_buft_alloc_buffer(ggml_backend_get_default_buffer_type(backend), weight_size)); + if (weights_buf == NULL) { + printf("failed to allocate weight tensors\n"); + return false; + } + ggml_backend_buffer_set_usage(weights_buf.get(), GGML_BACKEND_BUFFER_USAGE_WEIGHTS); + + // Allocate each weight tensor in the weights buffer + ggml_tallocr weights_talloc = ggml_tallocr_new(weights_buf.get()); + for (ggml_tensor * wt : weight_tensors) { + ggml_tallocr_alloc(&weights_talloc, wt); + } + } + + // allocate remaining tensors ggml_backend_buffer_ptr buf(ggml_backend_alloc_ctx_tensors(ctx.get(), backend)); // smart ptr - if (buf == NULL) { + if (buf == NULL && weights_buf == NULL) { printf("failed to allocate tensors\n"); return false; } @@ -1549,6 +1607,7 @@ struct test_case { bool eval_support(ggml_backend_t backend, const char * op_names_filter, printer * output_printer) { mode = MODE_SUPPORT; + weight_tensors.clear(); static const size_t graph_nodes = 8192; @@ -1584,6 +1643,7 @@ struct test_case { bool eval_grad(ggml_backend_t backend, const char * op_names_filter, printer * output_printer) { mode = MODE_GRAD; + weight_tensors.clear(); const std::vector expect = grad_expect(); ggml_init_params params = { @@ -1694,9 +1754,35 @@ struct test_case { return true; } - // allocate + // allocate weight tensors in a separate buffer with GGML_BACKEND_BUFFER_USAGE_WEIGHTS + ggml_backend_buffer_ptr weights_buf(nullptr); // smart ptr + if (!weight_tensors.empty()) { + // Calculate total size needed for weight tensors + size_t weight_size = 0; + for (ggml_tensor * wt : weight_tensors) { + weight_size += ggml_backend_buft_get_alloc_size(ggml_backend_get_default_buffer_type(backend), wt); + } + weight_size = GGML_PAD(weight_size, ggml_backend_buft_get_alignment(ggml_backend_get_default_buffer_type(backend))); + + weights_buf.reset(ggml_backend_buft_alloc_buffer(ggml_backend_get_default_buffer_type(backend), weight_size)); + if (weights_buf == NULL) { + test_operation_info info(op_desc(out), vars(), ggml_backend_name(backend)); + info.set_error("weight allocation", ""); + output_printer->print_operation(info); + return false; + } + ggml_backend_buffer_set_usage(weights_buf.get(), GGML_BACKEND_BUFFER_USAGE_WEIGHTS); + + // Allocate each weight tensor in the weights buffer + ggml_tallocr weights_talloc = ggml_tallocr_new(weights_buf.get()); + for (ggml_tensor * wt : weight_tensors) { + ggml_tallocr_alloc(&weights_talloc, wt); + } + } + + // allocate remaining tensors ggml_backend_buffer_ptr buf(ggml_backend_alloc_ctx_tensors(ctx.get(), backend)); // smart ptr - if (buf == NULL) { + if (buf == NULL && weights_buf == NULL) { test_operation_info info(op_desc(out), vars(), ggml_backend_name(backend)); info.set_error("allocation", ""); output_printer->print_operation(info); @@ -3662,6 +3748,7 @@ struct test_mul_mat : public test_case { a = ggml_new_tensor_4d(ctx, type_a, ne_a[per[0]], ne_a[per[1]], ne_a[per[2]], ne_a[per[3]]); b = ggml_new_tensor_4d(ctx, type_b, ne_b[per[0]], ne_b[per[1]], ne_b[per[2]], ne_b[per[3]]); + weight_tensors.push_back(a); // Track weight tensor for GGML_BACKEND_BUFFER_USAGE_WEIGHTS if (!ggml_is_quantized(type_a)) { if (bs[1] == 1 && nr[1] == 1) { ggml_set_param(a); @@ -3679,6 +3766,7 @@ struct test_mul_mat : public test_case { const int64_t k_physical = k_v == 0 ? k : k_v; a = ggml_new_tensor_4d(ctx, type_a, k_physical, m, bs[0], bs[1]); b = ggml_new_tensor_4d(ctx, type_b, k_physical, n, bs[0]*nr[0], bs[1]*nr[1]); + weight_tensors.push_back(a); // Track weight tensor for GGML_BACKEND_BUFFER_USAGE_WEIGHTS if (!ggml_is_quantized(type_a)) { if (bs[1] == 1 && nr[1] == 1) { @@ -3772,6 +3860,7 @@ struct test_mul_mat_id : public test_case { // C^T = A * B^T: (k, m) * (k, n) => (m, n) ggml_tensor * as = ggml_new_tensor_3d(ctx, type_a, k, m, n_mats); ggml_set_name(as, "as"); + weight_tensors.push_back(as); // Track weight tensor for GGML_BACKEND_BUFFER_USAGE_WEIGHTS ggml_tensor * ids = ggml_new_tensor_2d(ctx, GGML_TYPE_I32, n_mats, n); ggml_set_name(ids, "ids"); @@ -3832,6 +3921,7 @@ struct test_mul_mat_id_fusion : public test_case { // C^T = A * B^T: (k, m) * (k, n) => (m, n) ggml_tensor * as = ggml_new_tensor_3d(ctx, type_a, k, m, n_mats); ggml_set_name(as, "as"); + weight_tensors.push_back(as); // Track weight tensor for GGML_BACKEND_BUFFER_USAGE_WEIGHTS ggml_tensor * ids = ggml_new_tensor_2d(ctx, GGML_TYPE_I32, n_mats, n); ggml_set_name(ids, "ids"); @@ -3848,6 +3938,7 @@ struct test_mul_mat_id_fusion : public test_case { for (uint32_t i = 1; i < o; ++i) { ggml_tensor * a2 = ggml_new_tensor_3d(ctx, type_a, k, m, n_mats); + weight_tensors.push_back(a2); // Track weight tensor for GGML_BACKEND_BUFFER_USAGE_WEIGHTS ggml_tensor * out2 = ggml_mul_mat_id(ctx, a2, b, ids); ggml_set_name(out2, "out2"); out = ggml_add(ctx, out, out2);