diff --git a/CODEOWNERS b/CODEOWNERS index 8a0c98c968..de80874291 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -47,6 +47,7 @@ /ggml/cmake/ @ggerganov /ggml/include/ @ggerganov /ggml/src/ggml-common.h @ggerganov +/ggml/src/ggml-blas/ @taronaeo /ggml/src/ggml-cpu/ @ggerganov /ggml/src/ggml-cpu/spacemit/ @alex-spacemit /ggml/src/ggml-cuda/fattn* @JohannesGaessler diff --git a/ggml/src/ggml-blas/ggml-blas-rework.cpp b/ggml/src/ggml-blas/ggml-blas-rework.cpp new file mode 100644 index 0000000000..9c8be1470c --- /dev/null +++ b/ggml/src/ggml-blas/ggml-blas-rework.cpp @@ -0,0 +1,685 @@ +#include "ggml-impl.h" +#include "ggml-blas.h" +#include "ggml-backend-impl.h" + +#include +#include +#include + +#if defined(GGML_BLAS_USE_ACCELERATE) +# include +#elif defined(GGML_BLAS_USE_MKL) +# include +#elif defined(GGML_BLAS_USE_BLIS) +# include +#elif defined(GGML_BLAS_USE_NVPL) +# include +#else +# include +#endif + +struct ggml_backend_blas_context { + int n_threads = GGML_DEFAULT_N_THREADS; + std::unique_ptr work_data; + size_t work_size = 0; +#ifndef GGML_USE_OPENMP + std::vector> tasks; +#endif +}; + +static void ggml_backend_blas_mul_mat( + ggml_backend_blas_context * ctx, + ggml_tensor * dst) { + + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_TENSOR_BINARY_OP_LOCALS + + const ggml_type type = src0->type; + + GGML_ASSERT(ne0 == ne01); + GGML_ASSERT(ne1 == ne11); + GGML_ASSERT(ne2 == ne12); + GGML_ASSERT(ne3 == ne13); + + // we don't support permuted src0 or src1 + GGML_ASSERT(nb00 == ggml_type_size(type)); + GGML_ASSERT(nb10 == ggml_type_size(src1->type)); + + // dst cannot be transposed or permuted + GGML_ASSERT(nb0 == sizeof(float)); + GGML_ASSERT(nb0 <= nb1); + GGML_ASSERT(nb1 <= nb2); + GGML_ASSERT(nb2 <= nb3); + + // broadcast factors + const int64_t r2 = ne12/ne02; + const int64_t r3 = ne13/ne03; + + const int64_t ne_plane = ne01*ne00; + const size_t desired_wsize = type == GGML_TYPE_F32 ? 0 : ne03*ne02*ne_plane*sizeof(float); + + if (ctx->work_size < desired_wsize) { + ctx->work_data.reset(new char[desired_wsize]); + ctx->work_size = desired_wsize; + } + void * wdata = ctx->work_data.get(); + + // convert src0 to float + if (type != GGML_TYPE_F32) { + const auto * type_traits = ggml_get_type_traits(type); + ggml_to_float_t const to_float = type_traits->to_float; + + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + const void * x = (char *) src0->data + i02*nb02 + i03*nb03; + float * const wplane = (float *) wdata + i02*ne_plane + i03*ne02*ne_plane; + + const int min_cols_per_thread = 4096; + const int min_rows_per_thread = std::max((int)(min_cols_per_thread/ne00), 1); + const int n_threads = std::max(std::min(ctx->n_threads, (int)(ne01/min_rows_per_thread)), 1); + +#ifdef GGML_USE_OPENMP + #pragma omp parallel for num_threads(n_threads) + for (int64_t i01 = 0; i01 < ne01; i01++) { + to_float((const char *) x + i01*nb01, wplane + i01*ne00, ne00); + } +#else + for (int i = 1; i < n_threads; i++) { + const int64_t start = (i + 0) * ne01/n_threads; + const int64_t end = (i + 1) * ne01/n_threads; + if (start < end) { + ctx->tasks.push_back(std::async(std::launch::async, [=]() { + for (int64_t i01 = start; i01 < end; i01++) { + to_float((const char *) x + i01*nb01, wplane + i01*ne00, ne00); + } + })); + } + } + { + // reuse the current thread for the first task + const int64_t start = 0; + const int64_t end = ne01/n_threads; + for (int64_t i01 = start; i01 < end; i01++) { + to_float((const char *) x + i01*nb01, wplane + i01*ne00, ne00); + } + } +#endif + } + } + +#ifndef GGML_USE_OPENMP + // wait for all tasks to finish + for (auto & task : ctx->tasks) { + task.get(); + } + ctx->tasks.clear(); +#endif + } + +#if defined(OPENBLAS_VERSION) + openblas_set_num_threads(ctx->n_threads); +#endif + +#if defined(GGML_BLAS_USE_BLIS) + bli_thread_set_num_threads(ctx->n_threads); +#endif + +#if defined(GGML_BLAS_USE_NVPL) + nvpl_blas_set_num_threads(ctx->n_threads); +#endif + + for (int64_t i13 = 0; i13 < ne13; i13++) { + for (int64_t i12 = 0; i12 < ne12; i12++) { + const int64_t i03 = i13/r3; + const int64_t i02 = i12/r2; + + const float * x = (float *) ((char *) src0->data + i02*nb02 + i03*nb03); + const float * y = (float *) ((char *) src1->data + i12*nb12 + i13*nb13); + float * d = (float *) ((char *) dst->data + i12*nb2 + i13*nb3); + + if (type != GGML_TYPE_F32) { + x = (float *) wdata + i02*ne_plane + i03*ne02*ne_plane; + } + + GGML_LOG_INFO("%s: name='%s' type=%s dequantized first 5 elements: [%.6f, %.6f, %.6f, %.6f, %.6f]\n", + __func__, + src0->name, + ggml_type_name(src0->type), + ((float *)x)[0], + ((float *)x)[1], + ((float *)x)[2], + ((float *)x)[3], + ((float *)x)[4]); + + cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans, + ne1, ne01, ne10, + 1.0f, y, ne10, + x, ne00, + 0.0f, d, ne01); + } + } +} + +static void ggml_backend_blas_mul_mat_id( + ggml_backend_blas_context * ctx, + ggml_tensor * dst) { + + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + const ggml_tensor * ids = dst->src[2]; + + GGML_TENSOR_BINARY_OP_LOCALS + + const ggml_type type = src0->type; + + GGML_ASSERT(nb00 == ggml_type_size(type)); + GGML_ASSERT(nb10 == ggml_type_size(src1->type)); + + GGML_ASSERT(nb0 == sizeof(float)); + GGML_ASSERT(nb0 <= nb1); + GGML_ASSERT(nb1 <= nb2); + GGML_ASSERT(nb2 <= nb3); + + GGML_ASSERT(ne03 == 1); + GGML_ASSERT(ne13 == 1); + GGML_ASSERT(ne3 == 1); + + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT(ids->type == GGML_TYPE_I32); + + const int64_t ne_plane = ne01*ne00; + const size_t desired_wsize = type == GGML_TYPE_F32 + ? 0 + : ne03*ne02*ne_plane*sizeof(float); + + if (ctx->work_size < desired_wsize) { + ctx->work_data.reset(new char[desired_wsize]); + ctx->work_size = desired_wsize; + } + void * wdata = ctx->work_data.get(); + + // convert src0 to float + if (type != GGML_TYPE_F32) { + const auto * type_traits = ggml_get_type_traits(type); + ggml_to_float_t const to_float = type_traits->to_float; + + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + const void * x = (char *) src0->data + i02*nb02 + i03*nb03; + float * const wplane = (float *) wdata + i02*ne_plane + i03*ne02*ne_plane; + + const int min_cols_per_thread = 4096; + const int min_rows_per_thread = std::max((int)(min_cols_per_thread/ne00), 1); + const int n_threads = std::max(std::min(ctx->n_threads, (int)(ne01/min_rows_per_thread)), 1); + +#ifdef GGML_USE_OPENMP + #pragma omp parallel for num_threads(n_threads) + for (int64_t i01 = 0; i01 < ne01; i01++) { + to_float((const char *) x + i01*nb01, wplane + i01*ne00, ne00); + } +#else + for (int i = 1; i < n_threads; i++) { + const int64_t start = (i + 0)*ne01/n_threads; + const int64_t end = (i + 1)*ne01/n_threads; + if (start < end) { + ctx->tasks.push_back(std::async(std::launch::async, [=]() { + for (int64_t i01 = start; i01 < end; i01++) { + to_float((const char *) x + i01*nb01, wplane + i01*ne00, ne00); + } + })); + } + } + { + // reuse the current thread for the first task + const int64_t start = 0; + const int64_t end = ne01/n_threads; + for (int64_t i01 = start; i01 < end; i01++) { + to_float((const char *) x + i01*nb01, wplane + i01*ne00, ne00); + } + } +#endif + } + } + + GGML_LOG_INFO("%s: name='%s' type=%s dequantized first 5 elements: [%.6f, %.6f, %.6f, %.6f, %.6f]\n", + __func__, + src0->name, + ggml_type_name(src0->type), + ((float *)wdata)[0], + ((float *)wdata)[1], + ((float *)wdata)[2], + ((float *)wdata)[3], + ((float *)wdata)[4]); + +#ifndef GGML_USE_OPENMP + // wait for all tasks to finish + for (auto & task : ctx->tasks) { + task.get(); + } + ctx->tasks.clear(); +#endif + } + +#if defined(OPENBLAS_VERSION) + openblas_set_num_threads(ctx->n_threads); +#endif + +#if defined(GGML_BLAS_USE_BLIS) + bli_thread_set_num_threads(ctx->n_threads); +#endif + +#if defined(GGML_BLAS_USE_NVPL) + nvpl_blas_set_num_threads(ctx->n_threads); +#endif + + const int n_ids = ids->ne[0]; + const int n_tokens = ids->ne[1]; + + for (int t = 0; t < n_tokens; ++t) { + for (int e = 0; e < n_ids; ++e) { + const int32_t expert = *(const int32_t *) ((const char *) ids->data + e*ids->nb[0] + t*ids->nb[1]); + GGML_ASSERT(expert >= 0 && expert < ne02); + + const int e_src1 = e % ne11; + + const float * a = (float *) ((char *) src0->data + expert*nb02); + const float * b = (float *) ((char *) src1->data + e_src1*nb11 + t*nb12); + float * d = (float *) ((char *) dst->data + e*nb1 + t*nb2); + + if (type != GGML_TYPE_F32) { + a = (float *) wdata + expert*ne_plane; + } + + cblas_sgemv(CblasRowMajor, CblasNoTrans, + ne01, ne00, + 1.0f, a, ne00, + b, 1, + 0.0f, d, 1); + } + } +} + +static void ggml_backend_blas_out_prod(ggml_backend_blas_context * ctx, struct ggml_tensor * dst) { + const struct ggml_tensor * src0 = dst->src[0]; + const struct ggml_tensor * src1 = dst->src[1]; + + GGML_TENSOR_BINARY_OP_LOCALS + + GGML_ASSERT(ne0 == ne00); + GGML_ASSERT(ne1 == ne10); + GGML_ASSERT(ne2 == ne02); + GGML_ASSERT(ne02 == ne12); + GGML_ASSERT(ne3 == ne13); + GGML_ASSERT(ne03 == ne13); + + // we don't support permuted src0 or src1 + GGML_ASSERT(nb00 == sizeof(float)); + + // dst cannot be transposed or permuted + GGML_ASSERT(nb0 == sizeof(float)); + // GGML_ASSERT(nb0 <= nb1); + // GGML_ASSERT(nb1 <= nb2); + // GGML_ASSERT(nb2 <= nb3); + + // Arguments to ggml_compute_forward_out_prod (expressed as major,minor) + // src0: (k,n) + // src1: (k,m) + // dst: (m,n) + // + // Arguments to sgemm (see https://github.com/Reference-LAPACK/lapack/blob/master/BLAS/SRC/sgemm.f) + // Also expressed as (major,minor) + // a: (m,k): so src1 transposed + // b: (k,n): so src0 + // c: (m,n) + // + // However, if ggml_is_transposed(src1) is true, then + // src1->data already contains a transposed version, so sgemm mustn't + // transpose it further. + + int n = src0->ne[0]; + int k = src0->ne[1]; + int m = src1->ne[0]; + + CBLAS_TRANSPOSE transposeA; + int lda; + + if (!ggml_is_transposed(src1)) { + transposeA = CblasTrans; + lda = m; + } else { + transposeA = CblasNoTrans; + lda = k; + } + + float * a = (float *) ((char *) src1->data); + float * b = (float *) ((char *) src0->data); + float * c = (float *) ((char *) dst->data); + + cblas_sgemm(CblasRowMajor, transposeA, CblasNoTrans, m, n, k, 1.0, a, lda, b, n, 0.0, c, n); + + GGML_UNUSED(ctx); +} + +// backend interface + +static const char * ggml_backend_blas_get_name(ggml_backend_t backend) { + return "BLAS"; + + GGML_UNUSED(backend); +} + +static void ggml_backend_blas_free(ggml_backend_t backend) { + ggml_backend_blas_context * ctx = (ggml_backend_blas_context *)backend->context; + delete ctx; + delete backend; +} + +static enum ggml_status ggml_backend_blas_graph_compute(ggml_backend_t backend, struct ggml_cgraph * cgraph) { + ggml_backend_blas_context * ctx = (ggml_backend_blas_context *)backend->context; + + for (int i = 0; i < cgraph->n_nodes; i++) { + struct ggml_tensor * node = cgraph->nodes[i]; + + switch (node->op) { + case GGML_OP_MUL_MAT: + ggml_backend_blas_mul_mat(ctx, node); + break; + + case GGML_OP_MUL_MAT_ID: + ggml_backend_blas_mul_mat_id(ctx, node); + break; + + case GGML_OP_OUT_PROD: + ggml_backend_blas_out_prod(ctx, node); + break; + + case GGML_OP_NONE: + case GGML_OP_RESHAPE: + case GGML_OP_VIEW: + case GGML_OP_PERMUTE: + case GGML_OP_TRANSPOSE: + break; + + default: + GGML_ABORT("%s: unsupported op %s\n", __func__, ggml_op_desc(node)); + } + } + + return GGML_STATUS_SUCCESS; + + GGML_UNUSED(backend); +} + +static struct ggml_backend_i blas_backend_i = { + /* .get_name = */ ggml_backend_blas_get_name, + /* .free = */ ggml_backend_blas_free, + /* .set_tensor_async = */ NULL, + /* .get_tensor_async = */ NULL, + /* .cpy_tensor_async = */ NULL, + /* .synchronize = */ NULL, + /* .graph_plan_create = */ NULL, + /* .graph_plan_free = */ NULL, + /* .graph_plan_update = */ NULL, + /* .graph_plan_compute = */ NULL, + /* .graph_compute = */ ggml_backend_blas_graph_compute, + /* .event_record = */ NULL, + /* .event_wait = */ NULL, + /* .graph_optimize = */ NULL, +}; + +static ggml_guid_t ggml_backend_blas_guid(void) { + static ggml_guid guid = { 0x12, 0xa8, 0xae, 0xf4, 0xc0, 0x1e, 0x61, 0x97, 0x8f, 0xeb, 0x33, 0x04, 0xa1, 0x33, 0x51, 0x2d }; + return &guid; +} + +ggml_backend_t ggml_backend_blas_init(void) { + ggml_backend_blas_context * ctx = new ggml_backend_blas_context; + + ggml_backend_t backend = new ggml_backend { + /* .guid = */ ggml_backend_blas_guid(), + /* .iface = */ blas_backend_i, + /* .device = */ ggml_backend_reg_dev_get(ggml_backend_blas_reg(), 0), + /* .context = */ ctx, + }; + +#if defined(OPENBLAS_VERSION) && defined(GGML_USE_OPENMP) + if (openblas_get_parallel() != OPENBLAS_OPENMP) { + GGML_LOG_DEBUG("%s: warning: ggml is using OpenMP, but OpenBLAS was compiled without OpenMP support\n", __func__); + } +#endif + +#if defined(BLIS_ENABLE_CBLAS) && defined(GGML_USE_OPENMP) && !defined(BLIS_ENABLE_OPENMP) + GGML_LOG_DEBUG("%s: warning: ggml is using OpenMP, but BLIS was compiled without OpenMP support\n", __func__); +#endif + + return backend; +} + +bool ggml_backend_is_blas(ggml_backend_t backend) { + return backend != NULL && ggml_guid_matches(backend->guid, ggml_backend_blas_guid()); +} + +void ggml_backend_blas_set_n_threads(ggml_backend_t backend_blas, int n_threads) { + GGML_ASSERT(ggml_backend_is_blas(backend_blas)); + + ggml_backend_blas_context * ctx = (ggml_backend_blas_context *)backend_blas->context; + ctx->n_threads = n_threads; +} + +// device interface + +static const char * ggml_backend_blas_device_get_name(ggml_backend_dev_t dev) { + return "BLAS"; + + GGML_UNUSED(dev); +} + +static const char * ggml_backend_blas_device_get_description(ggml_backend_dev_t dev) { + #if defined(GGML_BLAS_USE_ACCELERATE) + return "Accelerate"; + #elif defined(GGML_BLAS_USE_MKL) + return "MKL"; + #elif defined(GGML_BLAS_USE_BLIS) + return "BLIS"; + #elif defined(GGML_BLAS_USE_NVPL) + return "NVPL"; + #elif defined(OPENBLAS_VERSION) + return "OpenBLAS"; + #else + return "BLAS"; + #endif + + GGML_UNUSED(dev); +} + +static void ggml_backend_blas_device_get_memory(ggml_backend_dev_t dev, size_t * free, size_t * total) { + // TODO + *free = 0; + *total = 0; + + GGML_UNUSED(dev); +} + +static enum ggml_backend_dev_type ggml_backend_blas_device_get_type(ggml_backend_dev_t dev) { + return GGML_BACKEND_DEVICE_TYPE_ACCEL; + + GGML_UNUSED(dev); +} + +static void ggml_backend_blas_device_get_props(ggml_backend_dev_t dev, struct ggml_backend_dev_props * props) { + props->name = ggml_backend_blas_device_get_name(dev); + props->description = ggml_backend_blas_device_get_description(dev); + props->type = ggml_backend_blas_device_get_type(dev); + ggml_backend_blas_device_get_memory(dev, &props->memory_free, &props->memory_total); + props->caps = { + /* .async = */ false, + /* .host_buffer = */ false, + /* .buffer_from_host_ptr = */ true, + /* .events = */ false, + }; +} + +static ggml_backend_t ggml_backend_blas_device_init_backend(ggml_backend_dev_t dev, const char * params) { + return ggml_backend_blas_init(); + + GGML_UNUSED(dev); + GGML_UNUSED(params); +} + +static ggml_backend_buffer_type_t ggml_backend_blas_device_get_buffer_type(ggml_backend_dev_t dev) { + return ggml_backend_cpu_buffer_type(); + + GGML_UNUSED(dev); +} + +static ggml_backend_buffer_t ggml_backend_blas_device_buffer_from_host_ptr(ggml_backend_dev_t dev, void * ptr, size_t size, size_t max_tensor_size) { + return ggml_backend_cpu_buffer_from_ptr(ptr, size); + + GGML_UNUSED(dev); + GGML_UNUSED(max_tensor_size); +} + +static bool ggml_backend_blas_device_supports_op(ggml_backend_dev_t dev, const struct ggml_tensor * op) { + const struct ggml_tensor * src0 = op->src[0]; + const struct ggml_tensor * src1 = op->src[1]; + + switch (op->op) { + case GGML_OP_NONE: + case GGML_OP_RESHAPE: + case GGML_OP_VIEW: + case GGML_OP_PERMUTE: + case GGML_OP_TRANSPOSE: + return true; + + case GGML_OP_MUL_MAT: + { + // BLAS usually is only faster for large matrices + const struct ggml_tensor * src0 = op->src[0]; + const struct ggml_tensor * src1 = op->src[1]; + + const int64_t ne10 = src1->ne[0]; + + const int64_t ne0 = op->ne[0]; + const int64_t ne1 = op->ne[1]; + + // TODO: find the optimal value + const int64_t min_batch = 32; + + return ggml_is_contiguous(src0) && + ggml_is_contiguous(src1) && + src1->type == GGML_TYPE_F32 && + (ne0 >= min_batch && ne1 >= min_batch && ne10 >= min_batch) && + (src0->type == GGML_TYPE_F32 || ggml_get_type_traits(src0->type)->to_float != NULL); + } + + case GGML_OP_MUL_MAT_ID: + { + const ggml_tensor * src0 = op->src[0]; + const ggml_tensor * src1 = op->src[1]; + + return ggml_is_contiguous(src0) && + ggml_is_contiguous(src1) && + src1->type == GGML_TYPE_F32 && + (src0->type == GGML_TYPE_F32 || ggml_get_type_traits(src0->type)->to_float != NULL); + } + + case GGML_OP_OUT_PROD: + return op->src[0]->type == GGML_TYPE_F32 && + op->src[1]->type == GGML_TYPE_F32 && + ggml_is_matrix(src0) && + ggml_is_matrix(src1) && + ggml_is_contiguous(src0) && + (ggml_is_contiguous(src1) || ggml_is_transposed(src1)) && + (src0->type == GGML_TYPE_F32 || ggml_get_type_traits(src0->type)->to_float != NULL); + + default: + return false; + + } + + GGML_UNUSED(dev); +} + +static bool ggml_backend_blas_device_supports_buft(ggml_backend_dev_t dev, ggml_backend_buffer_type_t buft) { + return ggml_backend_buft_is_host(buft); + + GGML_UNUSED(dev); +} + +static const struct ggml_backend_device_i ggml_backend_blas_device_i = { + /* .get_name = */ ggml_backend_blas_device_get_name, + /* .get_description = */ ggml_backend_blas_device_get_description, + /* .get_memory = */ ggml_backend_blas_device_get_memory, + /* .get_type = */ ggml_backend_blas_device_get_type, + /* .get_props = */ ggml_backend_blas_device_get_props, + /* .init_backend = */ ggml_backend_blas_device_init_backend, + /* .get_buffer_type = */ ggml_backend_blas_device_get_buffer_type, + /* .get_host_buffer_type = */ NULL, + /* .buffer_from_host_ptr = */ ggml_backend_blas_device_buffer_from_host_ptr, + /* .supports_op = */ ggml_backend_blas_device_supports_op, + /* .supports_buft = */ ggml_backend_blas_device_supports_buft, + /* .offload_op = */ NULL, + /* .event_new = */ NULL, + /* .event_free = */ NULL, + /* .event_synchronize = */ NULL, +}; + +// backend reg interface + +static const char * ggml_backend_blas_reg_get_name(ggml_backend_reg_t reg) { + return "BLAS"; + + GGML_UNUSED(reg); +} + +static size_t ggml_backend_blas_reg_get_device_count(ggml_backend_reg_t reg) { + return 1; + + GGML_UNUSED(reg); +} + +static ggml_backend_dev_t ggml_backend_blas_reg_get_device(ggml_backend_reg_t reg, size_t index) { + GGML_ASSERT(index == 0); + + static ggml_backend_device ggml_backend_blas_device = { + /* .iface = */ ggml_backend_blas_device_i, + /* .reg = */ reg, + /* .context = */ nullptr, + }; + + return &ggml_backend_blas_device; + + GGML_UNUSED(reg); + GGML_UNUSED(index); +} + +static void * ggml_backend_blas_get_proc_address(ggml_backend_reg_t reg, const char * name) { + if (std::strcmp(name, "ggml_backend_set_n_threads") == 0) { + return (void *)ggml_backend_blas_set_n_threads; + } + return NULL; + + GGML_UNUSED(reg); + GGML_UNUSED(name); +} + +static const struct ggml_backend_reg_i ggml_backend_blas_reg_i = { + /* .get_name = */ ggml_backend_blas_reg_get_name, + /* .get_device_count = */ ggml_backend_blas_reg_get_device_count, + /* .get_device = */ ggml_backend_blas_reg_get_device, + /* .get_proc_address = */ ggml_backend_blas_get_proc_address, +}; + +ggml_backend_reg_t ggml_backend_blas_reg(void) { + static struct ggml_backend_reg ggml_backend_blas_reg = { + /* .api_version = */ GGML_BACKEND_API_VERSION, + /* .iface = */ ggml_backend_blas_reg_i, + /* .context = */ NULL, + }; + + return &ggml_backend_blas_reg; +} + +GGML_BACKEND_DL_IMPL(ggml_backend_blas_reg) diff --git a/ggml/src/ggml-blas/ggml-blas.cpp b/ggml/src/ggml-blas/ggml-blas.cpp index 5b888cdd8c..a4e3898f13 100644 --- a/ggml/src/ggml-blas/ggml-blas.cpp +++ b/ggml/src/ggml-blas/ggml-blas.cpp @@ -2,9 +2,14 @@ #include "ggml-blas.h" #include "ggml-backend-impl.h" +#include "ggml.h" +#include "ggml-backend.h" + #include +#include #include #include +#include #if defined(GGML_BLAS_USE_ACCELERATE) # include @@ -18,22 +23,262 @@ # include #endif -struct ggml_backend_blas_context { - int n_threads = GGML_DEFAULT_N_THREADS; - std::unique_ptr work_data; - size_t work_size = 0; +struct ggml_backend_blas_buffer { + void * data; // dequantized data + size_t size; // ggml_nelements * sizeof(float) +}; + +struct ggml_backend_blas_buffer_type_context { + int n_threads; + #ifndef GGML_USE_OPENMP std::vector> tasks; #endif }; -static void ggml_backend_blas_mul_mat(ggml_backend_blas_context * ctx, struct ggml_tensor * dst) { - const struct ggml_tensor * src0 = dst->src[0]; - const struct ggml_tensor * src1 = dst->src[1]; +// BLAS backend - buffer + +static void ggml_backend_blas_buffer_free_buffer(ggml_backend_buffer_t buffer) { + GGML_ASSERT(buffer); + + ggml_backend_blas_buffer * buf_ctx = (ggml_backend_blas_buffer *)buffer->context; + ggml_aligned_free(buf_ctx->data, buf_ctx->size); + ggml_aligned_free(buffer->context, buffer->size); +} + +static void * ggml_backend_blas_buffer_get_base(ggml_backend_buffer_t buffer) { + GGML_ASSERT(buffer); + uintptr_t data = (uintptr_t)buffer->context; + + // align the buffer + if (data % TENSOR_ALIGNMENT != 0) { + data = GGML_PAD(data, TENSOR_ALIGNMENT); + } + + return (void *)data; +} + +static enum ggml_status ggml_backend_blas_buffer_init_tensor( + ggml_backend_buffer_t buffer, + ggml_tensor * tensor) { + + if (tensor->view_src != NULL) { + assert(tensor->view_src->buffer->buft == buffer->buft); + return GGML_STATUS_SUCCESS; + } + + if (tensor->type != GGML_TYPE_F32) { + ggml_backend_blas_buffer * extra = new ggml_backend_blas_buffer; + extra->data = ggml_aligned_malloc(ggml_nelements(tensor) * sizeof(float)); // sizeof(float) because dequantized + extra->size = ggml_nelements(tensor) * sizeof(float); + + tensor->extra = extra; + } + + return GGML_STATUS_SUCCESS; + + GGML_UNUSED(buffer); +} + +static void ggml_backend_blas_buffer_memset_tensor( + ggml_backend_buffer_t buffer, + ggml_tensor * tensor, + uint8_t value, + size_t offset, + size_t size) { + + GGML_ASSERT(tensor); + memset((char *)tensor->data + offset, value, size); + + GGML_UNUSED(buffer); +} + +static void ggml_backend_blas_buffer_set_tensor( + ggml_backend_buffer_t buffer, + ggml_tensor * tensor, + const void * data, + size_t offset, + size_t size) { + + GGML_ASSERT(tensor); + memcpy((char *)tensor->data + offset, data, size); + + ggml_backend_blas_buffer_type_context * buft_ctx = (ggml_backend_blas_buffer_type_context *)buffer->buft->context; + ggml_backend_blas_buffer * extra = (ggml_backend_blas_buffer *)tensor->extra; + + const int64_t ne00 = tensor->ne[0]; + const int64_t ne01 = tensor->ne[1]; + const int64_t ne02 = tensor->ne[2]; + const int64_t ne03 = tensor->ne[3]; + + const int64_t nb00 = tensor->nb[0]; + const int64_t nb01 = tensor->nb[1]; + const int64_t nb02 = tensor->nb[2]; + const int64_t nb03 = tensor->nb[3]; + + const int64_t ne_plane = ne01*ne00; + + if (tensor->buffer->usage == GGML_BACKEND_BUFFER_USAGE_WEIGHTS + && tensor->type != GGML_TYPE_F32 + && ggml_get_type_traits(tensor->type)->to_float != NULL) { + + const auto * type_traits = ggml_get_type_traits(tensor->type); + ggml_to_float_t const to_float = type_traits->to_float; + GGML_ASSERT(to_float != nullptr); + + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + const void * x = (char *)tensor->data + i02*nb02 + i03*nb03; + float * const wplane = (float *)extra->data + i02*ne_plane + i03*ne02*ne_plane; + + const int min_cols_per_thread = 4096; + const int min_rows_per_thread = std::max((int)(min_cols_per_thread / ne00), 1); + const int n_threads = std::max(std::min(buft_ctx->n_threads, (int)(ne01 / min_rows_per_thread)), 1); + +#ifdef GGML_USE_OPENMP + #pragma omp parallel for num_threads(n_threads) + for (int64_t i01 = 0; i01 < ne01; i01++) { + to_float((const char *)x + i01*nb01, wplane + i01*ne00, ne00); + } +#else + for (int i = 1; i < n_threads; i++) { + const int64_t start = (i + 0) * ne01/n_threads; + const int64_t end = (i + 1) * ne01/n_threads; + if (start < end) { + buft_ctx->tasks.push_back(std::async(std::launch::async, [=]() { + for (int64_t i01 = start; i01 < end; i01++) { + to_float((const char *)x + i01*nb01, wplane + i01*ne00, ne00); + } + })); + } + } + { + // reuse the current thread for the first task + const int64_t start = 0; + const int64_t end = ne01/n_threads; + for (int64_t i01 = start; i01 < end; i01++) { + to_float((const char *)x + i01*nb01, wplane + i01*ne00, ne00); + } + } +#endif + } + } + +#ifndef GGML_USE_OPENMP + // wait for all tasks to finish + for (auto & task : buft_ctx->tasks) { + task.get(); + } + buft_ctx->tasks.clear(); +#endif + } + + GGML_UNUSED(nb00); +} + +static void ggml_backend_blas_buffer_get_tensor( + ggml_backend_buffer_t buffer, + const ggml_tensor * tensor, + void * data, + size_t offset, + size_t size) { + + GGML_ASSERT(tensor); + memcpy(data, (const char *)tensor->data + offset, size); + + GGML_UNUSED(buffer); +} + +static void ggml_backend_blas_buffer_clear( + ggml_backend_buffer_t buffer, + uint8_t value) { + + GGML_ASSERT(buffer); + memset(buffer->context, value, buffer->size); +} + +static const ggml_backend_buffer_i ggml_backend_blas_buffer_i = { + /* .free_buffer = */ ggml_backend_blas_buffer_free_buffer, + /* .get_base = */ ggml_backend_blas_buffer_get_base, + /* .init_tensor = */ ggml_backend_blas_buffer_init_tensor, + /* .memset_tensor = */ ggml_backend_blas_buffer_memset_tensor, + /* .set_tensor = */ ggml_backend_blas_buffer_set_tensor, + /* .get_tensor = */ ggml_backend_blas_buffer_get_tensor, + /* .cpy_tensor = */ NULL, + /* .clear = */ ggml_backend_blas_buffer_clear, + /* .reset = */ NULL, +}; + +// BLAS backend buffer type + +static const char * ggml_backend_blas_buffer_type_get_name(ggml_backend_buffer_type_t buft) { + return "BLAS"; + GGML_UNUSED(buft); +} + +static ggml_backend_buffer_t ggml_backend_blas_buffer_type_alloc_buffer( + ggml_backend_buffer_type_t buft, + size_t size) { + + void * data = ggml_aligned_malloc(size); + if (data == nullptr) { + GGML_LOG_ERROR("%s: failed to allocate buffer of size %zu\n", __func__, size); + return NULL; + } + + return ggml_backend_buffer_init(buft, ggml_backend_blas_buffer_i, data, size); +} + +static size_t ggml_backend_blas_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) { + + return TENSOR_ALIGNMENT; + GGML_UNUSED(buft); +} + +static bool ggml_backend_blas_buffer_type_is_host(ggml_backend_buffer_type_t buft) { + + return true; + GGML_UNUSED(buft); +} + +static ggml_backend_buffer_type_t ggml_backend_blas_buffer_type(void) { + static ggml_backend_blas_buffer_type_context buft_ctx = { + /* .n_threads = */ (int)std::thread::hardware_concurrency(), +#ifndef GGML_USE_OPENMP + /* .tasks = */ std::vector>(), +#endif + }; + + static ggml_backend_buffer_type ggml_backend_blas_buffer_type = { + /* .iface = */ { + /* .get_name = */ ggml_backend_blas_buffer_type_get_name, + /* .alloc_buffer = */ ggml_backend_blas_buffer_type_alloc_buffer, + /* .get_alignment = */ ggml_backend_blas_buffer_type_get_alignment, + /* .get_max_size = */ NULL, // defaults to SIZE_MAX + /* .get_alloc_size = */ NULL, // defaults to ggml_nbytes + /* .is_host = */ ggml_backend_blas_buffer_type_is_host, + }, + /* .device = */ NULL, + /* .context = */ &buft_ctx, + }; + + return &ggml_backend_blas_buffer_type; +} + +struct ggml_backend_blas_context { + int n_threads; +}; + +static void ggml_backend_blas_mul_mat( + ggml_backend_blas_context * ctx, + ggml_tensor * dst) { + + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; GGML_TENSOR_BINARY_OP_LOCALS - const enum ggml_type type = src0->type; + const ggml_type type = src0->type; GGML_ASSERT(ne0 == ne01); GGML_ASSERT(ne1 == ne11); @@ -53,68 +298,133 @@ static void ggml_backend_blas_mul_mat(ggml_backend_blas_context * ctx, struct gg // broadcast factors const int64_t r2 = ne12/ne02; const int64_t r3 = ne13/ne03; + const int64_t ne_plane = ne01*ne00; - const int64_t ne_plane = ne01*ne00; - const size_t desired_wsize = type == GGML_TYPE_F32 ? 0 : ne03*ne02*ne_plane*sizeof(float); + const ggml_backend_blas_buffer * extra = (ggml_backend_blas_buffer *)src0->extra; - if (ctx->work_size < desired_wsize) { - ctx->work_data.reset(new char[desired_wsize]); - ctx->work_size = desired_wsize; - } - void * wdata = ctx->work_data.get(); + for (int64_t i13 = 0; i13 < ne13; i13++) { + for (int64_t i12 = 0; i12 < ne12; i12++) { + const int64_t i03 = i13/r3; + const int64_t i02 = i12/r2; - // convert src0 to float - if (type != GGML_TYPE_F32) { - const auto * type_traits = ggml_get_type_traits(type); - ggml_to_float_t const to_float = type_traits->to_float; + const float * x = (float *) ((char *) src0->data + i02*nb02 + i03*nb03); + const float * y = (float *) ((char *) src1->data + i12*nb12 + i13*nb13); + float * d = (float *) ((char *) dst->data + i12*nb2 + i13*nb3); - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - const void * x = (char *) src0->data + i02*nb02 + i03*nb03; - float * const wplane = (float *) wdata + i02*ne_plane + i03*ne02*ne_plane; - - const int min_cols_per_thread = 4096; - const int min_rows_per_thread = std::max((int)(min_cols_per_thread/ne00), 1); - const int n_threads = std::max(std::min(ctx->n_threads, (int)(ne01/min_rows_per_thread)), 1); - -#ifdef GGML_USE_OPENMP - #pragma omp parallel for num_threads(n_threads) - for (int64_t i01 = 0; i01 < ne01; i01++) { - to_float((const char *) x + i01*nb01, wplane + i01*ne00, ne00); - } -#else - for (int i = 1; i < n_threads; i++) { - const int64_t start = i*ne01/n_threads; - const int64_t end = (i + 1)*ne01/n_threads; - if (start < end) { - ctx->tasks.push_back(std::async(std::launch::async, [=]() { - for (int64_t i01 = start; i01 < end; i01++) { - to_float((const char *) x + i01*nb01, wplane + i01*ne00, ne00); - } - })); - } - } - { - // reuse the current thread for the first task - const int64_t start = 0; - const int64_t end = ne01/n_threads; - for (int64_t i01 = start; i01 < end; i01++) { - to_float((const char *) x + i01*nb01, wplane + i01*ne00, ne00); - } - } -#endif + // switch to dequantized F32 data + if (type != GGML_TYPE_F32) { + x = (float *)extra->data + i02*ne_plane + i03*ne02*ne_plane; } + + cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans, + ne1, ne01, ne10, + 1.0f, y, ne10, + x, ne00, + 0.0f, d, ne01); + } + } + + GGML_UNUSED(ctx); +} + + +static const char * ggml_backend_blas_get_name(ggml_backend_t backend) { + return "BLAS"; + + GGML_UNUSED(backend); +} + +static void ggml_backend_blas_free(ggml_backend_t backend) { + ggml_backend_blas_context * ctx = (ggml_backend_blas_context *)backend->context; + + delete ctx; + delete backend; +} + +static ggml_status ggml_backend_blas_graph_compute( + ggml_backend_t backend, + ggml_cgraph * cgraph) { + + ggml_backend_blas_context * ctx = (ggml_backend_blas_context *)backend->context; + + for (int i = 0; i < cgraph->n_nodes; i++) { + ggml_tensor * node = cgraph->nodes[i]; + + if (ggml_op_is_empty(node->op)) { + continue; } -#ifndef GGML_USE_OPENMP - // wait for all tasks to finish - for (auto & task : ctx->tasks) { - task.get(); + switch (node->op) { + case GGML_OP_MUL_MAT: + { + ggml_backend_blas_mul_mat(ctx, node); + } break; + + default: + GGML_ABORT("%s: unsupported op %s\n", __func__, ggml_op_desc(node)); } - ctx->tasks.clear(); -#endif } + return GGML_STATUS_SUCCESS; + + GGML_UNUSED(backend); +} + +static const ggml_backend_i ggml_backend_blas_i = { + /* .get_name = */ ggml_backend_blas_get_name, + /* .free = */ ggml_backend_blas_free, + /* .set_tensor_async = */ NULL, + /* .get_tensor_async = */ NULL, + /* .cpy_tensor_async = */ NULL, + /* .synchronize = */ NULL, + /* .graph_plan_create = */ NULL, + /* .graph_plan_free = */ NULL, + /* .graph_plan_update = */ NULL, + /* .graph_plan_compute = */ NULL, + /* .graph_compute = */ ggml_backend_blas_graph_compute, + /* .event_record = */ NULL, + /* .event_wait = */ NULL, + /* .graph_optimize = */ NULL, +}; + +static ggml_guid_t ggml_backend_blas_guid(void) { + static ggml_guid guid = { 0x12, 0xa8, 0xae, 0xf4, 0xc0, 0x1e, 0x61, 0x97, 0x8f, 0xeb, 0x33, 0x04, 0xa1, 0x33, 0x51, 0x2d }; + return &guid; +} + +ggml_backend_t ggml_backend_blas_init(void) { + ggml_backend_blas_context * ctx = new ggml_backend_blas_context; + if (ctx == NULL) { + return NULL; + } + + ctx->n_threads = GGML_DEFAULT_N_THREADS; + + ggml_backend_t blas_backend = new ggml_backend { + /* .guid = */ ggml_backend_blas_guid(), + /* .iface = */ ggml_backend_blas_i, + /* .device = */ ggml_backend_reg_dev_get(ggml_backend_blas_reg(), 0), + /* .context = */ ctx, + }; + + if (blas_backend == NULL) { + delete ctx; + return NULL; + } + + return blas_backend; +} + +bool ggml_backend_is_blas(ggml_backend_t backend) { + return backend != NULL && ggml_guid_matches(backend->guid, ggml_backend_blas_guid()); +} + +void ggml_backend_blas_set_n_threads(ggml_backend_t backend, int n_threads) { + GGML_ASSERT(ggml_backend_is_blas(backend)); + + ggml_backend_blas_context * ctx = (ggml_backend_blas_context *)backend->context; + ctx->n_threads = n_threads; + #if defined(OPENBLAS_VERSION) openblas_set_num_threads(ctx->n_threads); #endif @@ -126,193 +436,9 @@ static void ggml_backend_blas_mul_mat(ggml_backend_blas_context * ctx, struct gg #if defined(GGML_BLAS_USE_NVPL) nvpl_blas_set_num_threads(ctx->n_threads); #endif - - for (int64_t i13 = 0; i13 < ne13; i13++) { - for (int64_t i12 = 0; i12 < ne12; i12++) { - const int64_t i03 = i13/r3; - const int64_t i02 = i12/r2; - - const float * x = (float *) ((char *) src0->data + i02*nb02 + i03*nb03); - const float * y = (float *) ((char *) src1->data + i12*nb12 + i13*nb13); - float * d = (float *) ((char *) dst->data + i12*nb2 + i13*nb3); - - if (type != GGML_TYPE_F32) { - x = (float *) wdata + i02*ne_plane + i03*ne02*ne_plane; - } - - cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans, - ne1, ne01, ne10, - 1.0f, y, ne10, - x, ne00, - 0.0f, d, ne01); - } - } } -static void ggml_backend_blas_out_prod(ggml_backend_blas_context * ctx, struct ggml_tensor * dst) { - const struct ggml_tensor * src0 = dst->src[0]; - const struct ggml_tensor * src1 = dst->src[1]; - - GGML_TENSOR_BINARY_OP_LOCALS - - GGML_ASSERT(ne0 == ne00); - GGML_ASSERT(ne1 == ne10); - GGML_ASSERT(ne2 == ne02); - GGML_ASSERT(ne02 == ne12); - GGML_ASSERT(ne3 == ne13); - GGML_ASSERT(ne03 == ne13); - - // we don't support permuted src0 or src1 - GGML_ASSERT(nb00 == sizeof(float)); - - // dst cannot be transposed or permuted - GGML_ASSERT(nb0 == sizeof(float)); - // GGML_ASSERT(nb0 <= nb1); - // GGML_ASSERT(nb1 <= nb2); - // GGML_ASSERT(nb2 <= nb3); - - // Arguments to ggml_compute_forward_out_prod (expressed as major,minor) - // src0: (k,n) - // src1: (k,m) - // dst: (m,n) - // - // Arguments to sgemm (see https://github.com/Reference-LAPACK/lapack/blob/master/BLAS/SRC/sgemm.f) - // Also expressed as (major,minor) - // a: (m,k): so src1 transposed - // b: (k,n): so src0 - // c: (m,n) - // - // However, if ggml_is_transposed(src1) is true, then - // src1->data already contains a transposed version, so sgemm mustn't - // transpose it further. - - int n = src0->ne[0]; - int k = src0->ne[1]; - int m = src1->ne[0]; - - CBLAS_TRANSPOSE transposeA; - int lda; - - if (!ggml_is_transposed(src1)) { - transposeA = CblasTrans; - lda = m; - } else { - transposeA = CblasNoTrans; - lda = k; - } - - float * a = (float *) ((char *) src1->data); - float * b = (float *) ((char *) src0->data); - float * c = (float *) ((char *) dst->data); - - cblas_sgemm(CblasRowMajor, transposeA, CblasNoTrans, m, n, k, 1.0, a, lda, b, n, 0.0, c, n); - - GGML_UNUSED(ctx); -} - -// backend interface - -static const char * ggml_backend_blas_get_name(ggml_backend_t backend) { - return "BLAS"; - - GGML_UNUSED(backend); -} - -static void ggml_backend_blas_free(ggml_backend_t backend) { - ggml_backend_blas_context * ctx = (ggml_backend_blas_context *)backend->context; - delete ctx; - delete backend; -} - -static enum ggml_status ggml_backend_blas_graph_compute(ggml_backend_t backend, struct ggml_cgraph * cgraph) { - ggml_backend_blas_context * ctx = (ggml_backend_blas_context *)backend->context; - - for (int i = 0; i < cgraph->n_nodes; i++) { - struct ggml_tensor * node = cgraph->nodes[i]; - - switch (node->op) { - case GGML_OP_MUL_MAT: - ggml_backend_blas_mul_mat(ctx, node); - break; - - case GGML_OP_OUT_PROD: - ggml_backend_blas_out_prod(ctx, node); - break; - - case GGML_OP_NONE: - case GGML_OP_RESHAPE: - case GGML_OP_VIEW: - case GGML_OP_PERMUTE: - case GGML_OP_TRANSPOSE: - break; - - default: - GGML_ABORT("%s: unsupported op %s\n", __func__, ggml_op_desc(node)); - } - } - - return GGML_STATUS_SUCCESS; - - GGML_UNUSED(backend); -} - -static struct ggml_backend_i blas_backend_i = { - /* .get_name = */ ggml_backend_blas_get_name, - /* .free = */ ggml_backend_blas_free, - /* .set_tensor_async = */ NULL, - /* .get_tensor_async = */ NULL, - /* .cpy_tensor_async = */ NULL, - /* .synchronize = */ NULL, - /* .graph_plan_create = */ NULL, - /* .graph_plan_free = */ NULL, - /* .graph_plan_update = */ NULL, - /* .graph_plan_compute = */ NULL, - /* .graph_compute = */ ggml_backend_blas_graph_compute, - /* .event_record = */ NULL, - /* .event_wait = */ NULL, - /* .graph_optimize = */ NULL, -}; - -static ggml_guid_t ggml_backend_blas_guid(void) { - static ggml_guid guid = { 0x12, 0xa8, 0xae, 0xf4, 0xc0, 0x1e, 0x61, 0x97, 0x8f, 0xeb, 0x33, 0x04, 0xa1, 0x33, 0x51, 0x2d }; - return &guid; -} - -ggml_backend_t ggml_backend_blas_init(void) { - ggml_backend_blas_context * ctx = new ggml_backend_blas_context; - - ggml_backend_t backend = new ggml_backend { - /* .guid = */ ggml_backend_blas_guid(), - /* .iface = */ blas_backend_i, - /* .device = */ ggml_backend_reg_dev_get(ggml_backend_blas_reg(), 0), - /* .context = */ ctx, - }; - -#if defined(OPENBLAS_VERSION) && defined(GGML_USE_OPENMP) - if (openblas_get_parallel() != OPENBLAS_OPENMP) { - GGML_LOG_DEBUG("%s: warning: ggml is using OpenMP, but OpenBLAS was compiled without OpenMP support\n", __func__); - } -#endif - -#if defined(BLIS_ENABLE_CBLAS) && defined(GGML_USE_OPENMP) && !defined(BLIS_ENABLE_OPENMP) - GGML_LOG_DEBUG("%s: warning: ggml is using OpenMP, but BLIS was compiled without OpenMP support\n", __func__); -#endif - - return backend; -} - -bool ggml_backend_is_blas(ggml_backend_t backend) { - return backend != NULL && ggml_guid_matches(backend->guid, ggml_backend_blas_guid()); -} - -void ggml_backend_blas_set_n_threads(ggml_backend_t backend_blas, int n_threads) { - GGML_ASSERT(ggml_backend_is_blas(backend_blas)); - - ggml_backend_blas_context * ctx = (ggml_backend_blas_context *)backend_blas->context; - ctx->n_threads = n_threads; -} - -// device interface +struct ggml_backend_blas_device_context {}; static const char * ggml_backend_blas_device_get_name(ggml_backend_dev_t dev) { return "BLAS"; @@ -339,7 +465,6 @@ static const char * ggml_backend_blas_device_get_description(ggml_backend_dev_t } static void ggml_backend_blas_device_get_memory(ggml_backend_dev_t dev, size_t * free, size_t * total) { - // TODO *free = 0; *total = 0; @@ -352,7 +477,7 @@ static enum ggml_backend_dev_type ggml_backend_blas_device_get_type(ggml_backend GGML_UNUSED(dev); } -static void ggml_backend_blas_device_get_props(ggml_backend_dev_t dev, struct ggml_backend_dev_props * props) { +static void ggml_backend_blas_device_get_props(ggml_backend_dev_t dev, ggml_backend_dev_props * props) { props->name = ggml_backend_blas_device_get_name(dev); props->description = ggml_backend_blas_device_get_description(dev); props->type = ggml_backend_blas_device_get_type(dev); @@ -360,7 +485,7 @@ static void ggml_backend_blas_device_get_props(ggml_backend_dev_t dev, struct gg props->caps = { /* .async = */ false, /* .host_buffer = */ false, - /* .buffer_from_host_ptr = */ true, + /* .buffer_from_host_ptr = */ false, /* .events = */ false, }; } @@ -373,63 +498,41 @@ static ggml_backend_t ggml_backend_blas_device_init_backend(ggml_backend_dev_t d } static ggml_backend_buffer_type_t ggml_backend_blas_device_get_buffer_type(ggml_backend_dev_t dev) { - return ggml_backend_cpu_buffer_type(); + return ggml_backend_blas_buffer_type(); GGML_UNUSED(dev); } -static ggml_backend_buffer_t ggml_backend_blas_device_buffer_from_host_ptr(ggml_backend_dev_t dev, void * ptr, size_t size, size_t max_tensor_size) { - return ggml_backend_cpu_buffer_from_ptr(ptr, size); +static bool ggml_backend_blas_device_supports_op(ggml_backend_dev_t dev, const ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; - GGML_UNUSED(dev); - GGML_UNUSED(max_tensor_size); -} - -static bool ggml_backend_blas_device_supports_op(ggml_backend_dev_t dev, const struct ggml_tensor * op) { - const struct ggml_tensor * src0 = op->src[0]; - const struct ggml_tensor * src1 = op->src[1]; - - switch (op->op) { - case GGML_OP_NONE: - case GGML_OP_RESHAPE: - case GGML_OP_VIEW: - case GGML_OP_PERMUTE: - case GGML_OP_TRANSPOSE: - return true; + if (ggml_op_is_empty(dst->op)) { + return true; + } + switch (dst->op) { case GGML_OP_MUL_MAT: { - // BLAS usually is only faster for large matrices - const struct ggml_tensor * src0 = op->src[0]; - const struct ggml_tensor * src1 = op->src[1]; - const int64_t ne10 = src1->ne[0]; - - const int64_t ne0 = op->ne[0]; - const int64_t ne1 = op->ne[1]; + const int64_t ne0 = dst->ne[0]; + const int64_t ne1 = dst->ne[1]; // TODO: find the optimal value const int64_t min_batch = 32; - return ggml_is_contiguous(src0) && - ggml_is_contiguous(src1) && - src1->type == GGML_TYPE_F32 && - (ne0 >= min_batch && ne1 >= min_batch && ne10 >= min_batch) && - (src0->type == GGML_TYPE_F32 || ggml_get_type_traits(src0->type)->to_float != NULL); + return ggml_is_contiguous(src0) + && ggml_is_contiguous(src1) + && src1->type == GGML_TYPE_F32 + // NOTE: llama-bench creates views that somehow does not go through init_tensor + // this prevents the uninitialized views from being used in BLAS + && src0->view_src == nullptr && src1->view_src == nullptr + && (ne0 >= min_batch && ne1 >= min_batch && ne10 >= min_batch) + && (src0->type == GGML_TYPE_F32 || ggml_get_type_traits(src0->type)->to_float != NULL); } - case GGML_OP_OUT_PROD: - return op->src[0]->type == GGML_TYPE_F32 && - op->src[1]->type == GGML_TYPE_F32 && - ggml_is_matrix(src0) && - ggml_is_matrix(src1) && - ggml_is_contiguous(src0) && - (ggml_is_contiguous(src1) || ggml_is_transposed(src1)) && - (src0->type == GGML_TYPE_F32 || ggml_get_type_traits(src0->type)->to_float != NULL); - default: return false; - } GGML_UNUSED(dev); @@ -441,7 +544,7 @@ static bool ggml_backend_blas_device_supports_buft(ggml_backend_dev_t dev, ggml_ GGML_UNUSED(dev); } -static const struct ggml_backend_device_i ggml_backend_blas_device_i = { +static const ggml_backend_device_i ggml_backend_blas_device_i = { /* .get_name = */ ggml_backend_blas_device_get_name, /* .get_description = */ ggml_backend_blas_device_get_description, /* .get_memory = */ ggml_backend_blas_device_get_memory, @@ -450,7 +553,7 @@ static const struct ggml_backend_device_i ggml_backend_blas_device_i = { /* .init_backend = */ ggml_backend_blas_device_init_backend, /* .get_buffer_type = */ ggml_backend_blas_device_get_buffer_type, /* .get_host_buffer_type = */ NULL, - /* .buffer_from_host_ptr = */ ggml_backend_blas_device_buffer_from_host_ptr, + /* .buffer_from_host_ptr = */ NULL, /* .supports_op = */ ggml_backend_blas_device_supports_op, /* .supports_buft = */ ggml_backend_blas_device_supports_buft, /* .offload_op = */ NULL, @@ -459,7 +562,7 @@ static const struct ggml_backend_device_i ggml_backend_blas_device_i = { /* .event_synchronize = */ NULL, }; -// backend reg interface +// BLAS backend - backend (reg) static const char * ggml_backend_blas_reg_get_name(ggml_backend_reg_t reg) { return "BLAS"; @@ -476,29 +579,27 @@ static size_t ggml_backend_blas_reg_get_device_count(ggml_backend_reg_t reg) { static ggml_backend_dev_t ggml_backend_blas_reg_get_device(ggml_backend_reg_t reg, size_t index) { GGML_ASSERT(index == 0); + static ggml_backend_blas_device_context ctx; static ggml_backend_device ggml_backend_blas_device = { /* .iface = */ ggml_backend_blas_device_i, /* .reg = */ reg, - /* .context = */ nullptr, + /* .context = */ &ctx, }; return &ggml_backend_blas_device; - - GGML_UNUSED(reg); - GGML_UNUSED(index); } static void * ggml_backend_blas_get_proc_address(ggml_backend_reg_t reg, const char * name) { - if (std::strcmp(name, "ggml_backend_set_n_threads") == 0) { + if (strcmp(name, "ggml_backend_set_n_threads") == 0) { return (void *)ggml_backend_blas_set_n_threads; } - return NULL; + + return nullptr; GGML_UNUSED(reg); - GGML_UNUSED(name); } -static const struct ggml_backend_reg_i ggml_backend_blas_reg_i = { +static const ggml_backend_reg_i ggml_backend_blas_reg_i = { /* .get_name = */ ggml_backend_blas_reg_get_name, /* .get_device_count = */ ggml_backend_blas_reg_get_device_count, /* .get_device = */ ggml_backend_blas_reg_get_device, @@ -506,7 +607,7 @@ static const struct ggml_backend_reg_i ggml_backend_blas_reg_i = { }; ggml_backend_reg_t ggml_backend_blas_reg(void) { - static struct ggml_backend_reg ggml_backend_blas_reg = { + static ggml_backend_reg ggml_backend_blas_reg = { /* .api_version = */ GGML_BACKEND_API_VERSION, /* .iface = */ ggml_backend_blas_reg_i, /* .context = */ NULL, diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp index 416218b5b8..1a375c42bc 100644 --- a/tests/test-backend-ops.cpp +++ b/tests/test-backend-ops.cpp @@ -1160,6 +1160,9 @@ struct test_case { std::vector sentinels; + // Track weight tensors for separate buffer allocation with GGML_BACKEND_BUFFER_USAGE_WEIGHTS + std::vector weight_tensors; + std::string current_op_name; void add_sentinel(ggml_context * ctx) { @@ -1238,6 +1241,8 @@ struct test_case { const char * op_names_filter, printer * output_printer) { mode = MODE_TEST; + weight_tensors.clear(); + sentinels.clear(); ggml_init_params params = { /* .mem_size = */ ggml_tensor_overhead()*128 + ggml_graph_overhead(), @@ -1288,10 +1293,35 @@ struct test_case { // post-graph sentinel add_sentinel(ctx); - // allocate + // allocate weight tensors in a separate buffer with GGML_BACKEND_BUFFER_USAGE_WEIGHTS + ggml_backend_buffer_t weights_buf = nullptr; + if (!weight_tensors.empty()) { + // Calculate total size needed for weight tensors + size_t weight_size = 0; + for (ggml_tensor * wt : weight_tensors) { + weight_size += ggml_backend_buft_get_alloc_size(ggml_backend_get_default_buffer_type(backend1), wt); + } + weight_size = GGML_PAD(weight_size, ggml_backend_buft_get_alignment(ggml_backend_get_default_buffer_type(backend1))); + + weights_buf = ggml_backend_buft_alloc_buffer(ggml_backend_get_default_buffer_type(backend1), weight_size); + if (weights_buf == NULL) { + printf("failed to allocate weight tensors [%s] ", ggml_backend_name(backend1)); + ggml_free(ctx); + return test_status_t::FAIL; + } + ggml_backend_buffer_set_usage(weights_buf, GGML_BACKEND_BUFFER_USAGE_WEIGHTS); + + // Allocate each weight tensor in the weights buffer + ggml_tallocr weights_talloc = ggml_tallocr_new(weights_buf); + for (ggml_tensor * wt : weight_tensors) { + ggml_tallocr_alloc(&weights_talloc, wt); + } + } + + // allocate remaining tensors ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors(ctx, backend1); - if (buf == NULL) { + if (buf == NULL && weights_buf == NULL) { printf("failed to allocate tensors [%s] ", ggml_backend_name(backend1)); ggml_free(ctx); return test_status_t::FAIL; @@ -1385,6 +1415,9 @@ struct test_case { const bool cmp_ok = ggml_backend_compare_graph_backend(backend1, backend2, gf, callback, &ud, run_whole_graph() ? out : nullptr); + if (weights_buf) { + ggml_backend_buffer_free(weights_buf); + } ggml_backend_buffer_free(buf); ggml_free(ctx); @@ -1404,6 +1437,7 @@ struct test_case { bool eval_perf(ggml_backend_t backend, const char * op_names_filter, printer * output_printer) { mode = MODE_PERF; + weight_tensors.clear(); static const size_t graph_nodes = 8192; @@ -1432,10 +1466,34 @@ struct test_case { return true; } - // allocate + // allocate weight tensors in a separate buffer with GGML_BACKEND_BUFFER_USAGE_WEIGHTS + ggml_backend_buffer_ptr weights_buf(nullptr); // smart ptr + if (!weight_tensors.empty()) { + // Calculate total size needed for weight tensors + size_t weight_size = 0; + for (ggml_tensor * wt : weight_tensors) { + weight_size += ggml_backend_buft_get_alloc_size(ggml_backend_get_default_buffer_type(backend), wt); + } + weight_size = GGML_PAD(weight_size, ggml_backend_buft_get_alignment(ggml_backend_get_default_buffer_type(backend))); + + weights_buf.reset(ggml_backend_buft_alloc_buffer(ggml_backend_get_default_buffer_type(backend), weight_size)); + if (weights_buf == NULL) { + printf("failed to allocate weight tensors\n"); + return false; + } + ggml_backend_buffer_set_usage(weights_buf.get(), GGML_BACKEND_BUFFER_USAGE_WEIGHTS); + + // Allocate each weight tensor in the weights buffer + ggml_tallocr weights_talloc = ggml_tallocr_new(weights_buf.get()); + for (ggml_tensor * wt : weight_tensors) { + ggml_tallocr_alloc(&weights_talloc, wt); + } + } + + // allocate remaining tensors ggml_backend_buffer_ptr buf(ggml_backend_alloc_ctx_tensors(ctx.get(), backend)); // smart ptr - if (buf == NULL) { + if (buf == NULL && weights_buf == NULL) { printf("failed to allocate tensors\n"); return false; } @@ -1534,6 +1592,7 @@ struct test_case { bool eval_support(ggml_backend_t backend, const char * op_names_filter, printer * output_printer) { mode = MODE_SUPPORT; + weight_tensors.clear(); static const size_t graph_nodes = 8192; @@ -1569,6 +1628,7 @@ struct test_case { bool eval_grad(ggml_backend_t backend, const char * op_names_filter, printer * output_printer) { mode = MODE_GRAD; + weight_tensors.clear(); const std::vector expect = grad_expect(); ggml_init_params params = { @@ -1679,9 +1739,35 @@ struct test_case { return true; } - // allocate + // allocate weight tensors in a separate buffer with GGML_BACKEND_BUFFER_USAGE_WEIGHTS + ggml_backend_buffer_ptr weights_buf(nullptr); // smart ptr + if (!weight_tensors.empty()) { + // Calculate total size needed for weight tensors + size_t weight_size = 0; + for (ggml_tensor * wt : weight_tensors) { + weight_size += ggml_backend_buft_get_alloc_size(ggml_backend_get_default_buffer_type(backend), wt); + } + weight_size = GGML_PAD(weight_size, ggml_backend_buft_get_alignment(ggml_backend_get_default_buffer_type(backend))); + + weights_buf.reset(ggml_backend_buft_alloc_buffer(ggml_backend_get_default_buffer_type(backend), weight_size)); + if (weights_buf == NULL) { + test_operation_info info(op_desc(out), vars(), ggml_backend_name(backend)); + info.set_error("weight allocation", ""); + output_printer->print_operation(info); + return false; + } + ggml_backend_buffer_set_usage(weights_buf.get(), GGML_BACKEND_BUFFER_USAGE_WEIGHTS); + + // Allocate each weight tensor in the weights buffer + ggml_tallocr weights_talloc = ggml_tallocr_new(weights_buf.get()); + for (ggml_tensor * wt : weight_tensors) { + ggml_tallocr_alloc(&weights_talloc, wt); + } + } + + // allocate remaining tensors ggml_backend_buffer_ptr buf(ggml_backend_alloc_ctx_tensors(ctx.get(), backend)); // smart ptr - if (buf == NULL) { + if (buf == NULL && weights_buf == NULL) { test_operation_info info(op_desc(out), vars(), ggml_backend_name(backend)); info.set_error("allocation", ""); output_printer->print_operation(info); @@ -3606,6 +3692,7 @@ struct test_mul_mat : public test_case { a = ggml_new_tensor_4d(ctx, type_a, ne_a[per[0]], ne_a[per[1]], ne_a[per[2]], ne_a[per[3]]); b = ggml_new_tensor_4d(ctx, type_b, ne_b[per[0]], ne_b[per[1]], ne_b[per[2]], ne_b[per[3]]); + weight_tensors.push_back(a); // Track weight tensor for GGML_BACKEND_BUFFER_USAGE_WEIGHTS if (!ggml_is_quantized(type_a)) { if (bs[1] == 1 && nr[1] == 1) { ggml_set_param(a); @@ -3623,6 +3710,7 @@ struct test_mul_mat : public test_case { const int64_t k_physical = k_v == 0 ? k : k_v; a = ggml_new_tensor_4d(ctx, type_a, k_physical, m, bs[0], bs[1]); b = ggml_new_tensor_4d(ctx, type_b, k_physical, n, bs[0]*nr[0], bs[1]*nr[1]); + weight_tensors.push_back(a); // Track weight tensor for GGML_BACKEND_BUFFER_USAGE_WEIGHTS if (!ggml_is_quantized(type_a)) { if (bs[1] == 1 && nr[1] == 1) { @@ -3716,6 +3804,7 @@ struct test_mul_mat_id : public test_case { // C^T = A * B^T: (k, m) * (k, n) => (m, n) ggml_tensor * as = ggml_new_tensor_3d(ctx, type_a, k, m, n_mats); ggml_set_name(as, "as"); + weight_tensors.push_back(as); // Track weight tensor for GGML_BACKEND_BUFFER_USAGE_WEIGHTS ggml_tensor * ids = ggml_new_tensor_2d(ctx, GGML_TYPE_I32, n_mats, n); ggml_set_name(ids, "ids"); @@ -3776,6 +3865,7 @@ struct test_mul_mat_id_fusion : public test_case { // C^T = A * B^T: (k, m) * (k, n) => (m, n) ggml_tensor * as = ggml_new_tensor_3d(ctx, type_a, k, m, n_mats); ggml_set_name(as, "as"); + weight_tensors.push_back(as); // Track weight tensor for GGML_BACKEND_BUFFER_USAGE_WEIGHTS ggml_tensor * ids = ggml_new_tensor_2d(ctx, GGML_TYPE_I32, n_mats, n); ggml_set_name(ids, "ids"); @@ -3792,6 +3882,7 @@ struct test_mul_mat_id_fusion : public test_case { for (uint32_t i = 1; i < o; ++i) { ggml_tensor * a2 = ggml_new_tensor_3d(ctx, type_a, k, m, n_mats); + weight_tensors.push_back(a2); // Track weight tensor for GGML_BACKEND_BUFFER_USAGE_WEIGHTS ggml_tensor * out2 = ggml_mul_mat_id(ctx, a2, b, ids); ggml_set_name(out2, "out2"); out = ggml_add(ctx, out, out2);