ggml: rewrite ggml-blas
Signed-off-by: Aaron Teo <aaron.teo1@ibm.com>
This commit is contained in:
parent
61ee32dec3
commit
9a14a094ac
|
|
@ -2,9 +2,13 @@
|
|||
#include "ggml-blas.h"
|
||||
#include "ggml-backend-impl.h"
|
||||
|
||||
#include "ggml.h"
|
||||
#include "ggml-backend.h"
|
||||
|
||||
#include <future>
|
||||
#include <vector>
|
||||
#include <cstring>
|
||||
#include <cstdint>
|
||||
|
||||
#if defined(GGML_BLAS_USE_ACCELERATE)
|
||||
# include <Accelerate/Accelerate.h>
|
||||
|
|
@ -18,15 +22,234 @@
|
|||
# include <cblas.h>
|
||||
#endif
|
||||
|
||||
struct ggml_backend_blas_context {
|
||||
int n_threads = GGML_DEFAULT_N_THREADS;
|
||||
std::unique_ptr<char[]> work_data;
|
||||
size_t work_size = 0;
|
||||
#ifndef GGML_USE_OPENMP
|
||||
std::vector<std::future<void>> tasks;
|
||||
#endif
|
||||
struct ggml_backend_blas_buffer {
|
||||
void * data; // dequantized data
|
||||
size_t size;
|
||||
};
|
||||
|
||||
// BLAS backend - buffer
|
||||
|
||||
static void ggml_backend_blas_buffer_free_buffer(ggml_backend_buffer_t buffer) {
|
||||
GGML_ASSERT(buffer);
|
||||
|
||||
ggml_backend_blas_buffer * buf_ctx = (ggml_backend_blas_buffer *)buffer->context;
|
||||
ggml_aligned_free(buf_ctx->data, buf_ctx->size);
|
||||
ggml_aligned_free(buffer->context, buffer->size);
|
||||
}
|
||||
|
||||
static void * ggml_backend_blas_buffer_get_base(ggml_backend_buffer_t buffer) {
|
||||
GGML_ASSERT(buffer);
|
||||
uintptr_t data = (uintptr_t)buffer->context;
|
||||
|
||||
// align the buffer
|
||||
if (data % TENSOR_ALIGNMENT != 0) {
|
||||
data = GGML_PAD(data, TENSOR_ALIGNMENT);
|
||||
}
|
||||
|
||||
return (void *)data;
|
||||
}
|
||||
|
||||
static enum ggml_status ggml_backend_blas_buffer_init_tensor(
|
||||
ggml_backend_buffer_t buffer,
|
||||
ggml_tensor * tensor) {
|
||||
|
||||
if (tensor->view_src != NULL) {
|
||||
assert(tensor->view_src->buffer->buft == buffer->buft);
|
||||
return GGML_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
void * ctx = buffer->context;
|
||||
|
||||
if (tensor->type != GGML_TYPE_F32) {
|
||||
ggml_backend_blas_buffer * extra = new ggml_backend_blas_buffer;
|
||||
extra->data = ggml_aligned_malloc(ggml_nelements(tensor) * sizeof(float)); // sizeof(float) because dequantized
|
||||
extra->size = ggml_nelements(tensor) * sizeof(float);
|
||||
|
||||
tensor->extra = extra;
|
||||
}
|
||||
|
||||
return GGML_STATUS_SUCCESS;
|
||||
GGML_UNUSED(ctx);
|
||||
}
|
||||
|
||||
static void ggml_backend_blas_buffer_memset_tensor(
|
||||
ggml_backend_buffer_t buffer,
|
||||
ggml_tensor * tensor,
|
||||
uint8_t value,
|
||||
size_t offset,
|
||||
size_t size) {
|
||||
|
||||
GGML_ASSERT(tensor);
|
||||
memset((char *)tensor->data + offset, value, size);
|
||||
|
||||
GGML_UNUSED(buffer);
|
||||
}
|
||||
|
||||
static void ggml_backend_blas_buffer_set_tensor(
|
||||
ggml_backend_buffer_t buffer,
|
||||
ggml_tensor * tensor,
|
||||
const void * data,
|
||||
size_t offset,
|
||||
size_t size) {
|
||||
|
||||
GGML_ASSERT(tensor);
|
||||
memcpy((char *)tensor->data + offset, data, size);
|
||||
|
||||
// ggml_backend_blas_buffer_context * buf_ctx = (ggml_backend_blas_buffer_context *)buffer->buft->context;
|
||||
ggml_backend_blas_buffer * extra = (ggml_backend_blas_buffer *)tensor->extra;
|
||||
|
||||
const int64_t ne00 = tensor->ne[0];
|
||||
const int64_t ne01 = tensor->ne[1];
|
||||
const int64_t ne02 = tensor->ne[2];
|
||||
const int64_t ne03 = tensor->ne[3];
|
||||
|
||||
const int64_t nb00 = tensor->nb[0];
|
||||
const int64_t nb01 = tensor->nb[1];
|
||||
const int64_t nb02 = tensor->nb[2];
|
||||
const int64_t nb03 = tensor->nb[3];
|
||||
|
||||
const int64_t ne_plane = ne01*ne00;
|
||||
|
||||
if (tensor->buffer->usage == GGML_BACKEND_BUFFER_USAGE_WEIGHTS
|
||||
&& tensor->type != GGML_TYPE_F32
|
||||
&& ggml_get_type_traits(tensor->type)->to_float != NULL) {
|
||||
|
||||
const auto * type_traits = ggml_get_type_traits(tensor->type);
|
||||
ggml_to_float_t const to_float = type_traits->to_float;
|
||||
GGML_ASSERT(to_float != nullptr);
|
||||
|
||||
for (int64_t i03 = 0; i03 < ne03; i03++) {
|
||||
for (int64_t i02 = 0; i02 < ne02; i02++) {
|
||||
const void * x = (char *)tensor->data + i02*nb02 + i03*nb03;
|
||||
float * const wplane = (float *)extra->data + i02*ne_plane + i03*ne02*ne_plane;
|
||||
|
||||
const int min_cols_per_thread = 4096;
|
||||
const int min_rows_per_thread = std::max((int)(min_cols_per_thread / ne00), 1);
|
||||
const int n_threads = std::max(std::min(8, (int)(ne01 / min_rows_per_thread)), 1);
|
||||
|
||||
#pragma omp parallel for num_threads(n_threads)
|
||||
for (int64_t i01 = 0; i01 < ne01; i01++) {
|
||||
to_float((const char *)x + i01*nb01, wplane + i01*ne00, ne00);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
GGML_UNUSED(buffer);
|
||||
}
|
||||
|
||||
static void ggml_backend_blas_buffer_get_tensor(
|
||||
ggml_backend_buffer_t buffer,
|
||||
const ggml_tensor * tensor,
|
||||
void * data,
|
||||
size_t offset,
|
||||
size_t size) {
|
||||
|
||||
GGML_ASSERT(tensor);
|
||||
memcpy(data, (const char *)tensor->data + offset, size);
|
||||
|
||||
GGML_UNUSED(buffer);
|
||||
}
|
||||
|
||||
static void ggml_backend_blas_buffer_clear(
|
||||
ggml_backend_buffer_t buffer,
|
||||
uint8_t value) {
|
||||
|
||||
GGML_ASSERT(buffer);
|
||||
memset(buffer->context, value, buffer->size);
|
||||
}
|
||||
|
||||
static const ggml_backend_buffer_i ggml_backend_blas_buffer_i = {
|
||||
/* .free_buffer = */ ggml_backend_blas_buffer_free_buffer,
|
||||
/* .get_base = */ ggml_backend_blas_buffer_get_base,
|
||||
/* .init_tensor = */ ggml_backend_blas_buffer_init_tensor,
|
||||
/* .memset_tensor = */ ggml_backend_blas_buffer_memset_tensor,
|
||||
/* .set_tensor = */ ggml_backend_blas_buffer_set_tensor,
|
||||
/* .get_tensor = */ ggml_backend_blas_buffer_get_tensor,
|
||||
/* .cpy_tensor = */ NULL,
|
||||
/* .clear = */ ggml_backend_blas_buffer_clear,
|
||||
/* .reset = */ NULL,
|
||||
};
|
||||
|
||||
// BLAS backend buffer type
|
||||
|
||||
static const char * ggml_backend_blas_buffer_type_get_name(ggml_backend_buffer_type_t buft) {
|
||||
return "BLAS";
|
||||
GGML_UNUSED(buft);
|
||||
}
|
||||
|
||||
static ggml_backend_buffer_t ggml_backend_blas_buffer_type_alloc_buffer(
|
||||
ggml_backend_buffer_type_t buft,
|
||||
size_t size) {
|
||||
|
||||
// TODO: contains dequantized data
|
||||
void * data = ggml_aligned_malloc(size);
|
||||
if (data == nullptr) {
|
||||
GGML_LOG_ERROR("%s: failed to allocate buffer of size %zu\n", __func__, size);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return ggml_backend_buffer_init(buft, ggml_backend_blas_buffer_i, data, size);
|
||||
}
|
||||
|
||||
static size_t ggml_backend_blas_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) {
|
||||
|
||||
return TENSOR_ALIGNMENT;
|
||||
GGML_UNUSED(buft);
|
||||
}
|
||||
|
||||
static bool ggml_backend_blas_buffer_type_is_host(ggml_backend_buffer_type_t buft) {
|
||||
|
||||
return true;
|
||||
GGML_UNUSED(buft);
|
||||
}
|
||||
|
||||
static ggml_backend_buffer_type_t ggml_backend_blas_buffer_type(void) {
|
||||
static ggml_backend_buffer_type ggml_backend_blas_buffer_type = {
|
||||
/* .iface = */ {
|
||||
/* .get_name = */ ggml_backend_blas_buffer_type_get_name,
|
||||
/* .alloc_buffer = */ ggml_backend_blas_buffer_type_alloc_buffer,
|
||||
/* .get_alignment = */ ggml_backend_blas_buffer_type_get_alignment,
|
||||
/* .get_max_size = */ NULL, // defaults to SIZE_MAX
|
||||
/* .get_alloc_size = */ NULL, // defaults to ggml_nbytes
|
||||
/* .is_host = */ ggml_backend_blas_buffer_type_is_host,
|
||||
},
|
||||
/* .device = */ NULL,
|
||||
/* .context = */ NULL,
|
||||
};
|
||||
|
||||
return &ggml_backend_blas_buffer_type;
|
||||
}
|
||||
|
||||
struct ggml_backend_blas_context {
|
||||
int device;
|
||||
|
||||
int n_threads;
|
||||
ggml_threadpool_t threadpool;
|
||||
|
||||
uint8_t * work_data;
|
||||
size_t work_size;
|
||||
|
||||
ggml_abort_callback abort_callback;
|
||||
void * abort_callback_data;
|
||||
|
||||
// std::unique_ptr<char[]> work_data;
|
||||
// size_t work_size = 0;
|
||||
// #ifndef GGML_USE_OPENMP
|
||||
// std::vector<std::future<void>> tasks;
|
||||
// #endif
|
||||
// ggml_cgraph * gf;
|
||||
};
|
||||
|
||||
// struct ggml_backend_blas_context {
|
||||
// int n_threads = GGML_DEFAULT_N_THREADS;
|
||||
// std::unique_ptr<char[]> work_data;
|
||||
// size_t work_size = 0;
|
||||
// #ifndef GGML_USE_OPENMP
|
||||
// std::vector<std::future<void>> tasks;
|
||||
// #endif
|
||||
// };
|
||||
|
||||
static void ggml_backend_blas_mul_mat(
|
||||
ggml_backend_blas_context * ctx,
|
||||
ggml_tensor * dst) {
|
||||
|
|
@ -60,63 +283,7 @@ static void ggml_backend_blas_mul_mat(
|
|||
const int64_t ne_plane = ne01*ne00;
|
||||
const size_t desired_wsize = type == GGML_TYPE_F32 ? 0 : ne03*ne02*ne_plane*sizeof(float);
|
||||
|
||||
if (ctx->work_size < desired_wsize) {
|
||||
ctx->work_data.reset(new char[desired_wsize]);
|
||||
ctx->work_size = desired_wsize;
|
||||
}
|
||||
void * wdata = ctx->work_data.get();
|
||||
|
||||
// convert src0 to float
|
||||
if (type != GGML_TYPE_F32) {
|
||||
const auto * type_traits = ggml_get_type_traits(type);
|
||||
ggml_to_float_t const to_float = type_traits->to_float;
|
||||
|
||||
for (int64_t i03 = 0; i03 < ne03; i03++) {
|
||||
for (int64_t i02 = 0; i02 < ne02; i02++) {
|
||||
const void * x = (char *) src0->data + i02*nb02 + i03*nb03;
|
||||
float * const wplane = (float *) wdata + i02*ne_plane + i03*ne02*ne_plane;
|
||||
|
||||
const int min_cols_per_thread = 4096;
|
||||
const int min_rows_per_thread = std::max((int)(min_cols_per_thread/ne00), 1);
|
||||
const int n_threads = std::max(std::min(ctx->n_threads, (int)(ne01/min_rows_per_thread)), 1);
|
||||
|
||||
#ifdef GGML_USE_OPENMP
|
||||
#pragma omp parallel for num_threads(n_threads)
|
||||
for (int64_t i01 = 0; i01 < ne01; i01++) {
|
||||
to_float((const char *) x + i01*nb01, wplane + i01*ne00, ne00);
|
||||
}
|
||||
#else
|
||||
for (int i = 1; i < n_threads; i++) {
|
||||
const int64_t start = (i + 0) * ne01/n_threads;
|
||||
const int64_t end = (i + 1) * ne01/n_threads;
|
||||
if (start < end) {
|
||||
ctx->tasks.push_back(std::async(std::launch::async, [=]() {
|
||||
for (int64_t i01 = start; i01 < end; i01++) {
|
||||
to_float((const char *) x + i01*nb01, wplane + i01*ne00, ne00);
|
||||
}
|
||||
}));
|
||||
}
|
||||
}
|
||||
{
|
||||
// reuse the current thread for the first task
|
||||
const int64_t start = 0;
|
||||
const int64_t end = ne01/n_threads;
|
||||
for (int64_t i01 = start; i01 < end; i01++) {
|
||||
to_float((const char *) x + i01*nb01, wplane + i01*ne00, ne00);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef GGML_USE_OPENMP
|
||||
// wait for all tasks to finish
|
||||
for (auto & task : ctx->tasks) {
|
||||
task.get();
|
||||
}
|
||||
ctx->tasks.clear();
|
||||
#endif
|
||||
}
|
||||
const ggml_backend_blas_buffer * extra = (ggml_backend_blas_buffer *)src0->extra;
|
||||
|
||||
#if defined(OPENBLAS_VERSION)
|
||||
openblas_set_num_threads(ctx->n_threads);
|
||||
|
|
@ -139,210 +306,20 @@ static void ggml_backend_blas_mul_mat(
|
|||
const float * y = (float *) ((char *) src1->data + i12*nb12 + i13*nb13);
|
||||
float * d = (float *) ((char *) dst->data + i12*nb2 + i13*nb3);
|
||||
|
||||
// switch to dequantized F32 data
|
||||
if (type != GGML_TYPE_F32) {
|
||||
x = (float *) wdata + i02*ne_plane + i03*ne02*ne_plane;
|
||||
x = (float *)extra->data + i02*ne_plane + i03*ne02*ne_plane;
|
||||
}
|
||||
|
||||
cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans,
|
||||
ne1, ne01, ne10,
|
||||
1.0f, y, ne10,
|
||||
x, ne00,
|
||||
0.0f, d, ne01);
|
||||
0.0f, d, nb1/nb0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void ggml_backend_blas_mul_mat_id(
|
||||
ggml_backend_blas_context * ctx,
|
||||
ggml_tensor * dst) {
|
||||
|
||||
const ggml_tensor * src0 = dst->src[0];
|
||||
const ggml_tensor * src1 = dst->src[1];
|
||||
const ggml_tensor * ids = dst->src[2];
|
||||
|
||||
GGML_TENSOR_BINARY_OP_LOCALS
|
||||
|
||||
const ggml_type type = src0->type;
|
||||
|
||||
GGML_ASSERT(nb00 == ggml_type_size(type));
|
||||
GGML_ASSERT(nb10 == ggml_type_size(src1->type));
|
||||
|
||||
GGML_ASSERT(nb0 == sizeof(float));
|
||||
GGML_ASSERT(nb0 <= nb1);
|
||||
GGML_ASSERT(nb1 <= nb2);
|
||||
GGML_ASSERT(nb2 <= nb3);
|
||||
|
||||
GGML_ASSERT(ne03 == 1);
|
||||
GGML_ASSERT(ne13 == 1);
|
||||
GGML_ASSERT(ne3 == 1);
|
||||
|
||||
GGML_ASSERT(src1->type == GGML_TYPE_F32);
|
||||
GGML_ASSERT(ids->type == GGML_TYPE_I32);
|
||||
|
||||
const int64_t ne_plane = ne01*ne00;
|
||||
const size_t desired_wsize = type == GGML_TYPE_F32
|
||||
? 0
|
||||
: ne03*ne02*ne_plane*sizeof(float);
|
||||
|
||||
if (ctx->work_size < desired_wsize) {
|
||||
ctx->work_data.reset(new char[desired_wsize]);
|
||||
ctx->work_size = desired_wsize;
|
||||
}
|
||||
void * wdata = ctx->work_data.get();
|
||||
|
||||
// convert src0 to float
|
||||
if (type != GGML_TYPE_F32) {
|
||||
const auto * type_traits = ggml_get_type_traits(type);
|
||||
ggml_to_float_t const to_float = type_traits->to_float;
|
||||
|
||||
for (int64_t i03 = 0; i03 < ne03; i03++) {
|
||||
for (int64_t i02 = 0; i02 < ne02; i02++) {
|
||||
const void * x = (char *) src0->data + i02*nb02 + i03*nb03;
|
||||
float * const wplane = (float *) wdata + i02*ne_plane + i03*ne02*ne_plane;
|
||||
|
||||
const int min_cols_per_thread = 4096;
|
||||
const int min_rows_per_thread = std::max((int)(min_cols_per_thread/ne00), 1);
|
||||
const int n_threads = std::max(std::min(ctx->n_threads, (int)(ne01/min_rows_per_thread)), 1);
|
||||
|
||||
#ifdef GGML_USE_OPENMP
|
||||
#pragma omp parallel for num_threads(n_threads)
|
||||
for (int64_t i01 = 0; i01 < ne01; i01++) {
|
||||
to_float((const char *) x + i01*nb01, wplane + i01*ne00, ne00);
|
||||
}
|
||||
#else
|
||||
for (int i = 1; i < n_threads; i++) {
|
||||
const int64_t start = (i + 0)*ne01/n_threads;
|
||||
const int64_t end = (i + 1)*ne01/n_threads;
|
||||
if (start < end) {
|
||||
ctx->tasks.push_back(std::async(std::launch::async, [=]() {
|
||||
for (int64_t i01 = start; i01 < end; i01++) {
|
||||
to_float((const char *) x + i01*nb01, wplane + i01*ne00, ne00);
|
||||
}
|
||||
}));
|
||||
}
|
||||
}
|
||||
{
|
||||
// reuse the current thread for the first task
|
||||
const int64_t start = 0;
|
||||
const int64_t end = ne01/n_threads;
|
||||
for (int64_t i01 = start; i01 < end; i01++) {
|
||||
to_float((const char *) x + i01*nb01, wplane + i01*ne00, ne00);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef GGML_USE_OPENMP
|
||||
// wait for all tasks to finish
|
||||
for (auto & task : ctx->tasks) {
|
||||
task.get();
|
||||
}
|
||||
ctx->tasks.clear();
|
||||
#endif
|
||||
}
|
||||
|
||||
#if defined(OPENBLAS_VERSION)
|
||||
openblas_set_num_threads(ctx->n_threads);
|
||||
#endif
|
||||
|
||||
#if defined(GGML_BLAS_USE_BLIS)
|
||||
bli_thread_set_num_threads(ctx->n_threads);
|
||||
#endif
|
||||
|
||||
#if defined(GGML_BLAS_USE_NVPL)
|
||||
nvpl_blas_set_num_threads(ctx->n_threads);
|
||||
#endif
|
||||
|
||||
const int n_ids = ids->ne[0];
|
||||
const int n_tokens = ids->ne[1];
|
||||
|
||||
for (int t = 0; t < n_tokens; ++t) {
|
||||
for (int e = 0; e < n_ids; ++e) {
|
||||
const int32_t expert = *(const int32_t *) ((const char *) ids->data + e*ids->nb[0] + t*ids->nb[1]);
|
||||
GGML_ASSERT(expert >= 0 && expert < ne02);
|
||||
|
||||
const int e_src1 = e % ne11;
|
||||
|
||||
const float * a = (float *) ((char *) src0->data + expert*nb02);
|
||||
const float * b = (float *) ((char *) src1->data + e_src1*nb11 + t*nb12);
|
||||
float * d = (float *) ((char *) dst->data + e*nb1 + t*nb2);
|
||||
|
||||
if (type != GGML_TYPE_F32) {
|
||||
a = (float *) wdata + expert*ne_plane;
|
||||
}
|
||||
|
||||
cblas_sgemv(CblasRowMajor, CblasNoTrans,
|
||||
ne01, ne00,
|
||||
1.0f, a, ne00,
|
||||
b, 1,
|
||||
0.0f, d, 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void ggml_backend_blas_out_prod(ggml_backend_blas_context * ctx, struct ggml_tensor * dst) {
|
||||
const struct ggml_tensor * src0 = dst->src[0];
|
||||
const struct ggml_tensor * src1 = dst->src[1];
|
||||
|
||||
GGML_TENSOR_BINARY_OP_LOCALS
|
||||
|
||||
GGML_ASSERT(ne0 == ne00);
|
||||
GGML_ASSERT(ne1 == ne10);
|
||||
GGML_ASSERT(ne2 == ne02);
|
||||
GGML_ASSERT(ne02 == ne12);
|
||||
GGML_ASSERT(ne3 == ne13);
|
||||
GGML_ASSERT(ne03 == ne13);
|
||||
|
||||
// we don't support permuted src0 or src1
|
||||
GGML_ASSERT(nb00 == sizeof(float));
|
||||
|
||||
// dst cannot be transposed or permuted
|
||||
GGML_ASSERT(nb0 == sizeof(float));
|
||||
// GGML_ASSERT(nb0 <= nb1);
|
||||
// GGML_ASSERT(nb1 <= nb2);
|
||||
// GGML_ASSERT(nb2 <= nb3);
|
||||
|
||||
// Arguments to ggml_compute_forward_out_prod (expressed as major,minor)
|
||||
// src0: (k,n)
|
||||
// src1: (k,m)
|
||||
// dst: (m,n)
|
||||
//
|
||||
// Arguments to sgemm (see https://github.com/Reference-LAPACK/lapack/blob/master/BLAS/SRC/sgemm.f)
|
||||
// Also expressed as (major,minor)
|
||||
// a: (m,k): so src1 transposed
|
||||
// b: (k,n): so src0
|
||||
// c: (m,n)
|
||||
//
|
||||
// However, if ggml_is_transposed(src1) is true, then
|
||||
// src1->data already contains a transposed version, so sgemm mustn't
|
||||
// transpose it further.
|
||||
|
||||
int n = src0->ne[0];
|
||||
int k = src0->ne[1];
|
||||
int m = src1->ne[0];
|
||||
|
||||
CBLAS_TRANSPOSE transposeA;
|
||||
int lda;
|
||||
|
||||
if (!ggml_is_transposed(src1)) {
|
||||
transposeA = CblasTrans;
|
||||
lda = m;
|
||||
} else {
|
||||
transposeA = CblasNoTrans;
|
||||
lda = k;
|
||||
}
|
||||
|
||||
float * a = (float *) ((char *) src1->data);
|
||||
float * b = (float *) ((char *) src0->data);
|
||||
float * c = (float *) ((char *) dst->data);
|
||||
|
||||
cblas_sgemm(CblasRowMajor, transposeA, CblasNoTrans, m, n, k, 1.0, a, lda, b, n, 0.0, c, n);
|
||||
|
||||
GGML_UNUSED(ctx);
|
||||
}
|
||||
|
||||
// backend interface
|
||||
|
||||
static const char * ggml_backend_blas_get_name(ggml_backend_t backend) {
|
||||
return "BLAS";
|
||||
|
|
@ -352,35 +329,30 @@ static const char * ggml_backend_blas_get_name(ggml_backend_t backend) {
|
|||
|
||||
static void ggml_backend_blas_free(ggml_backend_t backend) {
|
||||
ggml_backend_blas_context * ctx = (ggml_backend_blas_context *)backend->context;
|
||||
|
||||
delete[] ctx->work_data;
|
||||
delete ctx;
|
||||
delete backend;
|
||||
}
|
||||
|
||||
static enum ggml_status ggml_backend_blas_graph_compute(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
|
||||
static ggml_status ggml_backend_blas_graph_compute(
|
||||
ggml_backend_t backend,
|
||||
ggml_cgraph * cgraph) {
|
||||
|
||||
ggml_backend_blas_context * ctx = (ggml_backend_blas_context *)backend->context;
|
||||
|
||||
for (int i = 0; i < cgraph->n_nodes; i++) {
|
||||
struct ggml_tensor * node = cgraph->nodes[i];
|
||||
ggml_tensor * node = cgraph->nodes[i];
|
||||
|
||||
if (ggml_op_is_empty(node->op)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
switch (node->op) {
|
||||
case GGML_OP_MUL_MAT:
|
||||
ggml_backend_blas_mul_mat(ctx, node);
|
||||
break;
|
||||
|
||||
case GGML_OP_MUL_MAT_ID:
|
||||
ggml_backend_blas_mul_mat_id(ctx, node);
|
||||
break;
|
||||
|
||||
case GGML_OP_OUT_PROD:
|
||||
ggml_backend_blas_out_prod(ctx, node);
|
||||
break;
|
||||
|
||||
case GGML_OP_NONE:
|
||||
case GGML_OP_RESHAPE:
|
||||
case GGML_OP_VIEW:
|
||||
case GGML_OP_PERMUTE:
|
||||
case GGML_OP_TRANSPOSE:
|
||||
break;
|
||||
{
|
||||
ggml_backend_blas_mul_mat(ctx, node);
|
||||
} break;
|
||||
|
||||
default:
|
||||
GGML_ABORT("%s: unsupported op %s\n", __func__, ggml_op_desc(node));
|
||||
|
|
@ -392,21 +364,21 @@ static enum ggml_status ggml_backend_blas_graph_compute(ggml_backend_t backend,
|
|||
GGML_UNUSED(backend);
|
||||
}
|
||||
|
||||
static struct ggml_backend_i blas_backend_i = {
|
||||
/* .get_name = */ ggml_backend_blas_get_name,
|
||||
/* .free = */ ggml_backend_blas_free,
|
||||
/* .set_tensor_async = */ NULL,
|
||||
/* .get_tensor_async = */ NULL,
|
||||
/* .cpy_tensor_async = */ NULL,
|
||||
/* .synchronize = */ NULL,
|
||||
/* .graph_plan_create = */ NULL,
|
||||
/* .graph_plan_free = */ NULL,
|
||||
/* .graph_plan_update = */ NULL,
|
||||
/* .graph_plan_compute = */ NULL,
|
||||
/* .graph_compute = */ ggml_backend_blas_graph_compute,
|
||||
/* .event_record = */ NULL,
|
||||
/* .event_wait = */ NULL,
|
||||
/* .graph_optimize = */ NULL,
|
||||
static const ggml_backend_i ggml_backend_blas_i = {
|
||||
/* .get_name = */ ggml_backend_blas_get_name,
|
||||
/* .free = */ ggml_backend_blas_free,
|
||||
/* .set_tensor_async = */ NULL,
|
||||
/* .get_tensor_async = */ NULL,
|
||||
/* .cpy_tensor_async = */ NULL,
|
||||
/* .synchronize = */ NULL,
|
||||
/* .graph_plan_create = */ NULL,
|
||||
/* .graph_plan_free = */ NULL,
|
||||
/* .graph_plan_update = */ NULL,
|
||||
/* .graph_plan_compute = */ NULL,
|
||||
/* .graph_compute = */ ggml_backend_blas_graph_compute,
|
||||
/* .event_record = */ NULL,
|
||||
/* .event_wait = */ NULL,
|
||||
/* .graph_optimize = */ NULL,
|
||||
};
|
||||
|
||||
static ggml_guid_t ggml_backend_blas_guid(void) {
|
||||
|
|
@ -416,39 +388,49 @@ static ggml_guid_t ggml_backend_blas_guid(void) {
|
|||
|
||||
ggml_backend_t ggml_backend_blas_init(void) {
|
||||
ggml_backend_blas_context * ctx = new ggml_backend_blas_context;
|
||||
if (ctx == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ggml_backend_t backend = new ggml_backend {
|
||||
ctx->n_threads = GGML_DEFAULT_N_THREADS;
|
||||
ctx->threadpool = NULL;
|
||||
ctx->work_data = nullptr;
|
||||
ctx->work_size = 0;
|
||||
ctx->abort_callback = NULL;
|
||||
ctx->abort_callback_data = nullptr;
|
||||
|
||||
ggml_backend_t blas_backend = new ggml_backend {
|
||||
/* .guid = */ ggml_backend_blas_guid(),
|
||||
/* .iface = */ blas_backend_i,
|
||||
/* .iface = */ ggml_backend_blas_i,
|
||||
/* .device = */ ggml_backend_reg_dev_get(ggml_backend_blas_reg(), 0),
|
||||
/* .context = */ ctx,
|
||||
};
|
||||
|
||||
#if defined(OPENBLAS_VERSION) && defined(GGML_USE_OPENMP)
|
||||
if (openblas_get_parallel() != OPENBLAS_OPENMP) {
|
||||
GGML_LOG_DEBUG("%s: warning: ggml is using OpenMP, but OpenBLAS was compiled without OpenMP support\n", __func__);
|
||||
if (blas_backend == NULL) {
|
||||
delete ctx;
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(BLIS_ENABLE_CBLAS) && defined(GGML_USE_OPENMP) && !defined(BLIS_ENABLE_OPENMP)
|
||||
GGML_LOG_DEBUG("%s: warning: ggml is using OpenMP, but BLIS was compiled without OpenMP support\n", __func__);
|
||||
#endif
|
||||
|
||||
return backend;
|
||||
return blas_backend;
|
||||
}
|
||||
|
||||
bool ggml_backend_is_blas(ggml_backend_t backend) {
|
||||
return backend != NULL && ggml_guid_matches(backend->guid, ggml_backend_blas_guid());
|
||||
}
|
||||
|
||||
void ggml_backend_blas_set_n_threads(ggml_backend_t backend_blas, int n_threads) {
|
||||
GGML_ASSERT(ggml_backend_is_blas(backend_blas));
|
||||
void ggml_backend_blas_set_n_threads(ggml_backend_t backend, int n_threads) {
|
||||
// TODO: IMPL
|
||||
GGML_ASSERT(ggml_backend_is_blas(backend));
|
||||
|
||||
ggml_backend_blas_context * ctx = (ggml_backend_blas_context *)backend_blas->context;
|
||||
ggml_backend_blas_context * ctx = (ggml_backend_blas_context *)backend->context;
|
||||
ctx->n_threads = n_threads;
|
||||
}
|
||||
|
||||
// device interface
|
||||
// TODO: maybe implement description?
|
||||
struct ggml_backend_blas_device_context {
|
||||
int blas_device;
|
||||
int blas_device_ref_count;
|
||||
};
|
||||
|
||||
static const char * ggml_backend_blas_device_get_name(ggml_backend_dev_t dev) {
|
||||
return "BLAS";
|
||||
|
|
@ -475,7 +457,6 @@ static const char * ggml_backend_blas_device_get_description(ggml_backend_dev_t
|
|||
}
|
||||
|
||||
static void ggml_backend_blas_device_get_memory(ggml_backend_dev_t dev, size_t * free, size_t * total) {
|
||||
// TODO
|
||||
*free = 0;
|
||||
*total = 0;
|
||||
|
||||
|
|
@ -488,7 +469,7 @@ static enum ggml_backend_dev_type ggml_backend_blas_device_get_type(ggml_backend
|
|||
GGML_UNUSED(dev);
|
||||
}
|
||||
|
||||
static void ggml_backend_blas_device_get_props(ggml_backend_dev_t dev, struct ggml_backend_dev_props * props) {
|
||||
static void ggml_backend_blas_device_get_props(ggml_backend_dev_t dev, ggml_backend_dev_props * props) {
|
||||
props->name = ggml_backend_blas_device_get_name(dev);
|
||||
props->description = ggml_backend_blas_device_get_description(dev);
|
||||
props->type = ggml_backend_blas_device_get_type(dev);
|
||||
|
|
@ -496,7 +477,7 @@ static void ggml_backend_blas_device_get_props(ggml_backend_dev_t dev, struct gg
|
|||
props->caps = {
|
||||
/* .async = */ false,
|
||||
/* .host_buffer = */ false,
|
||||
/* .buffer_from_host_ptr = */ true,
|
||||
/* .buffer_from_host_ptr = */ false,
|
||||
/* .events = */ false,
|
||||
};
|
||||
}
|
||||
|
|
@ -509,40 +490,25 @@ static ggml_backend_t ggml_backend_blas_device_init_backend(ggml_backend_dev_t d
|
|||
}
|
||||
|
||||
static ggml_backend_buffer_type_t ggml_backend_blas_device_get_buffer_type(ggml_backend_dev_t dev) {
|
||||
return ggml_backend_cpu_buffer_type();
|
||||
return ggml_backend_blas_buffer_type();
|
||||
|
||||
GGML_UNUSED(dev);
|
||||
}
|
||||
|
||||
static ggml_backend_buffer_t ggml_backend_blas_device_buffer_from_host_ptr(ggml_backend_dev_t dev, void * ptr, size_t size, size_t max_tensor_size) {
|
||||
return ggml_backend_cpu_buffer_from_ptr(ptr, size);
|
||||
static bool ggml_backend_blas_device_supports_op(ggml_backend_dev_t dev, const ggml_tensor * dst) {
|
||||
const ggml_tensor * src0 = dst->src[0];
|
||||
const ggml_tensor * src1 = dst->src[1];
|
||||
|
||||
GGML_UNUSED(dev);
|
||||
GGML_UNUSED(max_tensor_size);
|
||||
}
|
||||
|
||||
static bool ggml_backend_blas_device_supports_op(ggml_backend_dev_t dev, const struct ggml_tensor * op) {
|
||||
const struct ggml_tensor * src0 = op->src[0];
|
||||
const struct ggml_tensor * src1 = op->src[1];
|
||||
|
||||
switch (op->op) {
|
||||
case GGML_OP_NONE:
|
||||
case GGML_OP_RESHAPE:
|
||||
case GGML_OP_VIEW:
|
||||
case GGML_OP_PERMUTE:
|
||||
case GGML_OP_TRANSPOSE:
|
||||
return true;
|
||||
if (ggml_op_is_empty(dst->op)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
switch (dst->op) {
|
||||
case GGML_OP_MUL_MAT:
|
||||
{
|
||||
// BLAS usually is only faster for large matrices
|
||||
const struct ggml_tensor * src0 = op->src[0];
|
||||
const struct ggml_tensor * src1 = op->src[1];
|
||||
|
||||
const int64_t ne10 = src1->ne[0];
|
||||
|
||||
const int64_t ne0 = op->ne[0];
|
||||
const int64_t ne1 = op->ne[1];
|
||||
const int64_t ne0 = dst->ne[0];
|
||||
const int64_t ne1 = dst->ne[1];
|
||||
|
||||
// TODO: find the optimal value
|
||||
const int64_t min_batch = 32;
|
||||
|
|
@ -554,29 +520,8 @@ static bool ggml_backend_blas_device_supports_op(ggml_backend_dev_t dev, const s
|
|||
(src0->type == GGML_TYPE_F32 || ggml_get_type_traits(src0->type)->to_float != NULL);
|
||||
}
|
||||
|
||||
case GGML_OP_MUL_MAT_ID:
|
||||
{
|
||||
const ggml_tensor * src0 = op->src[0];
|
||||
const ggml_tensor * src1 = op->src[1];
|
||||
|
||||
return ggml_is_contiguous(src0) &&
|
||||
ggml_is_contiguous(src1) &&
|
||||
src1->type == GGML_TYPE_F32 &&
|
||||
(src0->type == GGML_TYPE_F32 || ggml_get_type_traits(src0->type)->to_float != NULL);
|
||||
}
|
||||
|
||||
case GGML_OP_OUT_PROD:
|
||||
return op->src[0]->type == GGML_TYPE_F32 &&
|
||||
op->src[1]->type == GGML_TYPE_F32 &&
|
||||
ggml_is_matrix(src0) &&
|
||||
ggml_is_matrix(src1) &&
|
||||
ggml_is_contiguous(src0) &&
|
||||
(ggml_is_contiguous(src1) || ggml_is_transposed(src1)) &&
|
||||
(src0->type == GGML_TYPE_F32 || ggml_get_type_traits(src0->type)->to_float != NULL);
|
||||
|
||||
default:
|
||||
return false;
|
||||
|
||||
}
|
||||
|
||||
GGML_UNUSED(dev);
|
||||
|
|
@ -588,7 +533,7 @@ static bool ggml_backend_blas_device_supports_buft(ggml_backend_dev_t dev, ggml_
|
|||
GGML_UNUSED(dev);
|
||||
}
|
||||
|
||||
static const struct ggml_backend_device_i ggml_backend_blas_device_i = {
|
||||
static const ggml_backend_device_i ggml_backend_blas_device_i = {
|
||||
/* .get_name = */ ggml_backend_blas_device_get_name,
|
||||
/* .get_description = */ ggml_backend_blas_device_get_description,
|
||||
/* .get_memory = */ ggml_backend_blas_device_get_memory,
|
||||
|
|
@ -597,7 +542,7 @@ static const struct ggml_backend_device_i ggml_backend_blas_device_i = {
|
|||
/* .init_backend = */ ggml_backend_blas_device_init_backend,
|
||||
/* .get_buffer_type = */ ggml_backend_blas_device_get_buffer_type,
|
||||
/* .get_host_buffer_type = */ NULL,
|
||||
/* .buffer_from_host_ptr = */ ggml_backend_blas_device_buffer_from_host_ptr,
|
||||
/* .buffer_from_host_ptr = */ NULL,
|
||||
/* .supports_op = */ ggml_backend_blas_device_supports_op,
|
||||
/* .supports_buft = */ ggml_backend_blas_device_supports_buft,
|
||||
/* .offload_op = */ NULL,
|
||||
|
|
@ -606,7 +551,7 @@ static const struct ggml_backend_device_i ggml_backend_blas_device_i = {
|
|||
/* .event_synchronize = */ NULL,
|
||||
};
|
||||
|
||||
// backend reg interface
|
||||
// BLAS backend - backend (reg)
|
||||
|
||||
static const char * ggml_backend_blas_reg_get_name(ggml_backend_reg_t reg) {
|
||||
return "BLAS";
|
||||
|
|
@ -623,29 +568,27 @@ static size_t ggml_backend_blas_reg_get_device_count(ggml_backend_reg_t reg) {
|
|||
static ggml_backend_dev_t ggml_backend_blas_reg_get_device(ggml_backend_reg_t reg, size_t index) {
|
||||
GGML_ASSERT(index == 0);
|
||||
|
||||
static ggml_backend_blas_device_context ctx;
|
||||
static ggml_backend_device ggml_backend_blas_device = {
|
||||
/* .iface = */ ggml_backend_blas_device_i,
|
||||
/* .reg = */ reg,
|
||||
/* .context = */ nullptr,
|
||||
/* .context = */ &ctx,
|
||||
};
|
||||
|
||||
return &ggml_backend_blas_device;
|
||||
|
||||
GGML_UNUSED(reg);
|
||||
GGML_UNUSED(index);
|
||||
}
|
||||
|
||||
static void * ggml_backend_blas_get_proc_address(ggml_backend_reg_t reg, const char * name) {
|
||||
if (std::strcmp(name, "ggml_backend_set_n_threads") == 0) {
|
||||
if (strcmp(name, "ggml_backend_set_n_threads") == 0) {
|
||||
return (void *)ggml_backend_blas_set_n_threads;
|
||||
}
|
||||
return NULL;
|
||||
|
||||
return nullptr;
|
||||
|
||||
GGML_UNUSED(reg);
|
||||
GGML_UNUSED(name);
|
||||
}
|
||||
|
||||
static const struct ggml_backend_reg_i ggml_backend_blas_reg_i = {
|
||||
static const ggml_backend_reg_i ggml_backend_blas_reg_i = {
|
||||
/* .get_name = */ ggml_backend_blas_reg_get_name,
|
||||
/* .get_device_count = */ ggml_backend_blas_reg_get_device_count,
|
||||
/* .get_device = */ ggml_backend_blas_reg_get_device,
|
||||
|
|
@ -653,7 +596,7 @@ static const struct ggml_backend_reg_i ggml_backend_blas_reg_i = {
|
|||
};
|
||||
|
||||
ggml_backend_reg_t ggml_backend_blas_reg(void) {
|
||||
static struct ggml_backend_reg ggml_backend_blas_reg = {
|
||||
static ggml_backend_reg ggml_backend_blas_reg = {
|
||||
/* .api_version = */ GGML_BACKEND_API_VERSION,
|
||||
/* .iface = */ ggml_backend_blas_reg_i,
|
||||
/* .context = */ NULL,
|
||||
|
|
|
|||
Loading…
Reference in New Issue