feat: fix llama-bench (#7)
* remove unused functions * wip * init from last devices * move init into constructor * wip * add static assert to device table * make kDeviceCaps as constexpr * get free memory and total memory * add optimize flag for qnn backend
This commit is contained in:
parent
8ad86dc703
commit
e6dbdacc32
|
|
@ -9,28 +9,17 @@ extern "C" {
|
|||
#endif
|
||||
|
||||
#define GGML_QNN_NAME "QNN"
|
||||
#define GGML_QNN_MAX_DEVICES 3
|
||||
#define GGML_QNN_MAX_DEVICES QNN_BACKEND_COUNT
|
||||
|
||||
enum QNNBackend {
|
||||
QNN_BACKEND_CPU = 0,
|
||||
QNN_BACKEND_GPU,
|
||||
QNN_BACKEND_NPU,
|
||||
QNN_BACKEND_GGML, //"fake" QNN backend, used for compare performance between
|
||||
// QNN and original GGML
|
||||
QNN_BACKEND_COUNT,
|
||||
};
|
||||
|
||||
/**
|
||||
*
|
||||
* @param index 0: QNN_BACKEND_CPU 1: QNN_BACKEND_GPU 2:QNN_BACKEND_NPU
|
||||
* @param extend_lib_search_path extened lib search path for searching QNN backend dynamic libs
|
||||
* @return
|
||||
*/
|
||||
GGML_API ggml_backend_t ggml_backend_qnn_init(size_t index, const char *extend_lib_search_path);
|
||||
|
||||
GGML_API bool ggml_backend_is_qnn(ggml_backend_t backend);
|
||||
|
||||
GGML_API int ggml_backend_qnn_get_device_count(void);
|
||||
|
||||
GGML_API ggml_backend_reg_t ggml_backend_qnn_reg(void);
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
|||
|
|
@ -969,6 +969,7 @@ if (GGML_QNN)
|
|||
message("QNN_SDK_PATH: ${GGML_QNN_SDK_PATH}")
|
||||
file(GLOB GGML_SOURCES_QNN "ggml-qnn/*.cpp")
|
||||
list(APPEND GGML_SOURCES_QNN "ggml-qnn.cpp")
|
||||
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -O3")
|
||||
set(GGML_HEADERS_QNN ../include/ggml-qnn.h)
|
||||
set(QNN_INC_PATH ${GGML_QNN_SDK_PATH}/include/QNN)
|
||||
set(GGML_EXTRA_INCLUDES ${GGML_EXTRA_INCLUDES} ${QNN_INC_PATH} "ggml-qnn")
|
||||
|
|
|
|||
|
|
@ -53,39 +53,50 @@ struct qnn_device_caps {
|
|||
enum ggml_backend_dev_type type;
|
||||
|
||||
// TODO: should get this caps from device
|
||||
std::unordered_set<ggml_type> supported_types;
|
||||
uint64_t supported_types;
|
||||
};
|
||||
|
||||
const qnn_device_caps kDeviceCaps[GGML_QNN_MAX_DEVICES]{
|
||||
{// https://docs.qualcomm.com/bundle/publicresource/topics/80-63442-50/CpuOpDefSupplement.html#matmul
|
||||
"qnn-cpu",
|
||||
"Qualcomm Kryo CPU",
|
||||
"libQnnCpu.so",
|
||||
GGML_BACKEND_DEVICE_TYPE_CPU,
|
||||
{GGML_TYPE_F32, GGML_TYPE_I8}},
|
||||
{// https://docs.qualcomm.com/bundle/publicresource/topics/80-63442-50/GpuOpDefSupplement.html#matmul
|
||||
"qnn-gpu",
|
||||
"Qualcomm Adreno GPU",
|
||||
"libQnnGpu.so",
|
||||
GGML_BACKEND_DEVICE_TYPE_GPU,
|
||||
{GGML_TYPE_F32, GGML_TYPE_F16}},
|
||||
{// https://docs.qualcomm.com/bundle/publicresource/topics/80-63442-50/HtpOpDefSupplement.html#matmul
|
||||
"qnn-npu",
|
||||
"Qualcomm NPU",
|
||||
"libQnnHtp.so",
|
||||
GGML_BACKEND_DEVICE_TYPE_GPU,
|
||||
{GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_I16, GGML_TYPE_I8}},
|
||||
constexpr const qnn_device_caps kDeviceCaps[] = {
|
||||
{
|
||||
// https://docs.qualcomm.com/bundle/publicresource/topics/80-63442-50/CpuOpDefSupplement.html#matmul
|
||||
"qnn-cpu",
|
||||
"Qualcomm Kryo CPU",
|
||||
"libQnnCpu.so",
|
||||
GGML_BACKEND_DEVICE_TYPE_CPU,
|
||||
(1 << GGML_TYPE_I8) | (1 << GGML_TYPE_F32),
|
||||
},
|
||||
{
|
||||
// https://docs.qualcomm.com/bundle/publicresource/topics/80-63442-50/GpuOpDefSupplement.html#matmul
|
||||
"qnn-gpu",
|
||||
"Qualcomm Adreno GPU",
|
||||
"libQnnGpu.so",
|
||||
GGML_BACKEND_DEVICE_TYPE_GPU,
|
||||
(1 << GGML_TYPE_F32) | (1 << GGML_TYPE_F16),
|
||||
},
|
||||
{
|
||||
// https://docs.qualcomm.com/bundle/publicresource/topics/80-63442-50/HtpOpDefSupplement.html#matmul
|
||||
"qnn-npu",
|
||||
"Qualcomm NPU",
|
||||
"libQnnHtp.so",
|
||||
GGML_BACKEND_DEVICE_TYPE_ACCEL,
|
||||
(1 << GGML_TYPE_F32) | (1 << GGML_TYPE_F16) | (1 << GGML_TYPE_I16) | (1 << GGML_TYPE_I8),
|
||||
},
|
||||
};
|
||||
|
||||
static_assert(sizeof(kDeviceCaps) / sizeof(kDeviceCaps[0]) == GGML_QNN_MAX_DEVICES,
|
||||
"The number of qnn devices should be equal to GGML_QNN_MAX_DEVICES");
|
||||
static_assert(kDeviceCaps[QNN_BACKEND_NPU].type == GGML_BACKEND_DEVICE_TYPE_ACCEL,
|
||||
"The NPU device should be an accelerator device");
|
||||
|
||||
class ggml_backend_qnn_buffer_context {
|
||||
public:
|
||||
ggml_backend_qnn_buffer_context(QNNBackend device, std::shared_ptr<qnn::qnn_instance> instance, size_t size)
|
||||
: _instance(instance), _name(QNN_BACKEND_NAME + std::to_string(device)) {
|
||||
|
||||
// TODO: fix this for other platforms
|
||||
size_t size_page = sysconf(_SC_PAGESIZE);
|
||||
|
||||
// TODO: for qnn npu, a better way here is to reuse the buffer allocated by qnn rpc, will save an extra copy
|
||||
// TODO: for qnn npu, a better way here is to reuse the buffer allocated by
|
||||
// qnn rpc, will save an extra copy
|
||||
_buffer = qnn::align_alloc(size_page, size);
|
||||
|
||||
if (!_buffer) {
|
||||
|
|
@ -192,8 +203,8 @@ ggml_backend_buffer_i ggml_backend_qnn_buffer_interface = {
|
|||
* -----------------------------------------------------------------------------------------------
|
||||
*/
|
||||
const char *ggml_backend_qnn_buffer_type_name(ggml_backend_buffer_type_t buft) {
|
||||
GGML_UNUSED(buft);
|
||||
return GGML_QNN_NAME;
|
||||
auto *dev_ctx = get_device_context(buft->device);
|
||||
return qnn::get_backend_name(dev_ctx->device);
|
||||
}
|
||||
|
||||
ggml_backend_buffer_t ggml_backend_qnn_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
|
||||
|
|
@ -209,13 +220,14 @@ ggml_backend_buffer_t ggml_backend_qnn_buffer_type_alloc_buffer(ggml_backend_buf
|
|||
|
||||
size_t ggml_backend_qnn_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) {
|
||||
GGML_UNUSED(buft);
|
||||
// TODO: fix this
|
||||
return 32;
|
||||
}
|
||||
|
||||
// TODO: this value is an experimental value, works fine with whisper/llm/minicpm-v inference on Android
|
||||
size_t ggml_backend_qnn_buffer_type_get_max_size(ggml_backend_buffer_type_t buft) {
|
||||
GGML_UNUSED(buft);
|
||||
|
||||
// TODO: this value is an experimental value, works fine with
|
||||
// whisper/llm/minicpm-v inference on Android
|
||||
return (96 * 1024 * 1024);
|
||||
}
|
||||
|
||||
|
|
@ -255,9 +267,12 @@ ggml_backend_buffer_type_t ggml_backend_qnn_buffer_type(ggml_backend_dev_t dev)
|
|||
ggml_backend_qnn_buffer_types[i] = {
|
||||
/* .iface = */ {
|
||||
/* .get_name = */ ggml_backend_qnn_buffer_type_name,
|
||||
/* .alloc_buffer = */ ggml_backend_qnn_buffer_type_alloc_buffer,
|
||||
/* .get_alignment = */ ggml_backend_qnn_buffer_type_get_alignment,
|
||||
/* .get_max_size = */ ggml_backend_qnn_buffer_type_get_max_size,
|
||||
/* .alloc_buffer = */
|
||||
ggml_backend_qnn_buffer_type_alloc_buffer,
|
||||
/* .get_alignment = */
|
||||
ggml_backend_qnn_buffer_type_get_alignment,
|
||||
/* .get_max_size = */
|
||||
ggml_backend_qnn_buffer_type_get_max_size,
|
||||
/* .get_alloc_size = */ nullptr, // defaults to ggml_nbytes
|
||||
/* .is_host = */ ggml_backend_qnn_buffer_is_host,
|
||||
},
|
||||
|
|
@ -321,17 +336,13 @@ const char *ggml_backend_qnn_device_get_description(ggml_backend_dev_t dev) {
|
|||
}
|
||||
|
||||
void ggml_backend_qnn_device_get_memory(ggml_backend_dev_t dev, size_t *free, size_t *total) {
|
||||
// TODO: get memory info
|
||||
*free = 0;
|
||||
*total = 0;
|
||||
|
||||
GGML_UNUSED(dev);
|
||||
*free = qnn::get_system_free_memory_in_bytes();
|
||||
*total = qnn::get_system_total_memory_in_bytes();
|
||||
}
|
||||
|
||||
enum ggml_backend_dev_type ggml_backend_qnn_device_get_type(ggml_backend_dev_t dev) {
|
||||
// TODO: for cpu backend, we should return GGML_BACKEND_DEVICE_TYPE_CPU
|
||||
GGML_UNUSED(dev);
|
||||
return GGML_BACKEND_DEVICE_TYPE_GPU;
|
||||
return kDeviceCaps[get_device_context(dev)->device].type;
|
||||
}
|
||||
|
||||
void ggml_backend_qnn_device_get_props(ggml_backend_dev_t dev, struct ggml_backend_dev_props *props) {
|
||||
|
|
@ -356,41 +367,43 @@ ggml_guid_t ggml_backend_qnn_guid() {
|
|||
ggml_backend_t ggml_backend_qnn_init_with_device_context(ggml_backend_dev_t dev, const char *extend_lib_search_path) {
|
||||
if (!extend_lib_search_path) {
|
||||
extend_lib_search_path = GGML_QNN_DEFAULT_LIB_SEARCH_PATH;
|
||||
QNN_LOG_WARN("extend_lib_search_path is nullptr, will use " GGML_QNN_DEFAULT_LIB_SEARCH_PATH " as default");
|
||||
QNN_LOG_WARN(
|
||||
"extend_lib_search_path is nullptr, will "
|
||||
"use " GGML_QNN_DEFAULT_LIB_SEARCH_PATH " as default");
|
||||
}
|
||||
|
||||
auto *dev_ctx = get_device_context(dev);
|
||||
auto device_index = dev_ctx->device;
|
||||
QNN_LOG_DEBUG("device %d", device_index);
|
||||
const auto device = dev_ctx->device;
|
||||
QNN_LOG_DEBUG("device %d", device);
|
||||
QNN_LOG_DEBUG("extend_lib_search_path %s", extend_lib_search_path);
|
||||
std::string path = extend_lib_search_path;
|
||||
|
||||
// TODO: Fix this for other platforms
|
||||
#if defined(__ANDROID__) || defined(ANDROID)
|
||||
if (QNN_BACKEND_NPU == device_index) {
|
||||
if (0 == setenv("LD_LIBRARY_PATH",
|
||||
(path + ":/vendor/dsp/cdsp:/vendor/lib64:/vendor/dsp/"
|
||||
"dsp:/vendor/dsp/images")
|
||||
.c_str(),
|
||||
1)) {
|
||||
if (device == QNN_BACKEND_NPU) {
|
||||
if (setenv("LD_LIBRARY_PATH",
|
||||
(path + ":/vendor/dsp/cdsp:/vendor/lib64:/vendor/dsp/"
|
||||
"dsp:/vendor/dsp/images")
|
||||
.c_str(),
|
||||
1) == 0) {
|
||||
QNN_LOG_INFO("QNN NPU backend setenv successfully");
|
||||
} else {
|
||||
QNN_LOG_ERROR("QNN NPU backend setenv failure");
|
||||
}
|
||||
if (0 == setenv("ADSP_LIBRARY_PATH",
|
||||
(path + ";/vendor/dsp/cdsp;/vendor/lib/rfsa/adsp;/system/lib/"
|
||||
"rfsa/adsp;/vendor/dsp/dsp;/vendor/dsp/images;/dsp")
|
||||
.c_str(),
|
||||
1)) {
|
||||
if (setenv("ADSP_LIBRARY_PATH",
|
||||
(path + ";/vendor/dsp/cdsp;/vendor/lib/rfsa/adsp;/system/lib/"
|
||||
"rfsa/adsp;/vendor/dsp/dsp;/vendor/dsp/images;/dsp")
|
||||
.c_str(),
|
||||
1) == 0) {
|
||||
QNN_LOG_INFO("QNN NPU backend setenv successfully");
|
||||
} else {
|
||||
QNN_LOG_ERROR("QNN NPU backend setenv failure");
|
||||
}
|
||||
} else {
|
||||
if (0 == setenv("LD_LIBRARY_PATH", path.c_str(), 1)) {
|
||||
QNN_LOG_INFO("%s backend setenv successfully\n", qnn::get_backend_name(device_index));
|
||||
if (setenv("LD_LIBRARY_PATH", path.c_str(), 1) == 0) {
|
||||
QNN_LOG_INFO("%s backend setenv successfully\n", qnn::get_backend_name(device));
|
||||
} else {
|
||||
QNN_LOG_ERROR("%s backend setenv failure\n", qnn::get_backend_name(device_index));
|
||||
QNN_LOG_ERROR("%s backend setenv failure\n", qnn::get_backend_name(device));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
|
@ -398,8 +411,7 @@ ggml_backend_t ggml_backend_qnn_init_with_device_context(ggml_backend_dev_t dev,
|
|||
auto instance = std::make_shared<qnn::qnn_instance>(path, dev_ctx->lib_name, "ggml");
|
||||
auto result = instance->qnn_init(nullptr);
|
||||
if (result != 0) {
|
||||
QNN_LOG_WARN("init qnn subsystem failed with qnn backend %s, pls check why\n",
|
||||
qnn::get_backend_name(device_index));
|
||||
QNN_LOG_WARN("init qnn subsystem failed with qnn backend %s, pls check why\n", qnn::get_backend_name(device));
|
||||
return nullptr;
|
||||
}
|
||||
auto qnn_interface = instance->get_qnn_interface();
|
||||
|
|
@ -408,12 +420,12 @@ ggml_backend_t ggml_backend_qnn_init_with_device_context(ggml_backend_dev_t dev,
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
std::string device_name = qnn::get_backend_name(device_index);
|
||||
std::string device_name = qnn::get_backend_name(device);
|
||||
QNN_LOG_INFO("qnn device name %s", device_name.c_str());
|
||||
dev_ctx->instance = instance;
|
||||
dev_ctx->qnn_interface = qnn_interface;
|
||||
dev_ctx->socinfo = instance->get_soc_info();
|
||||
dev_ctx->supported_types = kDeviceCaps[device_index].supported_types;
|
||||
dev_ctx->supported_types = kDeviceCaps[device].supported_types;
|
||||
|
||||
ggml_backend_t qnn_backend = new ggml_backend{
|
||||
/* .guid = */ ggml_backend_qnn_guid(),
|
||||
|
|
@ -479,9 +491,23 @@ struct ggml_backend_qnn_reg_impl : ggml_backend_reg {
|
|||
std::array<std::unique_ptr<ggml_backend_qnn_device_context>, GGML_QNN_MAX_DEVICES> device_contexts;
|
||||
std::array<ggml_backend_device, GGML_QNN_MAX_DEVICES> devices;
|
||||
|
||||
ggml_backend_qnn_reg_impl(ggml_backend_reg_i interface) {
|
||||
explicit ggml_backend_qnn_reg_impl(ggml_backend_reg_i interface) {
|
||||
context = this;
|
||||
iface = interface;
|
||||
|
||||
for (int i = 0; i < GGML_QNN_MAX_DEVICES; i++) {
|
||||
const auto device_enum = (QNNBackend)(GGML_QNN_MAX_DEVICES - 1 - i); // init from the last device, i.e. NPU
|
||||
device_contexts[i] = std::make_unique<ggml_backend_qnn_device_context>(
|
||||
/* .device = */ device_enum, // init from the last device, i.e. NPU
|
||||
/* .threads = */ 1,
|
||||
/* .name = */ qnn::get_backend_name(device_enum),
|
||||
/* .lib_name = */ kDeviceCaps[device_enum].lib_name);
|
||||
|
||||
auto &device = devices[i];
|
||||
device.iface = ggml_backend_qnn_device_interface;
|
||||
device.reg = this;
|
||||
device.context = device_contexts[i].get();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
|
@ -512,35 +538,5 @@ const ggml_backend_reg_i ggml_backend_qnn_reg_interface = {
|
|||
|
||||
ggml_backend_reg_t ggml_backend_qnn_reg() {
|
||||
static ggml_backend_qnn_reg_impl reg{ggml_backend_qnn_reg_interface};
|
||||
static bool initialized = false;
|
||||
static std::mutex mutex;
|
||||
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(mutex);
|
||||
if (!initialized) {
|
||||
for (int i = 0; i < GGML_QNN_MAX_DEVICES; i++) {
|
||||
reg.device_contexts[i] = std::make_unique<ggml_backend_qnn_device_context>(
|
||||
/* .device = */ (QNNBackend)i,
|
||||
/* .threads = */ 1,
|
||||
/* .name = */ qnn::get_backend_name(i),
|
||||
/* .lib_name = */ kDeviceCaps[i].lib_name);
|
||||
|
||||
auto &device = reg.devices[i];
|
||||
device.iface = ggml_backend_qnn_device_interface;
|
||||
device.reg = ®
|
||||
device.context = reg.device_contexts[i].get();
|
||||
}
|
||||
initialized = true;
|
||||
}
|
||||
}
|
||||
|
||||
return ®
|
||||
}
|
||||
|
||||
int ggml_backend_qnn_get_device_count() { return GGML_QNN_MAX_DEVICES; }
|
||||
|
||||
ggml_backend_t ggml_backend_qnn_init(size_t index, const char *extend_lib_search_path) {
|
||||
auto *reg = ggml_backend_qnn_reg();
|
||||
auto *device = ggml_backend_qnn_reg_get_device(reg, index);
|
||||
return ggml_backend_qnn_device_init(device, extend_lib_search_path);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -549,7 +549,7 @@ bool ggml_qnn_supports_tensor(ggml_backend_qnn_device_context *ctx, const ggml_t
|
|||
case GGML_TYPE_F16:
|
||||
case GGML_TYPE_Q8_0:
|
||||
case GGML_TYPE_Q4_0:
|
||||
if (ctx->supported_types.find(tensor->type) == ctx->supported_types.end()) {
|
||||
if (!(ctx->supported_types & (1 << tensor->type))) {
|
||||
QNN_LOG_DEBUG("unsupported data type %s for backend %d", type_name, (int)ctx->device);
|
||||
return false;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ struct ggml_backend_qnn_device_context {
|
|||
|
||||
// initialize in init
|
||||
qnn::qcom_socinfo socinfo = {};
|
||||
std::unordered_set<ggml_type> supported_types;
|
||||
uint64_t supported_types;
|
||||
std::shared_ptr<qnn::qnn_instance> instance;
|
||||
std::shared_ptr<qnn::qnn_interface> qnn_interface;
|
||||
|
||||
|
|
|
|||
|
|
@ -7,6 +7,10 @@
|
|||
|
||||
#include "qnn-types.hpp"
|
||||
|
||||
#ifdef __linux__
|
||||
#include <unistd.h>
|
||||
#endif
|
||||
|
||||
namespace qnn {
|
||||
|
||||
qnn_dimension_array_t get_internal_dimension(const ggml_dimension_array_t &dims, uint32_t rank) {
|
||||
|
|
@ -141,7 +145,7 @@ const char *get_ggml_type_name(ggml_type type) {
|
|||
return traits->type_name;
|
||||
}
|
||||
|
||||
const char *get_backend_name(size_t device_index) {
|
||||
const char *get_backend_name(QNNBackend device_index) {
|
||||
switch (device_index) {
|
||||
case QNN_BACKEND_CPU:
|
||||
return "QNN-CPU";
|
||||
|
|
@ -149,8 +153,7 @@ const char *get_backend_name(size_t device_index) {
|
|||
return "QNN-GPU";
|
||||
case QNN_BACKEND_NPU:
|
||||
return "QNN-NPU";
|
||||
case QNN_BACKEND_GGML:
|
||||
return "ggml"; //"fake" QNN backend, used for compare performance between QNN backend and original GGML
|
||||
case QNN_BACKEND_COUNT:
|
||||
default:
|
||||
return "unknown";
|
||||
}
|
||||
|
|
@ -295,4 +298,20 @@ const char *get_qnn_error_string(Qnn_ErrorHandle_t error) {
|
|||
}
|
||||
}
|
||||
|
||||
#ifdef __linux__
|
||||
|
||||
size_t get_system_total_memory_in_bytes() {
|
||||
auto pages = (size_t)sysconf(_SC_PHYS_PAGES);
|
||||
auto page_size = (size_t)sysconf(_SC_PAGE_SIZE);
|
||||
return pages * page_size;
|
||||
}
|
||||
|
||||
size_t get_system_free_memory_in_bytes() {
|
||||
auto avail_pages = (size_t)sysconf(_SC_AVPHYS_PAGES);
|
||||
auto page_size = (size_t)sysconf(_SC_PAGE_SIZE);
|
||||
return avail_pages * page_size;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
} // namespace qnn
|
||||
|
|
|
|||
|
|
@ -11,6 +11,8 @@
|
|||
|
||||
#include "ggml.h"
|
||||
|
||||
#include "ggml-qnn.h"
|
||||
|
||||
#include "QnnTypes.h"
|
||||
#include "logger.hpp"
|
||||
|
||||
|
|
@ -25,7 +27,7 @@ qnn_dimension_array_t get_internal_dimension(const ggml_dimension_array_t &dims,
|
|||
|
||||
uint32_t get_ggml_tensor_rank(const ggml_tensor *tensor);
|
||||
const char *get_ggml_type_name(ggml_type type);
|
||||
const char *get_backend_name(size_t device_index);
|
||||
const char *get_backend_name(QNNBackend device_index);
|
||||
const char *get_chipset_desc(uint32_t chipset_id);
|
||||
const char *get_htparch_desc(size_t htp_arch);
|
||||
intptr_t align_to(size_t alignment, intptr_t offset);
|
||||
|
|
@ -198,6 +200,8 @@ Qnn_DataType_t qnn_datatype_from_ggml_datatype(ggml_type ggml_type);
|
|||
ggml_type ggml_datatype_from_qnn_datatype(Qnn_DataType_t qnn_type);
|
||||
size_t qnn_datatype_size(Qnn_DataType_t qnn_type);
|
||||
const char *qnn_datatype_to_string(Qnn_DataType_t qnn_type);
|
||||
size_t get_system_total_memory_in_bytes();
|
||||
size_t get_system_free_memory_in_bytes();
|
||||
|
||||
#if ENABLE_QNNBACKEND_PERF
|
||||
class qnn_perf {
|
||||
|
|
|
|||
Loading…
Reference in New Issue