Fix llama-bench; Clang-format

This commit is contained in:
Yu, Zijun 2025-07-24 11:56:25 +08:00 committed by Mustafa Cavus
parent 6dc4b90635
commit 75eec6265f
3 changed files with 58 additions and 66 deletions

View File

@ -8,6 +8,8 @@ Cpp11BracedListStyle: true
AccessModifierOffset: -4
BinPackArguments: false
BreakBeforeBraces: Attach
IndentCaseBlocks: false
IndentCaseLabels: false
Language: Cpp
AlignAfterOpenBracket: Align
@ -68,8 +70,6 @@ IncludeCategories:
IncludeIsMainRegex: '([-_](test|unittest))?$'
IncludeIsMainSourceRegex: ''
IndentAccessModifiers: false
IndentCaseBlocks: true
IndentCaseLabels: true
IndentExternBlock: NoIndent
IndentGotoLabels: false
IndentPPDirectives: AfterHash

View File

@ -176,7 +176,7 @@ void GgmlOvDecoder::set_input_output(ggml_tensor* node, bool naive) {
break;
}
case GGML_OP_CPY: {
if (ggml_is_contiguous(node)) {
if (std::string(node->src[1]->name).find("cache_k") == 0) {
// Write K to cache_k
m_op_case = 1;
} else {
@ -184,7 +184,7 @@ void GgmlOvDecoder::set_input_output(ggml_tensor* node, bool naive) {
m_op_case = 2;
}
break;
}
}
case GGML_OP_PERMUTE: {
if (node->src[0]->view_src == nullptr) {
// Permute Qcur
@ -198,23 +198,21 @@ void GgmlOvDecoder::set_input_output(ggml_tensor* node, bool naive) {
}
break;
}
case GGML_OP_GET_ROWS:
{
if (node->src[1]->op == GGML_OP_VIEW) {
m_op_case = 2;
} else {
m_op_case = 1;
}
break;
case GGML_OP_GET_ROWS: {
if (node->src[1]->op == GGML_OP_VIEW) {
m_op_case = 2;
} else {
m_op_case = 1;
}
case GGML_OP_ROPE:
{
if (node->src[0]->op == GGML_OP_VIEW) {
m_op_case = 2;
} else {
m_op_case = 1;
}
break;
}
case GGML_OP_ROPE: {
if (node->src[0]->op == GGML_OP_VIEW) {
m_op_case = 2;
} else {
m_op_case = 1;
}
}
default:
break;
}
@ -405,17 +403,16 @@ std::shared_ptr<ov::Node> GgmlOvDecoder::create_weight_node(ggml_tensor* tensor)
weight_node = std::make_shared<ov::op::v0::Constant>(node_type, node_shape, data_f16);
break;
}
case GGML_TYPE_BF16:
{
const auto* ptr = reinterpret_cast<const uint16_t*>(tensor->data);
std::vector<ov::bfloat16> data_bf16;
data_bf16.reserve(ne_total);
for (int i = 0; i < ne_total; ++i) {
data_bf16.push_back(ov::bfloat16::from_bits(ptr[i]));
}
weight_node = std::make_shared<ov::op::v0::Constant>(node_type, node_shape, data_bf16);
break;
case GGML_TYPE_BF16: {
const auto* ptr = reinterpret_cast<const uint16_t*>(tensor->data);
std::vector<ov::bfloat16> data_bf16;
data_bf16.reserve(ne_total);
for (int i = 0; i < ne_total; ++i) {
data_bf16.push_back(ov::bfloat16::from_bits(ptr[i]));
}
weight_node = std::make_shared<ov::op::v0::Constant>(node_type, node_shape, data_bf16);
break;
}
default:
throw std::invalid_argument("Unsupported tensor type");
}
@ -614,8 +611,8 @@ int32_t* GgmlOvDecoder::get_output_op_params(const std::string& name) const {
void GgmlOvDecoder::visit_subgraph(std::function<void(std::shared_ptr<GgmlDecoder>)> node_visitor) const {
for (const auto& node : m_nodes) {
auto decoder = std::make_shared<GgmlOvDecoder>(node, m_cgraph, m_is_static, m_is_first_token, m_context_size,
m_num_heads, m_num_heads_kv, m_head_size);
auto decoder = std::make_shared<GgmlOvDecoder>(
node, m_cgraph, m_is_static, m_is_first_token, m_context_size, m_num_heads, m_num_heads_kv, m_head_size);
node_visitor(decoder);
}
}
@ -667,12 +664,12 @@ const std::string& GgmlOvDecoder::get_op_type() const {
};
switch (m_node->op) {
case GGML_OP_UNARY:
return unary_ops.at(ggml_get_unary_op(m_node));
case GGML_OP_GLU:
return glu_ops.at(ggml_get_glu_op(m_node));
default:
return ops.at(m_node->op);
case GGML_OP_UNARY:
return unary_ops.at(ggml_get_unary_op(m_node));
case GGML_OP_GLU:
return glu_ops.at(ggml_get_glu_op(m_node));
default:
return ops.at(m_node->op);
}
static const std::string unknown_op = "UNKNOWN_GGML_OP";
return unknown_op;

View File

@ -309,7 +309,7 @@ static bool is_op_unsupported_case(const ggml_tensor* op) {
return false;
}
static bool ggml_backend_openvino_device_supports_op(ggml_backend_dev_t dev, const ggml_tensor * op) {
static bool ggml_backend_openvino_device_supports_op(ggml_backend_dev_t dev, const ggml_tensor* op) {
GGML_ASSERT(dev->reg != nullptr);
static const std::set<ggml_type> supported_types{
@ -327,34 +327,29 @@ static bool ggml_backend_openvino_device_supports_op(ggml_backend_dev_t dev, con
};
switch (op->op) {
case GGML_OP_UNARY:
{
auto supported = supported_unary_ops.find(ggml_get_unary_op(op)) != supported_unary_ops.end();
if (!supported) {
GGML_LOG_WARN("OpenVINO backend does not support unary op %s\n",
ggml_unary_op_name(ggml_get_unary_op(op)));
return false;
}
break;
}
case GGML_OP_GLU:
{
auto supported = supported_glu_ops.find(ggml_get_glu_op(op)) != supported_glu_ops.end();
if (!supported) {
GGML_LOG_WARN("OpenVINO backend does not support GLU op %s\n",
ggml_glu_op_name(ggml_get_glu_op(op)));
return false;
}
break;
}
default:
{
auto supported = supported_ops.find(op->op) != supported_ops.end();
if (!supported) {
GGML_LOG_WARN("OpenVINO backend does not support op %s\n", ggml_op_name(op->op));
return false;
}
}
case GGML_OP_UNARY: {
auto supported = supported_unary_ops.find(ggml_get_unary_op(op)) != supported_unary_ops.end();
if (!supported) {
GGML_LOG_WARN("OpenVINO backend does not support unary op %s\n", ggml_unary_op_name(ggml_get_unary_op(op)));
return false;
}
break;
}
case GGML_OP_GLU: {
auto supported = supported_glu_ops.find(ggml_get_glu_op(op)) != supported_glu_ops.end();
if (!supported) {
GGML_LOG_WARN("OpenVINO backend does not support GLU op %s\n", ggml_glu_op_name(ggml_get_glu_op(op)));
return false;
}
break;
}
default: {
auto supported = supported_ops.find(op->op) != supported_ops.end();
if (!supported) {
GGML_LOG_WARN("OpenVINO backend does not support op %s\n", ggml_op_name(op->op));
return false;
}
}
}
if (supported_types.find(op->type) == supported_types.end()) {