add tmp source code files

This commit is contained in:
zhanmyz 2025-02-25 12:43:12 +08:00 committed by Mustafa Cavus
parent 57582fda39
commit afb8594194
7 changed files with 321 additions and 118 deletions

View File

@ -140,7 +140,7 @@ int main(int argc, char ** argv) {
std::string s(buf, n);
printf("%s", s.c_str());
}
printf("\n");
// prepare a batch for the prompt
llama_batch batch = llama_batch_get_one(prompt_tokens.data(), prompt_tokens.size());

View File

@ -685,8 +685,6 @@ void ggml_backend_openvino_dup_bytes(struct ggml_tensor *dst) {
if (src0->type == dst->type && ne00 == dst->ne[0] && nb00 == element_size && nb0 == element_size) {
// Assume that the data type is f32 and each element is 4 bytes
const size_t element_size = ggml_type_size(src0->type); // 4 bytes
// Logically, the number of valid elements per row is 3072 (src0->ne[0]), and the number of rows is 7 (src0->ne[1])
size_t valid_elems = static_cast<size_t>(src0->ne[0]); // 3072
size_t num_rows = static_cast<size_t>(src0->ne[1]); // 7
@ -740,7 +738,10 @@ void ggml_backend_openvino_dup_bytes(struct ggml_tensor *dst) {
infer_request.set_input_tensor(0, input_tensor);
// Construct output Tensor: dst is continuous storage, and its logical shape is [3072,7,1,1]
ov::Shape output_shape = { valid_elems, num_rows, 1, 1 };
ov::Shape output_shape = { static_cast<size_t>(dst->ne[0]),
static_cast<size_t>(dst->ne[1]),
static_cast<size_t>(dst->ne[2]),
static_cast<size_t>(dst->ne[3])};
ov::Tensor output_tensor(ov::element::f32, output_shape, dst->data);
infer_request.set_output_tensor(0, output_tensor);
@ -811,7 +812,10 @@ void ggml_backend_openvino_dup_bytes(struct ggml_tensor *dst) {
// gathered has a shape of [21504]
// 5. Reshape gathered to [3072,7,1,1], because 3072*7 = 21504
ov::Shape target_shape = { static_cast<size_t>(dst->ne[0]), static_cast<size_t>(dst->ne[1]), 1, 1 }; // [3072,7,1,1]
ov::Shape target_shape = { static_cast<size_t>(dst->ne[0]),
static_cast<size_t>(dst->ne[1]),
static_cast<size_t>(dst->ne[2]),
static_cast<size_t>(dst->ne[3])}; // [3072,7,1,1]
auto reshape_const = ov::op::v0::Constant::create(ov::element::i64, {4},
std::vector<int64_t>{ static_cast<int64_t>(dst->ne[0]), static_cast<int64_t>(dst->ne[1]), 1, 1 });
auto reshaped = std::make_shared<ov::op::v1::Reshape>(gathered, reshape_const, false);
@ -834,34 +838,6 @@ void ggml_backend_openvino_dup_bytes(struct ggml_tensor *dst) {
// Execute reasoning: The computation graph uses Gather+Reshape to collect each valid element of src0 in a predetermined order and write it directly to dst->data
infer_request.infer();
/*
const size_t rs = ne00 * element_size; // Row size in bytes for dst
// Create OpenVINO tensors for source and destination
// The tensors are reshaped to a 2D structure (num_rows x ne00) for easier iteration and compatibility with the simplified loop.
ov::Tensor src_tensor(ov::element::f32, ov::Shape{ne03 * ne02 * ne01, ne00}, src0->data);
ov::Tensor dst_tensor(ov::element::f32, ov::Shape{ne03 * ne02 * ne01, ne00}, dst->data);
// Perform the copy in a single loop
const size_t num_rows = ne03 * ne02 * ne01;
for (size_t row = 0; row < num_rows; ++row) {
// Calculate the source row pointer based on original strides
// The source row pointer is calculated based on the combined index row and the strides nb03, nb02, and nb01.
const char* src0_ptr = (char*)src_tensor.data() +
// Calculates which block of the i03 dimension the current row belongs to
(row / (ne02 * ne01)) * nb03 + // 0
// Calculates which block of the i02 dimension the current row belongs to within the current i03 block.
((row / ne01) % ne02) * nb02 + // 0, 0,......, 0,384, 384,......, 384,768,......, 2304
// Calculates the position within the current i02 block in terms of the i01 index.
(row % ne01) * nb01; // 0,2688,......,83328, 0, 2688,......,83328, 0,......, 83328
// Destination row pointer is linear
// Since dst is contiguous, its rows are accessed linearly using a single stride rs, simplifying the destination pointer calculation.
char* dst_ptr = (char*)dst_tensor.data() + row * rs;
// Copy row
std::memcpy(dst_ptr, src0_ptr, rs);
}*/
return;
}
std::cout << "Duplication of bytes completed successfully." << std::endl;
@ -939,6 +915,7 @@ void ggml_backend_openvino_cpy(struct ggml_tensor *dst) {
// ov::Shape flat_src0_shape = {80000};
ov::Shape flat_src0_shape = {dst->src[0]->nb[2]};
auto param_src0 = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, flat_src0_shape);
// auto param_src00 = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, flat_src0_shape);
auto gather_indices_const = ov::op::v0::Constant::create(ov::element::i64, gather_idx_shape, gather_idx);
auto gather_axis_const = ov::op::v0::Constant::create(ov::element::i64, {1}, {0});
@ -951,6 +928,7 @@ void ggml_backend_openvino_cpy(struct ggml_tensor *dst) {
// ov::Shape flat_dst_shape = {200000, 1};
ov::Shape flat_dst_shape = {dst->nb[2], 1};
auto param_dst_base = std::make_shared<ov::op::v0::Parameter>(ov::element::f16, flat_dst_shape);
// auto param_dst_base11 = std::make_shared<ov::op::v0::Parameter>(ov::element::f16, flat_dst_shape);
auto scatter_indices_const = ov::op::v0::Constant::create(ov::element::i64, scatter_idx_shape, scatter_idx);
@ -961,6 +939,8 @@ void ggml_backend_openvino_cpy(struct ggml_tensor *dst) {
);
ov::ParameterVector params = { param_src0, param_dst_base };
// ov::ParameterVector params = { param_src0};
// ov::ParameterVector params = { param_src00, param_dst_base11};
auto model = std::make_shared<ov::Model>(ov::OutputVector{ scatter }, params);
auto compiled_model = core.compile_model(model, "CPU");
@ -1009,16 +989,17 @@ static enum ggml_status ggml_backend_openvino_graph_compute(ggml_backend_t backe
}
}
// openvino_frontend_compute(backend, cgraph);
// Process nodes in order
for (int i = 0; i < cgraph->n_nodes; i++) {
if (std::find(cont_indices.begin(), cont_indices.end(), i) != cont_indices.end()) {
ggml_backend_openvino_dup_bytes(cgraph->nodes[i]);
} else if (std::find(reshape_indices.begin(), reshape_indices.end(), i) != reshape_indices.end()) {
if (std::find(reshape_indices.begin(), reshape_indices.end(), i) != reshape_indices.end()) {
ggml_backend_openvino_reshape(cgraph->nodes[i]);
// } else if (std::find(cont_indices.begin(), cont_indices.end(), i) != cont_indices.end()) {
// ggml_backend_openvino_dup_bytes(cgraph->nodes[i]);
} else if (std::find(view_indices.begin(), view_indices.end(), i) != view_indices.end()) {
ggml_backend_openvino_view(cgraph->nodes[i]);
// } else if (std::find(cpy_indices.begin(), cpy_indices.end(), i) != cpy_indices.end()) {
// ggml_backend_openvino_cpy(cgraph->nodes[i]);
ggml_backend_openvino_view(cgraph->nodes[i]);
} else if (std::find(cpy_indices.begin(), cpy_indices.end(), i) != cpy_indices.end()) {
ggml_backend_openvino_cpy(cgraph->nodes[i]);
} else if (std::find(transpose_indices.begin(), transpose_indices.end(), i) != transpose_indices.end()) {
ggml_backend_openvino_transpose(cgraph->nodes[i]);
} else if (std::find(permute_indices.begin(), permute_indices.end(), i) != permute_indices.end()) {
@ -1029,8 +1010,8 @@ static enum ggml_status ggml_backend_openvino_graph_compute(ggml_backend_t backe
// Process a range of nodes with openvino_frontend_compute
int start_index = i;
while (i < cgraph->n_nodes &&
// std::find(cpy_indices.begin(), cpy_indices.end(), i) == cpy_indices.end() &&
std::find(cont_indices.begin(), cont_indices.end(), i) == cont_indices.end() &&
std::find(cpy_indices.begin(), cpy_indices.end(), i) == cpy_indices.end() &&
//std::find(cont_indices.begin(), cont_indices.end(), i) == cont_indices.end() &&
std::find(mul_mat_indices.begin(), mul_mat_indices.end(), i) == mul_mat_indices.end()) {
i++;
}
@ -1270,7 +1251,7 @@ static const std::set<std::string>& openvino_ops = []() -> const std::set<std::s
switch (ggml_get_unary_op(op))
{
case GGML_UNARY_OP_SILU:
return true;
return false;
case GGML_UNARY_OP_ABS:
case GGML_UNARY_OP_SGN:
case GGML_UNARY_OP_NEG:

View File

@ -2,11 +2,18 @@
#include "openvino/core/node.hpp"
#include "openvino/frontend/decoder.hpp"
#include "openvino/op/parameter.hpp"
namespace ov {
namespace frontend {
namespace ggml {
// 定义 tensor_info 结构体
struct tensor_info {
std::vector<int> shape;
std::vector<int> stride;
};
// TODO: Directly include from openvino
class GgmlDecoder : public DecoderBase {
public:
@ -14,6 +21,8 @@ public:
virtual PartialShape get_input_shape(const std::string& name) const = 0;
virtual std::vector<size_t> get_input_stride(const std::string& name) const = 0;
virtual element::Type get_input_type(const std::string& name) const = 0;
virtual size_t get_input_size() const = 0;
@ -27,6 +36,10 @@ public:
virtual std::vector<std::string> get_input_names() const = 0;
virtual const std::string& get_node_op_name(const std::string& name) const = 0;
// virtual const struct tensor_info get_node_op_info(const std::string& name) const = 0;
virtual PartialShape get_output_shape(const std::string& name) const = 0;
virtual element::Type get_output_type(const std::string& name) const = 0;
@ -53,6 +66,8 @@ public:
virtual bool check_if_continuous() const = 0;
virtual const std::vector<std::shared_ptr<ov::op::v0::Parameter>>& get_params() const = 0;
};
} // namespace ggml

View File

@ -2,9 +2,13 @@
#include <ggml.h>
#include <ggml-impl.h>
#include <ggml-cpu-impl.h>
#include <iomanip>
#include <fstream>
void GgmlOvDecoder::set_input_output(ggml_tensor* node, std::map<std::string, ggml_tensor *>& inputs, std::map<std::string, ggml_tensor *>& outputs) {
m_node_op_name[node->name] = ggml_op_name(node->op);
std::string src0_name = std::string(node->src[0]->name) + "_" + std::to_string(node->src[0]->view_offs) + "_input_" + ggml_op_name(node->src[0]->op);
std::string node_name = std::string(node->name) + "_" + std::to_string(node->view_offs) + "_output_" + ggml_op_name(node->op);
switch (node->op) {
// Unary OPs
case GGML_OP_UNARY:
@ -16,6 +20,7 @@ void GgmlOvDecoder::set_input_output(ggml_tensor* node, std::map<std::string, gg
inputs[node->src[0]->name] = node->src[0];
outputs[node->name] = node;
m_input_names.push_back(node->src[0]->name);
m_node_op_name[node->src[0]->name] = ggml_op_name(node->op);
m_output_names.push_back(node->name);
break;
}
@ -25,76 +30,73 @@ void GgmlOvDecoder::set_input_output(ggml_tensor* node, std::map<std::string, gg
inputs[node->src[0]->name] = node->src[0];
outputs[node->name] = node;
m_input_names.push_back(node->src[0]->name);
m_node_op_name[node->src[0]->name] = ggml_op_name(node->op);
m_output_names.push_back(node->name);
m_continuous = true;
ov::Shape flat_shape = { static_cast<size_t>(ggml_nelements(node)) };
auto input_param = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, flat_shape);
m_params.push_back(input_param);
break;
}
if (node->src[0]->type == node->type && node->src[0]->ne[0] == node->ne[0] &&
node->src[0]->nb[0] == ggml_type_size(node->src[0]->type) && node->nb[0] == ggml_type_size(node->src[0]->type)) {
node->src[0]->nb[0] == ggml_type_size(node->src[0]->type) &&
node->nb[0] == ggml_type_size(node->src[0]->type)) {
for (size_t i01 = 0; i01 < node->src[0]->ne[1]; ++i01) {
const char *src_row = reinterpret_cast<const char *>(node->src[0]->data) + i01 * node->src[0]->nb[1];
char *dst_row = reinterpret_cast<char *>(node->data) + i01 * node->nb[1];
std::memcpy(dst_row, src_row, node->src[0]->ne[0] * ggml_type_size(node->src[0]->type));
}
// for (size_t i01 = 0; i01 < node->src[0]->ne[1]; ++i01) {
// const char *src_row = reinterpret_cast<const char *>(node->src[0]->data) + i01 * node->src[0]->nb[1];
// char *dst_row = reinterpret_cast<char *>(node->data) + i01 * node->nb[1];
// std::memcpy(dst_row, src_row, node->src[0]->ne[0] * ggml_type_size(node->src[0]->type));
// }
inputs[node->name] = node;
inputs[node->src[0]->name] = node->src[0];
outputs[node->name] = node;
m_input_names.push_back(node->name);
m_input_names.push_back(node->src[0]->name);
m_node_op_name[node->src[0]->name] = ggml_op_name(node->op);
m_output_names.push_back(node->name);
const size_t element_size = ggml_type_size(node->src[0]->type);
size_t valid_elems = static_cast<size_t>(node->src[0]->ne[0]); // 3072
size_t num_rows = static_cast<size_t>(node->src[0]->ne[1]); // 7
size_t phys_stride = static_cast<size_t>(node->src[0]->nb[1]) / element_size; // 9216
size_t total_phys = (num_rows - 1) * phys_stride + valid_elems; // 6*9216 + 3072 = 58368
ov::Shape flat_input_shape = { total_phys };
auto flat_input_param = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, flat_input_shape);
m_params.push_back(flat_input_param);
m_continuous = false;
break;
}
// if (ggml_is_contiguous(node)) {
const size_t rs = node->src[0]->ne[0] * ggml_type_size(node->src[0]->type); // Row size in bytes for dst
// Create OpenVINO tensors for source and destination
// The tensors are reshaped to a 2D structure (num_rows x ne00) for easier iteration and compatibility with the simplified loop.
ov::Tensor src_tensor(ov::element::f32,
ov::Shape{node->src[0]->ne[3] * node->src[0]->ne[2] * node->src[0]->ne[1], node->src[0]->ne[0]},
node->src[0]->data);
ov::Tensor dst_tensor(ov::element::f32,
ov::Shape{node->src[0]->ne[3] * node->src[0]->ne[2] * node->src[0]->ne[1], node->src[0]->ne[0]},
node->data);
// Perform the copy in a single loop
const size_t num_rows = node->src[0]->ne[3] * node->src[0]->ne[2] * node->src[0]->ne[1];
for (size_t row = 0; row < num_rows; ++row) {
// Calculate the source row pointer based on original strides
// The source row pointer is calculated based on the combined index row and the strides nb03, nb02, and nb01.
const char* src0_ptr = (char*)src_tensor.data() +
// Calculates which block of the i03 dimension the current row belongs to
(row / (node->src[0]->ne[2] * node->src[0]->ne[1])) * node->src[0]->nb[3] + // 0
// Calculates which block of the i02 dimension the current row belongs to within the current i03 block.
((row / node->src[0]->ne[1]) % node->src[0]->ne[2]) * node->src[0]->nb[2] + // 0, 0,......, 0,384, 384,......, 384,768,......, 2304
// Calculates the position within the current i02 block in terms of the i01 index.
(row % node->src[0]->ne[1]) * node->src[0]->nb[1]; // 0,2688,......,83328, 0, 2688,......,83328, 0,......, 83328
// Destination row pointer is linear
// Since dst is contiguous, its rows are accessed linearly using a single stride rs, simplifying the destination pointer calculation.
char* dst_ptr = (char*)dst_tensor.data() + row * rs;
// Copy row
std::memcpy(dst_ptr, src0_ptr, rs);
}
inputs[node->name] = node;
outputs[node->name] = node;
m_input_names.push_back(node->name);
m_output_names.push_back(node->name);
m_continuous = false;
break;
//}
}
case GGML_OP_CPY:
{
if (ggml_is_contiguous(node)) {
inputs[node->src[0]->name] = node->src[0];
outputs[node->name] = node;
m_input_names.push_back(node->src[0]->name);
m_node_op_name[node->src[0]->name] = ggml_op_name(node->op);
m_output_names.push_back(node->name);
size_t valid_i = static_cast<size_t>(node->src[0]->ne[0]); // 96
size_t valid_j = static_cast<size_t>(node->src[0]->ne[1]); // 32
size_t valid_k = static_cast<size_t>(node->src[0]->ne[2]); // 7
size_t total_valid = valid_i * valid_j * valid_k; // 96 * 32 * 7 = 21504
ov::Shape flat_input_shape = { total_valid };
auto input_param = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, flat_input_shape);
m_params.push_back(input_param);
m_continuous = false;
break;
}
}
case GGML_OP_CPY:
{
if (ggml_is_contiguous(node)) {
inputs[src0_name] = node->src[0];
outputs[node_name] = node;
m_input_names.push_back(src0_name);
m_node_op_name[src0_name] = ggml_op_name(node->op);
m_output_names.push_back(node_name);
m_continuous = true;
break;
} else {
@ -108,12 +110,40 @@ void GgmlOvDecoder::set_input_output(ggml_tensor* node, std::map<std::string, gg
}
}
// inputs[node->src[0]->name] = node->src[0];
inputs[node->name] = node;
outputs[node->name] = node;
m_input_names.push_back(node->name);
m_output_names.push_back(node->name);
inputs[node_name] = node;
outputs[node_name] = node;
m_input_names.push_back(node_name);
m_node_op_name[node_name] = ggml_op_name(node->op);
m_output_names.push_back(node_name);
m_continuous = false;
break;
// inputs[node->src[0]->name] = node->src[0];
// std::string temp_name = node->src[0]->name + std::string("_cpy_tmp");
// inputs[temp_name] = node;
// outputs[node->name] = node;
// m_input_names.push_back(node->src[0]->name);
// m_input_names.push_back(temp_name);
// m_node_op_name[node->src[0]->name] = ggml_op_name(node->op);
// m_node_op_name[temp_name] = ggml_op_name(node->op);
// m_output_names.push_back(node->name);
// ov::Shape flat_src0_shape = {80000};
// auto param_src0 = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, flat_src0_shape);
// m_params.push_back(param_src0);
// std::cout << "decoder ADDR-0: " << param_src0.get() << std::endl;
// ov::Shape flat_dst_shape = {200000, 1};
// auto param_dst_base = std::make_shared<ov::op::v0::Parameter>(ov::element::f16, flat_dst_shape);
// m_params.push_back(param_dst_base);
// std::cout << "decoder ADDR-1: " << param_dst_base.get() << std::endl;
// m_continuous = false;
// break;
}
}
// For view, input is node itself
@ -122,49 +152,76 @@ void GgmlOvDecoder::set_input_output(ggml_tensor* node, std::map<std::string, gg
inputs[node->name] = node;
outputs[node->name] = node;
m_input_names.push_back(node->name);
m_node_op_name[node->name] = ggml_op_name(node->op);
m_output_names.push_back(node->name);
break;
}
// SCALE
case GGML_OP_SCALE:
{
inputs[node->src[0]->name] = node->src[0];
outputs[node->name] = node;
m_input_names.push_back(node->name);
m_output_names.push_back(node->name);
inputs[src0_name] = node->src[0];
outputs[node_name] = node;
m_input_names.push_back(node_name);
// m_node_op_name[node_name] = ggml_op_name(node->op);
m_output_names.push_back(node_name);
break;
}
case GGML_OP_MUL_MAT:
{
std::string src1_name = std::string(node->src[1]->name) + "_" + std::to_string(node->src[1]->view_offs) + "_input_" + ggml_op_name(node->src[1]->op);
if (!ggml_is_contiguous(node->src[1]) || node->src[1]->ne[0] * node->src[1]->nb[0] != node->src[1]->nb[1]) {
m_continuous = false;
} else {
m_continuous = true;
}
inputs[src0_name] = node->src[0];
inputs[src1_name] = node->src[1];
outputs[node_name] = node;
m_input_names.push_back(src0_name);
m_node_op_name[src0_name] = ggml_op_name(node->op);
m_input_names.push_back(src1_name);
m_node_op_name[src1_name] = ggml_op_name(node->op);
m_output_names.push_back(node_name);
break;
}
// OPs with 2 inputs
case GGML_OP_ADD:
case GGML_OP_DIV:
case GGML_OP_MUL:
case GGML_OP_MUL_MAT:
case GGML_OP_SUB:
case GGML_OP_GET_ROWS:
case GGML_OP_SOFT_MAX:
{
inputs[node->src[0]->name] = node->src[0];
outputs[node->name] = node;
m_input_names.push_back(node->src[0]->name);
m_output_names.push_back(node->name);
inputs[src0_name] = node->src[0];
outputs[node_name] = node;
m_input_names.push_back(src0_name);
m_node_op_name[src0_name] = ggml_op_name(node->op);
m_output_names.push_back(node_name);
if (node->src[1]) {
inputs[node->src[1]->name] = node->src[1];
m_input_names.push_back(node->src[1]->name);
std::string src1_name = std::string(node->src[1]->name) + "_" + std::to_string(node->src[1]->view_offs) + "_input_" + ggml_op_name(node->src[1]->op);
inputs[src1_name] = node->src[1];
m_node_op_name[src1_name] = ggml_op_name(node->op);
m_input_names.push_back(src1_name);
}
break;
}
// OPs with 3 inputs:
case GGML_OP_ROPE:
{
std::string src1_name = std::string(node->src[1]->name) + "_" + std::to_string(node->src[1]->view_offs) + "_input_" + ggml_op_name(node->src[1]->op);
inputs[node->src[0]->name] = node->src[0];
inputs[node->src[1]->name] = node->src[1];
m_input_names.push_back(node->src[0]->name);
m_node_op_name[node->src[0]->name] = ggml_op_name(node->op);
m_input_names.push_back(node->src[1]->name);
m_node_op_name[node->src[1]->name] = ggml_op_name(node->op);
outputs[node->name] = node;
m_output_names.push_back(node->name);
if (node->src[2]) {
std::string src2_name = std::string(node->src[2]->name) + "_" + std::to_string(node->src[2]->view_offs) + "_input_" + ggml_op_name(node->src[2]->op);
inputs[node->src[2]->name] = node->src[2];
m_input_names.push_back(node->src[2]->name);
m_node_op_name[node->src[2]->name] = ggml_op_name(node->op);
}
break;
}
@ -173,6 +230,77 @@ void GgmlOvDecoder::set_input_output(ggml_tensor* node, std::map<std::string, gg
}
}
void ggml_graph_op_print(const struct ggml_cgraph * cgraph) {
std::ofstream file("2_graph_node_src_op_name.txt");
if (!file.is_open()) {
std::cerr << "Failed to open file" << std::endl;
return;
}
file << "=== GRAPH ===\n";
file << "n_nodes = " << cgraph->n_nodes << "\n";
for (int i = 0; i < cgraph->n_nodes; i++) {
struct ggml_tensor * node = cgraph->nodes[i];
file << " - " << std::setw(3) << i << ": [ "
<< std::setw(5) << node->ne[0] << ", "
<< std::setw(5) << node->ne[1] << ", "
<< std::setw(5) << node->ne[2] << "] "
<< std::left << std::setw(16) << ggml_op_name(node->op) << std::right << " "
<< " " << node->name
<< ((node->flags & GGML_TENSOR_FLAG_PARAM) ? "x" : node->grad ? "g" : " ") << "\n";
if (node->src[0]) {
file << std::setw(10) << " [ "
<< std::setw(5) << node->src[0]->ne[0] << ", "
<< std::setw(5) << node->src[0]->ne[1] << ", "
<< std::setw(5) << node->src[0]->ne[2] << "] "
<< std::setw(12)
<< "0: " << ggml_op_name(node->src[0]->op) << " ";
// // Custom logic to handle '\000'
// const char* name_ptr = node->src[0]->name;
// while (*name_ptr != '\0' || *(name_ptr + 1) != '\0' || *(name_ptr + 2) != '\0') {
// file << *name_ptr;
// name_ptr++;
// }
file << node->src[0]->name;
file << "\n";
}
if (node->src[1]) {
file << std::setw(10) << " [ "
<< std::setw(5) << node->src[1]->ne[0] << ", "
<< std::setw(5) << node->src[1]->ne[1] << ", "
<< std::setw(5) << node->src[1]->ne[2] << "] "
<< std::setw(12)
<< "1: " << ggml_op_name(node->src[1]->op) << " ";
// // Custom logic to handle '\000'
// const char* name_ptr = node->src[1]->name;
// while (*name_ptr != '\0' || *(name_ptr + 1) != '\0' || *(name_ptr + 2) != '\0') {
// file << *name_ptr;
// name_ptr++;
// }
file << node->src[1]->name;
file << "\n";
}
}
file << "n_leafs = " << cgraph->n_leafs << "\n";
for (int i = 0; i < cgraph->n_leafs; i++) {
struct ggml_tensor * node = cgraph->leafs[i];
file << " - " << std::setw(3) << i << ": [ "
<< std::setw(5) << node->ne[0] << ", "
<< std::setw(5) << node->ne[1] << "] "
<< std::setw(8) << ggml_op_name(node->op) << " "
<< std::setw(16) << ggml_get_name(node) << "\n";
}
file << "========================================\n";
file.close();
}
GgmlOvDecoder::GgmlOvDecoder(struct ggml_tensor * node, struct ggml_cgraph * cgraph, const int32_t start_index, const int32_t end_index)
:m_cgraph(cgraph),
m_node(node),
@ -193,7 +321,7 @@ GgmlOvDecoder::GgmlOvDecoder(struct ggml_tensor * node, struct ggml_cgraph * cgr
set_input_output(cur_node, m_inputs, m_outputs);
}
#ifdef GGML_OPENVINO_DEBUG
ggml_graph_print(m_cgraph);
ggml_graph_op_print(m_cgraph);
#endif
}
}
@ -204,6 +332,13 @@ ov::PartialShape GgmlOvDecoder::get_input_shape(const std::string& name) const {
ggml_tensor * node = m_inputs.at(name);
std::vector<size_t> shape;
// [TODO], 在这里判断如果是MUL_MAT就设置shape为一维
if(m_node_op_name.at(name) == "MUL_MAT") {
shape.push_back(static_cast<size_t>(node->ne[0] * node->ne[1] * node->ne[2]));
input_shape = ov::PartialShape(shape);
return input_shape;
}
for (int i = GGML_MAX_DIMS - 2; i >= 0 ; --i) {
if (node->ne[i] == 0) {
return input_shape;
@ -214,6 +349,15 @@ ov::PartialShape GgmlOvDecoder::get_input_shape(const std::string& name) const {
return input_shape;
}
std::vector<size_t> GgmlOvDecoder::get_input_stride(const std::string& name) const {
std::vector<size_t> stride;
ggml_tensor * node = m_inputs.at(name);
for (int i = GGML_MAX_DIMS - 2; i >= 0 ; --i) {
stride.push_back(static_cast<size_t>(node->nb[i]));
}
return stride;
}
ov::element::Type GgmlOvDecoder::get_input_type(const std::string& name) const {
ov::element::Type type = ov::element::dynamic;
switch (m_inputs.at(name)->type) {
@ -248,6 +392,18 @@ std::vector<std::string> GgmlOvDecoder::get_input_names() const {
return m_input_names;
}
const std::string& GgmlOvDecoder::get_node_op_name(const std::string& name) const {
auto it = m_node_op_name.find(name);
if (it != m_node_op_name.end()) {
return it->second;
}
return "";
}
const std::vector<std::shared_ptr<ov::op::v0::Parameter>>& GgmlOvDecoder::get_params() const {
return m_params;
}
ov::PartialShape GgmlOvDecoder::get_output_shape(const std::string& name) const {
ov::PartialShape output_shape;
// Use input_node->ne

View File

@ -2,6 +2,7 @@
#include "decoder.h"
#include "ggml.h"
#include "openvino/op/parameter.hpp"
class GgmlOvDecoder : public ov::frontend::ggml::GgmlDecoder {
public:
@ -16,6 +17,8 @@ public:
virtual ov::PartialShape get_input_shape(const std::string& name) const override;
virtual std::vector<size_t> get_input_stride(const std::string& name) const override;
virtual ov::element::Type get_input_type(const std::string& name) const override;
virtual size_t get_input_size() const override;
@ -66,13 +69,10 @@ public:
return m_continuous;
}
virtual const std::string& get_node_op_name(const std::string& name) const {
auto it = m_node_op_name.find(name);
if (it != m_node_op_name.end()) {
return it->second;
}
return "";
}
virtual const std::string& get_node_op_name(const std::string& name) const override;
// virtual const std::string& get_node_op_info(const std::string& name) const override;
virtual const std::vector<std::shared_ptr<ov::op::v0::Parameter>>& get_params() const override;
private:
void set_input_output(ggml_tensor* node, std::map<std::string, ggml_tensor *>& inputs, std::map<std::string, ggml_tensor *>& outputs);
@ -85,9 +85,10 @@ private:
ggml_tensor* m_node;
std::vector<ggml_tensor *> m_nodes;
std::vector<std::shared_ptr<GgmlOvDecoder>> m_decoders;
const std::string m_op_name;
std::string m_op_name;
mutable std::string m_name;
bool m_continuous;
std::map<std::string, std::string> m_node_op_name;
std::vector<std::shared_ptr<ov::op::v0::Parameter>> m_params;
};

View File

@ -13,13 +13,58 @@ std::shared_ptr<GgmlOvDecoder> get_ggml_decoder(struct ggml_cgraph * cgraph, con
std::map<std::string, ov::Tensor> get_ggml_graph_input_tensors(std::shared_ptr<GgmlOvDecoder> ggml_decoder) {
std::map<std::string, ov::Tensor> input_tensors;
auto input_names = ggml_decoder->get_input_names();
// auto node_name = ggml_decoder->get_op_name();
for (size_t inp = 0; inp < input_names.size(); ++inp) {
auto name = input_names[inp];
auto node_op_name = ggml_decoder->get_node_op_name(name);
auto input_data = ggml_decoder->get_input_ggml_tensor(name)->data;
#ifdef GGML_OPENVINO_DEBUG
printf("Subgraph input %d: %g\n", inp, *(double*)(input_data));
#endif
ov::Tensor input_tensor = ov::Tensor(ggml_decoder->get_input_type(name), ggml_decoder->get_input_shape(name).to_shape(), input_data);
ov::Tensor input_tensor;
auto input_shape = ggml_decoder->get_input_shape(name).to_shape();
// if (node_op_name == "CPY" && (input_shape[0] != 7)) {
// input_tensor = ov::Tensor(ggml_decoder->get_input_type(name), {80000}, input_data);
// } else if (node_op_name == "CONT" || node_op_name == "MUL_MAT") {
// // auto input_shape = ggml_decoder->get_input_shape(name).to_shape();
// // size_t total_size = 1;
// // for (auto dim : input_shape) {
// // total_size *= dim;
// // }
// // ov::Shape new_shape = {total_size};
// input_tensor = ov::Tensor(ggml_decoder->get_input_type(name), {ggml_decoder->get_input_shape(name).to_shape()[0]}, input_data);
// } else {
if (node_op_name == "CONT" && ggml_decoder->check_if_continuous()) {
ov::Shape flat_shape = { ggml_decoder->get_input_shape(name).to_shape()[0] *
ggml_decoder->get_input_shape(name).to_shape()[1] *
ggml_decoder->get_input_shape(name).to_shape()[2] };
input_tensor = ov::Tensor(ggml_decoder->get_input_type(name), flat_shape, input_data);
} else if ( node_op_name == "CONT" &&
!ggml_decoder->check_if_continuous() &&
input_shape[0] == 1) {
size_t valid_elems = static_cast<size_t>(ggml_decoder->get_input_shape(name).to_shape()[2]); // 3072
size_t num_rows = static_cast<size_t>(ggml_decoder->get_input_shape(name).to_shape()[1]); // 7
ov::element::Type input_type = ggml_decoder->get_input_type(name);
size_t element_size = input_type.size();
std::vector<size_t> strides = ggml_decoder->get_input_stride(name);
size_t phys_stride = static_cast<size_t>(strides[1]) / element_size;
size_t total_phys = (num_rows - 1) * phys_stride + valid_elems;
ov::Shape flat_input_shape = { total_phys };
input_tensor = ov::Tensor(ggml_decoder->get_input_type(name), flat_input_shape, input_data);
} else if (node_op_name == "CONT") {
size_t valid_i = static_cast<size_t>(ggml_decoder->get_input_shape(name).to_shape()[2]); // 96
size_t valid_j = static_cast<size_t>(ggml_decoder->get_input_shape(name).to_shape()[1]); // 32
size_t valid_k = static_cast<size_t>(ggml_decoder->get_input_shape(name).to_shape()[0]); // 7
size_t total_valid = valid_i * valid_j * valid_k; // 96 * 32 * 7 = 21504
ov::Shape flat_input_shape = { total_valid };
input_tensor = ov::Tensor(ggml_decoder->get_input_type(name), flat_input_shape, input_data);
} else {
input_tensor = ov::Tensor(ggml_decoder->get_input_type(name), ggml_decoder->get_input_shape(name).to_shape(), input_data);
}
// input_tensor = ov::Tensor(ggml_decoder->get_input_type(name), ggml_decoder->get_input_shape(name).to_shape(), input_data);
// }
input_tensors[name] = input_tensor;
}
return input_tensors;
@ -80,6 +125,8 @@ enum ggml_status openvino_frontend_compute(ggml_backend_t backend, struct ggml_c
// Convert InputModel -> ov::Model
std::shared_ptr<ov::Model> model = front_end->convert(input_model);
ov::save_model(model, "/home/user/zhan/merge_git_commits/llama.cpp-ov/001_model.xml");
if (!model) {
GGML_LOG_ERROR("Model is not converted \n");
} else {
@ -90,6 +137,7 @@ enum ggml_status openvino_frontend_compute(ggml_backend_t backend, struct ggml_c
// Loading a model to the device
ov::CompiledModel compiled_model = core.compile_model(model);
ov::save_model(compiled_model.get_runtime_model(), "/home/user/zhan/merge_git_commits/llama.cpp-ov/001_compile_model.xml");
// Create infer request
ov::InferRequest infer_request = compiled_model.create_infer_request();

2
setup.sh Executable file
View File

@ -0,0 +1,2 @@
cmake --build build --parallel $(nproc)