CUDA: also store node->src ne/nb for graph equality (#21736)

This commit is contained in:
Aman Gupta 2026-04-11 10:30:30 +08:00 committed by GitHub
parent b136b62cf9
commit a29e4c0b7b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 10 additions and 6 deletions

View File

@ -1185,7 +1185,9 @@ struct ggml_cuda_graph {
bool warmup_complete = false;
struct node_properties {
ggml_tensor node;
void * node_src_data_ptrs[GGML_MAX_SRC];
void * node_src_data_ptrs[GGML_MAX_SRC];
int64_t node_src_ne[GGML_MAX_SRC][GGML_MAX_DIMS];
size_t node_src_nb[GGML_MAX_SRC][GGML_MAX_DIMS];
};
std::vector<node_properties> node_props;

View File

@ -3070,16 +3070,18 @@ static bool ggml_cuda_graph_update_required(ggml_backend_cuda_context * cuda_ctx
ggml_cuda_graph::node_properties prop = {};
memcpy(&prop.node, cgraph->nodes[i], sizeof(ggml_tensor));
// if the backend scheduler is making copies of CPU tensors, the src pointers can be the same but with different data, see:
// https://github.com/ggml-org/llama.cpp/pull/21472#discussion_r3052235188
for (int j = 0; j < GGML_MAX_SRC; ++j) {
prop.node_src_data_ptrs[j] = cgraph->nodes[i]->src[j] ? cgraph->nodes[i]->src[j]->data : nullptr;
if (cgraph->nodes[i]->src[j]) {
prop.node_src_data_ptrs[j] = cgraph->nodes[i]->src[j]->data;
memcpy(prop.node_src_ne[j], cgraph->nodes[i]->src[j]->ne, sizeof(prop.node_src_ne[j]));
memcpy(prop.node_src_nb[j], cgraph->nodes[i]->src[j]->nb, sizeof(prop.node_src_nb[j]));
}
}
if (!res && memcmp(&graph->node_props[i], &prop, sizeof(prop)) != 0) {
if (res || memcmp(&graph->node_props[i], &prop, sizeof(prop)) != 0) {
graph->node_props[i] = prop;
res = true;
}
graph->node_props[i] = prop;
}
return res;