Enable GDN also for prefill, move TODO for chunked_GDN

This commit is contained in:
Oliver Simons 2026-03-11 15:52:23 +01:00
parent 1623bbc906
commit 2068908975
2 changed files with 2 additions and 2 deletions

View File

@ -145,7 +145,7 @@ static void launch_gated_delta_net(
int64_t sb1, int64_t sb2, int64_t sb3,
int64_t neqk1, int64_t rq3,
float scale, cudaStream_t stream) {
//TODO: Add chunked kernel for even faster pre-fill
constexpr uint32_t warp_size = ggml_cuda_get_physical_warp_size();
const int num_warps = 4;
dim3 grid_dims(H, n_seqs, (S_v + num_warps - 1) / num_warps);

View File

@ -5001,7 +5001,7 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g
#else
// KDA is faster using the AR kernel even when n_tokens >= 512
//TODO: Add chunked kernel
return op->src[0]->ne[2] == 1 || op->src[3]->ne[0] == op->src[2]->ne[0];
return true;
#endif // GGML_USE_MUSA
case GGML_OP_FLASH_ATTN_EXT:
return ggml_cuda_flash_attn_ext_supported(dev_ctx->device, op);