From fdb1a08e6fa04482cf52809f67b668fd426a488b Mon Sep 17 00:00:00 2001 From: "assistant-librarian[bot]" <210906412+assistant-librarian[bot]@users.noreply.github.com> Date: Mon, 9 Feb 2026 20:58:57 +0000 Subject: [PATCH] Enable group mode (varlen) kernel generation for PyTorch integration (#4292) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Proposed changes This PR enables group mode (variable-length attention) kernel generation for PyTorch's CK SDPA backend. ## Checklist Please put an `x` into the boxes that apply. You can also fill these out after creating the PR. If you're not sure, please don't hesitate to ask. - [X] I have added tests relevant to the introduced functionality, and the unit tests are passing locally - [ ] I have added the test to REGRESSION_TESTS list defined at the top of CMakeLists.txt in tests/CMakeLists.txt, **IF** the test takes more than 30 seconds to run. - [ ] I have added inline documentation which enables the maintainers with understanding the motivation - [ ] I have removed the stale documentation which is no longer relevant after this pull request - [ ] (If this change is user-facing) I have added release notes which provide the end users with a brief summary of the improvement from this pull request - [X] I have run `clang-format` on all changed files - [ ] Any dependent changes have been merged ## Discussion The change is minimal (single line deletion) but enables a significant feature: variable-length attention support for ROCm users via PyTorch's torch.nn.attention.varlen API. --- 🔁 Imported from [ROCm/composable_kernel#3553](https://github.com/ROCm/composable_kernel/pull/3553) 🧑‍💻 Originally authored by @chinmaydk99 Co-authored-by: Chinmay_Kuchinad --- example/ck_tile/01_fmha/codegen/ops/fmha_fwd.py | 1 - 1 file changed, 1 deletion(-) diff --git a/example/ck_tile/01_fmha/codegen/ops/fmha_fwd.py b/example/ck_tile/01_fmha/codegen/ops/fmha_fwd.py index b59f442663..f9301878c4 100644 --- a/example/ck_tile/01_fmha/codegen/ops/fmha_fwd.py +++ b/example/ck_tile/01_fmha/codegen/ops/fmha_fwd.py @@ -1219,7 +1219,6 @@ def get_product(receipt: int) -> Product: cond &= kernel_ctx.pipeline.F_vlayout == "row" cond &= kernel_ctx.pipeline.F_bias in ["no", "bias"] cond &= kernel_ctx.pipeline.F_qscale == "no" - cond &= problem_ctx.mode == "batch" cond &= kernel_ctx.pipeline.F_skip == "f" cond &= kernel_ctx.pipeline.F_logits == "f" return cond