From 15f38432dce3aba4fa38896908cef510f47d6b19 Mon Sep 17 00:00:00 2001 From: jefyang1 <146495389+jefyang1@users.noreply.github.com> Date: Mon, 19 May 2025 14:25:50 -0700 Subject: [PATCH] Fix example_grouped_gemm_multiple_d_xdl_fp16 on gfx950 (#2203) * Fix example_grouped_gemm_multiple_d_xdl_fp16 on gfx950 * Run clang format [ROCm/composable_kernel commit: b8b12bb81e1b370d39ab7b17b0c13654a6e54721] --- example/15_grouped_gemm/grouped_gemm_multiple_d_xdl_fp16.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/example/15_grouped_gemm/grouped_gemm_multiple_d_xdl_fp16.cpp b/example/15_grouped_gemm/grouped_gemm_multiple_d_xdl_fp16.cpp index db162fe444..63a2aea0b3 100644 --- a/example/15_grouped_gemm/grouped_gemm_multiple_d_xdl_fp16.cpp +++ b/example/15_grouped_gemm/grouped_gemm_multiple_d_xdl_fp16.cpp @@ -141,8 +141,8 @@ bool run_grouped_gemm(const ProblemSize& problem_size, const ExecutionConfig& co a_tensors_device.reserve(group_count); b_tensors_device.reserve(group_count); - d_tensors_device.reserve(group_count); c_tensors_device.reserve(group_count); + d_tensors_device.resize(group_count); // reserve and update vector size std::size_t flop = 0, num_btype = 0;