Skip to content

Commit f6b533d

Browse files
authored
Vulkan Flash Attention Coopmat1 Refactor (#19075)
* vulkan: use coopmat for flash attention p*v matrix multiplication * fix P loading issue * fix barrier position * remove reduction that is no longer needed * move max thread reduction into loop * remove osh padding * add bounds checks and padding * remove unused code * fix shmem sizes, loop duration and accesses * don't overwrite Qf, add new shared psh buffer instead * add missing bounds checks * use subgroup reductions * optimize * move bounds check, reduce barriers * support other Bc values and other subgroup sizes * remove D_split * replace Of register array with shared memory Ofsh array * parallelize HSV across the rowgroups * go back to Of in registers, not shmem * vectorize sfsh * don't store entire K tile in shmem * fixes * load large k tiles to shmem on Nvidia * adapt shared memory host check function to shader changes * remove Bc 32 case * remove unused variable * fix missing mask reduction tmspsh barrier * fix mask bounds check * fix rowmax f16 under/overflow to inf * fix flash_attn_cm2 BLOCK_SIZE preprocessor directives
1 parent 72d3b18 commit f6b533d

File tree

4 files changed

+317
-173
lines changed

4 files changed

+317
-173
lines changed

ggml/src/ggml-vulkan/ggml-vulkan.cpp

Lines changed: 43 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -3162,17 +3162,31 @@ static void ggml_vk_load_shaders(vk_device& device) {
31623162
// For scalar, use 128 (arbitrary)
31633163
// The same D_split value is used for both HSK and HSV, so just base it on the union of the LSBs.
31643164
const uint32_t D = (hsk|hsv);
3165-
uint32_t wg_size = (path == FA_SCALAR || path == FA_COOPMAT1)
3166-
? scalar_flash_attention_workgroup_size
3167-
: ((small_rows && (D % 32) == 0) ? 256 : 128);
31683165
auto rows_cols = fa_rows_cols(path, hsk, hsv, clamp, type, small_rows, small_cache);
31693166

3167+
uint32_t wg_size;
3168+
switch (path) {
3169+
case FA_COOPMAT2:
3170+
wg_size = ((small_rows && (D % 32) == 0) ? 256 : 128);
3171+
break;
3172+
case FA_COOPMAT1:
3173+
wg_size = (rows_cols[1] / 16) * device->subgroup_size; // enough subgroups for Bc/MatBc
3174+
break;
3175+
default:
3176+
wg_size = scalar_flash_attention_workgroup_size;
3177+
break;
3178+
}
3179+
31703180
// D_split can't be larger than a subgroup because we use subgroupShuffle to reduce it.
31713181
// D_split can't be larger than the LSB of D divided by 4 due to vectorization in the shader.
31723182
const uint32_t D_lsb = D ^ (D & (D-1));
31733183
uint32_t D_split = std::min(std::min(device->subgroup_size, 8u), D_lsb / 4);
31743184

3175-
return {wg_size, rows_cols[0], rows_cols[1], hsk, hsv, clamp, D_split};
3185+
// Nvidia prefers shared memory use to load large tiles of K
3186+
// AMD prefers loading K directly from global memory
3187+
const uint32_t k_load_shmem = device->vendor_id == VK_VENDOR_ID_NVIDIA ? 1 : 0;
3188+
3189+
return {wg_size, rows_cols[0], rows_cols[1], hsk, hsv, clamp, D_split, device->subgroup_size, k_load_shmem};
31763190
};
31773191

31783192
#define CREATE_FA(TYPE, NAMELC, FAPATH, SUFFIX) \
@@ -3187,15 +3201,15 @@ static void ggml_vk_load_shaders(vk_device& device) {
31873201
if (path == FAPATH) { \
31883202
if (aligned) { \
31893203
if (f32acc) { \
3190-
ggml_vk_create_pipeline(device, fa.second, "flash_attn_f32_f16_aligned_f32acc" #NAMELC, flash_attn_f32_f16_ ## NAMELC ## SUFFIX ## _len, flash_attn_f32_f16_ ## NAMELC ## SUFFIX ## _data, "main", 6, sizeof(vk_flash_attn_push_constants), fa_wg_denoms(FAPATH, HSK,HSV,0,TYPE,small_rows,small_cache), fa_spec_constants(FAPATH, HSK,HSV,0,TYPE,small_rows,small_cache), fa_align(FAPATH,HSK,HSV,TYPE,small_rows,small_cache), true, FAPATH==FA_COOPMAT1, (FAPATH==FA_COOPMAT1 ? 32 : 0)); \
3204+
ggml_vk_create_pipeline(device, fa.second, "flash_attn_f32_f16_aligned_f32acc" #NAMELC, flash_attn_f32_f16_ ## NAMELC ## SUFFIX ## _len, flash_attn_f32_f16_ ## NAMELC ## SUFFIX ## _data, "main", 6, sizeof(vk_flash_attn_push_constants), fa_wg_denoms(FAPATH, HSK,HSV,0,TYPE,small_rows,small_cache), fa_spec_constants(FAPATH, HSK,HSV,0,TYPE,small_rows,small_cache), fa_align(FAPATH,HSK,HSV,TYPE,small_rows,small_cache), true, FAPATH==FA_COOPMAT1, (FAPATH==FA_COOPMAT1 ? device->subgroup_size : 0)); \
31913205
} else { \
3192-
ggml_vk_create_pipeline(device, fa.second, "flash_attn_f32_f16_aligned_f16acc" #NAMELC, flash_attn_f32_f16_ ## NAMELC ## _f16acc ## SUFFIX ## _len, flash_attn_f32_f16_ ## NAMELC ## _f16acc ## SUFFIX ## _data, "main", 6, sizeof(vk_flash_attn_push_constants), fa_wg_denoms(FAPATH, HSK,HSV,0,TYPE,small_rows,small_cache), fa_spec_constants(FAPATH, HSK,HSV,0,TYPE,small_rows,small_cache), fa_align(FAPATH,HSK,HSV,TYPE,small_rows,small_cache), true, FAPATH==FA_COOPMAT1, (FAPATH==FA_COOPMAT1 ? 32 : 0)); \
3206+
ggml_vk_create_pipeline(device, fa.second, "flash_attn_f32_f16_aligned_f16acc" #NAMELC, flash_attn_f32_f16_ ## NAMELC ## _f16acc ## SUFFIX ## _len, flash_attn_f32_f16_ ## NAMELC ## _f16acc ## SUFFIX ## _data, "main", 6, sizeof(vk_flash_attn_push_constants), fa_wg_denoms(FAPATH, HSK,HSV,0,TYPE,small_rows,small_cache), fa_spec_constants(FAPATH, HSK,HSV,0,TYPE,small_rows,small_cache), fa_align(FAPATH,HSK,HSV,TYPE,small_rows,small_cache), true, FAPATH==FA_COOPMAT1, (FAPATH==FA_COOPMAT1 ? device->subgroup_size : 0)); \
31933207
} \
31943208
} else { \
31953209
if (f32acc) { \
3196-
ggml_vk_create_pipeline(device, fa.second, "flash_attn_f32_f16_f32acc" #NAMELC, flash_attn_f32_f16_ ## NAMELC ## SUFFIX ## _len, flash_attn_f32_f16_ ## NAMELC ## SUFFIX ## _data, "main", 6, sizeof(vk_flash_attn_push_constants), fa_wg_denoms(FAPATH, HSK,HSV,1,TYPE,small_rows,small_cache), fa_spec_constants(FAPATH, HSK,HSV,1,TYPE,small_rows,small_cache), 1, true, FAPATH==FA_COOPMAT1, (FAPATH==FA_COOPMAT1 ? 32 : 0)); \
3210+
ggml_vk_create_pipeline(device, fa.second, "flash_attn_f32_f16_f32acc" #NAMELC, flash_attn_f32_f16_ ## NAMELC ## SUFFIX ## _len, flash_attn_f32_f16_ ## NAMELC ## SUFFIX ## _data, "main", 6, sizeof(vk_flash_attn_push_constants), fa_wg_denoms(FAPATH, HSK,HSV,1,TYPE,small_rows,small_cache), fa_spec_constants(FAPATH, HSK,HSV,1,TYPE,small_rows,small_cache), 1, true, FAPATH==FA_COOPMAT1, (FAPATH==FA_COOPMAT1 ? device->subgroup_size : 0)); \
31973211
} else { \
3198-
ggml_vk_create_pipeline(device, fa.second, "flash_attn_f32_f16_f16acc" #NAMELC, flash_attn_f32_f16_ ## NAMELC ## _f16acc ## SUFFIX ## _len, flash_attn_f32_f16_ ## NAMELC ## _f16acc ## SUFFIX ## _data, "main", 6, sizeof(vk_flash_attn_push_constants), fa_wg_denoms(FAPATH, HSK,HSV,1,TYPE,small_rows,small_cache), fa_spec_constants(FAPATH, HSK,HSV,1,TYPE,small_rows,small_cache), 1, true, FAPATH==FA_COOPMAT1, (FAPATH==FA_COOPMAT1 ? 32 : 0)); \
3212+
ggml_vk_create_pipeline(device, fa.second, "flash_attn_f32_f16_f16acc" #NAMELC, flash_attn_f32_f16_ ## NAMELC ## _f16acc ## SUFFIX ## _len, flash_attn_f32_f16_ ## NAMELC ## _f16acc ## SUFFIX ## _data, "main", 6, sizeof(vk_flash_attn_push_constants), fa_wg_denoms(FAPATH, HSK,HSV,1,TYPE,small_rows,small_cache), fa_spec_constants(FAPATH, HSK,HSV,1,TYPE,small_rows,small_cache), 1, true, FAPATH==FA_COOPMAT1, (FAPATH==FA_COOPMAT1 ? device->subgroup_size : 0)); \
31993213
} \
32003214
} \
32013215
} \
@@ -8344,41 +8358,49 @@ static bool ggml_vk_flash_attn_scalar_shmem_support(const vk_device& device, con
83448358
const uint32_t total_size = tmpsh + tmpshv4 + masksh + Qf;
83458359
const bool supported = total_size <= device->properties.limits.maxComputeSharedMemorySize;
83468360

8347-
VK_LOG_DEBUG("ggml_vk_flash_attn_coopmat_shmem_support(HSK=" << hsk << ", HSV=" << hsv << ", total_size=" << total_size << ", supported=" << supported);
8361+
VK_LOG_DEBUG("ggml_vk_flash_attn_scalar_shmem_support(HSK=" << hsk << ", HSV=" << hsv << ", total_size=" << total_size << ", supported=" << supported);
83488362

83498363
return supported;
83508364
}
83518365

8352-
static bool ggml_vk_flash_attn_coopmat_shmem_support(const vk_device& device, const uint32_t hsk, uint32_t hsv, bool f32acc) {
8366+
static bool ggml_vk_flash_attn_coopmat_shmem_support(const vk_device& device, const uint32_t hsk, uint32_t hsv, bool f32acc, ggml_type kv_type) {
83538367
// Needs to be kept up to date on shader changes
83548368
GGML_UNUSED(hsv);
8355-
const uint32_t wg_size = scalar_flash_attention_workgroup_size;
8356-
const uint32_t Br = coopmat1_flash_attention_num_large_rows;
8357-
const uint32_t Bc = scalar_flash_attention_Bc;
8369+
const auto rows_cols = fa_rows_cols(FA_COOPMAT1, hsk, hsv, 0, kv_type, false, false);
8370+
const uint32_t Br = rows_cols[0];
8371+
const uint32_t Bc = rows_cols[1];
8372+
8373+
const uint32_t MatBr = 16, MatBc = 16;
8374+
8375+
const uint32_t row_split = Bc / MatBc;
83588376

83598377
const uint32_t hsk_pad = ROUNDUP_POW2(hsk, 16);
83608378

83618379
const uint32_t acctype = f32acc ? 4 : 2;
83628380
const uint32_t f16vec4 = 8;
83638381

8364-
const uint32_t tmpsh = wg_size * sizeof(float);
8365-
const uint32_t tmpshv4 = wg_size * 4 * acctype;
8382+
const uint32_t tmpsh = (Bc / MatBc) * sizeof(float);
83668383

83678384
const uint32_t qstride = hsk_pad / 4 + 2;
83688385
const uint32_t Qf = Br * qstride * f16vec4;
83698386

8387+
const uint32_t psh_stride = Br / 4 + 2;
8388+
const uint32_t Psh = Bc * psh_stride * f16vec4;
8389+
83708390
const uint32_t sfshstride = (hsk <= 128) ? (Br + 8) : Br;
83718391
const uint32_t sfsh = Bc * sfshstride * acctype;
83728392

8373-
const uint32_t kshstride = hsk_pad / 4 + 2;
8374-
const uint32_t ksh = Bc * kshstride * f16vec4;
8393+
const bool k_load_shmem = device->vendor_id == VK_VENDOR_ID_NVIDIA;
8394+
const uint32_t kshstride = (k_load_shmem ? hsk_pad : MatBr) / 4 + 2;
8395+
const uint32_t vsh_stride = MatBc / 4 * row_split;
8396+
const uint32_t ksh = ((kshstride >= vsh_stride) ? (Bc * kshstride) : (Bc * vsh_stride)) * f16vec4;
83758397

8376-
const uint32_t slope = Br * sizeof(float);
8398+
const uint32_t slope = Br * acctype;
83778399

8378-
const uint32_t total_size = tmpsh + tmpshv4 + Qf + sfsh + ksh + slope;
8400+
const uint32_t total_size = tmpsh + Qf + Psh + sfsh + ksh + slope;
83798401
const bool supported = total_size <= device->properties.limits.maxComputeSharedMemorySize;
83808402

8381-
VK_LOG_DEBUG("ggml_vk_flash_attn_coopmat_shmem_support(HSK=" << hsk << ", HSV=" << hsv << ", f32acc=" << f32acc << ", total_size=" << total_size << ", supported=" << supported);
8403+
VK_LOG_DEBUG("ggml_vk_flash_attn_coopmat_shmem_support(HSK=" << hsk << ", HSV=" << hsv << ", f32acc=" << f32acc << ", kv_type=" << kv_type << ", total_size=" << total_size << ", supported=" << supported);
83828404

83838405
return supported;
83848406
}
@@ -8442,7 +8464,7 @@ static void ggml_vk_flash_attn(ggml_backend_vk_context * ctx, vk_context& subctx
84428464
const bool coopmat_shape_supported = (dst->op_params[3] == GGML_PREC_F32 && ctx->device->coopmat_support_16x16x16_f32acc) ||
84438465
(dst->op_params[3] != GGML_PREC_F32 && ctx->device->coopmat_support_16x16x16_f16acc);
84448466

8445-
const bool coopmat_shmem_supported = ggml_vk_flash_attn_coopmat_shmem_support(ctx->device, HSK, HSV, dst->op_params[3] == GGML_PREC_F32);
8467+
const bool coopmat_shmem_supported = ggml_vk_flash_attn_coopmat_shmem_support(ctx->device, HSK, HSV, dst->op_params[3] == GGML_PREC_F32, k->type);
84468468

84478469
if (!coopmat_shape_supported || !coopmat_shmem_supported) {
84488470
path = FA_SCALAR;

ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_base.glsl

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,8 @@ layout (constant_id = 3) const uint32_t HSK = 32;
88
layout (constant_id = 4) const uint32_t HSV = 32;
99
layout (constant_id = 5) const uint32_t Clamp = 0;
1010
layout (constant_id = 6) const uint32_t D_split = 16;
11+
layout (constant_id = 7) const uint32_t SubGroupSize = 32;
12+
layout (constant_id = 8) const uint32_t K_LOAD_SHMEM = 0;
1113

1214
// Round up head sizes to a multiple of 16, for coopmat1/coopmat2 paths
1315
const uint32_t HSK_pad = (HSK + 15) & ~15;
@@ -74,6 +76,10 @@ layout (binding = 1) readonly buffer K_PACKED16 {A_TYPE_PACKED16 k_data_packed16
7476
layout (binding = 2) readonly buffer V_PACKED16 {A_TYPE_PACKED16 v_data_packed16[];} v_packed;
7577
#endif
7678

79+
#ifndef BLOCK_SIZE
80+
#define BLOCK_SIZE 1
81+
#endif
82+
7783
#if defined(DATA_A_F32)
7884
#undef BLOCK_SIZE
7985
#define BLOCK_SIZE 4

0 commit comments

Comments
 (0)