From 7c31a5d52ea6bd3904fb92ebde6994ce54f09f7c Mon Sep 17 00:00:00 2001 From: Vivek Trivedi <5340687+trivedivivek@users.noreply.github.com> Date: Fri, 3 Jan 2025 12:35:58 -0800 Subject: [PATCH 1/6] [ET-VK] Reduced int precision for all int storage in conv pw op to improve performance. Pull Request resolved: https://github.com/pytorch/executorch/pull/7447 This diff reduces the precision of all int storage in the conv pw op to improve performance. The code changes include adding the extension GL_EXT_shader_explicit_arithmetic_types_int16 and changing the data type of ints to uint16. ghstack-source-id: 260166244 @exported-using-ghexport Differential Revision: [D67674212](https://our.internmc.facebook.com/intern/diff/D67674212/) --- .../graph/ops/glsl/conv2d_dw_output_tile.glsl | 22 ++++++++++--------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/backends/vulkan/runtime/graph/ops/glsl/conv2d_dw_output_tile.glsl b/backends/vulkan/runtime/graph/ops/glsl/conv2d_dw_output_tile.glsl index 21760eca0e0..57ae98eb85b 100644 --- a/backends/vulkan/runtime/graph/ops/glsl/conv2d_dw_output_tile.glsl +++ b/backends/vulkan/runtime/graph/ops/glsl/conv2d_dw_output_tile.glsl @@ -32,12 +32,14 @@ ${layout_declare_ubo(8, "float", "out_min", "float", "out_max")} layout(local_size_x_id = 0, local_size_y_id = 1, local_size_z_id = 2) in; +#extension GL_EXT_shader_explicit_arithmetic_types_int16 : require + /* * Computes a depthwise convolution. Each shader invocation calculates the * output at a single output location. */ void main() { - const ivec3 pos = ivec3(gl_GlobalInvocationID); + const u16vec3 pos = u16vec3(gl_GlobalInvocationID); if (any(greaterThanEqual(pos, out_limits))) { return; @@ -45,22 +47,22 @@ void main() { // Compute the index of the top-left element of the overlay region. Negative // indices indicate that the top-left element is in a region added by padding. - const ivec2 ipos = pos.xy * stride - padding; + const u16vec2 ipos = pos.xy * u16vec2(stride) - u16vec2(padding); // Compute the start and end of the input indices to load. Padding is assumed // to be constant 0 padding, so any reads from the padding region is skipped. - const ivec2 start = ipos; - const ivec2 end = ipos + overlay_region.xy; + const u16vec2 start = ipos; + const u16vec2 end = ipos + u16vec2(overlay_region.xy); - VEC4_T sum = texelFetch(t_bias, ivec2(pos.z, 0), 0); - int kx = 0; - for (int y = start.y, i = 0; i < TILE_SIZE; y += dilation.y, i++) { - for (int x = start.x, j = 0; j < TILE_SIZE; x += dilation.x, j++) { + VEC4_T sum = texelFetch(t_bias, u16vec2(pos.z, 0), 0); + uint16_t kx = uint16_t(0); + for (uint16_t y = start.y, i = uint16_t(0); i < uint16_t(TILE_SIZE); y += uint16_t(dilation.y), i++) { + for (uint16_t x = start.x, j = uint16_t(0); j < uint16_t(TILE_SIZE); x += uint16_t(dilation.x), j++) { // The weight kernel was rearranged such that every NxN filter is // flattened to fit in one row. Each filter was then stacked on top of // each other vertically. - const vec4 in_texel = texelFetch(t_in, ivec3(x, y, pos.z), 0); - sum = fma(in_texel, texelFetch(t_kernel, ivec2(kx, pos.z), 0), sum); + const vec4 in_texel = texelFetch(t_in, u16vec3(x, y, pos.z), 0); + sum = fma(in_texel, texelFetch(t_kernel, u16vec2(kx, pos.z), 0), sum); kx++; } } From f139e39a81cd3b5be809fa7689ca57a140f2c516 Mon Sep 17 00:00:00 2001 From: Vivek Trivedi <5340687+trivedivivek@users.noreply.github.com> Date: Fri, 3 Jan 2025 12:35:59 -0800 Subject: [PATCH 2/6] [ET-VK] Minor fix to conv 2d op using wg_size from create_conv2d_global_wg_size to determine local wg size. Pull Request resolved: https://github.com/pytorch/executorch/pull/7450 This diff contains changes to the Convolution.cpp file in the Vulkan backend of Executorch. The changes involve updating the code to use the create_conv2d_global_wg_size function to determine the local workgroup size for the convolution operation. This is done to ensure that the correct workgroup size is used for the operation, which can improve performance. ghstack-source-id: 260166246 @exported-using-ghexport Differential Revision: [D67676422](https://our.internmc.facebook.com/intern/diff/D67676422/) --- backends/vulkan/runtime/graph/ops/impl/Convolution.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/backends/vulkan/runtime/graph/ops/impl/Convolution.cpp b/backends/vulkan/runtime/graph/ops/impl/Convolution.cpp index 1cdd7315f16..6e9adf7d5a2 100644 --- a/backends/vulkan/runtime/graph/ops/impl/Convolution.cpp +++ b/backends/vulkan/runtime/graph/ops/impl/Convolution.cpp @@ -370,11 +370,13 @@ void add_conv2d_node( weight_data, clamp_out); + utils::uvec3 wg_size = create_conv2d_global_wg_size(graph, method, out); + graph.execute_nodes().emplace_back(new DispatchNode( graph, shader, - create_conv2d_global_wg_size(graph, method, out), - graph.create_local_wg_size(out), + wg_size, + graph.create_local_wg_size(wg_size), // Inputs and Outputs {{out, vkapi::MemoryAccessType::WRITE}, {{in, arg_weight, arg_bias}, vkapi::MemoryAccessType::READ}}, From 5b1c98046a8975ffffc011bf3ea99dc6157467cf Mon Sep 17 00:00:00 2001 From: Vivek Trivedi <5340687+trivedivivek@users.noreply.github.com> Date: Fri, 3 Jan 2025 12:36:00 -0800 Subject: [PATCH 3/6] [ET-VK] Modify conv 2d pw op shader and dispatch settings to linearly dispatch work accounting for linearity texture to improve performance. Pull Request resolved: https://github.com/pytorch/executorch/pull/7452 This diff modifies the convolution 2D pointwise op shader and dispatch settings to linearly dispatch work accounting for linearity texture to improve performance. ghstack-source-id: 260166247 @exported-using-ghexport Differential Revision: [D67683411](https://our.internmc.facebook.com/intern/diff/D67683411/) --- backends/vulkan/runtime/graph/ops/glsl/conv2d_pw.glsl | 7 ++++++- backends/vulkan/runtime/graph/ops/impl/Convolution.cpp | 4 ++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/backends/vulkan/runtime/graph/ops/glsl/conv2d_pw.glsl b/backends/vulkan/runtime/graph/ops/glsl/conv2d_pw.glsl index b1950f970e4..9d1f6c3bd91 100644 --- a/backends/vulkan/runtime/graph/ops/glsl/conv2d_pw.glsl +++ b/backends/vulkan/runtime/graph/ops/glsl/conv2d_pw.glsl @@ -40,7 +40,12 @@ layout(local_size_x_id = 0, local_size_y_id = 1, local_size_z_id = 2) in; * size is only 1x1, making it easier to re-use loaded texels from t_kernel. */ void main() { - const u16vec3 gpos = u16vec3(gl_GlobalInvocationID); + const uint16_t out_limits_y_scaled = uint16_t((out_limits.y + TILE_SIZE - 1) / TILE_SIZE); + + const u16vec3 gpos = u16vec3( + gl_GlobalInvocationID.x / (out_limits_y_scaled * out_limits.z), + (gl_GlobalInvocationID.x / out_limits.z) % out_limits_y_scaled, + gl_GlobalInvocationID.x % out_limits.z); // Output position for TILE_SIZE = 2 // +--------+--------+ diff --git a/backends/vulkan/runtime/graph/ops/impl/Convolution.cpp b/backends/vulkan/runtime/graph/ops/impl/Convolution.cpp index 6e9adf7d5a2..4f123cb8337 100644 --- a/backends/vulkan/runtime/graph/ops/impl/Convolution.cpp +++ b/backends/vulkan/runtime/graph/ops/impl/Convolution.cpp @@ -372,6 +372,10 @@ void add_conv2d_node( utils::uvec3 wg_size = create_conv2d_global_wg_size(graph, method, out); + if (method == Conv2dMethod::Pointwise) { + wg_size = {wg_size[0] * wg_size[1] * wg_size[2], 1, 1}; + } + graph.execute_nodes().emplace_back(new DispatchNode( graph, shader, From c913e17af0cf9bbfd5d13a6a10adda39f3b6f223 Mon Sep 17 00:00:00 2001 From: Vivek Trivedi <5340687+trivedivivek@users.noreply.github.com> Date: Fri, 3 Jan 2025 12:36:02 -0800 Subject: [PATCH 4/6] [ET-VK] Using vec2 to store output positions to reudce shader register footprint. Pull Request resolved: https://github.com/pytorch/executorch/pull/7474 The diff changes the use of `u16vec3` to `u16vec2` to store output positions in the conv2d_pw op. This change is made to reduce the shader register footprint and improve performance. ghstack-source-id: 260166245 @exported-using-ghexport Differential Revision: [D67726229](https://our.internmc.facebook.com/intern/diff/D67726229/) --- .../vulkan/runtime/graph/ops/glsl/conv2d_pw.glsl | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/backends/vulkan/runtime/graph/ops/glsl/conv2d_pw.glsl b/backends/vulkan/runtime/graph/ops/glsl/conv2d_pw.glsl index 9d1f6c3bd91..af7c22bb5ad 100644 --- a/backends/vulkan/runtime/graph/ops/glsl/conv2d_pw.glsl +++ b/backends/vulkan/runtime/graph/ops/glsl/conv2d_pw.glsl @@ -53,18 +53,18 @@ void main() { // +--------+--------+ // | pos[2] | pos[3] | // +--------+--------+ - u16vec3 pos[TILE_SIZE * TILE_SIZE]; + u16vec2 pos[TILE_SIZE * TILE_SIZE]; for (int y = 0, i = 0; y < TILE_SIZE; ++y) { for (int x = 0; x < TILE_SIZE; ++x) { - pos[i] = u16vec3( - gpos.x * TILE_SIZE + x, gpos.y * TILE_SIZE + y, gpos.z); + pos[i] = u16vec2( + gpos.x * TILE_SIZE + x, gpos.y * TILE_SIZE + y); i++; } } // If the top left position is out of bounds, then this invocation will have // no work to do. - if (any(greaterThanEqual(pos[0], out_limits))) { + if (any(greaterThanEqual(u16vec3(pos[0], gpos.z), out_limits))) { return; } @@ -138,8 +138,8 @@ void main() { } for (int i = 0; i < TILE_SIZE * TILE_SIZE; ++i) { - if (all(lessThan(pos[i], out_limits))) { - imageStore(t_out, pos[i], op(sum[i], out_min, out_max)); + if (all(lessThan(u16vec3(pos[i], gpos.z), out_limits))) { + imageStore(t_out, u16vec3(pos[i], gpos.z), op(sum[i], out_min, out_max)); } } } From c80549963e85343958819947bb48ff4cfeb1c4db Mon Sep 17 00:00:00 2001 From: Vivek Trivedi <5340687+trivedivivek@users.noreply.github.com> Date: Fri, 3 Jan 2025 12:36:03 -0800 Subject: [PATCH 5/6] [ET-VK] Using shared variable to store calculated output pose to free up registers and improve performance. Pull Request resolved: https://github.com/pytorch/executorch/pull/7475 This diff introduces a shared variable to store calculated output pose in conv2d_pw op to free up registers and improve performance. The code changes include adding a shared variable to hold calculated positions and modifying the existing code to use the shared variable. ghstack-source-id: 260166242 Differential Revision: [D67742567](https://our.internmc.facebook.com/intern/diff/D67742567/) --- .../vulkan/runtime/graph/ops/glsl/conv2d_pw.glsl | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/backends/vulkan/runtime/graph/ops/glsl/conv2d_pw.glsl b/backends/vulkan/runtime/graph/ops/glsl/conv2d_pw.glsl index af7c22bb5ad..2393ed33450 100644 --- a/backends/vulkan/runtime/graph/ops/glsl/conv2d_pw.glsl +++ b/backends/vulkan/runtime/graph/ops/glsl/conv2d_pw.glsl @@ -34,6 +34,9 @@ layout(local_size_x_id = 0, local_size_y_id = 1, local_size_z_id = 2) in; #extension GL_EXT_shader_explicit_arithmetic_types_int16 : require +// shared memory to hold calculated positions, this would reduce register usage thus improving performance. +shared u16vec2 pos_shared[gl_WorkGroupSize.x * gl_WorkGroupSize.y * gl_WorkGroupSize.z * TILE_SIZE * TILE_SIZE]; + /* * Computes a 2D pointwise convolution of an NxN output tile. Calculating an * output tile for pointwise convolution is more efficient because the kernel @@ -41,6 +44,7 @@ layout(local_size_x_id = 0, local_size_y_id = 1, local_size_z_id = 2) in; */ void main() { const uint16_t out_limits_y_scaled = uint16_t((out_limits.y + TILE_SIZE - 1) / TILE_SIZE); + const uint shared_mem_stride = gl_WorkGroupSize.x * gl_WorkGroupSize.y * gl_WorkGroupSize.z; const u16vec3 gpos = u16vec3( gl_GlobalInvocationID.x / (out_limits_y_scaled * out_limits.z), @@ -58,6 +62,7 @@ void main() { for (int x = 0; x < TILE_SIZE; ++x) { pos[i] = u16vec2( gpos.x * TILE_SIZE + x, gpos.y * TILE_SIZE + y); + pos_shared[(shared_mem_stride * i) + gl_LocalInvocationIndex] = pos[i]; i++; } } @@ -73,7 +78,7 @@ void main() { // the top-left element is in a region added by padding. u16vec2 ipos[TILE_SIZE * TILE_SIZE]; for (int i = 0; i < TILE_SIZE * TILE_SIZE; ++i) { - ipos[i] = pos[i].xy * u16vec2(stride) - u16vec2(padding); + ipos[i] = pos[i] * u16vec2(stride) - u16vec2(padding); } vec4 sum[TILE_SIZE * TILE_SIZE]; @@ -138,8 +143,9 @@ void main() { } for (int i = 0; i < TILE_SIZE * TILE_SIZE; ++i) { - if (all(lessThan(u16vec3(pos[i], gpos.z), out_limits))) { - imageStore(t_out, u16vec3(pos[i], gpos.z), op(sum[i], out_min, out_max)); + const u16vec2 pos = pos_shared[(shared_mem_stride * i) + gl_LocalInvocationIndex]; + if (all(lessThan(u16vec3(pos, gpos.z), out_limits))) { + imageStore(t_out, u16vec3(pos, gpos.z), op(sum[i], out_min, out_max)); } } } From 1366dbd7c2063ec627bc60b9bfdeb0a62d082326 Mon Sep 17 00:00:00 2001 From: Vivek Trivedi <5340687+trivedivivek@users.noreply.github.com> Date: Fri, 3 Jan 2025 12:36:04 -0800 Subject: [PATCH 6/6] [ET-VK] Changing texture access pattern for conv2d pw op to improve performance. Pull Request resolved: https://github.com/pytorch/executorch/pull/7476 This diff changes the texture access pattern for conv2d pw op to iterate first on x axis then y and then z to improve performance. ghstack-source-id: 260166241 @exported-using-ghexport Differential Revision: [D67769100](https://our.internmc.facebook.com/intern/diff/D67769100/) --- backends/vulkan/runtime/graph/ops/glsl/conv2d_pw.glsl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/backends/vulkan/runtime/graph/ops/glsl/conv2d_pw.glsl b/backends/vulkan/runtime/graph/ops/glsl/conv2d_pw.glsl index 2393ed33450..23ad912c11a 100644 --- a/backends/vulkan/runtime/graph/ops/glsl/conv2d_pw.glsl +++ b/backends/vulkan/runtime/graph/ops/glsl/conv2d_pw.glsl @@ -43,13 +43,13 @@ shared u16vec2 pos_shared[gl_WorkGroupSize.x * gl_WorkGroupSize.y * gl_WorkGroup * size is only 1x1, making it easier to re-use loaded texels from t_kernel. */ void main() { - const uint16_t out_limits_y_scaled = uint16_t((out_limits.y + TILE_SIZE - 1) / TILE_SIZE); + const uvec2 out_limits_scaled = (out_limits.xy + TILE_SIZE - 1) / TILE_SIZE; const uint shared_mem_stride = gl_WorkGroupSize.x * gl_WorkGroupSize.y * gl_WorkGroupSize.z; const u16vec3 gpos = u16vec3( - gl_GlobalInvocationID.x / (out_limits_y_scaled * out_limits.z), - (gl_GlobalInvocationID.x / out_limits.z) % out_limits_y_scaled, - gl_GlobalInvocationID.x % out_limits.z); + gl_GlobalInvocationID.x % out_limits_scaled.x, + (gl_GlobalInvocationID.x / out_limits_scaled.x) % out_limits_scaled.y, + gl_GlobalInvocationID.x / (out_limits_scaled.x * out_limits_scaled.y)); // Output position for TILE_SIZE = 2 // +--------+--------+