Skip to content

Commit 2d66762

Browse files
trivedivivekfacebook-github-bot
authored andcommitted
Modifying slice op to support all tensor packing. (#9030)
Summary: This diff updates Executorch Vulkan backend's `slice_copy` operation to support width, height and channel packed tensors. It also updates the `op_registry.py` file to register the `slice_copy` operation and adds a new test case to the `cases.py` file to test the operation. Additionally, it updates the `Slice.cpp` file to check for the same packed dimension in the input and output tensors, and updates the `cases.py` file to include the `utils::kWidthPacked`, `utils::kHeightPacked`, and `utils::kChannelsPacked` layouts. Reviewed By: SS-JIA Differential Revision: D70559149
1 parent 7e1ce52 commit 2d66762

File tree

5 files changed

+63
-25
lines changed

5 files changed

+63
-25
lines changed

backends/vulkan/op_registry.py

+12-1
Original file line numberDiff line numberDiff line change
@@ -530,7 +530,6 @@ def register_view_op(features: OpFeatures):
530530
exir_ops.edge.aten.flip.default,
531531
exir_ops.edge.aten.index_select.default,
532532
exir_ops.edge.aten.select_copy.int,
533-
exir_ops.edge.aten.slice_copy.Tensor,
534533
# Tensor combination
535534
exir_ops.edge.aten.cat.default,
536535
exir_ops.edge.aten.split_with_sizes_copy.default,
@@ -556,6 +555,18 @@ def register_ported_op(features: OpFeatures):
556555
)
557556
return features
558557

558+
@update_features(
559+
[
560+
# Indexing and lookup
561+
exir_ops.edge.aten.slice_copy.Tensor,
562+
]
563+
)
564+
def register_ported_op(features: OpFeatures):
565+
features.texture_impl = TextureImplFeatures(
566+
valid_packed_dims=all_packed_dims,
567+
)
568+
return features
569+
559570

560571
# Ported ops that support their own prepacking.
561572
@update_features(

backends/vulkan/runtime/graph/ops/glsl/slice_batch_height_width.glsl

+19-7
Original file line numberDiff line numberDiff line change
@@ -27,8 +27,7 @@ layout(set = 0, binding = 3) uniform PRECISION restrict SliceArg {
2727
int dim;
2828
int offset;
2929
int step;
30-
// Used when dim=batch. Stride is the # of plances for each batch value.
31-
int stride;
30+
int image_in_channel_size;
3231
}
3332
slice_arg;
3433

@@ -45,11 +44,24 @@ void main() {
4544

4645
ivec3 in_pos = pos;
4746

48-
int index = pos[slice_arg.dim] / slice_arg.stride;
49-
int within_stride = pos[slice_arg.dim] % slice_arg.stride;
50-
51-
in_pos[slice_arg.dim] = slice_arg.offset * slice_arg.stride + index * slice_arg.step *
52-
slice_arg.stride + within_stride;
47+
// slice along batch axis
48+
if (slice_arg.dim == 3) {
49+
// index of the channel inside a batch
50+
const int chanl_index = pos.z % slice_arg.image_in_channel_size;
51+
// index of batch
52+
const int batch_index = pos.z / slice_arg.image_in_channel_size;
53+
in_pos.z = (slice_arg.offset + batch_index * slice_arg.step) * slice_arg.image_in_channel_size + chanl_index;
54+
} else if (slice_arg.dim == C_DIM) {
55+
// index of the channel inside a batch
56+
const int chanl_index = pos.z % sizes.z;
57+
// index of batch
58+
const int batch_index = pos.z / sizes.z;
59+
in_pos.z = slice_arg.offset + batch_index * slice_arg.image_in_channel_size + chanl_index * slice_arg.step;
60+
} else if (slice_arg.dim == H_DIM) {
61+
in_pos.y = slice_arg.offset + pos.y * slice_arg.step;
62+
} else {
63+
in_pos.x = slice_arg.offset + pos.x * slice_arg.step;
64+
}
5365

5466
imageStore(image_out, pos, texelFetch(image_in, in_pos, 0));
5567

backends/vulkan/runtime/graph/ops/glsl/slice_channel.glsl

+2-2
Original file line numberDiff line numberDiff line change
@@ -49,10 +49,10 @@ void main() {
4949
for (int i=0;i<4;i++) {
5050
ivec4 user_coor = nchwi_to_tidx(buf_indices[i], out_sizes);
5151

52-
int in_channel = user_coor.z;
52+
int in_dim = user_coor[packed_dim];
5353

5454
ivec4 in_user_coor = user_coor;
55-
in_user_coor.z = slice_arg.offset + in_channel * slice_arg.step;
55+
in_user_coor[packed_dim] = slice_arg.offset + in_dim * slice_arg.step;
5656

5757
ivec4 in_pow_elem = to_texture_elem_pos(
5858
in_user_coor,

backends/vulkan/runtime/graph/ops/impl/Slice.cpp

+25-14
Original file line numberDiff line numberDiff line change
@@ -44,8 +44,7 @@ void add_slice_tensor_copy_node(
4444
vTensorPtr t_in = graph.get_tensor(in);
4545
vTensorPtr t_out = graph.get_tensor(out);
4646

47-
VK_CHECK_COND(check_packed_dim_is(*t_in, WHCN::kChannelsDim));
48-
VK_CHECK_COND(check_packed_dim_is(*t_out, WHCN::kChannelsDim));
47+
VK_CHECK_COND(check_same_packed_dim(*t_in, *t_out));
4948

5049
// Need normalize the dim
5150
int64_t dim = graph.extract_scalar<int64_t>(dim_ref);
@@ -76,7 +75,13 @@ void add_slice_tensor_copy_node(
7675
start = normalize_idx(start, in_sizes[dim], 0);
7776
end = normalize_idx(end, in_sizes[dim], in_sizes[dim]);
7877

79-
if (dim_index == kChannel4D) {
78+
const vkapi::SpecVarList spec_vars = {t_in->packed_dim()};
79+
80+
const auto packed_dim_idx =
81+
static_cast<DimIndex>(DimIndex::DIM_LAST - t_in->packed_dim());
82+
83+
// if slice dim is the same as the packed dim, we can use the channel slice
84+
if (dim_index == packed_dim_idx) {
8085
// slice by channel
8186
std::string kernel_name = "slice_channel";
8287
kernel_name.reserve(kShaderNameReserve);
@@ -99,26 +104,31 @@ void add_slice_tensor_copy_node(
99104
{in, vkapi::MemoryAccessType::READ}},
100105
{t_out->sizes_ubo(),
101106
t_in->sizes_ubo(),
102-
graph.create_params_buffer(params)}));
107+
graph.create_params_buffer(params)},
108+
spec_vars));
103109

104110
} else {
105111
// GPU's coordinate is in x, y, z
106112
int64_t gpu_dim = -1;
107-
int64_t stride = 1;
113+
int64_t in_channel_stride = 1;
108114
if (dim_index == kWidth4D) {
109115
gpu_dim = 0; // width: x dimension in gpu
110116
VK_CHECK_COND(out_sizes[dim] == (1 + (end - start - 1) / step));
111117
} else if (dim_index == kHeight4D) {
112118
gpu_dim = 1; // height: y dimension
113119
VK_CHECK_COND(out_sizes[dim] == (1 + (end - start - 1) / step));
114-
} else if (dim_index == kBatch4D) {
115-
gpu_dim = 2; // batch: z dimension
116-
117-
// Due to channel packing, each batch value is span over stride planes
118-
int64_t n_channels = dim_at(in_sizes, kChannel4D);
119-
stride = utils::div_up_4(n_channels);
120+
} else if (dim_index == kChannel4D) {
121+
gpu_dim = 2; // channel: z dimension
122+
VK_CHECK_COND(out_sizes[dim] == (1 + (end - start - 1) / step));
123+
in_channel_stride = dim_at(in_sizes, kChannel4D);
120124
} else {
121-
VK_THROW("Unexpected ncwh_dim!");
125+
gpu_dim = 3; // batch: w dimension
126+
127+
in_channel_stride = dim_at(in_sizes, kChannel4D);
128+
if (packed_dim_idx == kChannel4D) {
129+
// Due to channel packing, each batch value is span over stride planes
130+
in_channel_stride = utils::div_up_4(in_channel_stride);
131+
}
122132
}
123133

124134
std::string kernel_name = "slice_batch_height_width";
@@ -137,7 +147,7 @@ void add_slice_tensor_copy_node(
137147
static_cast<int32_t>(gpu_dim),
138148
static_cast<int32_t>(start),
139149
static_cast<int32_t>(step),
140-
static_cast<int32_t>(stride),
150+
static_cast<int32_t>(in_channel_stride),
141151
};
142152

143153
graph.execute_nodes().emplace_back(new DispatchNode(
@@ -147,7 +157,8 @@ void add_slice_tensor_copy_node(
147157
local_size,
148158
{{out, vkapi::MemoryAccessType::WRITE},
149159
{in, vkapi::MemoryAccessType::READ}},
150-
{t_out->sizes_ubo(), graph.create_params_buffer(params)}));
160+
{t_out->sizes_ubo(), graph.create_params_buffer(params)},
161+
spec_vars));
151162
}
152163
}
153164

backends/vulkan/test/op_tests/cases.py

+5-1
Original file line numberDiff line numberDiff line change
@@ -585,7 +585,11 @@ def get_slice_out_inputs():
585585
test_suite = VkTestSuite([tuple(tc) for tc in test_cases])
586586

587587
test_suite.dtypes = ["at::kFloat", "at::kHalf"]
588-
test_suite.layouts = ["utils::kChannelsPacked"]
588+
test_suite.layouts = [
589+
"utils::kWidthPacked",
590+
"utils::kHeightPacked",
591+
"utils::kChannelsPacked",
592+
]
589593
test_suite.data_gen = "make_seq_tensor"
590594
return test_suite
591595

0 commit comments

Comments
 (0)