Skip to content

Commit 822dabc

Browse files
committed
channel pooling along any dimension (GPU)
1 parent 6b99f33 commit 822dabc

File tree

2 files changed

+16
-24
lines changed

2 files changed

+16
-24
lines changed

src/cuda/layers/channel-pooling.jl

Lines changed: 16 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -37,9 +37,9 @@ function forward(backend::GPUBackend, pool::StdPoolingFunction,
3737
output = state.blobs[i]
3838

3939
if isa(pool, Pooling.Max)
40-
cuda_max_channel_pooling_forward(backend, input, output, state.etc[i], state.layer)
40+
cuda_max_channel_pooling_forward(backend, input, output, state.etc[i], state.layer, state.op_dims[i])
4141
elseif isa(pool, Pooling.Mean)
42-
cuda_mean_channel_pooling_forward(backend, input, output, state.etc[i], state.layer)
42+
cuda_mean_channel_pooling_forward(backend, input, output, state.etc[i], state.layer, state.op_dims[i])
4343
else
4444
error("Pooling for $pool not implemented yet")
4545
end
@@ -57,9 +57,9 @@ function backward(backend::GPUBackend, pool::StdPoolingFunction, state::ChannelP
5757
diff = diffs[i]
5858
if !isa(diff, NullBlob)
5959
if isa(pool, Pooling.Max)
60-
cuda_max_channel_pooling_backward(backend, diff, state.blobs_diff[i], state.etc[i], state.layer)
60+
cuda_max_channel_pooling_backward(backend, diff, state.blobs_diff[i], state.etc[i], state.layer, state.op_dims[i])
6161
elseif isa(pool, Pooling.Mean)
62-
cuda_mean_channel_pooling_backward(backend, diff, state.blobs_diff[i], state.layer)
62+
cuda_mean_channel_pooling_backward(backend, diff, state.blobs_diff[i], state.layer, state.op_dims[i])
6363
else
6464
error("Pooling for $pool not implemented yet")
6565
end
@@ -70,15 +70,14 @@ function backward(backend::GPUBackend, pool::StdPoolingFunction, state::ChannelP
7070
end
7171

7272
function cuda_mean_channel_pooling_forward{T}(backend::GPUBackend, input::CuTensorBlob{T},
73-
output::CuTensorBlob{T}, integral::CuPtr, layer)
73+
output::CuTensorBlob{T}, integral::CuPtr, layer, op_dim)
7474

75-
width, height, channels, num = size(input)
76-
pooled_chann = size(output, 3)
75+
spatial_dim_T, channels, num = split_dims(input, op_dim)
76+
pooled_chann = size(output, op_dim)
7777
one = convert(T, 1)
7878
neg_one = convert(T, -1)
7979
scale = convert(T, 1.0/layer.kernel)
8080

81-
spatial_dim_T = width*height
8281
spatial_dim = spatial_dim_T * sizeof(T)
8382
fea_dim = spatial_dim * channels
8483
output_fea_dim = spatial_dim * pooled_chann
@@ -116,15 +115,14 @@ function cuda_mean_channel_pooling_forward{T}(backend::GPUBackend, input::CuTens
116115
end
117116

118117
function cuda_mean_channel_pooling_backward{T}(backend::GPUBackend, input::CuTensorBlob{T},
119-
output::CuTensorBlob{T}, layer)
118+
output::CuTensorBlob{T}, layer, op_dim)
120119

121-
width, height, channels, num = size(input)
122-
pooled_chann = size(output, 3)
120+
spatial_dim_T, channels, num = split_dims(input, op_dim)
121+
pooled_chann = size(output, op_dim)
123122
scale = 1/convert(T, layer.kernel)
124123

125124
fill!(input, 0)
126125

127-
spatial_dim_T = width*height
128126
spatial_dim = spatial_dim_T * sizeof(T)
129127
fea_dim = spatial_dim * channels
130128
output_fea_dim = spatial_dim * pooled_chann
@@ -157,11 +155,10 @@ function cuda_geometry_max_chann_pool(sp_dim::Int, num::Int)
157155

158156
end
159157
function cuda_max_channel_pooling_forward{T}(backend::GPUBackend, input::CuTensorBlob{T},
160-
output::CuTensorBlob{T}, mask::CuPtr, layer)
158+
output::CuTensorBlob{T}, mask::CuPtr, layer, op_dim)
161159

162-
width, height, channels, num = size(input)
163-
sp_dim = width*height
164-
pooled_chann = get_chann(output)
160+
sp_dim, channels, num = split_dims(input, op_dim)
161+
pooled_chann = size(output, op_dim)
165162

166163
cuda_dim = cuda_geometry_max_chann_pool(sp_dim, num);
167164
if T == Float32
@@ -177,11 +174,10 @@ function cuda_max_channel_pooling_forward{T}(backend::GPUBackend, input::CuTenso
177174
end
178175

179176
function cuda_max_channel_pooling_backward{T}(backend::GPUBackend, input::CuTensorBlob{T},
180-
output::CuTensorBlob{T}, mask::CuPtr, layer)
177+
output::CuTensorBlob{T}, mask::CuPtr, layer, op_dim)
181178

182-
width, height, channels, num = size(input)
183-
sp_dim = width*height
184-
pooled_chann = get_chann(output)
179+
sp_dim, channels, num = split_dims(input, op_dim)
180+
pooled_chann = size(output, op_dim)
185181

186182
cuda_dim = cuda_geometry_max_chann_pool(sp_dim, num);
187183
if T == Float32

src/layers/channel-pooling.jl

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -105,8 +105,6 @@ function forward(backend::CPUBackend, pool::StdPoolingFunction,
105105
max_channel_pooling_forward(reshape(input,dims_in), reshape(output,dims_out), reshape(state.etc[i],dims_out), state.layer)
106106
elseif isa(pool, Pooling.Mean)
107107
mean_channel_pooling_forward(reshape(input,dims_in), reshape(output,dims_out), state.etc[i], state.layer)
108-
else
109-
error("Pooling for $pool not implemented yet")
110108
end
111109
end
112110
end
@@ -130,8 +128,6 @@ function backward(backend::CPUBackend, pool::StdPoolingFunction, state::ChannelP
130128
reshape(state.etc[i],dims_out), state.layer)
131129
elseif isa(pool, Pooling.Mean)
132130
mean_channel_pooling_backward(reshape(diff.data,dims_in), reshape(state.blobs_diff[i].data,dims_out), state.layer)
133-
else
134-
error("Pooling for $pool not implemented yet")
135131
end
136132
end
137133
end

0 commit comments

Comments
 (0)