Skip to content

Commit e9082bb

Browse files
committed
Resume unit testing.
1 parent 884ce5d commit e9082bb

File tree

5 files changed

+33
-37
lines changed

5 files changed

+33
-37
lines changed

paddle/operators/cross_entropy_op.cu

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,8 +23,6 @@ template <typename T>
2323
__global__ void CrossEntropyGradientKernel(T* dX, const T* dY, const T* X,
2424
const int64_t* label, const int N,
2525
const int D) {
26-
// TOOD(qingqing) define CUDA_1D_KERNEL_LOOP macro in a common file.
27-
// CUDA_1D_KERNEL_LOOP(i, N) {
2826
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N;
2927
i += blockDim.x * gridDim.x) {
3028
int idx = i * D + label[i];

paddle/operators/math/math_function.cu

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -240,7 +240,7 @@ void axpy<platform::GPUPlace, float>(const platform::DeviceContext& context,
240240
PADDLE_ENFORCE(platform::dynload::cublasSaxpy(
241241
reinterpret_cast<const platform::CUDADeviceContext&>(context)
242242
.cublas_handle(),
243-
n, alpha, x, 1, y, 1));
243+
n, &alpha, x, 1, y, 1));
244244
}
245245

246246
template <>
@@ -250,7 +250,7 @@ void axpy<platform::GPUPlace, double>(const platform::DeviceContext& context,
250250
PADDLE_ENFORCE(platform::dynload::cublasDaxpy(
251251
reinterpret_cast<const platform::CUDADeviceContext&>(context)
252252
.cublas_handle(),
253-
n, alpha, x, 1, y, 1));
253+
n, &alpha, x, 1, y, 1));
254254
}
255255

256256
template struct SetConstant<platform::GPUPlace, float>;
@@ -270,7 +270,7 @@ DEFINE_GPU_TRANS(6);
270270

271271
struct TensorSetConstantGPU {
272272
TensorSetConstantGPU(const platform::DeviceContext& context,
273-
framework::Tensor* tensor, float value)
273+
framework::Tensor* tensor, float value)
274274
: context_(context), tensor_(tensor), value_(value) {}
275275

276276
template <typename T>

paddle/operators/sequence_conv_op.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -65,10 +65,8 @@ class SequenceConvKernel : public framework::OpKernel<T> {
6565
padding_trainable, context_start, context_length,
6666
context_stride, up_pad, down_pad);
6767

68-
context.device_context().Finish();
6968
math::matmul<Place, T>(context.device_context(), col, false, filter, false,
7069
static_cast<T>(1.0), out, static_cast<T>(0.0));
71-
context.device_context().Finish();
7270
}
7371
};
7472

python/paddle/v2/framework/tests/test_lstm_op.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -180,7 +180,6 @@ def test_check_grad(self):
180180
['Input', 'Weight', 'Bias'], ['Hidden'], max_relative_error=5e-4)
181181

182182

183-
"""
184183
class TestLstmOpHasInitial(TestLstmOp):
185184
def set_argument(self):
186185
self.lod = [[0, 2, 5, 7]]
@@ -281,7 +280,7 @@ def set_argument(self):
281280
self.has_initial_state = False
282281
self.is_reverse = True
283282
self.use_peepholes = False
284-
"""
283+
285284

286285
if __name__ == '__main__':
287286
unittest.main()

python/paddle/v2/framework/tests/test_seq_conv.py

Lines changed: 29 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -122,7 +122,7 @@ def test_check_grad_padding_data(self):
122122
max_relative_error=0.05,
123123
no_grad_set=set(['X', 'Filter']))
124124

125-
def not_test_check_grad_Filter(self):
125+
def test_check_grad_Filter(self):
126126
self.check_grad(
127127
['Filter'],
128128
'Out',
@@ -165,33 +165,34 @@ def init_test_case(self):
165165
self.output_represention = 8 # output feature size
166166

167167

168-
#class TestSeqProjectCase1(TestSeqProject):
169-
# def init_test_case(self):
170-
# self.input_row = 11
171-
# self.context_start = -1
172-
# self.context_length = 3
173-
# self.padding_trainable = True
174-
# self.context_stride = 1
175-
#
176-
# self.input_size = [self.input_row, 23]
177-
# self.lod = [[0, 4, 5, 8, self.input_row]]
178-
# self.output_represention = 8 # output feature size
179-
#
180-
#
181-
#class TestSeqProjectCase2(TestSeqProject):
182-
# def init_test_case(self):
183-
# self.input_row = 25
184-
# self.context_start = 2
185-
# self.context_length = 3
186-
# self.padding_trainable = True
187-
# self.context_stride = 1
188-
#
189-
# self.input_size = [self.input_row, 23]
190-
# idx = range(self.input_size[0])
191-
# del idx[0]
192-
# self.lod = [[0] + np.sort(random.sample(idx, 8)).tolist() +
193-
# [self.input_size[0]]]
194-
# self.output_represention = 8 # output feature size
168+
class TestSeqProjectCase1(TestSeqProject):
169+
def init_test_case(self):
170+
self.input_row = 11
171+
self.context_start = -1
172+
self.context_length = 3
173+
self.padding_trainable = True
174+
self.context_stride = 1
175+
176+
self.input_size = [self.input_row, 23]
177+
self.lod = [[0, 4, 5, 8, self.input_row]]
178+
self.output_represention = 8 # output feature size
179+
180+
181+
class TestSeqProjectCase2(TestSeqProject):
182+
def init_test_case(self):
183+
self.input_row = 25
184+
self.context_start = 2
185+
self.context_length = 3
186+
self.padding_trainable = True
187+
self.context_stride = 1
188+
189+
self.input_size = [self.input_row, 23]
190+
idx = range(self.input_size[0])
191+
del idx[0]
192+
self.lod = [[0] + np.sort(random.sample(idx, 8)).tolist() +
193+
[self.input_size[0]]]
194+
self.output_represention = 8 # output feature size
195+
195196

196197
if __name__ == '__main__':
197198
unittest.main()

0 commit comments

Comments
 (0)