Skip to content

Commit 0fd4a04

Browse files
committed
Remove debug codes
1 parent dd0a4c3 commit 0fd4a04

File tree

4 files changed

+2
-36
lines changed

4 files changed

+2
-36
lines changed

paddle/framework/tensor_impl.h

Lines changed: 2 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -134,17 +134,8 @@ inline void* Tensor::mutable_data(platform::Place place, std::type_index type) {
134134
#endif
135135
offset_ = 0;
136136
}
137-
void* buf = reinterpret_cast<void*>(
138-
reinterpret_cast<uintptr_t>(holder_->ptr()) + offset_);
139-
if (type.hash_code() == typeid(float).hash_code() ||
140-
type.hash_code() == typeid(double).hash_code()) {
141-
float* tmp = (float*)(buf);
142-
for (int64_t i = 0; i < numel(); ++i) {
143-
tmp[i] = NAN;
144-
}
145-
}
146-
147-
return buf;
137+
return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(holder_->ptr()) +
138+
offset_);
148139
}
149140

150141
inline void* Tensor::mutable_data(platform::Place place) {

paddle/operators/fill_constant_op.cc

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,6 @@ class FillConstantOp : public framework::OperatorBase {
5151

5252
platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
5353
auto &dev_ctx = *pool.Get(dev_place);
54-
VLOG(10) << "FillConstant to " << &out;
5554
math::set_constant(dev_ctx, &out, value);
5655
}
5756
};

paddle/operators/shrink_rnn_memory_op.cc

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -116,7 +116,6 @@ class ShrinkRNNMemoryGradOp : public ArrayOp {
116116
auto height = dout_tensor.dims()[0];
117117
auto slice = dx_tensor.Slice(0, static_cast<int>(height));
118118
framework::CopyFrom(dout_tensor, dout_tensor.place(), dev_ctx, &slice);
119-
VLOG(10) << dx_tensor.dims()[0] << ", " << height;
120119
if (dx_tensor.dims()[0] > height) {
121120
auto rest_tensor = dx_tensor.Slice(
122121
static_cast<int>(height), static_cast<int>(dx_tensor.dims()[0]));

paddle/operators/while_op.cc

Lines changed: 0 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1212
See the License for the specific language governing permissions and
1313
limitations under the License. */
1414

15-
#include <cmath>
1615
#include <vector>
1716
#include "paddle/framework/executor.h"
1817
#include "paddle/framework/lod_tensor_array.h"
@@ -195,36 +194,14 @@ class WhileGradOp : public framework::OperatorBase {
195194
}
196195
}
197196

198-
auto check_var_no_nan = [](const framework::Scope &scope,
199-
const std::string &var_name) {
200-
auto *var = scope.FindVar(var_name);
201-
if (var->IsType<LoDTensor>()) {
202-
VLOG(10) << "Checking " << var_name;
203-
PADDLE_ENFORCE(!framework::HasNAN(var->Get<framework::LoDTensor>()),
204-
"%s has NAN", var_name);
205-
if (var->Get<framework::LoDTensor>().type() ==
206-
typeid(float)) { // NOLINT
207-
auto &tensor = var->Get<framework::LoDTensor>();
208-
auto *buf = tensor.data<float>();
209-
for (int64_t i = 0; i < tensor.numel(); ++i) {
210-
PADDLE_ENFORCE(!std::isnan(buf[i]));
211-
}
212-
VLOG(10) << buf[0];
213-
}
214-
}
215-
};
216-
check_var_no_nan(cur_scope, inside_grad_name);
217197
auto new_inside_name = cur_scope.Rename(inside_grad_name);
218-
check_var_no_nan(cur_scope, new_inside_name);
219198
auto sum_op = framework::OpRegistry::CreateOp(
220199
"sum", {{"X", {pg_names[param_id], new_inside_name}}},
221200
{{"Out", {pg_names[param_id]}}}, framework::AttributeMap{});
222201
sum_op->Run(cur_scope, dev_place);
223-
check_var_no_nan(scope, pg_names[param_id]);
224202
cur_scope.Rename(new_inside_name, inside_grad_name);
225203
}
226204
}
227-
VLOG(1) << "Complete WhileOpGrad";
228205
}
229206
};
230207

0 commit comments

Comments
 (0)