Skip to content

Commit 98e90e9

Browse files
co63oclixcli
authored andcommitted
Clean paddle/fluid/platform/for_range.h [fluid_ops](PaddlePaddle#66142)
1 parent c903a03 commit 98e90e9

File tree

5 files changed

+15
-42
lines changed

5 files changed

+15
-42
lines changed

paddle/fluid/imperative/reducer.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@
3030
#include "paddle/fluid/framework/tensor.h"
3131
#include "paddle/fluid/framework/tensor_util.h"
3232
#include "paddle/fluid/framework/variable.h"
33-
#include "paddle/fluid/platform/for_range.h"
33+
#include "paddle/phi/kernels/funcs/for_range.h"
3434
#include "paddle/phi/kernels/funcs/math_function.h"
3535

3636
namespace paddle {
@@ -72,8 +72,8 @@ struct DivNRanksForAllReduce {
7272
template <typename T>
7373
void apply() const {
7474
T* data = in_->mutable_data<T>(ctx_.GetPlace());
75-
platform::ForRange<Dex> for_range(static_cast<const Dex&>(ctx_),
76-
static_cast<size_t>(in_->numel()));
75+
phi::funcs::ForRange<Dex> for_range(static_cast<const Dex&>(ctx_),
76+
static_cast<size_t>(in_->numel()));
7777
DivNRanksFunctor<T> functor(nranks_, data);
7878
for_range(functor);
7979
}

paddle/fluid/platform/for_range.h

Lines changed: 0 additions & 27 deletions
This file was deleted.

test/cpp/fluid/memory/best_fit_allocator_test.cu

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222
#include "paddle/fluid/memory/allocation/best_fit_allocator.h"
2323
#include "paddle/fluid/memory/allocation/cuda_allocator.h"
2424
#include "paddle/fluid/memory/memcpy.h"
25-
#include "paddle/fluid/platform/for_range.h"
25+
#include "paddle/phi/kernels/funcs/for_range.h"
2626
namespace paddle {
2727
namespace memory {
2828
namespace allocation {
@@ -62,7 +62,7 @@ TEST(BestFitAllocator, concurrent_cuda) {
6262
size_t* data = reinterpret_cast<size_t*>(allocation->ptr());
6363

6464
ForEachFill fill(data);
65-
platform::ForRange<phi::GPUContext> for_range(dev_ctx, allocate_size);
65+
phi::funcs::ForRange<phi::GPUContext> for_range(dev_ctx, allocate_size);
6666
for_range(fill);
6767

6868
memory::Copy(phi::CPUPlace(),

test/cpp/fluid/test_leaky_relu_grad_grad_functor.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919

2020
#include "gtest/gtest.h"
2121
#include "paddle/fluid/operators/activation_op.h"
22-
#include "paddle/fluid/platform/for_range.h"
22+
#include "paddle/phi/kernels/funcs/for_range.h"
2323

2424
namespace paddle {
2525
namespace operators {
@@ -98,13 +98,13 @@ static bool TestLeakyReluGradGradMain(const phi::DDim &dim,
9898
if (phi::is_gpu_place(place)) {
9999
auto &cuda_dev_ctx = dynamic_cast<phi::GPUContext &>(dev_ctx);
100100
functor(cuda_dev_ctx, &x, out, &ddx, &ddout, dout, dx);
101-
platform::ForRange<phi::GPUContext> for_range(cuda_dev_ctx, limit);
101+
phi::funcs::ForRange<phi::GPUContext> for_range(cuda_dev_ctx, limit);
102102
for_range(actual_functor);
103103
} else {
104104
#endif
105105
auto &cpu_dev_ctx = dynamic_cast<phi::CPUContext &>(dev_ctx);
106106
functor(cpu_dev_ctx, &x, out, &ddx, &ddout, dout, dx);
107-
platform::ForRange<phi::CPUContext> for_range(cpu_dev_ctx, limit);
107+
phi::funcs::ForRange<phi::CPUContext> for_range(cpu_dev_ctx, limit);
108108
for_range(actual_functor);
109109
#if defined(__NVCC__) || defined(__HIPCC__)
110110
}

test/deprecated/custom_op/custom_raw_op_kernel_op.h

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
#include "paddle/fluid/framework/data_type.h"
1919
#include "paddle/fluid/framework/tensor.h"
2020
#include "paddle/fluid/platform/device_context.h"
21-
#include "paddle/fluid/platform/for_range.h"
21+
#include "paddle/phi/kernels/funcs/for_range.h"
2222

2323
namespace custom_raw_op {
2424

@@ -50,12 +50,12 @@ struct ReluFunctor {
5050

5151
const auto &dev_ctx = *phi::DeviceContextPool::Instance().Get(place);
5252

53-
#define LAUNCH_RELU_KERNEL(DevCtxT) \
54-
do { \
55-
auto &__dev_ctx = dynamic_cast<const DevCtxT &>(dev_ctx); \
56-
paddle::platform::ForRange<DevCtxT> for_range(__dev_ctx, n); \
57-
Impl<T> functor(x_data, y_data); \
58-
for_range(functor); \
53+
#define LAUNCH_RELU_KERNEL(DevCtxT) \
54+
do { \
55+
auto &__dev_ctx = dynamic_cast<const DevCtxT &>(dev_ctx); \
56+
phi::funcs::ForRange<DevCtxT> for_range(__dev_ctx, n); \
57+
Impl<T> functor(x_data, y_data); \
58+
for_range(functor); \
5959
} while (0)
6060

6161
#if defined(__NVCC__) || defined(__HIPCC__)

0 commit comments

Comments
 (0)