@@ -42,7 +42,7 @@ limitations under the License. */
4242
4343#include  " paddle/fluid/operators/elementwise/elementwise_op_broadcast.cu.h" 
4444#include  " paddle/fluid/operators/reduce_ops/reduce_op.cu.h" 
45- #include  " paddle/fluid/platform/device /gpu/gpu_device_function.h" 
45+ #include  " paddle/phi/backends /gpu/gpu_device_function.h" 
4646#include  " paddle/phi/backends/gpu/gpu_primitives.h" 
4747#include  " paddle/phi/kernels/gpu/elementwise_grad.h" 
4848
@@ -982,7 +982,7 @@ static __global__ void FusedElemwiseAndActGradBroadcast1CUDAKernel(
982982#pragma  unroll
983983    for  (int  i = BLOCK_X >> 1 ; i > 0 ; i >>= 1 ) {
984984      //  reduce sum with wrap
985-       val += platform ::CudaShuffleXorSync0xFFFFFFFF , val, i);
985+       val += phi::backends::gpu ::CudaShuffleXorSync0xFFFFFFFF , val, i);
986986    }
987987
988988    size_t  idx_j = j + threadIdx.y ;
@@ -1004,7 +1004,8 @@ static __global__ void FusedElemwiseAndActGradBroadcast1CUDAKernel(
10041004#pragma  unroll
10051005        for  (int  i = BLOCK_X >> 1 ; i > 0 ; i >>= 1 ) {
10061006          //  reduce sum with wrap
1007-           inter_val += platform::CudaShuffleXorSync (0xFFFFFFFF , inter_val, i);
1007+           inter_val +=
1008+               phi::backends::gpu::CudaShuffleXorSync (0xFFFFFFFF , inter_val, i);
10081009        }
10091010        if  (threadIdx.x  == 0  && (idx_j < w)) d_intermediate[idx_j] = inter_val;
10101011      }
@@ -1160,22 +1161,22 @@ static __global__ void FusedElemwiseAndActGradBroadcast2CUDAKernel(
11601161  h = h > ELEMWISE_MAX_BLOCK_DIM ? ELEMWISE_MAX_BLOCK_DIM : h;
11611162  if  (BcastY) {
11621163    if  (dy) {
1163-       val = paddle::platform ::reduceSum
1164+       val = phi::backends::gpu ::reduceSum
11641165      if  (threadIdx.x  == 0 ) {
11651166        dy[j] = val;
11661167      }
11671168    }
11681169  } else  {
11691170    if  (dx) {
1170-       val = paddle::platform ::reduceSum
1171+       val = phi::backends::gpu ::reduceSum
11711172      if  (threadIdx.x  == 0 ) {
11721173        dx[j] = val;
11731174      }
11741175    }
11751176  }
11761177  if  (!SameShapeOfIntermediateOutAndOut) {
11771178    if  (d_intermediate) {
1178-       inter_val = paddle::platform ::reduceSum
1179+       inter_val = phi::backends::gpu ::reduceSum
11791180      if  (threadIdx.x  == 0 ) {
11801181        d_intermediate[j] = inter_val;
11811182      }
0 commit comments