@@ -14,9 +14,9 @@ limitations under the License. */
1414
1515#include < algorithm>
1616
17- #include " paddle/fluid/platform/device/gpu/gpu_primitives.h"
1817#include " paddle/phi/backends/gpu/gpu_context.h"
1918#include " paddle/phi/backends/gpu/gpu_launch_config.h"
19+ #include " paddle/phi/backends/gpu/gpu_primitives.h"
2020#include " paddle/phi/kernels/funcs/gather.cu.h"
2121#include " paddle/phi/kernels/funcs/math_function.h"
2222#include " paddle/phi/kernels/funcs/segment_pooling.h"
@@ -60,7 +60,7 @@ __global__ void SegmentSumIdsKernel(const Index* segment_ids,
6060 }
6161 if (j > 0 ) {
6262 if (last_segment_id == first_segment_id) {
63- paddle::platform ::CudaAtomicAdd (summed_ids + last_segment_id, sum);
63+ phi ::CudaAtomicAdd (summed_ids + last_segment_id, sum);
6464 } else {
6565 *(summed_ids + last_segment_id) = sum;
6666 }
@@ -70,7 +70,7 @@ __global__ void SegmentSumIdsKernel(const Index* segment_ids,
7070 sum += T (1 );
7171 last_segment_id = current_segment_id;
7272 }
73- paddle::platform ::CudaAtomicAdd (summed_ids + last_segment_id, sum);
73+ phi ::CudaAtomicAdd (summed_ids + last_segment_id, sum);
7474 }
7575}
7676
@@ -111,8 +111,8 @@ __global__ void SegmentMeanKernel(const Index* segment_ids,
111111 last_segment_id * inner_dim_size + segment_offset;
112112
113113 if (last_segment_id == first_segment_id) {
114- paddle::platform:: CudaAtomicAdd (
115- output + output_index, sum / *(summed_ids + last_segment_id));
114+ phi:: CudaAtomicAdd (output + output_index,
115+ sum / *(summed_ids + last_segment_id));
116116 } else {
117117 *(output + output_index) = sum / *(summed_ids + last_segment_id);
118118 }
@@ -123,8 +123,8 @@ __global__ void SegmentMeanKernel(const Index* segment_ids,
123123 last_segment_id = current_segment_id;
124124 }
125125 Index output_index = last_segment_id * inner_dim_size + segment_offset;
126- paddle::platform ::CudaAtomicAdd (output + output_index,
127- sum / *(summed_ids + last_segment_id));
126+ phi ::CudaAtomicAdd (output + output_index,
127+ sum / *(summed_ids + last_segment_id));
128128 }
129129}
130130
@@ -215,7 +215,7 @@ class MaxPool {
215215 DEVICE inline T initial () { return static_cast <T>(-FLT_MAX); }
216216 DEVICE inline void compute (const T& x, T* y) { *y = *y > x ? *y : x; }
217217 DEVICE inline T atomic (T* address, const T val) {
218- return paddle::platform ::CudaAtomicMax (address, val);
218+ return phi ::CudaAtomicMax (address, val);
219219 }
220220};
221221
@@ -225,7 +225,7 @@ class MinPool {
225225 DEVICE inline T initial () { return static_cast <T>(FLT_MAX); }
226226 DEVICE inline void compute (const T& x, T* y) { *y = *y < x ? *y : x; }
227227 DEVICE inline T atomic (T* address, const T val) {
228- return paddle::platform ::CudaAtomicMin (address, val);
228+ return phi ::CudaAtomicMin (address, val);
229229 }
230230};
231231
@@ -235,7 +235,7 @@ class SumPool {
235235 DEVICE inline T initial () { return static_cast <T>(0 ); }
236236 DEVICE inline void compute (const T& x, T* y) { *y = *y + x; }
237237 DEVICE inline T atomic (T* address, const T val) {
238- return paddle::platform ::CudaAtomicAdd (address, val);
238+ return phi ::CudaAtomicAdd (address, val);
239239 }
240240};
241241
0 commit comments