Skip to content

Commit 24a2330

Browse files
co63oclixcli
authored andcommitted
Replace platform::DeviceContextPool [fluid_ops] (PaddlePaddle#65983)
1 parent f75b244 commit 24a2330

File tree

43 files changed

+95
-107
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

43 files changed

+95
-107
lines changed

paddle/fluid/framework/ir/auto_mixed_precision_pass.cc

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -227,9 +227,9 @@ void AutoMixedPrecisionPass::Init(Graph* graph) const {
227227
phi::CustomRegisteredDeviceMap::Instance()
228228
.GetOrRegisterGlobalDeviceTypeId(device_type));
229229
#else
230-
PADDLE_THROW(paddle::platform::errors::Unavailable(
231-
"Paddle is not compiled with CustomDevice. "
232-
"Cannot enable custom_device_mixed."));
230+
PADDLE_THROW(
231+
phi::errors::Unavailable("Paddle is not compiled with CustomDevice. "
232+
"Cannot enable custom_device_mixed."));
233233
#endif
234234
}
235235

paddle/fluid/framework/ir/conv2d_trans_filter_dilations_nxn_to_1x1_pass.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -125,7 +125,7 @@ void Conv2dTransFilterDilationsNxNTo1x1Pass::conv2d_dilation_trans(
125125
scope->Var(new_weights_name)->GetMutable<phi::DenseTensor>();
126126
new_weights->Resize({weights_shape[0], weights_shape[1], new_kh, new_kw});
127127
auto* cpu_ctx = static_cast<phi::CPUContext*>(
128-
platform::DeviceContextPool::Instance().Get(phi::CPUPlace()));
128+
phi::DeviceContextPool::Instance().Get(phi::CPUPlace()));
129129
if (weights->dtype() == phi::DataType::FLOAT32) {
130130
auto weights_data = weights->data<float>();
131131
auto* new_weights_data = cpu_ctx->Alloc<float>(new_weights);

paddle/fluid/framework/ir/cudnn_placement_pass.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ bool CUDNNPlacementPass::IsSupport(const Node* op) const {
3232
return false;
3333
}
3434
for (auto& kernel_pair : it->second) {
35-
if (platform::is_gpu_place(kernel_pair.first.place_) &&
35+
if (phi::is_gpu_place(kernel_pair.first.place_) &&
3636
(kernel_pair.first.library_type_ == LibraryType::kCUDNN)) {
3737
return true;
3838
}

paddle/fluid/framework/ir/delete_cast_op_pass.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -692,7 +692,7 @@ int DeleteCastOpPass::ApplyCastLookupTablePass(ir::Graph* graph) const {
692692
lookup_table_w->Var()->SetDataType(proto::VarType::FP16);
693693
if (w_tensor->dtype() != phi::DataType::FLOAT16) {
694694
auto* cpu_ctx = static_cast<phi::CPUContext*>(
695-
platform::DeviceContextPool::Instance().Get(phi::CPUPlace()));
695+
phi::DeviceContextPool::Instance().Get(phi::CPUPlace()));
696696
phi::DenseTensor w_fp32_tensor;
697697
w_fp32_tensor.Resize(w_tensor->dims());
698698
w_fp32_tensor.set_type(w_tensor->dtype());

paddle/fluid/framework/ir/delete_cast_op_pass_test.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ void AddVarToScope(Scope* param_scope,
2424
auto* tensor = param_scope->Var(name)->GetMutable<phi::DenseTensor>();
2525
tensor->Resize(dims);
2626
auto* cpu_ctx = static_cast<phi::CPUContext*>(
27-
platform::DeviceContextPool::Instance().Get(phi::CPUPlace()));
27+
phi::DeviceContextPool::Instance().Get(phi::CPUPlace()));
2828
cpu_ctx->Alloc<float>(tensor);
2929
}
3030

paddle/fluid/framework/ir/delete_quant_dequant_filter_op_pass.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -151,7 +151,7 @@ void DeleteQuantDequantFilterOpPass::ApplyImpl(ir::Graph* graph) const {
151151
const phi::DenseTensor& channel_scale_tensor =
152152
scope->FindVar(scales_name[0])->Get<phi::DenseTensor>();
153153
PADDLE_ENFORCE(
154-
paddle::platform::is_cpu_place(channel_scale_tensor.place()),
154+
phi::is_cpu_place(channel_scale_tensor.place()),
155155
platform::errors::InvalidArgument(
156156
"Channel scale tensor's place should be CPU."));
157157
// compute the channel wise abs max of the weight tensor

paddle/fluid/framework/ir/delete_quant_dequant_linear_op_pass.cc

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -139,11 +139,10 @@ void DeleteQuantDequantLinearOpPass::ApplyImpl(ir::Graph* graph) const {
139139
const phi::DenseTensor& input_scale_tensor =
140140
scope->GetVar(quantize_linear_op_scale->Name())
141141
->Get<phi::DenseTensor>();
142-
PADDLE_ENFORCE_EQ(
143-
paddle::platform::is_cpu_place(input_scale_tensor.place()),
144-
true,
145-
platform::errors::InvalidArgument(
146-
"Input scale tensor's place should be CPU."));
142+
PADDLE_ENFORCE_EQ(phi::is_cpu_place(input_scale_tensor.place()),
143+
true,
144+
platform::errors::InvalidArgument(
145+
"Input scale tensor's place should be CPU."));
147146

148147
float input_scale = NAN;
149148
if (input_scale_tensor.dtype() == phi::DataType::FLOAT32) {

paddle/fluid/framework/ir/delete_quant_dequant_op_pass.cc

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -70,11 +70,10 @@ void DeleteQuantDequantOpPass::ApplyImpl(ir::Graph* graph) const {
7070
"Scope in DeleteQuantDequantOpPass should not be null."));
7171
const phi::DenseTensor& input_scale_tensor =
7272
scope->FindVar(input_scale_var_name)->Get<phi::DenseTensor>();
73-
PADDLE_ENFORCE_EQ(
74-
paddle::platform::is_cpu_place(input_scale_tensor.place()),
75-
true,
76-
platform::errors::InvalidArgument(
77-
"Input scale tensor's place should be CPU."));
73+
PADDLE_ENFORCE_EQ(phi::is_cpu_place(input_scale_tensor.place()),
74+
true,
75+
platform::errors::InvalidArgument(
76+
"Input scale tensor's place should be CPU."));
7877
const float* input_scale_data = input_scale_tensor.data<float>();
7978
float input_scale = input_scale_data[0];
8079

paddle/fluid/framework/ir/delete_weight_dequant_linear_op_pass_tester.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ void AddVarToScope(Scope* param_scope,
2828
auto* tensor = param_scope->Var(name)->GetMutable<phi::DenseTensor>();
2929
tensor->Resize(dims);
3030
auto* dev_ctx = static_cast<phi::CPUContext*>(
31-
platform::DeviceContextPool::Instance().Get(phi::CPUPlace()));
31+
phi::DeviceContextPool::Instance().Get(phi::CPUPlace()));
3232
dev_ctx->HostAlloc<T>(tensor, tensor->numel() * sizeof(T));
3333
}
3434

paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_optimizer_op_pass.cc

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -293,9 +293,9 @@ bool FuseOptimizerOpPass::OpWithKernelSupportCPUAndGPU(
293293
kernel_factory.SelectKernelMap(phi::TransToPhiKernelName(op_type));
294294
bool has_op_kernel = !kernel_key_map.empty() ? true : false;
295295
for (auto &kernel : kernel_key_map) {
296-
if (platform::is_gpu_place(phi::TransToPhiPlace(kernel.first.backend()))) {
296+
if (phi::is_gpu_place(phi::TransToPhiPlace(kernel.first.backend()))) {
297297
support_gpu = true;
298-
} else if (platform::is_cpu_place(
298+
} else if (phi::is_cpu_place(
299299
phi::TransToPhiPlace(kernel.first.backend()))) {
300300
support_cpu = true;
301301
}
@@ -308,9 +308,9 @@ bool FuseOptimizerOpPass::OpWithKernelSupportCPUAndGPU(
308308
if (it != all_kernels.end()) {
309309
has_op_kernel = true;
310310
for (auto &kernel_pair : it->second) {
311-
if (platform::is_cpu_place(kernel_pair.first.place_)) {
311+
if (phi::is_cpu_place(kernel_pair.first.place_)) {
312312
support_cpu = true;
313-
} else if (platform::is_gpu_place(kernel_pair.first.place_)) {
313+
} else if (phi::is_gpu_place(kernel_pair.first.place_)) {
314314
support_gpu = true;
315315
}
316316
}

0 commit comments

Comments
 (0)