Skip to content

Commit 91818c2

Browse files
authored
Replace platform::errors [fluid_ops] part6 (#66150)
1 parent 0ba68a4 commit 91818c2

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

45 files changed

+521
-533
lines changed

paddle/fluid/eager/auto_code_generator/eager_generator.cc

Lines changed: 19 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -344,7 +344,7 @@ static std::string AttrTypeToString(const proto::AttrType& type) {
344344
break;
345345
}
346346
default: {
347-
PADDLE_THROW(platform::errors::Fatal(
347+
PADDLE_THROW(phi::errors::Fatal(
348348
"AttrType of type paddle::variant only supports specific data types."
349349
"However, detected unrecognized AttrType: %d",
350350
type));
@@ -455,7 +455,7 @@ static std::pair<std::string, std::string> GetAttrType(
455455
break;
456456
}
457457
default: {
458-
PADDLE_THROW(platform::errors::Fatal(
458+
PADDLE_THROW(phi::errors::Fatal(
459459
"AttrType of type paddle::variant only supports specific data types."
460460
"However, detected unrecognized AttrType: %d",
461461
variant_pos));
@@ -501,7 +501,7 @@ static void SlotNameMatching(
501501
if (grad_var == fwd_var) {
502502
if (grad_fwd_slotname_map.count(grad_slot_name) &&
503503
grad_fwd_slotname_map[grad_slot_name] != fwd_slot_name) {
504-
PADDLE_THROW(platform::errors::Fatal(
504+
PADDLE_THROW(phi::errors::Fatal(
505505
"Detected mismatched slot names."
506506
"grad_slot_name %s matches both %s and %s fwd_slot_name",
507507
grad_slot_name,
@@ -515,7 +515,7 @@ static void SlotNameMatching(
515515
if (fwd_var->GetGradVar() && grad_var == fwd_var->GetGradVar()) {
516516
if (grad_grad_slotname_map.count(grad_slot_name) &&
517517
grad_grad_slotname_map[grad_slot_name] != fwd_slot_name) {
518-
PADDLE_THROW(platform::errors::Fatal(
518+
PADDLE_THROW(phi::errors::Fatal(
519519
"Detected mismatched slot names."
520520
"grad_slot_name %s matches both %s and %s fwd_slot_name",
521521
grad_slot_name,
@@ -536,7 +536,7 @@ static void SlotNameMatching(
536536
if (grad_var == fwd_var) {
537537
if (grad_fwd_slotname_map.count(grad_slot_name) &&
538538
grad_fwd_slotname_map[grad_slot_name] != fwd_slot_name) {
539-
PADDLE_THROW(platform::errors::Fatal(
539+
PADDLE_THROW(phi::errors::Fatal(
540540
"Detected mismatched slot names"
541541
"grad_slot_name %s matches both %s and %s fwd_slot_name",
542542
grad_slot_name,
@@ -550,7 +550,7 @@ static void SlotNameMatching(
550550
if (fwd_var->GetGradVar() && grad_var == fwd_var->GetGradVar()) {
551551
if (grad_grad_slotname_map.count(grad_slot_name) &&
552552
grad_grad_slotname_map[grad_slot_name] != fwd_slot_name) {
553-
PADDLE_THROW(platform::errors::Fatal(
553+
PADDLE_THROW(phi::errors::Fatal(
554554
"Detected mismatched slot names."
555555
"grad_slot_name %s matches both %s and %s fwd_slot_name",
556556
grad_slot_name,
@@ -565,7 +565,7 @@ static void SlotNameMatching(
565565
}
566566

567567
if (!found_matching) {
568-
PADDLE_THROW(platform::errors::Fatal(
568+
PADDLE_THROW(phi::errors::Fatal(
569569
"Detected mismatched slot names."
570570
"Found no matching fwd_slot_name for grad_slot_name: %s",
571571
grad_slot_name));
@@ -2290,10 +2290,10 @@ static std::string GenerateSingleOpBase(
22902290
can_be_inplaced_name);
22912291
}
22922292
} else {
2293-
PADDLE_THROW(platform::errors::Fatal(
2294-
"Detected mismatched slot names."
2295-
"Unable to find forward slot name that matches %s",
2296-
grad_input_name));
2293+
PADDLE_THROW(
2294+
phi::errors::Fatal("Detected mismatched slot names."
2295+
"Unable to find forward slot name that matches %s",
2296+
grad_input_name));
22972297
}
22982298
}
22992299
if (!ins_contents_str.empty())
@@ -2438,10 +2438,10 @@ static std::string GenerateSingleOpBase(
24382438
}
24392439
}
24402440
} else {
2441-
PADDLE_THROW(platform::errors::Fatal(
2442-
"Detected mismatched slot names."
2443-
"Unable to find forward slot name that matches %s",
2444-
grad_output_name));
2441+
PADDLE_THROW(
2442+
phi::errors::Fatal("Detected mismatched slot names."
2443+
"Unable to find forward slot name that matches %s",
2444+
grad_output_name));
24452445
}
24462446
}
24472447

@@ -2495,10 +2495,10 @@ static std::string GenerateSingleOpBase(
24952495
}
24962496
}
24972497
} else {
2498-
PADDLE_THROW(platform::errors::Fatal(
2499-
"Detected mismatched slot names."
2500-
"Unable to find forward slot name that matches %s",
2501-
grad_output_name));
2498+
PADDLE_THROW(
2499+
phi::errors::Fatal("Detected mismatched slot names."
2500+
"Unable to find forward slot name that matches %s",
2501+
grad_output_name));
25022502
}
25032503
}
25042504

paddle/fluid/imperative/all_reduce.cc

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ static const phi::Place &GetVarPlace(const framework::Variable &src) {
4545
return src.Get<phi::SelectedRows>().value().place();
4646
#endif
4747
} else {
48-
PADDLE_THROW(platform::errors::InvalidArgument(
48+
PADDLE_THROW(phi::errors::InvalidArgument(
4949
"Cannot get unsupported variable type %s for imperative allreduce, "
5050
"only "
5151
"LoDTensor and SelectedRows are supported.",
@@ -61,7 +61,7 @@ static void AllReduce(const phi::DenseTensor &src,
6161
PADDLE_ENFORCE_EQ(
6262
phi::is_gpu_place(place),
6363
true,
64-
platform::errors::Unimplemented(
64+
phi::errors::Unimplemented(
6565
"Imperative mode does not support multi-CPU training yet."));
6666

6767
const void *src_ptr = src.data();
@@ -90,7 +90,7 @@ static void AllReduce(const phi::SelectedRows &src,
9090
PADDLE_ENFORCE_EQ(
9191
phi::is_gpu_place(place),
9292
true,
93-
platform::errors::Unimplemented(
93+
phi::errors::Unimplemented(
9494
"Imperative mode does not support multi-CPU training yet."));
9595

9696
auto dtype = framework::TransToProtoVarType(src_tensor.dtype());
@@ -259,7 +259,7 @@ void AllReduce(const framework::Variable &src,
259259
}
260260
#endif
261261
} else {
262-
PADDLE_THROW(platform::errors::InvalidArgument(
262+
PADDLE_THROW(phi::errors::InvalidArgument(
263263
"Unsupported variable type %s for imperative allreduce, only "
264264
"LoDTensor and SelectedRows are supported.",
265265
platform::demangle(framework::ToTypeName(src.Type()))));

paddle/fluid/imperative/amp_auto_cast.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ OpSupportedInfos(const std::string& place,
5858
};
5959
PADDLE_ENFORCE_NE(is_target_place.count(query_place),
6060
0,
61-
platform::errors::InvalidArgument(
61+
phi::errors::InvalidArgument(
6262
"The argument `place` should be 'GPU', 'CPU', 'XPU' or "
6363
"other Custom Device, but got '%s'.",
6464
place));

paddle/fluid/imperative/basic_engine.cc

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -48,20 +48,20 @@ void BasicEngine::Init(
4848
PADDLE_ENFORCE_EQ(
4949
tensors.size(),
5050
grad_tensors.size(),
51-
platform::errors::Unavailable(
51+
phi::errors::Unavailable(
5252
"The size of tensors do not equal the size of grad_tensors,"
5353
"the size of tensors is %s, but the size of grad_tensors is %s.",
5454
tensors.size(),
5555
grad_tensors.size()));
5656

5757
PADDLE_ENFORCE_EQ(accumulators_.empty(),
5858
true,
59-
platform::errors::AlreadyExists(
59+
phi::errors::AlreadyExists(
6060
"Accumulators are not empty before preparing it for "
6161
"backward network execution."));
6262
PADDLE_ENFORCE_EQ(accumulators_with_grad_node_.empty(),
6363
true,
64-
platform::errors::AlreadyExists(
64+
phi::errors::AlreadyExists(
6565
"Accumulators with grad_node as the key are not empty "
6666
"before preparing it for backward network execution."));
6767

@@ -74,7 +74,7 @@ void BasicEngine::Init(
7474
PADDLE_ENFORCE_EQ(
7575
var->GradVarBase()->GraphIsFreed(),
7676
false,
77-
platform::errors::Unavailable(
77+
phi::errors::Unavailable(
7878
"%s trying to backward through the same graph a second "
7979
"time, but this graph have already been freed. Please "
8080
"specify Tensor.backward(retain_graph=True) when "
@@ -99,7 +99,7 @@ void BasicEngine::Init(
9999
PADDLE_ENFORCE_EQ(
100100
var->HasGradVar(),
101101
true,
102-
platform::errors::NotFound("Tensor %s has no gradient", var->Name()));
102+
phi::errors::NotFound("Tensor %s has no gradient", var->Name()));
103103

104104
auto& fwd_var = var->Var().Get<phi::DenseTensor>();
105105
auto* grad_var =
@@ -192,7 +192,7 @@ void BasicEngine::PrepareGradAccumulators(
192192
for (auto& grad_pending_node : grad_pending_nodes) {
193193
PADDLE_ENFORCE_NOT_NULL(
194194
grad_pending_node,
195-
platform::errors::NotFound("Grad pending node is nullptr."));
195+
phi::errors::NotFound("Grad pending node is nullptr."));
196196
for (auto& grad_pending_op : *grad_pending_node) {
197197
VLOG(6) << "Determine whether var (" << var->Name()
198198
<< ") is the input var of grad_pending_op ("
@@ -279,8 +279,8 @@ void BasicEngine::PrepareDeps() {
279279
PADDLE_ENFORCE_EQ(
280280
node_deps_.empty(),
281281
true,
282-
platform::errors::AlreadyExists("Op deps are not empty before preparing "
283-
"it for backward network execution."));
282+
phi::errors::AlreadyExists("Op deps are not empty before preparing "
283+
"it for backward network execution."));
284284

285285
std::queue<GradOpNode*> q;
286286
std::unordered_set<GradOpNode*> visited;
@@ -304,7 +304,7 @@ void BasicEngine::PrepareDeps() {
304304
for (auto& grad_pending_node : grad_pending_nodes) {
305305
PADDLE_ENFORCE_NOT_NULL(
306306
grad_pending_node,
307-
platform::errors::NotFound("Grad pending node is nullptr."));
307+
phi::errors::NotFound("Grad pending node is nullptr."));
308308
++node_deps_[grad_pending_node.get()];
309309
if (visited.count(grad_pending_node.get()) == 0) {
310310
visited.insert(grad_pending_node.get());
@@ -489,8 +489,8 @@ void BasicEngine::Execute() {
489489
PADDLE_ENFORCE_EQ(
490490
iter != accumulators_.end(),
491491
true,
492-
platform::errors::NotFound(
493-
"Cannot find gradient of variable %s", var->Name()));
492+
phi::errors::NotFound("Cannot find gradient of variable %s",
493+
var->Name()));
494494
}
495495

496496
// leaf_accumulators_ : hooks and accumulate-grad for leaf tensor,
@@ -549,7 +549,7 @@ void BasicEngine::Execute() {
549549
PADDLE_ENFORCE_EQ(
550550
tensor_version,
551551
wrapper_version_snapshot,
552-
platform::errors::PermissionDenied(
552+
phi::errors::PermissionDenied(
553553
"Tensor '%s' used in gradient computation in grad op '%s' "
554554
"has been "
555555
"modified by an inplace operation. "
@@ -607,7 +607,7 @@ void BasicEngine::Execute() {
607607
throw exception;
608608
} catch (std::exception& ex) {
609609
Clear();
610-
PADDLE_THROW(platform::errors::External("%s", ex.what()));
610+
PADDLE_THROW(phi::errors::External("%s", ex.what()));
611611
}
612612
}
613613

@@ -656,7 +656,7 @@ void BasicEngine::Execute() {
656656
for (auto& grad_pending_node : shared_cur_node->GradPendingNodes()) {
657657
PADDLE_ENFORCE_NOT_NULL(
658658
grad_pending_node,
659-
platform::errors::NotFound("Grad pending node is nullptr."));
659+
phi::errors::NotFound("Grad pending node is nullptr."));
660660
auto iter = node_deps_.find(grad_pending_node.get());
661661
if (iter == node_deps_.end()) {
662662
continue;

0 commit comments

Comments
 (0)