Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
38 changes: 19 additions & 19 deletions paddle/fluid/eager/auto_code_generator/eager_generator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -344,7 +344,7 @@ static std::string AttrTypeToString(const proto::AttrType& type) {
break;
}
default: {
PADDLE_THROW(platform::errors::Fatal(
PADDLE_THROW(phi::errors::Fatal(
"AttrType of type paddle::variant only supports specific data types."
"However, detected unrecognized AttrType: %d",
type));
Expand Down Expand Up @@ -455,7 +455,7 @@ static std::pair<std::string, std::string> GetAttrType(
break;
}
default: {
PADDLE_THROW(platform::errors::Fatal(
PADDLE_THROW(phi::errors::Fatal(
"AttrType of type paddle::variant only supports specific data types."
"However, detected unrecognized AttrType: %d",
variant_pos));
Expand Down Expand Up @@ -501,7 +501,7 @@ static void SlotNameMatching(
if (grad_var == fwd_var) {
if (grad_fwd_slotname_map.count(grad_slot_name) &&
grad_fwd_slotname_map[grad_slot_name] != fwd_slot_name) {
PADDLE_THROW(platform::errors::Fatal(
PADDLE_THROW(phi::errors::Fatal(
"Detected mismatched slot names."
"grad_slot_name %s matches both %s and %s fwd_slot_name",
grad_slot_name,
Expand All @@ -515,7 +515,7 @@ static void SlotNameMatching(
if (fwd_var->GetGradVar() && grad_var == fwd_var->GetGradVar()) {
if (grad_grad_slotname_map.count(grad_slot_name) &&
grad_grad_slotname_map[grad_slot_name] != fwd_slot_name) {
PADDLE_THROW(platform::errors::Fatal(
PADDLE_THROW(phi::errors::Fatal(
"Detected mismatched slot names."
"grad_slot_name %s matches both %s and %s fwd_slot_name",
grad_slot_name,
Expand All @@ -536,7 +536,7 @@ static void SlotNameMatching(
if (grad_var == fwd_var) {
if (grad_fwd_slotname_map.count(grad_slot_name) &&
grad_fwd_slotname_map[grad_slot_name] != fwd_slot_name) {
PADDLE_THROW(platform::errors::Fatal(
PADDLE_THROW(phi::errors::Fatal(
"Detected mismatched slot names"
"grad_slot_name %s matches both %s and %s fwd_slot_name",
grad_slot_name,
Expand All @@ -550,7 +550,7 @@ static void SlotNameMatching(
if (fwd_var->GetGradVar() && grad_var == fwd_var->GetGradVar()) {
if (grad_grad_slotname_map.count(grad_slot_name) &&
grad_grad_slotname_map[grad_slot_name] != fwd_slot_name) {
PADDLE_THROW(platform::errors::Fatal(
PADDLE_THROW(phi::errors::Fatal(
"Detected mismatched slot names."
"grad_slot_name %s matches both %s and %s fwd_slot_name",
grad_slot_name,
Expand All @@ -565,7 +565,7 @@ static void SlotNameMatching(
}

if (!found_matching) {
PADDLE_THROW(platform::errors::Fatal(
PADDLE_THROW(phi::errors::Fatal(
"Detected mismatched slot names."
"Found no matching fwd_slot_name for grad_slot_name: %s",
grad_slot_name));
Expand Down Expand Up @@ -2290,10 +2290,10 @@ static std::string GenerateSingleOpBase(
can_be_inplaced_name);
}
} else {
PADDLE_THROW(platform::errors::Fatal(
"Detected mismatched slot names."
"Unable to find forward slot name that matches %s",
grad_input_name));
PADDLE_THROW(
phi::errors::Fatal("Detected mismatched slot names."
"Unable to find forward slot name that matches %s",
grad_input_name));
}
}
if (!ins_contents_str.empty())
Expand Down Expand Up @@ -2438,10 +2438,10 @@ static std::string GenerateSingleOpBase(
}
}
} else {
PADDLE_THROW(platform::errors::Fatal(
"Detected mismatched slot names."
"Unable to find forward slot name that matches %s",
grad_output_name));
PADDLE_THROW(
phi::errors::Fatal("Detected mismatched slot names."
"Unable to find forward slot name that matches %s",
grad_output_name));
}
}

Expand Down Expand Up @@ -2495,10 +2495,10 @@ static std::string GenerateSingleOpBase(
}
}
} else {
PADDLE_THROW(platform::errors::Fatal(
"Detected mismatched slot names."
"Unable to find forward slot name that matches %s",
grad_output_name));
PADDLE_THROW(
phi::errors::Fatal("Detected mismatched slot names."
"Unable to find forward slot name that matches %s",
grad_output_name));
}
}

Expand Down
8 changes: 4 additions & 4 deletions paddle/fluid/imperative/all_reduce.cc
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ static const phi::Place &GetVarPlace(const framework::Variable &src) {
return src.Get<phi::SelectedRows>().value().place();
#endif
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
PADDLE_THROW(phi::errors::InvalidArgument(
"Cannot get unsupported variable type %s for imperative allreduce, "
"only "
"LoDTensor and SelectedRows are supported.",
Expand All @@ -61,7 +61,7 @@ static void AllReduce(const phi::DenseTensor &src,
PADDLE_ENFORCE_EQ(
phi::is_gpu_place(place),
true,
platform::errors::Unimplemented(
phi::errors::Unimplemented(
"Imperative mode does not support multi-CPU training yet."));

const void *src_ptr = src.data();
Expand Down Expand Up @@ -90,7 +90,7 @@ static void AllReduce(const phi::SelectedRows &src,
PADDLE_ENFORCE_EQ(
phi::is_gpu_place(place),
true,
platform::errors::Unimplemented(
phi::errors::Unimplemented(
"Imperative mode does not support multi-CPU training yet."));

auto dtype = framework::TransToProtoVarType(src_tensor.dtype());
Expand Down Expand Up @@ -259,7 +259,7 @@ void AllReduce(const framework::Variable &src,
}
#endif
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
PADDLE_THROW(phi::errors::InvalidArgument(
"Unsupported variable type %s for imperative allreduce, only "
"LoDTensor and SelectedRows are supported.",
platform::demangle(framework::ToTypeName(src.Type()))));
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/imperative/amp_auto_cast.cc
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ OpSupportedInfos(const std::string& place,
};
PADDLE_ENFORCE_NE(is_target_place.count(query_place),
0,
platform::errors::InvalidArgument(
phi::errors::InvalidArgument(
"The argument `place` should be 'GPU', 'CPU', 'XPU' or "
"other Custom Device, but got '%s'.",
place));
Expand Down
28 changes: 14 additions & 14 deletions paddle/fluid/imperative/basic_engine.cc
Original file line number Diff line number Diff line change
Expand Up @@ -48,20 +48,20 @@ void BasicEngine::Init(
PADDLE_ENFORCE_EQ(
tensors.size(),
grad_tensors.size(),
platform::errors::Unavailable(
phi::errors::Unavailable(
"The size of tensors do not equal the size of grad_tensors,"
"the size of tensors is %s, but the size of grad_tensors is %s.",
tensors.size(),
grad_tensors.size()));

PADDLE_ENFORCE_EQ(accumulators_.empty(),
true,
platform::errors::AlreadyExists(
phi::errors::AlreadyExists(
"Accumulators are not empty before preparing it for "
"backward network execution."));
PADDLE_ENFORCE_EQ(accumulators_with_grad_node_.empty(),
true,
platform::errors::AlreadyExists(
phi::errors::AlreadyExists(
"Accumulators with grad_node as the key are not empty "
"before preparing it for backward network execution."));

Expand All @@ -74,7 +74,7 @@ void BasicEngine::Init(
PADDLE_ENFORCE_EQ(
var->GradVarBase()->GraphIsFreed(),
false,
platform::errors::Unavailable(
phi::errors::Unavailable(
"%s trying to backward through the same graph a second "
"time, but this graph have already been freed. Please "
"specify Tensor.backward(retain_graph=True) when "
Expand All @@ -99,7 +99,7 @@ void BasicEngine::Init(
PADDLE_ENFORCE_EQ(
var->HasGradVar(),
true,
platform::errors::NotFound("Tensor %s has no gradient", var->Name()));
phi::errors::NotFound("Tensor %s has no gradient", var->Name()));

auto& fwd_var = var->Var().Get<phi::DenseTensor>();
auto* grad_var =
Expand Down Expand Up @@ -192,7 +192,7 @@ void BasicEngine::PrepareGradAccumulators(
for (auto& grad_pending_node : grad_pending_nodes) {
PADDLE_ENFORCE_NOT_NULL(
grad_pending_node,
platform::errors::NotFound("Grad pending node is nullptr."));
phi::errors::NotFound("Grad pending node is nullptr."));
for (auto& grad_pending_op : *grad_pending_node) {
VLOG(6) << "Determine whether var (" << var->Name()
<< ") is the input var of grad_pending_op ("
Expand Down Expand Up @@ -279,8 +279,8 @@ void BasicEngine::PrepareDeps() {
PADDLE_ENFORCE_EQ(
node_deps_.empty(),
true,
platform::errors::AlreadyExists("Op deps are not empty before preparing "
"it for backward network execution."));
phi::errors::AlreadyExists("Op deps are not empty before preparing "
"it for backward network execution."));

std::queue<GradOpNode*> q;
std::unordered_set<GradOpNode*> visited;
Expand All @@ -304,7 +304,7 @@ void BasicEngine::PrepareDeps() {
for (auto& grad_pending_node : grad_pending_nodes) {
PADDLE_ENFORCE_NOT_NULL(
grad_pending_node,
platform::errors::NotFound("Grad pending node is nullptr."));
phi::errors::NotFound("Grad pending node is nullptr."));
++node_deps_[grad_pending_node.get()];
if (visited.count(grad_pending_node.get()) == 0) {
visited.insert(grad_pending_node.get());
Expand Down Expand Up @@ -489,8 +489,8 @@ void BasicEngine::Execute() {
PADDLE_ENFORCE_EQ(
iter != accumulators_.end(),
true,
platform::errors::NotFound(
"Cannot find gradient of variable %s", var->Name()));
phi::errors::NotFound("Cannot find gradient of variable %s",
var->Name()));
}

// leaf_accumulators_ : hooks and accumulate-grad for leaf tensor,
Expand Down Expand Up @@ -549,7 +549,7 @@ void BasicEngine::Execute() {
PADDLE_ENFORCE_EQ(
tensor_version,
wrapper_version_snapshot,
platform::errors::PermissionDenied(
phi::errors::PermissionDenied(
"Tensor '%s' used in gradient computation in grad op '%s' "
"has been "
"modified by an inplace operation. "
Expand Down Expand Up @@ -607,7 +607,7 @@ void BasicEngine::Execute() {
throw exception;
} catch (std::exception& ex) {
Clear();
PADDLE_THROW(platform::errors::External("%s", ex.what()));
PADDLE_THROW(phi::errors::External("%s", ex.what()));
}
}

Expand Down Expand Up @@ -656,7 +656,7 @@ void BasicEngine::Execute() {
for (auto& grad_pending_node : shared_cur_node->GradPendingNodes()) {
PADDLE_ENFORCE_NOT_NULL(
grad_pending_node,
platform::errors::NotFound("Grad pending node is nullptr."));
phi::errors::NotFound("Grad pending node is nullptr."));
auto iter = node_deps_.find(grad_pending_node.get());
if (iter == node_deps_.end()) {
continue;
Expand Down
Loading