Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 9 additions & 9 deletions paddle/phi/kernels/gpu/accuracy_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ void AccuracyKernel(const Context& dev_ctx,
PADDLE_ENFORCE_EQ(
inference.dims().size(),
2,
phi::errors::InvalidArgument(
common::errors::InvalidArgument(
"Rank(Input) of AccuracyOp must be 2, with shape "
"[sample_number, class_dim], But received rank(Input) is %d",
inference.dims().size()));
Expand All @@ -104,18 +104,18 @@ void AccuracyKernel(const Context& dev_ctx,

PADDLE_ENFORCE_GT(label.dims().size(),
0,
phi::errors::InvalidArgument(
common::errors::InvalidArgument(
"Rank(Label) of AccuracyOp must greater than 0, "
"But received rank(Label) is %d",
label.dims().size()));

PADDLE_ENFORCE_GE(
label.dims()[0],
inference.dims()[0],
phi::errors::InvalidArgument("num_samples(%d) of Label should less than "
"or equal to num_samples(%d) of Input",
label.dims()[0],
num_samples));
PADDLE_ENFORCE_GE(label.dims()[0],
inference.dims()[0],
common::errors::InvalidArgument(
"num_samples(%d) of Label should less than "
"or equal to num_samples(%d) of Input",
label.dims()[0],
num_samples));

if (num_samples == 0) {
return;
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/gpu/add_n_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ void AddNKernel(const Context &dev_ctx,
PADDLE_ENFORCE_EQ(
x[i]->initialized(),
true,
phi::errors::InvalidArgument(
common::errors::InvalidArgument(
"This argument is invalid, %d-th tensor is uninitialized.", i));
}

Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/gpu/all_to_all_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ void AllToAllKernel(const Context& dev_ctx,
}
comm_ctx->GroupEnd();
#else
PADDLE_THROW(phi::errors::Unavailable("NCCL version >= 2.7.3 is needed."));
PADDLE_THROW(common::errors::Unavailable("NCCL version >= 2.7.3 is needed."));
#endif
#else
PADDLE_THROW(
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/kernels/gpu/allclose_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ void AllCloseKernel(const Context& dev_ctx,
} else if (rtol.dtype() == DataType::FLOAT32) {
rtol_v = rtol.to<float>();
} else {
PADDLE_THROW(phi::errors::InvalidArgument(
PADDLE_THROW(common::errors::InvalidArgument(
"Input (Rtol) type must be double or float, but get %s.",
rtol.dtype()));
}
Expand All @@ -72,7 +72,7 @@ void AllCloseKernel(const Context& dev_ctx,
} else if (atol.dtype() == DataType::FLOAT32) {
atol_v = atol.to<float>();
} else {
PADDLE_THROW(phi::errors::InvalidArgument(
PADDLE_THROW(common::errors::InvalidArgument(
"Input (Atol) type must be double or float, but get %s.",
atol.dtype()));
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/gpu/arg_min_max_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -214,7 +214,7 @@ void ArgMinMaxOpCUDAKernel(const Context& dev_ctx,
PADDLE_ENFORCE_GT(
x.numel(),
0,
phi::errors::InvalidArgument(
common::errors::InvalidArgument(
"argmin/argmax input numel must > 0, bug got %d", x.numel()));
if (dtype == DataType::UNDEFINED) {
phi::VisitDataTypeTiny(
Expand Down
12 changes: 6 additions & 6 deletions paddle/phi/kernels/gpu/batch_norm_grad_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ class InplaceHelper {
const gpuStream_t &stream) {
PADDLE_ENFORCE_EQ(x,
y,
phi::errors::InvalidArgument(
common::errors::InvalidArgument(
"X and Y should be inplaced in inplace mode"));
KeBNRestoreData<<<grid2, block, 0, stream>>>(
layout, x, scale, bias, mean, variance, epsilon, C, M, num, y);
Expand Down Expand Up @@ -526,7 +526,7 @@ void BatchNormGradFunctor(const Context &ctx,
PADDLE_ENFORCE_EQ(
x_dims.size() >= 2 && x_dims.size() <= 5,
true,
phi::errors::InvalidArgument(
common::errors::InvalidArgument(
"The size of input's dimensions should be between 2 and 5."
"But received: the size of input's dimensions is [%d],"
"the dimensions of input is [%s]",
Expand All @@ -536,7 +536,7 @@ void BatchNormGradFunctor(const Context &ctx,
PADDLE_ENFORCE_EQ((d_scale == nullptr && d_bias == nullptr) ||
(d_scale != nullptr && d_bias != nullptr),
true,
phi::errors::InvalidArgument(
common::errors::InvalidArgument(
"Weight and bias's stop_gradient of BatchNorm must be "
"True or False at the same time."));

Expand Down Expand Up @@ -574,7 +574,7 @@ void BatchNormGradFunctor(const Context &ctx,
PADDLE_ENFORCE_EQ(
new_scale.dims().size(),
1UL,
phi::errors::InvalidArgument(
common::errors::InvalidArgument(
"The size of scale's dimensions must equal to 1. But received: "
"the size of scale's dimensions is [%d], the dimensions of scale "
"is [%s].",
Expand All @@ -583,7 +583,7 @@ void BatchNormGradFunctor(const Context &ctx,
PADDLE_ENFORCE_EQ(
new_scale.dims()[0],
C,
phi::errors::InvalidArgument(
common::errors::InvalidArgument(
"The first dimension of scale must equal to Channels[%d]. But "
"received: the first dimension of scale is [%d]",
C,
Expand Down Expand Up @@ -1374,7 +1374,7 @@ void BatchNormDoubleGradKernel(
DenseTensor *y_grad_grad) {
PADDLE_ENFORCE_EQ(is_test,
false,
phi::errors::InvalidArgument(
common::errors::InvalidArgument(
"`is_test = True` CANNOT be used in train program. If "
"you want to use global status in pre_train model, "
"please set `use_global_stats = True`"));
Expand Down
12 changes: 6 additions & 6 deletions paddle/phi/kernels/gpu/batch_norm_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -544,7 +544,7 @@ void BatchNormKernel(const Context &ctx,
PADDLE_ENFORCE_EQ(
x_dims.size() >= 2 && x_dims.size() <= 5,
true,
phi::errors::InvalidArgument(
common::errors::InvalidArgument(
"The size of input's dimensions should be between 2 and 5"
"But received: the size of input's dimensions is [%d]",
x_dims.size()));
Expand Down Expand Up @@ -707,7 +707,7 @@ void BatchNormKernel(const Context &ctx,
PADDLE_ENFORCE_EQ(
est_mean->dims().size(),
1UL,
phi::errors::InvalidArgument(
common::errors::InvalidArgument(
"The size of mean's dimensions must equal to 1."
"But received: the size of mean's dimensions mean is [%d],"
"the dimensions of mean is [%s].",
Expand All @@ -716,7 +716,7 @@ void BatchNormKernel(const Context &ctx,
PADDLE_ENFORCE_EQ(
est_var->dims().size(),
1UL,
phi::errors::InvalidArgument(
common::errors::InvalidArgument(
"The size of variance's dimensions must equal to 1."
"But received: the size of variance's dimensions is [%d],"
"the dimensions of variance is [%s].",
Expand All @@ -725,7 +725,7 @@ void BatchNormKernel(const Context &ctx,
PADDLE_ENFORCE_EQ(
est_mean->dims()[0],
C,
phi::errors::InvalidArgument(
common::errors::InvalidArgument(
"The first dimension of mean must equal to the number of "
"Channels, which is [%d]. But received: the first dimension"
"of mean is [%d], the dimensions of mean is [%s].",
Expand All @@ -735,7 +735,7 @@ void BatchNormKernel(const Context &ctx,
PADDLE_ENFORCE_EQ(
est_var->dims()[0],
C,
phi::errors::InvalidArgument(
common::errors::InvalidArgument(
"The first dimension of variance must equal to the number"
"of Channels, which is [%d]. But received: the first dimension of"
"variance is [%d], the dimensions of variance is [%s].",
Expand Down Expand Up @@ -1156,7 +1156,7 @@ void BatchNormKernel(const Context &ctx,
}
PADDLE_ENFORCE_NOT_NULL(
reserve_space,
phi::errors::NotFound(
common::errors::NotFound(
"The argument ReserveSpace of batch_norm op is not found."));
// --------------- cudnn batchnorm workspace ---------------
PADDLE_ENFORCE_GPU_SUCCESS(
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/kernels/gpu/bincount_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ void BincountCUDAInner(const Context& dev_ctx,
PADDLE_ENFORCE_GE(
input_min,
static_cast<InputT>(0),
phi::errors::InvalidArgument(
common::errors::InvalidArgument(
"The elements in input tensor must be non-negative ints"));

int64_t output_size =
Expand Down Expand Up @@ -137,7 +137,7 @@ void BincountKernel(const Context& dev_ctx,
int int_minlength = minlength.to<int>();
PADDLE_ENFORCE_GE(int_minlength,
0,
phi::errors::InvalidArgument(
common::errors::InvalidArgument(
"The minlength should be greater than or equal to 0."
"But received minlength is %d",
int_minlength));
Expand Down
6 changes: 3 additions & 3 deletions paddle/phi/kernels/gpu/box_coder_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -164,7 +164,7 @@ void BoxCoderKernel(const Context &dev_ctx,
if (prior_box_var) {
PADDLE_ENFORCE_EQ(variance.empty(),
true,
phi::errors::InvalidArgument(
common::errors::InvalidArgument(
"Input 'PriorBoxVar' and attribute 'variance'"
" of BoxCoder operator should not be used at the "
"same time."));
Expand All @@ -174,7 +174,7 @@ void BoxCoderKernel(const Context &dev_ctx,
if (!(variance.empty())) {
PADDLE_ENFORCE_EQ(static_cast<int>(variance.size()),
4,
phi::errors::InvalidArgument(
common::errors::InvalidArgument(
"Size of attribute 'variance' in BoxCoder operator"
" should be 4. But received size is %d",
variance.size()));
Expand All @@ -183,7 +183,7 @@ void BoxCoderKernel(const Context &dev_ctx,
if (target_box.lod().size()) {
PADDLE_ENFORCE_EQ(target_box.lod().size(),
1,
phi::errors::InvalidArgument(
common::errors::InvalidArgument(
"Input 'TargetBox' of BoxCoder operator only"
" supports LoD with one level."));
}
Expand Down
8 changes: 4 additions & 4 deletions paddle/phi/kernels/gpu/broadcast_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -28,10 +28,10 @@ void BroadcastKernel(const Context& dev_ctx,
const DenseTensor& x,
int root,
DenseTensor* out) {
PADDLE_ENFORCE_GT(
x.numel(),
0,
phi::errors::InvalidArgument("Tensor need be broadcast must not empty."));
PADDLE_ENFORCE_GT(x.numel(),
0,
common::errors::InvalidArgument(
"Tensor need be broadcast must not empty."));

#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
dev_ctx.template Alloc<T>(out);
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/gpu/c_embedding_grad_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ void CEmbeddingGradKernel(const Context& dev_ctx,
return;
}
}
PADDLE_THROW(phi::errors::InvalidArgument(
PADDLE_THROW(common::errors::InvalidArgument(
"The data type of Input(Ids) must be int32 or int64."));
}

Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/gpu/c_embedding_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ void CEmbeddingKernel(const Context& ctx,
limit,
vocab_size);
} else {
PADDLE_THROW(phi::errors::Unavailable(
PADDLE_THROW(common::errors::Unavailable(
"GPU c_embedding ids only support int32 or int64."));
}
}
Expand Down
6 changes: 3 additions & 3 deletions paddle/phi/kernels/gpu/c_split_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -62,19 +62,19 @@ void CSplitKernel(const Context& ctx,

PADDLE_ENFORCE_GE(rank,
0,
phi::errors::PreconditionNotMet(
common::errors::PreconditionNotMet(
"The value of rank (%d) for c_split must be "
"greater than or equal to 0.",
rank));
PADDLE_ENFORCE_GE(nranks,
2,
phi::errors::PreconditionNotMet(
common::errors::PreconditionNotMet(
"The value of nranks (%d) for c_split must be "
"greater than or equal to 2.",
nranks));
PADDLE_ENFORCE_LT(rank,
nranks,
phi::errors::PreconditionNotMet(
common::errors::PreconditionNotMet(
"The value of rank (%d) for c_split must be "
"less than that of nranks (%d).",
rank,
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/kernels/gpu/calc_reduced_attn_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -58,13 +58,13 @@ void CalcReducedAttnScoresKernel(const Context& ctx,
#if defined(PADDLE_WITH_FLASHATTN) && !defined(PADDLE_WITH_HIP)
PADDLE_ENFORCE_EQ(q.dims().size(),
4,
phi::errors::InvalidArgument(
common::errors::InvalidArgument(
"calc_reduced_attention receive input with dim "
"[batch_size, seq_len, num_heads, head_dim]"));

PADDLE_ENFORCE_EQ(k.dims().size(),
4,
phi::errors::InvalidArgument(
common::errors::InvalidArgument(
"calc_reduced_attention receive input with dim "
"[batch_size, seq_len, num_heads, head_dim]"));

Expand Down
10 changes: 5 additions & 5 deletions paddle/phi/kernels/gpu/check_numerics_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ static void InitMultiGPUOpVarMap() {
int dev_count = phi::backends::gpu::GetGPUDeviceCount();
PADDLE_ENFORCE_GT(dev_count,
0,
phi::errors::NotFound(
common::errors::NotFound(
"cuda device must > 0, now dev_count=%d", dev_count));

// https://stackoverflow.com/questions/16465633/how-can-i-use-something-like-stdvectorstdmutex
Expand Down Expand Up @@ -279,8 +279,8 @@ inline std::string GetHintString(const std::string& op_type,
PADDLE_ENFORCE_EQ(
(dev_id >= 0 && dev_id < multi_op_var2gpu_str_mutex().size()),
true,
phi::errors::OutOfRange("GPU dev_id must >=0 and < dev_count=%d",
multi_op_var2gpu_str_mutex().size()));
common::errors::OutOfRange("GPU dev_id must >=0 and < dev_count=%d",
multi_op_var2gpu_str_mutex().size()));
return op_var;
}

Expand Down Expand Up @@ -312,7 +312,7 @@ static char* GetGpuHintStringPtr(const phi::GPUContext& ctx,
auto iter = op_var2gpu_str.find(op_var);
PADDLE_ENFORCE_EQ(iter != op_var2gpu_str.end(),
true,
phi::errors::PreconditionNotMet(
common::errors::PreconditionNotMet(
"op_var=%s should successed insert into "
"op_var2gpu_str, but now failed",
op_var));
Expand All @@ -334,7 +334,7 @@ static char* GetGpuHintStringPtr(const phi::GPUContext& ctx,
auto iter = op_var2gpu_str.find(op_var);
PADDLE_ENFORCE_EQ(iter != op_var2gpu_str.end(),
true,
phi::errors::PreconditionNotMet(
common::errors::PreconditionNotMet(
"op_var=%s should be in the op_var2gpu_str, but "
"now can't find it",
op_var));
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/gpu/clip_by_norm_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ void ClipByNormKernel(const Context& dev_ctx,
dev_ctx.template Alloc<T>(output);

PADDLE_ENFORCE_NOT_NULL(input,
phi::errors::InvalidArgument(
common::errors::InvalidArgument(
"Input(X) of ClipByNormOp should not be null. "
"Please check if it is created correctly."));
std::vector<int> reduce_dims;
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/gpu/concat_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ void ConcatKernel(const Context& dev_ctx,
PADDLE_ENFORCE_EQ(
x[i]->lod().size(),
lod_size_0,
phi::errors::Unimplemented(
common::errors::Unimplemented(
"The lod level of all input LoDTensors should be same. "
"Maybe different lod level of input LoDTensors can concat,"
"it is not supported currently. The lod level of %dth input "
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/kernels/gpu/contiguous_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -413,7 +413,7 @@ bool LaunchContiguousCazeOneKernel(
input_dims[rank - 1] * input_dims[rank - 2] * input_dims[rank - 3]);
break;
default:
PADDLE_THROW(phi::errors::InvalidArgument(
PADDLE_THROW(common::errors::InvalidArgument(
"The rank of input should be less than 9, but received %d.", rank));
}

Expand Down Expand Up @@ -470,7 +470,7 @@ void LaunchContiguousDefaultKernel(
input_data, input_stride, input_dims, numel, output_data);
break;
default:
PADDLE_THROW(phi::errors::InvalidArgument(
PADDLE_THROW(common::errors::InvalidArgument(
"The rank of input should be less than 9, but received %d.", rank));
}
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/gpu/correlation_grad_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -221,7 +221,7 @@ void CorrelationCUDAGradKernel(const Context &dev_ctx,
PADDLE_ENFORCE_EQ(
is_gpu_place,
true,
phi::errors::InvalidArgument("Correlation only supports GPU now."));
common::errors::InvalidArgument("Correlation only supports GPU now."));
const auto *grad_output = &out_grad;

auto *grad_input1 = input1_grad;
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/gpu/correlation_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ void CorrelationCUDAKernel(const Context &dev_ctx,
PADDLE_ENFORCE_EQ(
is_gpu_place,
true,
phi::errors::InvalidArgument("Correlation only supports GPU now."));
common::errors::InvalidArgument("Correlation only supports GPU now."));

dev_ctx.template Alloc<T>(out);

Expand Down
Loading