Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
37 commits
Select commit Hold shift + click to select a range
cdb63c9
add LPPool1D forward
WintersMontagne10335 Oct 26, 2023
bdc21ca
Merge remote-tracking branch 'upstream/develop' into winters016
WintersMontagne10335 Oct 26, 2023
cf51ba2
Merge remote-tracking branch 'upstream/develop' into winters016
WintersMontagne10335 Oct 26, 2023
442c0ca
fix bugs
WintersMontagne10335 Oct 30, 2023
9de7d43
Merge remote-tracking branch 'upstream/develop' into winters016
WintersMontagne10335 Oct 30, 2023
9e5a765
fix bugs
WintersMontagne10335 Oct 30, 2023
a8a1e51
fix bugs
WintersMontagne10335 Oct 31, 2023
fb05d44
fix bugs
WintersMontagne10335 Nov 1, 2023
cddebfd
Merge remote-tracking branch 'upstream/develop' into winters016
WintersMontagne10335 Nov 1, 2023
d38e66d
fix bugs
WintersMontagne10335 Nov 1, 2023
25f193d
fix bugs
WintersMontagne10335 Nov 2, 2023
be2138c
Merge remote-tracking branch 'upstream/develop' into winters016
WintersMontagne10335 Nov 2, 2023
d5fadd3
fix bugs
WintersMontagne10335 Nov 2, 2023
54638bd
add LPPool2D backward
WintersMontagne10335 Nov 3, 2023
3e5acf4
Merge remote-tracking branch 'upstream/develop' into winters016
WintersMontagne10335 Nov 13, 2023
f763cea
Merge remote-tracking branch 'upstream/develop' into winters016
WintersMontagne10335 Nov 21, 2023
875c503
Merge remote-tracking branch 'upstream/develop' into winters016
WintersMontagne10335 Nov 21, 2023
8c424dd
modify yaml files
WintersMontagne10335 Nov 21, 2023
c52d76e
fix bugs
WintersMontagne10335 Nov 21, 2023
b0a51b1
Merge remote-tracking branch 'upstream/develop' into winters016
WintersMontagne10335 Nov 21, 2023
d948cd9
fix bugs
WintersMontagne10335 Nov 22, 2023
94213b8
Merge remote-tracking branch 'upstream/develop' into winters016
WintersMontagne10335 Nov 22, 2023
faf33ed
fix bugs
WintersMontagne10335 Nov 25, 2023
f770e49
Merge remote-tracking branch 'upstream/develop' into winters016
WintersMontagne10335 Nov 26, 2023
df0339c
reduce useless parameters
WintersMontagne10335 Nov 26, 2023
69ffd26
reduce useless parameters
WintersMontagne10335 Nov 27, 2023
54ba6b7
fix bugs
WintersMontagne10335 Nov 27, 2023
e0e98c5
fix bugs
WintersMontagne10335 Nov 27, 2023
93a03f6
convert implementation method
WintersMontagne10335 Dec 2, 2023
088ff3c
Merge remote-tracking branch 'upstream/develop' into winters016
WintersMontagne10335 Dec 2, 2023
0ebccc5
Revert "convert implementation method"
WintersMontagne10335 Dec 3, 2023
4d01635
fix bugs
WintersMontagne10335 Dec 3, 2023
f35a169
add static graph unit test case
WintersMontagne10335 Dec 4, 2023
a95232a
add more dygraph unit test cases
WintersMontagne10335 Dec 4, 2023
3dee834
handle boundary situations
WintersMontagne10335 Dec 5, 2023
d3d4b3d
Merge remote-tracking branch 'upstream/develop' into winters016
WintersMontagne10335 Dec 13, 2023
d0cc111
reply to issue
WintersMontagne10335 Dec 13, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 11 additions & 0 deletions paddle/phi/api/yaml/backward.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -1429,6 +1429,17 @@
func : logsigmoid_grad
inplace : (out_grad -> x_grad)

- backward_op : lp_pool2d_grad
forward : lp_pool2d(Tensor x, float norm_type, IntArray kernel_size, int[] strides, bool ceil_mode, str data_format) -> Tensor(out)
args : (Tensor x, Tensor out, Tensor out_grad, float norm_type, IntArray kernel_size, int[] strides, bool ceil_mode, str data_format)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : lp_pool2d_grad
param : [x, out, out_grad, norm_type, kernel_size, strides, ceil_mode, data_format]

- backward_op : lu_grad
forward : lu (Tensor x, bool pivot = true) -> Tensor(out), Tensor(pivots), Tensor(infos)
args : (Tensor x, Tensor out, Tensor pivots, Tensor out_grad, bool pivot)
Expand Down
11 changes: 11 additions & 0 deletions paddle/phi/api/yaml/ops.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -1601,6 +1601,17 @@
func : logsigmoid
backward : logsigmoid_grad

- op : lp_pool2d
args : (Tensor x, float norm_type, IntArray kernel_size, int[] strides, bool ceil_mode, str data_format)
output : Tensor(out)
infer_meta :
func : LPPool2DInferMeta
param : [x, kernel_size, strides, ceil_mode, data_format]
kernel :
func : lp_pool2d
param : [x, norm_type, kernel_size, strides, ceil_mode, data_format]
backward : lp_pool2d_grad

- op : lstsq
args : (Tensor x, Tensor y, Scalar rcond=0.0f, str driver="gels")
output : Tensor(solution), Tensor(residuals), Tensor(rank), Tensor(singular_values)
Expand Down
120 changes: 120 additions & 0 deletions paddle/phi/infermeta/unary.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2144,6 +2144,126 @@ void MatrixPowerInferMeta(const MetaTensor& x, int n, MetaTensor* out) {
out->set_dtype(x.dtype());
}

void LPPoolInferMeta(const MetaTensor& x,
const std::vector<int>& kernel_size,
const std::vector<int>& strides,
bool ceil_mode,
const std::string& data_format,
MetaTensor* out,
MetaConfig config) {
std::vector<int> kernel_size_ = kernel_size;

auto x_dims = x.dims();
PADDLE_ENFORCE_EQ(
x_dims.size() == 4 || x_dims.size() == 5,
true,
errors::InvalidArgument(
"the input of Op(pool) should be 4-D or 5-D Tensor. But "
"received: %u-D Tensor and it's shape is [%s].",
x_dims.size(),
x_dims));

PADDLE_ENFORCE_EQ(x_dims.size() - kernel_size_.size(),
2U,
errors::InvalidArgument(
"the dimension of input minus the size of "
"Attr(kernel_size_) must be euqal to 2 in Op(pool). "
"But received: the dimension of input minus the size "
"of Attr(kernel_size_) is %d, the "
"input's dimension is %d, the shape of input "
"is [%s], the Attr(kernel_size_)'s size is %d, the "
"Attr(kernel_size_) is [%s].",
x_dims.size() - kernel_size_.size(),
x_dims.size(),
x_dims,
kernel_size_.size(),
make_ddim(kernel_size_)));

PADDLE_ENFORCE_EQ(
kernel_size_.size(),
strides.size(),
errors::InvalidArgument(
"the size of Attr(kernel_size_) and Attr(strides) in "
"Op(pool) must be equal. "
"But received: Attr(kernel_size_)'s size is %d, Attr(strides)'s "
"size is %d, Attr(kernel_size_) is [%s], Attr(strides)is [%s].",
kernel_size_.size(),
strides.size(),
make_ddim(kernel_size_),
make_ddim(strides)));

// MKL-DNN Kernels are using NCHW order of dims description
// so we ignore data_format consideration for MKL-DNN kernel
const bool channel_last = (config.is_run_mkldnn_kernel == false) &&
(data_format == "NHWC" || data_format == "NDHWC");

// update paddings if "SAME" or global_pooling
DDim data_dims;
if (channel_last) {
data_dims = slice_ddim(x_dims, 1, x_dims.size() - 1);
} else {
data_dims = slice_ddim(x_dims, 2, x_dims.size());
}

std::vector<int64_t> output_shape;
for (int i = 0; i < data_dims.size(); ++i) {
if ((!config.is_runtime) && (data_dims[i] < 0)) {
output_shape.push_back(data_dims[i]);
} else {
output_shape.push_back(
funcs::PoolOutputSize(static_cast<int>(data_dims[i]),
kernel_size_[i],
0,
0,
strides[i],
ceil_mode));
}
}

// output_N = input_N
output_shape.insert(output_shape.begin(), x_dims[0]);
// output_C = input_C
if (channel_last) {
output_shape.push_back(x_dims[x_dims.size() - 1]);
} else {
output_shape.insert(output_shape.begin() + 1, x_dims[1]);
}

out->set_dims(make_ddim(output_shape));
out->share_lod(x);
out->set_dtype(x.dtype());
}

void LPPool2DInferMeta(const MetaTensor& x,
const IntArray& kernel_size,
const std::vector<int>& strides,
bool ceil_mode,
const std::string& data_format,
MetaTensor* out,
MetaConfig config) {
const bool channel_last = (config.is_run_mkldnn_kernel == false) &&
(data_format == "NHWC" || data_format == "NDHWC");
if (!config.is_runtime && kernel_size.FromTensor()) {
auto x_dims = x.dims();
std::vector<int64_t> output_shape = std::move(phi::vectorize(x_dims));
// set dims of HW -1
output_shape[x_dims.size() - 2] = -1;
if (channel_last) { // for NHWC, NDHWC
output_shape[x_dims.size() - 3] = -1;
} else { // for NCHW
output_shape[x_dims.size() - 1] = -1;
}
out->set_dims(make_ddim(output_shape));
out->share_lod(x);
out->set_dtype(x.dtype());
} else {
std::vector<int> kernel_size_val(kernel_size.GetData().begin(),
kernel_size.GetData().end());
LPPoolInferMeta(
x, kernel_size_val, strides, ceil_mode, data_format, out, config);
}
}

void LUInferMeta(const MetaTensor& x,
bool pivot,
MetaTensor* out,
Expand Down
16 changes: 16 additions & 0 deletions paddle/phi/infermeta/unary.h
Original file line number Diff line number Diff line change
Expand Up @@ -320,6 +320,22 @@ void LogsumexpInferMeta(const MetaTensor& input,
bool reduce_all,
MetaTensor* out);

void LPPoolInferMeta(const MetaTensor& x,
const std::vector<int>& kernel_size,
const std::vector<int>& strides,
bool ceil_mode,
const std::string& data_format,
MetaTensor* out,
MetaConfig config = MetaConfig());

void LPPool2DInferMeta(const MetaTensor& x,
const IntArray& kernel_size,
const std::vector<int>& strides,
bool ceil_mode,
const std::string& data_format,
MetaTensor* out,
MetaConfig config = MetaConfig());

void LUInferMeta(const MetaTensor& x,
bool pivot,
MetaTensor* out,
Expand Down
2 changes: 2 additions & 0 deletions paddle/phi/kernels/cpu/pool_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@

PD_REGISTER_KERNEL(
pool2d_grad, CPU, ALL_LAYOUT, phi::Pool2dGradKernel, float, double) {}
PD_REGISTER_KERNEL(
lp_pool2d_grad, CPU, ALL_LAYOUT, phi::LPPool2dGradKernel, float, double) {}
PD_REGISTER_KERNEL(pool2d_double_grad,
CPU,
ALL_LAYOUT,
Expand Down
4 changes: 4 additions & 0 deletions paddle/phi/kernels/cpu/pool_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,10 @@
#include "paddle/phi/kernels/impl/pool_kernel_impl.h"

PD_REGISTER_KERNEL(pool2d, CPU, ALL_LAYOUT, phi::Pool2dKernel, float, double) {}

PD_REGISTER_KERNEL(
lp_pool2d, CPU, ALL_LAYOUT, phi::LPPool2dKernel, float, double) {}

PD_REGISTER_KERNEL(max_pool2d_with_index,
CPU,
ALL_LAYOUT,
Expand Down
Loading