Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions paddle/phi/infermeta/unary.cc
Original file line number Diff line number Diff line change
Expand Up @@ -3443,6 +3443,15 @@ void PSendArrayInferMeta(const MetaTensor& x, int peer) {
"The peer (%d) for p_send op must be non-negative.", peer));
}

void SetInferMeta(const MetaTensor& x,
const std::vector<int64_t>& shape,
const std::vector<int64_t>& stride,
MetaTensor* out) {
out->set_dtype(x.dtype());
out->set_dims(common::make_ddim(shape));
out->set_strides(common::make_ddim(stride));
}

void SendV2InferMeta(const int peer, const int ring_id) {
PADDLE_ENFORCE_GE(
peer,
Expand Down
5 changes: 5 additions & 0 deletions paddle/phi/infermeta/unary.h
Original file line number Diff line number Diff line change
Expand Up @@ -726,6 +726,11 @@ void FillSplitOutDims(const MetaTensor& x,
const std::vector<int64_t>& sections_vec,
std::vector<MetaTensor*>* out);

void SetInferMeta(const MetaTensor& x,
const std::vector<int64_t>& shape,
const std::vector<int64_t>& stride,
MetaTensor* out);

void SequenceSoftmaxInferMeta(const MetaTensor& x, MetaTensor* out);

void SplitInferMeta(const MetaTensor& x_meta,
Expand Down
77 changes: 77 additions & 0 deletions paddle/phi/kernels/set_kernel.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
// Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/set_kernel.h"
#include "paddle/phi/core/kernel_registry.h"

namespace phi {

template <typename T, typename Context>
void SetKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& source,
const std::vector<int64_t>& dims,
const std::vector<int64_t>& stride,
int64_t offset,
DenseTensor* out) {
auto meta = out->meta();
meta.dims = DDim(dims.data(), static_cast<int>(dims.size()));
meta.strides = DDim(stride.data(), static_cast<int>(stride.size()));
meta.offset = offset;
if (x.IsSharedWith(source)) {
out->set_meta(meta);
} else {
// reset holder to nullptr
out->clear();
*out = DenseTensor{source.Holder(), meta};
}
out->ShareInplaceVersionCounterWith(x);
}

} // namespace phi

PD_REGISTER_KERNEL(set,
CPU,
ALL_LAYOUT,
phi::SetKernel,
bool,
uint8_t,
int8_t,
int16_t,
int,
int64_t,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16,
phi::dtype::complex<float>,
phi::dtype::complex<double>) {}

#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
PD_REGISTER_KERNEL(set,
GPU,
ALL_LAYOUT,
phi::SetKernel,
bool,
uint8_t,
int8_t,
int16_t,
int,
int64_t,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16,
phi::dtype::complex<float>,
phi::dtype::complex<double>) {}
#endif
30 changes: 30 additions & 0 deletions paddle/phi/kernels/set_kernel.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
// Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include "paddle/phi/core/dense_tensor.h"

namespace phi {

template <typename T, typename Context>
void SetKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& source,
const std::vector<int64_t>& dims,
const std::vector<int64_t>& stride,
int64_t offset,
DenseTensor* out);

} // namespace phi
10 changes: 10 additions & 0 deletions paddle/phi/ops/yaml/ops.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4413,6 +4413,16 @@
backward: sequence_pool_grad
interfaces : paddle::dialect::InferSymbolicShapeInterface

- op : set
args : (Tensor x, Tensor source, int64_t[] dims = {}, int64_t[] stride = {}, int64_t offset = 0)
output : Tensor (out)
infer_meta :
func : SetInferMeta
param : [x, dims, stride]
kernel :
func : set
inplace : (x -> out)

- op : set_value_with_tensor
args : (Tensor x, Tensor values, IntArray starts, IntArray ends, IntArray steps, int64_t[] axes, int64_t[] decrease_axes, int64_t[] none_axes)
output : Tensor(out)
Expand Down
2 changes: 2 additions & 0 deletions python/paddle/tensor/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@
ones,
ones_like,
polar,
set_,
to_tensor,
tril,
tril_,
Expand Down Expand Up @@ -842,6 +843,7 @@
"combinations",
'signbit',
'log_normal_',
'set_',
]

# this list used in math_op_patch.py for magic_method bind
Expand Down
122 changes: 122 additions & 0 deletions python/paddle/tensor/creation.py
Original file line number Diff line number Diff line change
Expand Up @@ -3257,3 +3257,125 @@ def geometric_(
x.uniform_(min=float(tiny), max=float(1))
x.log_().divide_(paddle.log1p(-(probs)))
return x


@inplace_apis_in_dygraph_only
def set_(
x: paddle.Tensor,
source: paddle.Tensor | None = None,
shape: Sequence[int] | None = None,
stride: Sequence[int] | None = None,
offset: int = 0,
name: str | None = None,
) -> paddle.Tensor:
"""
set x with specified source Tensor's underlying storage, shape, stride and offset.

Note that the ``x`` will share the same data with ``source`` Tensor.

Args:
x (Tensor): An arbitrary Tensor. The data type supports ``bfloat16``, ``float16``, ``float32``, ``float64``,
``bool``, ``int8``, ``int16``, ``int32``, ``int64``, ``uint8``, ``complex64`` or ``complex128``.
source (Tensor|None, optional): Define the target Tensor to use. The data type supports `bfloat16`, ``float16``,
``float32``, ``float64``, ``bool``, ``int8``, ``int16``, ``int32``, ``int64``, ``uint8``, ``complex64`` or
``complex128``. Default: None, which means to set ``x`` with an empty source tensor.
shape (list|tuple|None, optional): Define the target shape. Each element of it should be integer. Default: None,
which means it will use the specified ``source``'s shape as default value.
stride (list|tuple|None, optional): Define the target stride. Each element of it should be integer. Default: None,
and when ``shape`` is also None, it will use the specified ``source``'s stride as default value; when ``shape``
is specified, it will use the default stride corresponding to the specified ``shape``.
offset (int, optional): Define the target offset from x's holder. Default: 0.
name (str|None, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

Returns:
Tensor, the Tensor with the same data type as ``x``.

Examples:
.. code-block:: python

>>> import paddle

>>> src = paddle.to_tensor([[11., 22., 33.]])
>>> src2 = paddle.to_tensor([11., 22., 33., 44., 55., 66.])

>>> x = paddle.to_tensor([1., 2., 3., 4., 5.])
>>> x.set_()
>>> print(x)
Tensor(shape=[0], dtype=float32, place=Place(cpu), stop_gradient=True,
[])

>>> x = paddle.to_tensor([1., 2., 3., 4., 5.])
>>> x.set_(src)
>>> print(x)
Tensor(shape=[1, 3], dtype=float32, place=Place(cpu), stop_gradient=True,
[[11., 22., 33.]])

>>> print(x._is_shared_buffer_with(src))
True

>>> x = paddle.to_tensor([1., 2., 3., 4., 5.])
>>> x.set_(src, shape=[2, 1])
>>> print(x)
Tensor(shape=[2, 1], dtype=float32, place=Place(cpu), stop_gradient=True,
[[11.],
[22.]])

>>> x = paddle.to_tensor([1., 2., 3., 4., 5.])
>>> x.set_(src2, shape=[3], stride=[2])
>>> print(x)
Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True,
[11., 33., 55.])

>>> x = paddle.to_tensor([1., 2., 3., 4., 5.])
>>> x.set_(src2, shape=[5], offset=4)
>>> print(x)
Tensor(shape=[5], dtype=float32, place=Place(cpu), stop_gradient=True,
[22., 33., 44., 55., 66.])

"""
if in_dynamic_mode():
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

add data dtype check

Copy link
Contributor Author

@NKNaN NKNaN Oct 23, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

added in branch source is not None

# set_ doesn't have backward op so EagerUtils::CheckInplace will not be
# called in eager_generator.cc. Here to keep consistent with other inplace
# op, manually check whether x is leaf node and doesn't stop gradient.
if x.is_leaf and not x.stop_gradient:
raise ValueError(
f"(InvalidArgument) Leaf Tensor {x.name} that doesn't stop gradient can't use "
"inplace strategy."
)
if source is None:
source = paddle.empty([0], dtype=x.dtype)
shape = [0]
stride = [0]
else:
if not isinstance(source, (Variable, core.eager.Tensor)):
raise ValueError(
f"Input (source) should be paddle.Tensor but received {type(source)}"
)
check_dtype(
source.dtype,
'source',
[
'bool',
'float16',
'uint16',
'float32',
'float64',
'int8',
'int16',
'int32',
'int64',
'uint8',
'complex64',
'complex128',
],
'set',
)
if stride is None:
if shape is None:
stride = source.strides
else:
stride = paddle.empty(shape).strides
if shape is None:
shape = source.shape

return _C_ops.set_(x, source, shape, stride, offset)
Loading