Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion paddle/fluid/framework/infershape_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -397,7 +397,7 @@ void CompatMetaTensor::share_lod(const MetaTensor& meta_tensor) {
if (var == nullptr) return;
if (var->IsType<phi::DenseTensor>() && meta_tensor.is_dense()) {
auto* tensor = var->GetMutable<phi::DenseTensor>();
phi::DenseTensorUtils::GetMutableMeta(tensor)->lod =
phi::DenseTensorUtils::GetMutableMeta(tensor)->legacy_lod =
static_cast<const CompatMetaTensor&>(meta_tensor).GetRuntimeLoD();
} else {
// NOTE(chenweihang): do nothing
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/controlflow/feed_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ class FeedOp : public framework::OperatorWithKernel {
meta.dims = feed_tensor.dims();
meta.dtype = feed_tensor.dtype();
meta.layout = feed_tensor.layout();
meta.lod = feed_tensor.lod();
meta.legacy_lod = feed_tensor.lod();
meta.strides = feed_tensor.strides();
if (meta.strides.size() == -1) {
meta.strides = meta.calc_strides(meta.dims);
Expand Down
6 changes: 4 additions & 2 deletions paddle/phi/core/dense_tensor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -214,7 +214,7 @@ void DenseTensor::set_meta(const DenseTensorMeta& meta) {
meta_.dtype = meta.dtype;
meta_.is_scalar = meta.is_scalar;
meta_.layout = meta.layout;
meta_.lod = meta.lod;
meta_.legacy_lod = meta.legacy_lod;
meta_.offset = meta.offset;
meta_.use_gpudnn = meta.use_gpudnn;
if (meta.strides.size() == -1) {
Expand Down Expand Up @@ -255,7 +255,9 @@ void DenseTensor::ResizeAndAllocate(const DDim& dims) {
}
}

void DenseTensor::ResetLoD(const LoD& lod) { meta_.lod = lod; }
void DenseTensor::ResetLoD(const LoD& legacy_lod) {
meta_.legacy_lod = legacy_lod;
}

#define DATA_MEMBER_FUNC_INSTANTIATION(dtype) \
template TEST_API const dtype* DenseTensor::data() const; \
Expand Down
6 changes: 3 additions & 3 deletions paddle/phi/core/dense_tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ class TEST_API DenseTensor : public TensorBase,

/// \brief Returns the lod of the tensor.
/// \return The lod of the tensor.
const LoD& lod() const noexcept { return meta_.lod; }
const LoD& lod() const noexcept { return meta_.legacy_lod; }

/// \brief Returns the data type of the tensor.
/// \return The data type of the tensor.
Expand Down Expand Up @@ -153,8 +153,8 @@ class TEST_API DenseTensor : public TensorBase,
DenseTensor& Resize(const DDim& dims);

/// \brief Change the lod information in the metadata.
/// \param lod The new lod of the dense tensor.
void ResetLoD(const LoD& lod);
/// \param legacy_lod The new lod of the dense tensor.
void ResetLoD(const LoD& legacy_lod);

/// \brief Returns the actual allocation size occupied by tensor, may be
/// larger
Expand Down
17 changes: 11 additions & 6 deletions paddle/phi/core/dense_tensor_impl.cc
Original file line number Diff line number Diff line change
Expand Up @@ -227,11 +227,15 @@ LEGACY_DATA_MEMBER_FUNC_INSTANTIATION(::phi::dtype::complex<double>)
/* From phi::DenseTensor */
/* ------------------------------ */

DenseTensor::DenseTensor(const LoD& lod) : DenseTensor() { meta_.lod = lod; }
DenseTensor::DenseTensor(const LoD& legacy_lod) : DenseTensor() {
meta_.legacy_lod = legacy_lod;
}

void DenseTensor::set_lod(const LoD& lod) { meta_.lod = lod; }
void DenseTensor::set_lod(const LoD& legacy_lod) {
meta_.legacy_lod = legacy_lod;
}

LoD* DenseTensor::mutable_lod() { return &meta_.lod; }
LoD* DenseTensor::mutable_lod() { return &meta_.legacy_lod; }

std::pair<size_t, size_t> DenseTensor::lod_element(size_t level,
size_t elem) const {
Expand All @@ -254,10 +258,11 @@ std::pair<size_t, size_t> DenseTensor::lod_element(size_t level,
elem,
NumElements(level)));

return std::make_pair((meta_.lod)[level][elem], (meta_.lod)[level][elem + 1]);
return std::make_pair((meta_.legacy_lod)[level][elem],
(meta_.legacy_lod)[level][elem + 1]);
}

size_t DenseTensor::NumLevels() const { return meta_.lod.size(); }
size_t DenseTensor::NumLevels() const { return meta_.legacy_lod.size(); }

size_t DenseTensor::NumElements(size_t level) const {
PADDLE_ENFORCE_LT(
Expand All @@ -270,7 +275,7 @@ size_t DenseTensor::NumElements(size_t level) const {
NumLevels()));

// the last offset is the end of last element
return (meta_.lod)[level].size() - 1;
return (meta_.legacy_lod)[level].size() - 1;
}

DenseTensor& DenseTensor::Resize(const DDim& dims) {
Expand Down
16 changes: 8 additions & 8 deletions paddle/phi/core/meta_tensor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -207,37 +207,37 @@ void MetaTensor::share_lod(const MetaTensor& meta_tensor) {
return;
}
if (phi::DenseTensor::classof(tensor_)) {
DenseTensorUtils::GetMutableMeta(static_cast<DenseTensor*>(tensor_))->lod =
meta_tensor.lod();
DenseTensorUtils::GetMutableMeta(static_cast<DenseTensor*>(tensor_))
->legacy_lod = meta_tensor.lod();
} else if (phi::SelectedRows::classof(tensor_)) {
DenseTensorUtils::GetMutableMeta(
static_cast<SelectedRows*>(tensor_)->mutable_value())
->lod = meta_tensor.lod();
->legacy_lod = meta_tensor.lod();
} else {
PADDLE_THROW(common::errors::Unimplemented(
"Unsupported sharing lod inplace for `%s`.",
tensor_->type_info().name()));
}
}

void MetaTensor::share_lod(const LoD& lod) {
void MetaTensor::share_lod(const LoD& legacy_lod) {
ValidCheck(*this);
if (phi::SparseCooTensor::classof(tensor_) ||
phi::SparseCsrTensor::classof(tensor_) ||
phi::distributed::DistTensor::classof(tensor_)) {
return;
}
if (lod.empty()) {
if (legacy_lod.empty()) {
// no need share
return;
}
if (phi::DenseTensor::classof(tensor_)) {
DenseTensorUtils::GetMutableMeta(static_cast<DenseTensor*>(tensor_))->lod =
lod;
DenseTensorUtils::GetMutableMeta(static_cast<DenseTensor*>(tensor_))
->legacy_lod = legacy_lod;
} else if (phi::SelectedRows::classof(tensor_)) {
DenseTensorUtils::GetMutableMeta(
static_cast<SelectedRows*>(tensor_)->mutable_value())
->lod = lod;
->legacy_lod = legacy_lod;
} else {
PADDLE_THROW(common::errors::Unimplemented(
"Unsupported sharing lod inplace for `%s`.",
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/core/meta_tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ class TEST_API MetaTensor {
virtual void set_strides(const DDim& strides);

virtual void share_lod(const MetaTensor& meta_tensor);
void share_lod(const LoD& lod);
void share_lod(const LoD& legacy_lod);
void share_lod(const MetaTensor& meta_tensor, int64_t index);
virtual void share_meta(const MetaTensor& meta_tensor);
virtual void share_dims(const MetaTensor& meta_tensor);
Expand Down
14 changes: 9 additions & 5 deletions paddle/phi/core/tensor_meta.cc
Original file line number Diff line number Diff line change
Expand Up @@ -148,9 +148,13 @@ DenseTensorMeta::DenseTensorMeta(DataType dtype,
DenseTensorMeta::DenseTensorMeta(DataType dtype,
const DDim& dims,
DataLayout layout,
const LoD& lod,
const LoD& legacy_lod,
size_t offset)
: dims(dims), dtype(dtype), layout(layout), lod(lod), offset(offset) {
: dims(dims),
dtype(dtype),
layout(layout),
legacy_lod(legacy_lod),
offset(offset) {
strides = calc_strides(dims);
use_gpudnn = true;
}
Expand All @@ -161,7 +165,7 @@ DenseTensorMeta::DenseTensorMeta(const DenseTensorMeta& other) {
dims = other.dims;
dtype = other.dtype;
layout = other.layout;
lod = other.lod;
legacy_lod = other.legacy_lod;
offset = other.offset;
if (other.strides.size() == -1) {
strides = calc_strides(dims);
Expand All @@ -176,7 +180,7 @@ DenseTensorMeta& DenseTensorMeta::operator=(const DenseTensorMeta& other) {
dims = other.dims;
dtype = other.dtype;
layout = other.layout;
lod = other.lod;
legacy_lod = other.legacy_lod;
offset = other.offset;
if (other.strides.size() == -1) {
strides = calc_strides(dims);
Expand All @@ -193,7 +197,7 @@ DenseTensorMeta& DenseTensorMeta::operator=( // NOLINT
dims = other.dims;
dtype = other.dtype;
layout = other.layout;
lod = std::move(other.lod);
legacy_lod = std::move(other.legacy_lod);
offset = other.offset;
if (other.strides.size() == -1) {
strides = calc_strides(dims);
Expand Down
6 changes: 3 additions & 3 deletions paddle/phi/core/tensor_meta.h
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ struct TEST_API DenseTensorMeta {
DenseTensorMeta(DataType dtype,
const DDim& dims,
DataLayout layout,
const LoD& lod,
const LoD& legacy_lod,
size_t offset = 0);

DenseTensorMeta(const DenseTensorMeta& other);
Expand All @@ -80,15 +80,15 @@ struct TEST_API DenseTensorMeta {
DDim dims;
DataType dtype{DataType::UNDEFINED};
DataLayout layout{DataLayout::NCHW};
LoD lod;
LoD legacy_lod;
size_t offset{0};
DDim strides;
};

inline bool operator==(const DenseTensorMeta& lhs, const DenseTensorMeta& rhs) {
return (lhs.is_scalar == rhs.is_scalar) && lhs.use_gpudnn == rhs.use_gpudnn &&
(lhs.dims == rhs.dims) && (lhs.dtype == rhs.dtype) &&
(lhs.layout == rhs.layout) && (lhs.lod == rhs.lod) &&
(lhs.layout == rhs.layout) && (lhs.legacy_lod == rhs.legacy_lod) &&
(lhs.offset == rhs.offset) && (lhs.strides == rhs.strides);
}

Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/kernels/cpu/match_matrix_tensor_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -112,14 +112,14 @@ void CPUMatchMatrixTensorOPKernel(const Context& dev_ctx,
phi::DenseTensorMeta new_out_meta(out_meta.dtype,
common::make_ddim(out_dims_vec),
out_meta.layout,
out_meta.lod);
out_meta.legacy_lod);
out->set_meta(new_out_meta);

auto& tmp_meta = tmp->meta();
phi::DenseTensorMeta new_tmp_meta(tmp_meta.dtype,
common::make_ddim(tmp_dims_vec),
tmp_meta.layout,
tmp_meta.lod);
tmp_meta.legacy_lod);
tmp->set_meta(new_tmp_meta);

int64_t dim_in = x->dims()[1];
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/funcs/common_shape.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ inline void SetXShape(const DenseTensor &x, DenseTensor *xshape) {
xshape_dims[i + 1] = in_dims[i];
}
xshape->ResizeAndAllocate(common::make_ddim(xshape_dims));
xshape->ResetLoD(x.meta().lod);
xshape->ResetLoD(x.meta().legacy_lod);
}

inline void GetBroadcastDimsArrays(const DDim &x_dims,
Expand Down
12 changes: 6 additions & 6 deletions test/cpp/phi/core/test_dense_tensor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -108,13 +108,13 @@ TEST(dense_tensor, meta) {
"lod. Expected layout: %s, but got: %s",
layout,
meta_3.layout));
PADDLE_ENFORCE_EQ(meta_3.lod,
PADDLE_ENFORCE_EQ(meta_3.legacy_lod,
lod,
common::errors::InvalidArgument(
"Fail in DenseTensorMeta with dtype, dims, layout and "
"lod. Expected lod: %s, but got: %s",
lod,
meta_3.lod));
meta_3.legacy_lod));
PADDLE_ENFORCE_EQ(meta_3.valid(),
true,
common::errors::InvalidArgument(
Expand Down Expand Up @@ -145,12 +145,12 @@ TEST(dense_tensor, meta) {
layout,
meta_4.layout));
PADDLE_ENFORCE_EQ(
meta_4.lod,
meta_4.legacy_lod,
lod,
common::errors::InvalidArgument(
"Fail in copy DenseTensorMeta. Expected lod: %s, but got: %s",
lod,
meta_4.lod));
meta_4.legacy_lod));
PADDLE_ENFORCE_EQ(
meta_4.valid(),
true,
Expand Down Expand Up @@ -181,12 +181,12 @@ TEST(dense_tensor, meta) {
layout,
meta_5.layout));
PADDLE_ENFORCE_EQ(
meta_5.lod,
meta_5.legacy_lod,
lod,
common::errors::InvalidArgument(
"Fail in copy DenseTensorMeta. Expected lod: %s, but got: %s",
lod,
meta_5.lod));
meta_5.legacy_lod));
PADDLE_ENFORCE_EQ(
meta_5.valid(),
true,
Expand Down