Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -95,9 +95,6 @@ void InterpreterCoreEventGarbageCollector::Add(Variable* var,
OrderedMultiDeviceDenseTensorBlockingQueueHolder>()) { // NOLINT
// TODO(xiongkun03) in old executor, this type of variable is not support
// eager deletion. so we just leave it here ?
} else if (var->IsType<LoDRankTable>()) {
// TODO(xiongkun03) in old executor, this type of variable is not support
// eager deletion. so we just leave it here ?
} else if (var->IsType<phi::SelectedRows>()) {
Add(var->GetMutable<phi::SelectedRows>()
->mutable_value()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,9 +40,6 @@ void InterpreterCoreFastGarbageCollector::Add(Variable* var) {
OrderedMultiDeviceDenseTensorBlockingQueueHolder>()) { // NOLINT
// TODO(xiongkun03) in old executor, this type of variable is not support
// eager deletion. so we just leave it here ?
} else if (var->IsType<LoDRankTable>()) {
// TODO(xiongkun03) in old executor, this type of variable is not support
// eager deletion. so we just leave it here ?
} else if (var->IsType<phi::SelectedRows>()) {
Add(var->GetMutable<phi::SelectedRows>()
->mutable_value()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,9 +55,6 @@ void InterpreterCoreNoEventGarbageCollector::Add(
OrderedMultiDeviceDenseTensorBlockingQueueHolder>()) { // NOLINT
// TODO(xiongkun03) in old executor, this type of variable is not support
// eager deletion. so we just leave it here ?
} else if (var->IsType<LoDRankTable>()) {
// TODO(xiongkun03) in old executor, this type of variable is not support
// eager deletion. so we just leave it here ?
} else if (var->IsType<phi::SelectedRows>()) {
Add(var->GetMutable<phi::SelectedRows>()
->mutable_value()
Expand Down
4 changes: 0 additions & 4 deletions paddle/fluid/framework/var_type.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@ limitations under the License. */

#pragma once
#include "paddle/fluid/framework/dense_tensor_array.h"
#include "paddle/fluid/framework/lod_rank_table.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/selected_rows_utils.h"
#include "paddle/fluid/framework/var_type_traits.h"
Expand Down Expand Up @@ -51,9 +50,6 @@ inline void VisitVarType(const framework::Variable& var, Visitor visitor) {
case proto::VarType::DENSE_TENSOR:
visitor(var.Get<phi::DenseTensor>());
return;
case proto::VarType::LOD_RANK_TABLE:
visitor(var.Get<LoDRankTable>());
return;
case proto::VarType::DENSE_TENSOR_ARRAY:
visitor(var.Get<phi::TensorArray>());
return;
Expand Down
1 change: 0 additions & 1 deletion paddle/fluid/framework/var_type_traits.cc
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@
#include "paddle/fluid/framework/var_type_traits.h"

#include "paddle/common/macros.h"
#include "paddle/fluid/framework/lod_rank_table.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/phi/core/framework/reader.h"
#include "paddle/phi/core/operators/reader/dense_tensor_blocking_queue.h"
Expand Down
3 changes: 0 additions & 3 deletions paddle/fluid/framework/var_type_traits.h
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,6 @@ class BKCLCommunicator;
} // namespace platform

namespace framework {
class LoDRankTable;
class Scope;
class ReaderHolder;
class Scope;
Expand Down Expand Up @@ -180,7 +179,6 @@ using VarTypeRegistry = detail::VarTypeRegistryImpl<
phi::SparseCooTensor,
phi::SparseCsrTensor,
std::vector<Scope *>,
LoDRankTable,
Strings,
phi::TensorArray,
phi::PlaceList,
Expand Down Expand Up @@ -240,7 +238,6 @@ struct VarTypeTrait {
REG_PROTO_VAR_TYPE_TRAIT(phi::DenseTensor, proto::VarType::DENSE_TENSOR);
REG_PROTO_VAR_TYPE_TRAIT(phi::SelectedRows, proto::VarType::SELECTED_ROWS);
REG_PROTO_VAR_TYPE_TRAIT(std::vector<Scope *>, proto::VarType::STEP_SCOPES);
REG_PROTO_VAR_TYPE_TRAIT(LoDRankTable, proto::VarType::LOD_RANK_TABLE);
REG_PROTO_VAR_TYPE_TRAIT(phi::TensorArray, proto::VarType::DENSE_TENSOR_ARRAY);
REG_PROTO_VAR_TYPE_TRAIT(phi::PlaceList, proto::VarType::PLACE_LIST);
REG_PROTO_VAR_TYPE_TRAIT(ReaderHolder, proto::VarType::READER);
Expand Down
3 changes: 0 additions & 3 deletions paddle/fluid/framework/variable_helper.cc
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@ limitations under the License. */

#include "paddle/fluid/framework/dense_tensor_array.h"
#include "paddle/fluid/framework/feed_fetch_type.h"
#include "paddle/fluid/framework/lod_rank_table.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/framework/selected_rows_utils.h"
Expand All @@ -37,8 +36,6 @@ void InitializeVariable(Variable *var, proto::VarType::Type var_type) {
var->GetMutable<FetchList>();
} else if (var_type == proto::VarType::STEP_SCOPES) {
var->GetMutable<std::vector<framework::Scope *>>();
} else if (var_type == proto::VarType::LOD_RANK_TABLE) {
var->GetMutable<LoDRankTable>();
} else if (var_type == proto::VarType::DENSE_TENSOR_ARRAY) {
var->GetMutable<phi::TensorArray>();
} else if (var_type == proto::VarType::STRINGS) {
Expand Down
3 changes: 0 additions & 3 deletions paddle/fluid/imperative/var_helper.cc
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@
#include "paddle/fluid/framework/convert_utils.h"
#include "paddle/fluid/framework/dense_tensor_array.h"
#include "paddle/fluid/framework/feed_fetch_type.h"
#include "paddle/fluid/framework/lod_rank_table.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/framework/tensor.h"
Expand Down Expand Up @@ -55,8 +54,6 @@ void InitializeVariable(paddle::framework::Variable *var,
var->GetMutable<paddle::framework::FetchList>();
} else if (var_type == paddle::framework::proto::VarType::STEP_SCOPES) {
var->GetMutable<std::vector<paddle::framework::Scope *>>();
} else if (var_type == paddle::framework::proto::VarType::LOD_RANK_TABLE) {
var->GetMutable<paddle::framework::LoDRankTable>();
} else if (var_type ==
paddle::framework::proto::VarType::DENSE_TENSOR_ARRAY) {
var->GetMutable<phi::TensorArray>();
Expand Down
1 change: 0 additions & 1 deletion paddle/fluid/pybind/compiled_program.cc
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,6 @@
#include "paddle/fluid/framework/ir/cost_model.h"
#include "paddle/fluid/framework/ir/generate_pass.h"
#include "paddle/fluid/framework/ir/pass_builder.h"
#include "paddle/fluid/framework/lod_rank_table.h"
#include "paddle/fluid/framework/new_executor/executor_statistics.h"
#include "paddle/fluid/framework/new_executor/standalone_executor.h"
#include "paddle/fluid/framework/op_info.h"
Expand Down
1 change: 0 additions & 1 deletion paddle/fluid/pybind/place.cc
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,6 @@ limitations under the License. */
#include "paddle/fluid/framework/ir/cost_model.h"
#include "paddle/fluid/framework/ir/generate_pass.h"
#include "paddle/fluid/framework/ir/pass_builder.h"
#include "paddle/fluid/framework/lod_rank_table.h"
#include "paddle/fluid/framework/new_executor/executor_statistics.h"
#include "paddle/fluid/framework/new_executor/standalone_executor.h"
#include "paddle/fluid/framework/op_info.h"
Expand Down
14 changes: 0 additions & 14 deletions paddle/fluid/pybind/pybind.cc
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,6 @@ limitations under the License. */
#include "paddle/fluid/framework/ir/cost_model.h"
#include "paddle/fluid/framework/ir/generate_pass.h"
#include "paddle/fluid/framework/ir/pass_builder.h"
#include "paddle/fluid/framework/lod_rank_table.h"
#include "paddle/fluid/framework/new_executor/collect_shape_manager.h"
#include "paddle/fluid/framework/new_executor/executor_statistics.h"
#include "paddle/fluid/framework/new_executor/interpreter/job.h"
Expand Down Expand Up @@ -1511,10 +1510,6 @@ All parameter, weight, gradient are variables in Paddle.
"get_map_tensor",
[](Variable &self) { return self.GetMutable<Vocab>(); },
py::return_value_policy::reference)
.def(
"get_lod_rank_table",
[](Variable &self) { return self.GetMutable<LoDRankTable>(); },
py::return_value_policy::reference)
.def(
"get_selected_rows",
[](Variable &self) -> phi::SelectedRows * {
Expand Down Expand Up @@ -2497,15 +2492,6 @@ All parameter, weight, gradient are variables in Paddle.
BindAutoParallel(&m);
BindJitProperty(&m);

py::class_<framework::LoDRankTable>(m, "LodRankTable")
.def("items", [](framework::LoDRankTable &table) {
std::vector<std::pair<size_t, size_t>> res;
for (auto &item : table.items()) {
res.push_back({item.index, item.length});
}
return res;
});

py::class_<phi::TensorArray> pydensetensorarray(m, "DenseTensorArray", R"DOC(
DenseTensorArray is array of DenseTensor, it supports operator[], len() and for-loop iteration.

Expand Down
1 change: 0 additions & 1 deletion paddle/fluid/pybind/tensor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,6 @@ limitations under the License. */
#include "paddle/fluid/framework/ir/cost_model.h"
#include "paddle/fluid/framework/ir/generate_pass.h"
#include "paddle/fluid/framework/ir/pass_builder.h"
#include "paddle/fluid/framework/lod_rank_table.h"
#include "paddle/fluid/framework/new_executor/executor_statistics.h"
#include "paddle/fluid/framework/new_executor/standalone_executor.h"
#include "paddle/fluid/framework/op_info.h"
Expand Down
28 changes: 0 additions & 28 deletions test/cpp/fluid/framework/operator_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -414,34 +414,6 @@ REGISTER_OP_CPU_KERNEL(
indicate_other_data_type_test,
paddle::framework::EmptyTestKernel<phi::CPUContext, int>);

TEST(IndicateVarDataTypeTest, other) {
paddle::framework::InitDevices();
paddle::framework::proto::OpDesc op_desc;
op_desc.set_type("indicate_other_data_type_test");
BuildVar("Other", {"lod_rank_table_1"}, op_desc.add_inputs());

phi::CPUPlace cpu_place;
paddle::framework::Scope scope;

auto op = paddle::framework::OpRegistry::CreateOp(op_desc);
auto* var = scope.Var("lod_rank_table_1");
var->GetMutable<paddle::framework::LoDRankTable>();

bool caught = false;
try {
op->Run(scope, cpu_place);
} catch (paddle::platform::EnforceNotMet& err) {
caught = true;
std::string ex_msg = err.what();
EXPECT_TRUE(ex_msg.find("The Input Variable(Other) of "
"(indicate_other_data_type_test) Operator used to "
"determine kernel data type "
"is empty or not phi::DenseTensor or SelectedRows "
"or DenseTensorArray.") != std::string::npos);
}
ASSERT_TRUE(caught);
}

TEST(ExecutionContextAttrAndInOut, new_api) {
paddle::framework::InitDevices();
paddle::framework::proto::OpDesc op_desc;
Expand Down
3 changes: 0 additions & 3 deletions test/cpp/fluid/framework/var_type_traits_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@

#include <gtest/gtest.h>

#include "paddle/fluid/framework/lod_rank_table.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/framework/selected_rows_utils.h"
#include "paddle/phi/core/framework/reader.h"
Expand Down Expand Up @@ -94,7 +93,6 @@ TEST(var_type_traits, check_proto_type_id) {
ASSERT_TRUE(CheckVarId<phi::DenseTensor>(proto::VarType::DENSE_TENSOR));
ASSERT_TRUE(CheckVarId<phi::SelectedRows>(proto::VarType::SELECTED_ROWS));
ASSERT_TRUE(CheckVarId<std::vector<Scope *>>(proto::VarType::STEP_SCOPES));
ASSERT_TRUE(CheckVarId<LoDRankTable>(proto::VarType::LOD_RANK_TABLE));
ASSERT_TRUE(CheckVarId<phi::TensorArray>(proto::VarType::DENSE_TENSOR_ARRAY));
ASSERT_TRUE(CheckVarId<phi::PlaceList>(proto::VarType::PLACE_LIST));
ASSERT_TRUE(CheckVarId<ReaderHolder>(proto::VarType::READER));
Expand All @@ -104,7 +102,6 @@ TEST(var_type_traits, check_proto_type_id) {
ASSERT_EQ(proto::VarType_Type_DENSE_TENSOR, proto::VarType::DENSE_TENSOR);
ASSERT_EQ(proto::VarType_Type_SELECTED_ROWS, proto::VarType::SELECTED_ROWS);
ASSERT_EQ(proto::VarType_Type_STEP_SCOPES, proto::VarType::STEP_SCOPES);
ASSERT_EQ(proto::VarType_Type_LOD_RANK_TABLE, proto::VarType::LOD_RANK_TABLE);
ASSERT_EQ(proto::VarType_Type_DENSE_TENSOR_ARRAY,
proto::VarType::DENSE_TENSOR_ARRAY);
ASSERT_EQ(proto::VarType_Type_PLACE_LIST, proto::VarType::PLACE_LIST);
Expand Down
11 changes: 1 addition & 10 deletions test/cpp/imperative/test_eager.cc
Original file line number Diff line number Diff line change
Expand Up @@ -68,10 +68,9 @@ TEST(test_create_node, eager_node) {
{});
}
TEST(test_var_helper, eager_var_helper) {
framework::Variable var0, var1, var2, var3, var4, var5, var6, var7, var8;
framework::Variable var0, var1, var3, var4, var5, var6, var7, var8;
InitializeVariable(&var0, paddle::framework::proto::VarType::FEED_MINIBATCH);
InitializeVariable(&var1, paddle::framework::proto::VarType::STEP_SCOPES);
InitializeVariable(&var2, paddle::framework::proto::VarType::LOD_RANK_TABLE);
InitializeVariable(&var3,
paddle::framework::proto::VarType::DENSE_TENSOR_ARRAY);
InitializeVariable(&var4, paddle::framework::proto::VarType::STRINGS);
Expand All @@ -82,12 +81,10 @@ TEST(test_var_helper, eager_var_helper) {
InitializeVariable(&var8, paddle::framework::proto::VarType::FP64));

auto egr_tensor = std::make_shared<egr::EagerVariable>();
auto egr_tensor2 = std::make_shared<egr::EagerVariable>();
egr_tensor->MutableVar()
->GetMutable<phi::SelectedRows>()
->mutable_value()
->mutable_data<float>(phi::CPUPlace());
egr_tensor2->MutableVar()->GetMutable<framework::LoDRankTable>();
VLOG(6) << "egr_tensor create with ";
ASSERT_TRUE(phi::is_cpu_place(GetPlace<egr::EagerVariable>(egr_tensor)));
ASSERT_TRUE(GetDataType<egr::EagerVariable>(egr_tensor) ==
Expand All @@ -96,12 +93,6 @@ TEST(test_var_helper, eager_var_helper) {
phi::KernelKey(phi::Backend::CPU,
phi::DataLayout::ALL_LAYOUT,
phi::DataType::FLOAT32));
SetCachedValue<egr::EagerVariable>(egr_tensor,
phi::KernelKey(phi::Backend::CPU,
phi::DataLayout::ALL_LAYOUT,
phi::DataType::FLOAT32),
egr_tensor2);
ASSERT_ANY_THROW(GetPlace<egr::EagerVariable>(egr_tensor2));
ASSERT_ANY_THROW(SetType<egr::EagerVariable>(
egr_tensor, paddle::framework::proto::VarType::DENSE_TENSOR_ARRAY));
}
Expand Down