Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions test/cpp/fluid/assign_op_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -55,12 +55,12 @@ TEST(AssignOp, AssignDenseTensorArray) {
phi::TensorArray input;
for (int i = 0; i < 5; ++i) {
phi::DDim in_dims = common::make_ddim({i + 1, i + 2});
phi::DenseTensor lod_tensor;
float* in_data = lod_tensor.mutable_data<float>(in_dims, cpu_place);
phi::DenseTensor dense_tensor;
float* in_data = dense_tensor.mutable_data<float>(in_dims, cpu_place);
for (int j = 0; j < (i + 1) * (i + 2); ++j) {
in_data[j] = static_cast<float>(j);
}
input.push_back(lod_tensor);
input.push_back(dense_tensor);
}

assign_functor(input);
Expand Down
12 changes: 6 additions & 6 deletions test/cpp/fluid/platform/bfloat16_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -103,8 +103,8 @@ TEST(bfloat16, comparison_cpu) {
EXPECT_TRUE(bfloat16(2.0f) >= bfloat16(2.0f));
}

TEST(bfloat16, lod_tensor_cpu) {
phi::DenseTensor lod_tensor;
TEST(bfloat16, dense_tensor_cpu) {
phi::DenseTensor dense_tensor;

std::vector<bfloat16> input_data = {
bfloat16(1.0f), bfloat16(0.5f), bfloat16(0.33333f), bfloat16(0.0f)};
Expand All @@ -113,12 +113,12 @@ TEST(bfloat16, lod_tensor_cpu) {
EXPECT_EQ(input_data[2].x, 0x3eab);
EXPECT_EQ(input_data[3].x, 0x0000);

lod_tensor.Resize({4, 1});
lod_tensor.set_lod(phi::LoD({{0, 2, 4}}));
bfloat16* data_ptr = lod_tensor.mutable_data<bfloat16>(CPUPlace());
dense_tensor.Resize({4, 1});
dense_tensor.set_lod(phi::LoD({{0, 2, 4}}));
bfloat16* data_ptr = dense_tensor.mutable_data<bfloat16>(CPUPlace());

EXPECT_NE(data_ptr, nullptr);
EXPECT_EQ(input_data.size(), static_cast<size_t>(lod_tensor.numel()));
EXPECT_EQ(input_data.size(), static_cast<size_t>(dense_tensor.numel()));
for (size_t i = 0; i < input_data.size(); ++i) {
data_ptr[i] = input_data[i];
EXPECT_EQ(data_ptr[i].x, input_data[i].x);
Expand Down
2 changes: 1 addition & 1 deletion test/cpp/fluid/platform/bfloat16_test.cu
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ TEST(bfloat16, convert_bfloat16_to_float32_on_gpu) {
EXPECT_EQ(static_cast<bool>(bfloat16(true)), true);
}

TEST(bfloat16, lod_tensor_on_gpu) {
TEST(bfloat16, dense_tensor_on_gpu) {
phi::DenseTensor src_tensor;
phi::DenseTensor gpu_tensor;
phi::DenseTensor dst_tensor;
Expand Down
2 changes: 1 addition & 1 deletion test/cpp/fluid/platform/float16_test.cu
Original file line number Diff line number Diff line change
Expand Up @@ -317,7 +317,7 @@ TEST(float16, conversion_on_gpu) {
EXPECT_EQ(v_assign.x, 0x3c00);
}

TEST(float16, lod_tensor_on_gpu) {
TEST(float16, dense_tensor_on_gpu) {
phi::DenseTensor src_tensor;
phi::DenseTensor gpu_tensor;
phi::DenseTensor dst_tensor;
Expand Down
12 changes: 6 additions & 6 deletions test/cpp/imperative/test_layer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -224,15 +224,15 @@ TEST(test_layer, test_debug_string) {
ASSERT_TRUE(res_ut.find("UNRESOLVED_TYPE") != std::string::npos);

// 4. test uninit lod tensor
std::shared_ptr<imperative::VarBase> lod_tensor(
new imperative::VarBase(false, "lod_tensor"));
auto tensor_l = lod_tensor->MutableVar()->GetMutable<phi::DenseTensor>();
std::string res_ui_lod_t = test_func(lod_tensor);
ASSERT_TRUE(res_ui_lod_t.find("NOT_INITED") != std::string::npos);
std::shared_ptr<imperative::VarBase> dense_tensor(
new imperative::VarBase(false, "dense_tensor"));
auto tensor_l = dense_tensor->MutableVar()->GetMutable<phi::DenseTensor>();
std::string res_ui_dense_t = test_func(dense_tensor);
ASSERT_TRUE(res_ui_dense_t.find("NOT_INITED") != std::string::npos);

// 5. test init lod tensor
tensor_l->mutable_data<float>(place);
std::string res_lod_t = test_func(lod_tensor);
std::string res_lod_t = test_func(dense_tensor);
ASSERT_TRUE(res_lod_t.find("DenseTensor") != std::string::npos);

// 6. test uninit selected rows
Expand Down
4 changes: 2 additions & 2 deletions test/dygraph_to_static/test_len.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ def len_with_tensor(x):
return x_len


def len_with_lod_tensor_array(x):
def len_with_dense_tensor_array(x):
x = paddle.to_tensor(x)

i = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=0)
Expand Down Expand Up @@ -75,7 +75,7 @@ def test_len(self):

class TestLenWithTensorArray(TestLen):
def init_func(self):
self.func = len_with_lod_tensor_array
self.func = len_with_dense_tensor_array


# Note: Variable(SelectedRows) is not exposed directly in dygraph.
Expand Down
20 changes: 10 additions & 10 deletions test/legacy_test/test_eager_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -236,10 +236,10 @@ def check_with_place(place):

numpy_array = np.random.randn(3, 4)
# covert core.DenseTensor to paddle.Tensor
lod_tensor = paddle.base.core.DenseTensor()
dense_tensor = paddle.base.core.DenseTensor()
place = paddle.base.framework._current_expected_place()
lod_tensor.set(numpy_array, place)
x = paddle.to_tensor(lod_tensor)
dense_tensor.set(numpy_array, place)
x = paddle.to_tensor(dense_tensor)
np.testing.assert_array_equal(x.numpy(), numpy_array)
self.assertEqual(x.type, core.VarDesc.VarType.DENSE_TENSOR)
self.assertEqual(str(x.place), str(place))
Expand Down Expand Up @@ -349,19 +349,19 @@ def test_to_tensor_change_place(self):
a = paddle.to_tensor(a, place=paddle.CUDAPinnedPlace())
self.assertEqual(a.place.__repr__(), "Place(gpu_pinned)")

def test_to_tensor_with_lodtensor(self):
def test_to_tensor_with_densetensor(self):
if core.is_compiled_with_cuda():
a_np = np.random.rand(1024, 1024)
with paddle.base.dygraph.guard(core.CPUPlace()):
lod_tensor = core.DenseTensor()
lod_tensor.set(a_np, core.CPUPlace())
a = paddle.to_tensor(lod_tensor)
dense_tensor = core.DenseTensor()
dense_tensor.set(a_np, core.CPUPlace())
a = paddle.to_tensor(dense_tensor)
np.testing.assert_array_equal(a_np, a.numpy())

with paddle.base.dygraph.guard(core.CUDAPlace(0)):
lod_tensor = core.DenseTensor()
lod_tensor.set(a_np, core.CUDAPlace(0))
a = paddle.to_tensor(lod_tensor, place=core.CPUPlace())
dense_tensor = core.DenseTensor()
dense_tensor.set(a_np, core.CUDAPlace(0))
a = paddle.to_tensor(dense_tensor, place=core.CPUPlace())
np.testing.assert_array_equal(a_np, a.numpy())
self.assertTrue(a.place.__repr__(), "Place(cpu)")

Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_fetch_lod_tensor_array.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ def check_network(self, use_cuda=True):
self.assertEqual(array_v[2].shape, ())
np.testing.assert_allclose(loss_v, array_v[2], rtol=1e-05)

def test_fetch_lod_tensor_array(self):
def test_fetch_dense_tensor_array(self):
if base.core.is_compiled_with_cuda():
self.check_network(use_cuda=True)
self.check_network(use_cuda=False)
Expand Down