Skip to content

Commit c368c12

Browse files
authored
Fix (#69622)
1 parent 74e74dc commit c368c12

File tree

8 files changed

+30
-30
lines changed

8 files changed

+30
-30
lines changed

test/cpp/fluid/assign_op_test.cc

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -55,12 +55,12 @@ TEST(AssignOp, AssignDenseTensorArray) {
5555
phi::TensorArray input;
5656
for (int i = 0; i < 5; ++i) {
5757
phi::DDim in_dims = common::make_ddim({i + 1, i + 2});
58-
phi::DenseTensor lod_tensor;
59-
float* in_data = lod_tensor.mutable_data<float>(in_dims, cpu_place);
58+
phi::DenseTensor dense_tensor;
59+
float* in_data = dense_tensor.mutable_data<float>(in_dims, cpu_place);
6060
for (int j = 0; j < (i + 1) * (i + 2); ++j) {
6161
in_data[j] = static_cast<float>(j);
6262
}
63-
input.push_back(lod_tensor);
63+
input.push_back(dense_tensor);
6464
}
6565

6666
assign_functor(input);

test/cpp/fluid/platform/bfloat16_test.cc

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -103,8 +103,8 @@ TEST(bfloat16, comparison_cpu) {
103103
EXPECT_TRUE(bfloat16(2.0f) >= bfloat16(2.0f));
104104
}
105105

106-
TEST(bfloat16, lod_tensor_cpu) {
107-
phi::DenseTensor lod_tensor;
106+
TEST(bfloat16, dense_tensor_cpu) {
107+
phi::DenseTensor dense_tensor;
108108

109109
std::vector<bfloat16> input_data = {
110110
bfloat16(1.0f), bfloat16(0.5f), bfloat16(0.33333f), bfloat16(0.0f)};
@@ -113,12 +113,12 @@ TEST(bfloat16, lod_tensor_cpu) {
113113
EXPECT_EQ(input_data[2].x, 0x3eab);
114114
EXPECT_EQ(input_data[3].x, 0x0000);
115115

116-
lod_tensor.Resize({4, 1});
117-
lod_tensor.set_lod(phi::LoD({{0, 2, 4}}));
118-
bfloat16* data_ptr = lod_tensor.mutable_data<bfloat16>(CPUPlace());
116+
dense_tensor.Resize({4, 1});
117+
dense_tensor.set_lod(phi::LoD({{0, 2, 4}}));
118+
bfloat16* data_ptr = dense_tensor.mutable_data<bfloat16>(CPUPlace());
119119

120120
EXPECT_NE(data_ptr, nullptr);
121-
EXPECT_EQ(input_data.size(), static_cast<size_t>(lod_tensor.numel()));
121+
EXPECT_EQ(input_data.size(), static_cast<size_t>(dense_tensor.numel()));
122122
for (size_t i = 0; i < input_data.size(); ++i) {
123123
data_ptr[i] = input_data[i];
124124
EXPECT_EQ(data_ptr[i].x, input_data[i].x);

test/cpp/fluid/platform/bfloat16_test.cu

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ TEST(bfloat16, convert_bfloat16_to_float32_on_gpu) {
5656
EXPECT_EQ(static_cast<bool>(bfloat16(true)), true);
5757
}
5858

59-
TEST(bfloat16, lod_tensor_on_gpu) {
59+
TEST(bfloat16, dense_tensor_on_gpu) {
6060
phi::DenseTensor src_tensor;
6161
phi::DenseTensor gpu_tensor;
6262
phi::DenseTensor dst_tensor;

test/cpp/fluid/platform/float16_test.cu

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -317,7 +317,7 @@ TEST(float16, conversion_on_gpu) {
317317
EXPECT_EQ(v_assign.x, 0x3c00);
318318
}
319319

320-
TEST(float16, lod_tensor_on_gpu) {
320+
TEST(float16, dense_tensor_on_gpu) {
321321
phi::DenseTensor src_tensor;
322322
phi::DenseTensor gpu_tensor;
323323
phi::DenseTensor dst_tensor;

test/cpp/imperative/test_layer.cc

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -224,15 +224,15 @@ TEST(test_layer, test_debug_string) {
224224
ASSERT_TRUE(res_ut.find("UNRESOLVED_TYPE") != std::string::npos);
225225

226226
// 4. test uninit lod tensor
227-
std::shared_ptr<imperative::VarBase> lod_tensor(
228-
new imperative::VarBase(false, "lod_tensor"));
229-
auto tensor_l = lod_tensor->MutableVar()->GetMutable<phi::DenseTensor>();
230-
std::string res_ui_lod_t = test_func(lod_tensor);
231-
ASSERT_TRUE(res_ui_lod_t.find("NOT_INITED") != std::string::npos);
227+
std::shared_ptr<imperative::VarBase> dense_tensor(
228+
new imperative::VarBase(false, "dense_tensor"));
229+
auto tensor_l = dense_tensor->MutableVar()->GetMutable<phi::DenseTensor>();
230+
std::string res_ui_dense_t = test_func(dense_tensor);
231+
ASSERT_TRUE(res_ui_dense_t.find("NOT_INITED") != std::string::npos);
232232

233233
// 5. test init lod tensor
234234
tensor_l->mutable_data<float>(place);
235-
std::string res_lod_t = test_func(lod_tensor);
235+
std::string res_lod_t = test_func(dense_tensor);
236236
ASSERT_TRUE(res_lod_t.find("DenseTensor") != std::string::npos);
237237

238238
// 6. test uninit selected rows

test/dygraph_to_static/test_len.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ def len_with_tensor(x):
3838
return x_len
3939

4040

41-
def len_with_lod_tensor_array(x):
41+
def len_with_dense_tensor_array(x):
4242
x = paddle.to_tensor(x)
4343

4444
i = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=0)
@@ -75,7 +75,7 @@ def test_len(self):
7575

7676
class TestLenWithTensorArray(TestLen):
7777
def init_func(self):
78-
self.func = len_with_lod_tensor_array
78+
self.func = len_with_dense_tensor_array
7979

8080

8181
# Note: Variable(SelectedRows) is not exposed directly in dygraph.

test/legacy_test/test_eager_tensor.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -236,10 +236,10 @@ def check_with_place(place):
236236

237237
numpy_array = np.random.randn(3, 4)
238238
# covert core.DenseTensor to paddle.Tensor
239-
lod_tensor = paddle.base.core.DenseTensor()
239+
dense_tensor = paddle.base.core.DenseTensor()
240240
place = paddle.base.framework._current_expected_place()
241-
lod_tensor.set(numpy_array, place)
242-
x = paddle.to_tensor(lod_tensor)
241+
dense_tensor.set(numpy_array, place)
242+
x = paddle.to_tensor(dense_tensor)
243243
np.testing.assert_array_equal(x.numpy(), numpy_array)
244244
self.assertEqual(x.type, core.VarDesc.VarType.DENSE_TENSOR)
245245
self.assertEqual(str(x.place), str(place))
@@ -349,19 +349,19 @@ def test_to_tensor_change_place(self):
349349
a = paddle.to_tensor(a, place=paddle.CUDAPinnedPlace())
350350
self.assertEqual(a.place.__repr__(), "Place(gpu_pinned)")
351351

352-
def test_to_tensor_with_lodtensor(self):
352+
def test_to_tensor_with_densetensor(self):
353353
if core.is_compiled_with_cuda():
354354
a_np = np.random.rand(1024, 1024)
355355
with paddle.base.dygraph.guard(core.CPUPlace()):
356-
lod_tensor = core.DenseTensor()
357-
lod_tensor.set(a_np, core.CPUPlace())
358-
a = paddle.to_tensor(lod_tensor)
356+
dense_tensor = core.DenseTensor()
357+
dense_tensor.set(a_np, core.CPUPlace())
358+
a = paddle.to_tensor(dense_tensor)
359359
np.testing.assert_array_equal(a_np, a.numpy())
360360

361361
with paddle.base.dygraph.guard(core.CUDAPlace(0)):
362-
lod_tensor = core.DenseTensor()
363-
lod_tensor.set(a_np, core.CUDAPlace(0))
364-
a = paddle.to_tensor(lod_tensor, place=core.CPUPlace())
362+
dense_tensor = core.DenseTensor()
363+
dense_tensor.set(a_np, core.CUDAPlace(0))
364+
a = paddle.to_tensor(dense_tensor, place=core.CPUPlace())
365365
np.testing.assert_array_equal(a_np, a.numpy())
366366
self.assertTrue(a.place.__repr__(), "Place(cpu)")
367367

test/legacy_test/test_fetch_lod_tensor_array.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,7 @@ def check_network(self, use_cuda=True):
7878
self.assertEqual(array_v[2].shape, ())
7979
np.testing.assert_allclose(loss_v, array_v[2], rtol=1e-05)
8080

81-
def test_fetch_lod_tensor_array(self):
81+
def test_fetch_dense_tensor_array(self):
8282
if base.core.is_compiled_with_cuda():
8383
self.check_network(use_cuda=True)
8484
self.check_network(use_cuda=False)

0 commit comments

Comments
 (0)