@@ -18,7 +18,7 @@ limitations under the License. */
1818#include " paddle/fluid/framework/op_registry.h"
1919#include " paddle/fluid/framework/scope.h"
2020
21- using LoDTensorArray = paddle::framework::LoDTensorArray ;
21+ using LoDTensorArray = phi::TensorArray ;
2222using Scope = paddle::framework::Scope;
2323using Variable = paddle::framework::Variable;
2424using Place = phi::Place;
@@ -34,7 +34,7 @@ TEST(ConditionalBlockGrad, NoNeedRunLoDTensorArray) {
3434 cond_data[0 ] = false ;
3535
3636 Variable* input_var = scope.Var (" input_lod_tensor_array" );
37- LoDTensorArray* input_tensors = input_var->GetMutable <LoDTensorArray >();
37+ LoDTensorArray* input_tensors = input_var->GetMutable <phi::TensorArray >();
3838 for (int i = 0 ; i < 5 ; ++i) {
3939 phi::DDim in_dims = common::make_ddim ({i + 1 , i + 2 });
4040 phi::DenseTensor lod_tensor;
@@ -46,7 +46,7 @@ TEST(ConditionalBlockGrad, NoNeedRunLoDTensorArray) {
4646 }
4747
4848 Variable* input_grad_var = scope.Var (" input_lod_tensor_array@GRAD" );
49- LoDTensorArray* grad_tensors = input_grad_var->GetMutable <LoDTensorArray >();
49+ LoDTensorArray* grad_tensors = input_grad_var->GetMutable <phi::TensorArray >();
5050 grad_tensors->resize (5 );
5151
5252 paddle::framework::AttributeMap attrs;
@@ -60,7 +60,7 @@ TEST(ConditionalBlockGrad, NoNeedRunLoDTensorArray) {
6060
6161 conditional_grad_op->Run (scope, place);
6262
63- const LoDTensorArray& out_tensors = input_grad_var->Get <LoDTensorArray >();
63+ const LoDTensorArray& out_tensors = input_grad_var->Get <phi::TensorArray >();
6464 for (int i = 0 ; i < 5 ; ++i) {
6565 phi::DDim out_dims = out_tensors[i].dims ();
6666 EXPECT_EQ (common::make_ddim ({i + 1 , i + 2 }), out_dims);
0 commit comments