Skip to content

Commit 7c41b15

Browse files
committed
Add Intermediate API layer
1 parent 76a588e commit 7c41b15

40 files changed

+526
-203
lines changed

.gitignore

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@ paddle/fluid/API_DEV.spec
44
paddle/fluid/API_PR.spec
55
paddle/fluid/op_use_default_grad_maker_DEV.spec
66
paddle/fluid/op_use_default_grad_maker_PR.spec
7-
tools/__pycache__/static_mode_white_list.cpython-37.pyc
87

98
*.DS_Store
109
*.vs

paddle/fluid/framework/operator.cc

Lines changed: 5 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ limitations under the License. */
3030
#include "paddle/fluid/framework/var_type.h"
3131
#include "paddle/fluid/platform/enforce.h"
3232
#include "paddle/fluid/platform/profiler.h"
33+
#include "paddle/pten/common/scalar.h"
3334

3435
namespace paddle {
3536
namespace framework {
@@ -1080,20 +1081,6 @@ void OperatorWithKernel::RuntimeInferShape(const Scope& scope,
10801081
this->InferShape(&infer_shape_ctx);
10811082
}
10821083

1083-
static std::string RuntimeContextDebugString(const RuntimeContext& ctx) {
1084-
std::stringstream ss;
1085-
ss << "RuntimeContext(Inputs: ";
1086-
for (auto& var_pair : ctx.inputs) {
1087-
ss << var_pair.first << ", ";
1088-
}
1089-
ss << "Outputs: ";
1090-
for (auto& var_pair : ctx.outputs) {
1091-
ss << var_pair.first << ", ";
1092-
}
1093-
ss << ")";
1094-
return ss.str();
1095-
}
1096-
10971084
void OperatorWithKernel::RunImpl(const Scope& scope,
10981085
const platform::Place& place) const {
10991086
// To reduce the elapsed time of HasAttr, we use bool variable to record the
@@ -1144,7 +1131,7 @@ void OperatorWithKernel::RunImpl(const Scope& scope,
11441131
// and RCOM backend, the XPU, NPU and MKLDNN will be supported in the second
11451132
// phase
11461133
if (FLAGS_run_pt_kernel &&
1147-
pten::KernelFactory::Instance().ContainsKernel(type_.c_str())) {
1134+
pten::KernelFactory::Instance().HasCompatiblePtenKernel(type_)) {
11481135
if (pt_kernel_signature_.get() == nullptr || pt_kernel_.get() == nullptr) {
11491136
ChoosePtenKernel(exe_ctx);
11501137
}
@@ -1651,10 +1638,9 @@ void OperatorWithKernel::ParseInputDataType(
16511638
if (t != nullptr) {
16521639
PADDLE_ENFORCE_EQ(
16531640
t->IsInitialized(), true,
1654-
platform::errors::InvalidArgument(
1655-
"The Tensor in the %s Op's Input Variable %s(%s) is "
1656-
"not initialized.",
1657-
Type(), name, Inputs().at(name).at(i)));
1641+
platform::errors::InvalidArgument("The %s Op's Input Variable `%s` "
1642+
"contains uninitialized Tensor.",
1643+
Type(), name));
16581644
proto::VarType::Type tmp = t->type();
16591645
PADDLE_ENFORCE(tmp == *data_type || *data_type == default_data_type,
16601646
platform::errors::InvalidArgument(
@@ -1789,8 +1775,6 @@ KernelSignature OperatorWithKernel::GetExpectedPtenKernelArgs(
17891775

17901776
pten::KernelContext OperatorWithKernel::BuildPtenKernelContext(
17911777
const RuntimeContext& ctx, const platform::DeviceContext& dev_ctx) const {
1792-
VLOG(1) << RuntimeContextDebugString(ctx);
1793-
17941778
// TODO(chenweihang): now only work for very simple case,
17951779
// many cases need to be deal with later:
17961780
// 1. the input and output are not tensor

paddle/fluid/framework/operator_test.cc

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -439,9 +439,8 @@ TEST(IndicateVarDataTypeTest, lodtensor) {
439439
std::string ex_msg = err.what();
440440
EXPECT_TRUE(
441441
ex_msg.find(
442-
"The Tensor in the indicate_lod_tensor_data_type_test Op's "
443-
"Input Variable LoDTensor(lodtensor_1) is not initialized") !=
444-
std::string::npos);
442+
"The indicate_lod_tensor_data_type_test Op's Input Variable "
443+
"`LoDTensor` contains uninitialized Tensor.") != std::string::npos);
445444
}
446445
ASSERT_TRUE(caught);
447446
}
@@ -466,9 +465,9 @@ TEST(IndicateVarDataTypeTest, selectedrows) {
466465
caught = true;
467466
std::string ex_msg = err.what();
468467
EXPECT_TRUE(
469-
ex_msg.find("The Tensor in the indicate_selected_rows_data_type_test "
470-
"Op's Input Variable SelectedRows(selected_rows_1) is not "
471-
"initialized") != std::string::npos);
468+
ex_msg.find("The indicate_selected_rows_data_type_test Op's "
469+
"Input Variable `SelectedRows` contains uninitialized "
470+
"Tensor.") != std::string::npos);
472471
}
473472
ASSERT_TRUE(caught);
474473
}

paddle/fluid/imperative/prepared_operator.cc

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818
#include "paddle/fluid/framework/details/nan_inf_utils.h"
1919
#include "paddle/fluid/framework/pten_utils.h"
2020
#include "paddle/fluid/imperative/infer_shape_context.h"
21+
#include "paddle/pten/common/scalar.h"
2122
#include "paddle/utils/small_vector.h"
2223
#ifdef PADDLE_WITH_XPU
2324
#include "paddle/fluid/platform/xpu/xpu_op_list.h"
@@ -153,7 +154,7 @@ PreparedOp PrepareImpl(const NameVarMap<VarType>& ins,
153154
VLOG(3) << "expected_kernel_key:" << expected_kernel_key;
154155

155156
if (FLAGS_run_pt_kernel &&
156-
pten::KernelFactory::Instance().ContainsKernel(op.Type().c_str())) {
157+
pten::KernelFactory::Instance().HasCompatiblePtenKernel(op.Type())) {
157158
auto pt_kernel_signature = op.GetExpectedPtenKernelArgs(dygraph_exe_ctx);
158159

159160
VLOG(1) << framework::KernelSignatureToString(pt_kernel_signature);

paddle/fluid/operators/mean_op.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ using EigenVector = framework::EigenVector<T, MajorType, IndexType>;
4949
* Currently, only the first two cases are adapted.
5050
*
5151
* The principle here is that the implementation in the kernel must reuse the
52-
* corresponding functions in the Tensor compute library and cannot maintain
52+
* corresponding functions in the Tensor Operation library and cannot maintain
5353
* two copies of the code.
5454
*/
5555
template <typename DeviceContext, typename T>

paddle/fluid/pybind/op_function_generator.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -557,7 +557,7 @@ GenerateOpFunctions() {
557557
// since only OperatorWithKernel can run in dygraph mode.
558558
// if the pten lib contains op kernel, we still generate ops method
559559
if (!all_kernels.count(op_type) &&
560-
!pten::KernelFactory::Instance().ContainsKernel(op_type.c_str())) {
560+
!pten::KernelFactory::Instance().HasCompatiblePtenKernel(op_type)) {
561561
continue;
562562
}
563563

paddle/pten/api/include/core.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,5 +19,4 @@ limitations under the License. */
1919
#include "paddle/pten/core/dense_tensor.h"
2020
#include "paddle/pten/core/kernel_context.h"
2121
#include "paddle/pten/core/kernel_factory.h"
22-
#include "paddle/pten/core/scalar.h"
2322
#include "paddle/pten/core/tensor_meta.h"

paddle/pten/api/include/creation.h

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,5 +14,20 @@
1414

1515
#pragma once
1616

17+
#include "paddle/pten/api/include/infershape.h"
1718
#include "paddle/pten/kernels/cpu/creation.h"
1819
#include "paddle/pten/kernels/cuda/creation.h"
20+
21+
namespace pten {
22+
23+
template <typename T, typename ContextT>
24+
DenseTensor FillAnyLike(const ContextT& dev_ctx,
25+
const DenseTensor& x,
26+
const Scalar& val) {
27+
auto out_meta = UnchangedInferShape(x.meta());
28+
pten::DenseTensor dense_out(out_meta, pten::TensorStatus());
29+
FillAnyLike<T>(dev_ctx, x, val, &dense_out);
30+
return dense_out;
31+
}
32+
33+
} // namespace pten

paddle/pten/api/include/linalg.h

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,5 +15,20 @@
1515
#pragma once
1616

1717
// See Note: [ How do we organize the kernel directory ]
18+
#include "paddle/pten/api/include/infershape.h"
1819
#include "paddle/pten/kernels/cpu/linalg.h"
1920
#include "paddle/pten/kernels/cuda/linalg.h"
21+
22+
namespace pten {
23+
24+
template <typename T, typename ContextT>
25+
DenseTensor Dot(const ContextT& dev_ctx,
26+
const DenseTensor& x,
27+
const DenseTensor& y) {
28+
auto out_meta = DotInferShape(x.meta(), y.meta());
29+
pten::DenseTensor dense_out(out_meta, pten::TensorStatus());
30+
Dot<T>(dev_ctx, x, y, &dense_out);
31+
return dense_out;
32+
}
33+
34+
} // namespace pten

paddle/pten/api/include/manipulation.h

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,5 +15,21 @@
1515
#pragma once
1616

1717
// See Note: [ How do we organize the kernel directory ]
18+
#include "paddle/pten/api/include/infershape.h"
1819
#include "paddle/pten/kernels/cpu/manipulation.h"
1920
#include "paddle/pten/kernels/cuda/manipulation.h"
21+
22+
namespace pten {
23+
24+
template <typename T, typename ContextT>
25+
DenseTensor Flatten(const ContextT& dev_ctx,
26+
const DenseTensor& x,
27+
int start_axis,
28+
int stop_axis) {
29+
auto out_meta = FlattenInferShape(x.meta(), start_axis, stop_axis);
30+
pten::DenseTensor dense_out(out_meta, pten::TensorStatus());
31+
Flatten<T>(dev_ctx, x, start_axis, stop_axis, &dense_out);
32+
return dense_out;
33+
}
34+
35+
} // namespace pten

0 commit comments

Comments
 (0)