Skip to content

Commit 19b1095

Browse files
committed
remove tensor signature and backend set member
1 parent 252fb79 commit 19b1095

File tree

11 files changed

+35
-85
lines changed

11 files changed

+35
-85
lines changed

.gitignore

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@ paddle/fluid/API_DEV.spec
44
paddle/fluid/API_PR.spec
55
paddle/fluid/op_use_default_grad_maker_DEV.spec
66
paddle/fluid/op_use_default_grad_maker_PR.spec
7-
tools/__pycache__/static_mode_white_list.cpython-37.pyc
87

98
*.DS_Store
109
*.vs

paddle/fluid/operators/mean_op.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ using EigenVector = framework::EigenVector<T, MajorType, IndexType>;
4949
* Currently, only the first two cases are adapted.
5050
*
5151
* The principle here is that the implementation in the kernel must reuse the
52-
* corresponding functions in the Tensor compute library and cannot maintain
52+
* corresponding functions in the Tensor Operation library and cannot maintain
5353
* two copies of the code.
5454
*/
5555
template <typename DeviceContext, typename T>

paddle/pten/common/backend.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,8 +28,8 @@ namespace experimental {
2828
* but in order to make the boundary of the kernel clearer and the function
2929
* more specific, we need to distinguish the calculation method.
3030
*
31-
* Such as the kernel for CUDA device, it can be a native CUDA kernel,
32-
* or a kernel implemented by CUDNN library.
31+
* Such as the kernel for CPU device, it can be a native CPU kernel,
32+
* or a kernel implemented by MKLDNN library.
3333
*
3434
* Note(chenweihang): HIP is not needed now, we can added it if needed
3535
* in the future

paddle/pten/hapi/include/backend_set.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,8 +26,8 @@ namespace experimental {
2626
* We use the backend to form a bit set to assist the runtime kernel selection,
2727
* and the higher backend bit has a higher priority.
2828
*
29-
* A Tensor may belong to multiple backends at the same time, such CUDNN and
30-
* CUDA. Only one backend value cannot
29+
* A Tensor may belong to multiple backends at the same time, such CPU and
30+
* MKLDNN. Only one backend value cannot
3131
*/
3232
class BackendSet final {
3333
public:

paddle/pten/hapi/include/tensor.h

Lines changed: 15 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -19,18 +19,17 @@ limitations under the License. */
1919
#include <utility>
2020

2121
#include "paddle/pten/core/tensor_base.h"
22-
#include "paddle/pten/hapi/include/tensor_signature.h"
2322

2423
/**
2524
* [ Why still include the fluid headers? ]
2625
*
2726
* We hope to organize the basic implementation of Tensor and the logic related
2827
* to Tensor computation into an independent library, which we call
29-
* [Tensor Compute Library, pten], so we extract or rewrite the original
28+
* [Tensor Operation Library, pten], so we extract or rewrite the original
3029
* Kernels.
3130
*
3231
* In the future, the training library, inference library and custom operators
33-
* will link to this Tensor Compute library.
32+
* will link to this Tensor Operation library.
3433
*
3534
* However, if we directly split the link relation, we need to make too many
3635
* changes, which will affect the stability of the framework, so here we still
@@ -47,15 +46,15 @@ namespace experimental {
4746

4847
class Tensor;
4948

50-
class AutogradMetaInterface {
49+
class AbstractAutogradMeta {
5150
public:
52-
// No AutogradMetaInterface should be created
53-
virtual ~AutogradMetaInterface() {}
51+
// No AbstractAutogradMeta should be created
52+
virtual ~AbstractAutogradMeta() {}
5453
};
5554

5655
/**
5756
* Tensor is the API description of the basic data structure in the
58-
* [ Paddle "Tensor CoMPuTe (pten)" Library ].
57+
* [ "Paddle Tensor Operation (pten)" Library ].
5958
*
6059
* It is not limited to a simple n-dimensional array.
6160
* It contains a smart pointer to `TensorImpl`. The data description contained
@@ -97,7 +96,6 @@ class Tensor final {
9796
if (impl_.get() == nullptr) {
9897
throw std::runtime_error("TensorImpl with nullptr is not supported");
9998
}
100-
signature_.reset(new TensorSignature(impl_->backend()));
10199
}
102100

103101
/* Part 2: Dimension, DataType and DataLayout methods */
@@ -140,16 +138,8 @@ class Tensor final {
140138
/**
141139
* Backend judgment APIs, shield the concept of Backend.
142140
*/
143-
BackendSet backend_set() const { return signature_->backend_set; }
144-
void set_backend_set(const BackendSet& backend_set) {
145-
if (signature_ == nullptr) {
146-
signature_.reset(new TensorSignature());
147-
}
148-
signature_->backend_set = backend_set;
149-
}
150-
151-
bool is_cpu() const { return signature_->backend_set.Has(Backend::CPU); }
152-
bool is_cuda() const { return signature_->backend_set.Has(Backend::CUDA); }
141+
bool is_cpu() const { return paddle::platform::is_cpu_place(place()); }
142+
bool is_cuda() const { return paddle::platform::is_gpu_place(place()); }
153143

154144
/**
155145
* Backend convert APIs.
@@ -211,11 +201,11 @@ class Tensor final {
211201
}
212202

213203
/* Part 7: Autograd methods */
214-
AutogradMetaInterface* get_autograd_meta() const {
204+
AbstractAutogradMeta* get_autograd_meta() const {
215205
return autograd_meta_.get();
216206
}
217207

218-
void set_autograd_meta(std::shared_ptr<AutogradMetaInterface> autograd_meta) {
208+
void set_autograd_meta(std::shared_ptr<AbstractAutogradMeta> autograd_meta) {
219209
autograd_meta_ = std::move(autograd_meta);
220210
}
221211

@@ -244,7 +234,7 @@ class Tensor final {
244234
std::shared_ptr<pten::TensorBase> impl_;
245235

246236
/**
247-
* [ Why need abstract AutogradMetaInterface here? ]
237+
* [ Why need abstract AbstractAutogradMeta here? ]
248238
*
249239
* Dynamic graphs need to hold backward information
250240
*
@@ -254,17 +244,13 @@ class Tensor final {
254244
* information, not Tensor data description-related information.
255245
* 2. Kernel calculation does not require AutogradMeta.
256246
*/
257-
std::shared_ptr<AutogradMetaInterface> autograd_meta_{nullptr};
247+
std::shared_ptr<AbstractAutogradMeta> autograd_meta_{nullptr};
258248

259249
/**
260-
* TensorSignature is used to store auxiliary description information
261-
* needed by Tensor.
262-
*
263-
* The currently stored information includes:
264-
* 1. name: used for Debug analysis in the development of new dygraph.
265-
* 2. backend_set: used by the API to determine the kernel backend.
250+
* Tensor name: used for adapt original execution mechanism and debug analysis
251+
* in the development of new dygraph.
266252
*/
267-
std::shared_ptr<TensorSignature> signature_{nullptr};
253+
std::string name_;
268254
};
269255

270256
} // namespace experimental

paddle/pten/hapi/include/tensor_signature.h

Lines changed: 0 additions & 45 deletions
This file was deleted.

paddle/pten/hapi/lib/creation.cc

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,6 @@ Tensor full_like(const Tensor& x,
5656
std::make_shared<pten::DenseTensor>(out_meta, pten::TensorStatus());
5757
kernel_context.EmplaceBackOutput(dense_out);
5858
out.set_impl(dense_out);
59-
out.set_backend_set(x.backend_set());
6059

6160
// 6. Call kernel
6261
kernel(&kernel_context);

paddle/pten/hapi/lib/kernel_dispatch.h

Lines changed: 15 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@ limitations under the License. */
2424
#include "paddle/pten/hapi/include/tensor.h"
2525

2626
// TODO(chenweihang): split KernelName, Key, Kernel, Factory into diff files
27+
#include "paddle/pten/core/convert_utils.h"
2728
#include "paddle/pten/core/kernel_factory.h"
2829

2930
// See Note [ Why still include the fluid headers? ]
@@ -39,6 +40,19 @@ using CUDAContext = paddle::platform::CUDADeviceContext;
3940
#endif
4041

4142
namespace detail {
43+
BackendSet GetTensorBackendSet(const Tensor& t) {
44+
BackendSet backend_set(pten::TransToPtenBackend(t.place()));
45+
switch (t.layout()) {
46+
case DataLayout::MKLDNN:
47+
backend_set = backend_set | BackendSet(Backend::MKLDNN);
48+
break;
49+
default:
50+
// do nothing
51+
break;
52+
}
53+
return backend_set;
54+
}
55+
4256
std::size_t CountLeadingZeros(uint64_t val) {
4357
if (val == 0) {
4458
return 64;
@@ -102,7 +116,7 @@ struct KernelKeyParser : ArgsIterator<KernelKeyParser> {
102116
// TODO(chenweihang): deal with multiple diff input Tensors
103117
// TODO(chenweihang): add global device guard method to set backend
104118
void operator()(const Tensor& x) {
105-
key_set.backend_set = key_set.backend_set | x.backend_set();
119+
key_set.backend_set = key_set.backend_set | detail::GetTensorBackendSet(x);
106120
// TODO(chenweihang): selecte multi layout and dtype
107121
key_set.layout = x.layout();
108122
key_set.dtype = x.type();

paddle/pten/hapi/lib/linalg.cc

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,6 @@ Tensor dot(const Tensor& x, const Tensor& y) {
5656
std::make_shared<pten::DenseTensor>(out_meta, pten::TensorStatus());
5757
kernel_context.EmplaceBackOutput(dense_out);
5858
out.set_impl(dense_out);
59-
out.set_backend_set(x.backend_set());
6059

6160
// 6. Call kernel
6261
kernel(&kernel_context);

paddle/pten/hapi/lib/manipulation.cc

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,6 @@ Tensor flatten(const Tensor& x, int start_axis, int stop_axis) {
5050
std::make_shared<pten::DenseTensor>(out_meta, pten::TensorStatus());
5151
kernel_context.EmplaceBackOutput(dense_out);
5252
out.set_impl(dense_out);
53-
out.set_backend_set(x.backend_set());
5453

5554
// 6. Call kernel
5655
kernel(&kernel_context);

0 commit comments

Comments
 (0)