Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
621 changes: 539 additions & 82 deletions paddle/fluid/dialect/op_gen.py

Large diffs are not rendered by default.

27 changes: 11 additions & 16 deletions paddle/fluid/dialect/pd_dialect.cc
Original file line number Diff line number Diff line change
Expand Up @@ -35,13 +35,13 @@ ParameterConvertInterface::ParameterToVariable(ir::Parameter *parameter) {
std::make_shared<paddle::framework::Variable>();
phi::DenseTensor *tensor = var->GetMutable<phi::DenseTensor>();
// Init DenseTensor
auto dim = parameter->type().dyn_cast<DenseTensorType>().dim();
auto dim = parameter->type().dyn_cast<DenseTensorType>().dims();
phi::DenseTensorMeta meta(
TransToPhiDataType(
parameter->type().dyn_cast<DenseTensorType>().dtype()),
phi::DDim(dim.data(), dim.size()),
TransToPhiDataLayout(
parameter->type().dyn_cast<DenseTensorType>().data_layout()),
dim,

parameter->type().dyn_cast<DenseTensorType>().data_layout(),
parameter->type().dyn_cast<DenseTensorType>().lod(),
parameter->type().dyn_cast<DenseTensorType>().offset());
tensor->set_meta(meta);
Expand All @@ -67,17 +67,13 @@ std::unique_ptr<ir::Parameter> ParameterConvertInterface::VariableToParameter(
// Get Meta
ir::IrContext *ctx = ir::IrContext::Instance();
ir::Type data_type = TransToIrDataType(tensor->dtype(), ctx);
DenseTensorTypeStorage::Dim dims(tensor->dims().size());
std::copy(tensor->dims().Get(),
tensor->dims().Get() + tensor->dims().size(),
dims.data());
DenseTensorTypeStorage::DataLayout data_layout =
TransToIrDataLayout(tensor->layout());
DenseTensorTypeStorage::LoD lod = tensor->lod();
size_t offset = tensor->meta().offset;
void *data = tensor->data();
ir::Type dense_tensor_type =
DenseTensorType::get(ctx, data_type, dims, data_layout, lod, offset);
ir::Type dense_tensor_type = DenseTensorType::get(ctx,
data_type,
tensor->dims(),
tensor->layout(),
tensor->lod(),
tensor->meta().offset);
return std::make_unique<ir::Parameter>(
data,
tensor->numel() * phi::SizeOf(tensor->dtype()),
Expand Down Expand Up @@ -116,8 +112,7 @@ void PaddleDialect::PrintType(ir::Type type, std::ostream &os) {
DenseTensorType tensor_type = type.dyn_cast<DenseTensorType>();

os << "tensor<";
auto &dims = tensor_type.dim();
for (auto d : dims) {
for (auto d : phi::vectorize(tensor_type.dims())) {
os << d;
os << "x";
}
Expand Down
15 changes: 6 additions & 9 deletions paddle/fluid/dialect/pd_interface.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,33 +19,30 @@

using OpInfoTuple = std::tuple<std::vector<paddle::dialect::OpInputInfo>,
std::vector<paddle::dialect::OpAttributeInfo>,
std::vector<paddle::dialect::OpOutputInfo>>;
std::vector<paddle::dialect::OpOutputInfo>,
paddle::dialect::OpRunTimeInfo>;

namespace paddle {
namespace dialect {
class GetOpInfoInterface : public ir::OpInterfaceBase<GetOpInfoInterface> {
public:
struct Concept {
explicit Concept(OpInfoTuple (*get_op_info)(ir::Operation *))
explicit Concept(OpInfoTuple (*get_op_info)())
: get_op_info_(get_op_info) {}
OpInfoTuple (*get_op_info_)(ir::Operation *);
OpInfoTuple (*get_op_info_)();
};

template <class ConcreteOp>
struct Model : public Concept {
static OpInfoTuple GetOpInfo(ir::Operation *op) {
ConcreteOp concret_op = op->dyn_cast<ConcreteOp>();
if (concret_op == nullptr) throw("concret_op is nullptr");
return concret_op.GetOpInfo();
}
static OpInfoTuple GetOpInfo() { return ConcreteOp::GetOpInfo(); }

Model() : Concept(GetOpInfo) {}
};

GetOpInfoInterface(ir::Operation *op, Concept *impl)
: ir::OpInterfaceBase<GetOpInfoInterface>(op), impl_(impl) {}

OpInfoTuple GetOpInfo() { return impl_->get_op_info_(operation()); }
OpInfoTuple GetOpInfo() { return impl_->get_op_info_(); }

private:
Concept *impl_;
Expand Down
22 changes: 0 additions & 22 deletions paddle/fluid/dialect/pd_op.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,17 +11,6 @@
- {typename: Tensor, name: out, optional: false, intermediate: false}
no_need_buffer: null
data_transform: null
infer_meta:
func: null
param: null
kernel:
func: null
param: null
backend: null
layout: null
data_type: null
dispatch: null
force_backend: null
inplace: null
backward: null
- name: fetch
Expand All @@ -37,16 +26,5 @@
- {typename: 'Tensor[]', name: out, optional: false, intermediate: false}
no_need_buffer: null
data_transform: null
infer_meta:
func: null
param: null
kernel:
func: null
param: null
backend: null
layout: null
data_type: null
dispatch: null
force_backend: null
inplace: null
backward: null
13 changes: 3 additions & 10 deletions paddle/fluid/dialect/pd_type.cc
Original file line number Diff line number Diff line change
Expand Up @@ -18,20 +18,13 @@ namespace paddle {
namespace dialect {
const ir::Type& DenseTensorType::dtype() const { return storage()->dtype_; }

const paddle::dialect::DenseTensorTypeStorage::Dim& DenseTensorType::dim()
const {
return storage()->dims_;
}
const phi::DDim& DenseTensorType::dims() const { return storage()->dims_; }

const paddle::dialect::DenseTensorTypeStorage::DataLayout&
DenseTensorType::data_layout() const {
const phi::DataLayout& DenseTensorType::data_layout() const {
return storage()->layout_;
}

const paddle::dialect::DenseTensorTypeStorage::LoD& DenseTensorType::lod()
const {
return storage()->lod_;
}
const phi::LoD& DenseTensorType::lod() const { return storage()->lod_; }

const size_t& DenseTensorType::offset() const { return storage()->offset_; }

Expand Down
7 changes: 3 additions & 4 deletions paddle/fluid/dialect/pd_type.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,12 +30,11 @@ class DenseTensorType : public ir::Type {

const ir::Type &dtype() const;

const paddle::dialect::DenseTensorTypeStorage::Dim &dim() const;
const phi::DDim &dims() const;

const paddle::dialect::DenseTensorTypeStorage::DataLayout &data_layout()
const;
const phi::DataLayout &data_layout() const;

const paddle::dialect::DenseTensorTypeStorage::LoD &lod() const;
const phi::LoD &lod() const;

const size_t &offset() const;
};
Expand Down
61 changes: 18 additions & 43 deletions paddle/fluid/dialect/pd_type_storage.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@

#include "paddle/ir/core/type.h"
#include "paddle/ir/core/utils.h"
#include "paddle/phi/core/tensor_meta.h"

namespace std {
///
Expand Down Expand Up @@ -46,46 +47,20 @@ namespace dialect {
/// (3)define HashValue method, (4)overload operator==.
///
struct DenseTensorTypeStorage : public ir::TypeStorage {
///
/// \brief It is consistent with the DataLayout defined by Phi operator
/// library. See the file for details: paddle/phi/common/layout.h.
///
enum class DataLayout : unsigned int {
UNDEFINED = 0,
NHWC,
NCHW,
NCDHW,
NDHWC,
ONEDNN,
SPARSE_COO,
SPARSE_CSR,
PSTRING_UNION,

NUM_DATA_LAYOUTS,

// See Note [ Why we need ALL in basic kernel key member? ]
ALL_LAYOUT = UNDEFINED,

// Note: Unify phi DataLayout and fluid::framework::DataLayout,
// for compatible with fluid DataLayout, here need prefix `k`
kNHWC = NHWC,
kNCHW = NCHW,
kMKLDNN = ONEDNN, // all layouts supported by ONEDNN internally
kNDHWC = NDHWC,
kNCDHW = NCDHW,
};

using Dim = std::vector<int64_t>;

using DataLayout = phi::DataLayout;
using Dim = phi::DDim;
using LoD = std::vector<std::vector<size_t>>;

///
/// \brief Declare ParamKey according to parameter type.
///
using ParamKey = std::tuple<ir::Type, Dim, DataLayout, LoD, size_t>;

DenseTensorTypeStorage(
ir::Type dtype, Dim dims, DataLayout layout, LoD lod, size_t offset)
using ParamKey =
std::tuple<ir::Type, phi::DDim, phi::DataLayout, phi::LoD, size_t>;

DenseTensorTypeStorage(ir::Type dtype,
phi::DDim dims,
phi::DataLayout layout,
phi::LoD lod,
size_t offset)
: dtype_(dtype),
dims_(dims),
layout_(layout),
Expand Down Expand Up @@ -114,16 +89,16 @@ struct DenseTensorTypeStorage : public ir::TypeStorage {
ir::hash_combine(hash_value, std::hash<ir::Type>()(std::get<0>(key)));
// hash dims
hash_value =
ir::hash_combine(hash_value, std::hash<Dim>()(std::get<1>(key)));
ir::hash_combine(hash_value, std::hash<phi::DDim>()(std::get<1>(key)));
// hash layout
hash_value = ir::hash_combine(
hash_value,
std::hash<std::underlying_type<DataLayout>::type>()(
static_cast<std::underlying_type<DataLayout>::type>(
std::hash<std::underlying_type<phi::DataLayout>::type>()(
static_cast<std::underlying_type<phi::DataLayout>::type>(
std::get<2>(key))));
// hash lod
hash_value =
ir::hash_combine(hash_value, std::hash<LoD>()(std::get<3>(key)));
ir::hash_combine(hash_value, std::hash<phi::LoD>()(std::get<3>(key)));
// hash offset
hash_value =
ir::hash_combine(hash_value, std::hash<size_t>()(std::get<4>(key)));
Expand All @@ -146,9 +121,9 @@ struct DenseTensorTypeStorage : public ir::TypeStorage {
/// layout, lod, offset.
///
ir::Type dtype_;
Dim dims_;
DataLayout layout_;
LoD lod_;
phi::DDim dims_;
phi::DataLayout layout_;
phi::LoD lod_;
size_t offset_;
};

Expand Down
Loading