Skip to content
Merged
10 changes: 0 additions & 10 deletions _typos.toml
Original file line number Diff line number Diff line change
Expand Up @@ -61,16 +61,6 @@ cann = 'cann'
vart = 'vart'
checkings = 'checkings'
childs = 'childs'
compability = 'compability'
compatiblity = 'compatiblity'
Compitable = 'Compitable'
compatable = 'compatable'
compitable = 'compitable'
compling = 'compling'
comple = 'comple'
complition = 'complition'
complext = 'complext'
compsite = 'compsite'
comsume = 'comsume'
Continer = 'Continer'
contenst = 'contenst'
Expand Down
4 changes: 2 additions & 2 deletions paddle/cinn/ir/ir.cc
Original file line number Diff line number Diff line change
Expand Up @@ -602,7 +602,7 @@ Expr Store::Make(Expr tensor, Expr value, const std::vector<Expr> &indices) {
node->tensor = tensor;
node->value = value;
node->indices =
utils::GetCompitableStoreLoadIndices(tensor.as_tensor_ref(), indices);
utils::GetCompatibleStoreLoadIndices(tensor.as_tensor_ref(), indices);

if (tensor->type() != Void()) {
node->set_type(
Expand Down Expand Up @@ -904,7 +904,7 @@ Expr Load::Make(Expr tensor, const std::vector<Expr> &origin_indices) {
true,
::common::errors::InvalidArgument("The tensor type is not valid. "
"A valid tensor type is required."));
const auto indices = utils::GetCompitableStoreLoadIndices(
const auto indices = utils::GetCompatibleStoreLoadIndices(
tensor.as_tensor_ref(), origin_indices);
PADDLE_ENFORCE_EQ(
!indices.empty(),
Expand Down
4 changes: 2 additions & 2 deletions paddle/cinn/ir/ir_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
namespace cinn::ir::utils {

// FIXME(Aurelius84): Return [Expr(1)] for 0D Tensor as the shape.
static inline std::vector<Expr> GetCompitableShape(
static inline std::vector<Expr> GetCompatibleShape(
const std::vector<Expr>& shape) {
return shape.empty() ? std::vector<Expr>({Expr(1)}) : shape;
}
Expand All @@ -32,7 +32,7 @@ static inline bool MaybeZeroRankTensor(const Tensor& tensor) {
}

// FIXME(Aurelius84): Return [Expr(0)] for 0D Tensor as the indices.
static inline std::vector<Expr> GetCompitableStoreLoadIndices(
static inline std::vector<Expr> GetCompatibleStoreLoadIndices(
const Tensor& tensor, const std::vector<Expr>& indices) {
const bool should_fill_zero = indices.empty() && MaybeZeroRankTensor(tensor);
return should_fill_zero ? std::vector<Expr>({Expr(0)}) : indices;
Expand Down
2 changes: 1 addition & 1 deletion paddle/cinn/ir/stmt.cc
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ Store _Store_::Make(Expr tensor, Expr value, const std::vector<Expr> &indices) {
ref->set_tensor(tensor);
ref->set_value(value);
ref->set_indices(
utils::GetCompitableStoreLoadIndices(tensor.as_tensor_ref(), indices));
utils::GetCompatibleStoreLoadIndices(tensor.as_tensor_ref(), indices));

if (tensor->type() != Void()) {
ref->set_type(
Expand Down
12 changes: 6 additions & 6 deletions paddle/cinn/ir/tensor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ Tensor _Tensor_::Make(const std::string &name,
"Required tensor name shall not be empty."));
auto n = make_shared<_Tensor_>();
n->name = name;
n->shape = utils::GetCompitableShape(shape);
n->shape = utils::GetCompatibleShape(shape);
n->domain = domain;
n->reduce_axis = reduce_axis;
n->set_type(dtype);
Expand All @@ -71,7 +71,7 @@ Tensor _Tensor_::Make(const std::string &name,
"Required tensor name shall not be empty."));
auto n = make_shared<_Tensor_>();
n->name = name;
n->shape = utils::GetCompitableShape(shape);
n->shape = utils::GetCompatibleShape(shape);
n->domain = domain;
n->reduce_axis = reduce_axis;
n->operation = PlaceholderOp::Make(n->name, n->shape, Float(32));
Expand Down Expand Up @@ -178,14 +178,14 @@ Expr Tensor::operator()(const std::vector<Expr> &indices) const {
::common::errors::PreconditionNotMet(
"Required tensor shall not be tuple type."));
auto *node = operator->();
const auto compitable_indices =
utils::GetCompitableStoreLoadIndices(*this, indices);
const auto compatible_indices =
utils::GetCompatibleStoreLoadIndices(*this, indices);

PADDLE_ENFORCE_EQ(compitable_indices.size(),
PADDLE_ENFORCE_EQ(compatible_indices.size(),
ndims(),
::common::errors::PreconditionNotMet(
"number of indices not match the dimension"));
return Load::Make(*this, compitable_indices);
return Load::Make(*this, compatible_indices);
}

Expr _Tensor_::inline_expanded(const std::vector<Expr> &indices) {
Expand Down
4 changes: 2 additions & 2 deletions paddle/cinn/lang/placeholder.cc
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ ir::Tensor CreatePlaceHolder(const std::vector<int> &shape,
expr_shape.push_back(Expr(s));
}
return CreatePlaceHolder(
ir::utils::GetCompitableShape(expr_shape), type, name);
ir::utils::GetCompatibleShape(expr_shape), type, name);
}

ir::Tensor CreatePlaceHolder(const std::vector<ir::Dim> &shape,
Expand Down Expand Up @@ -75,7 +75,7 @@ ir::Tensor CreatePlaceHolder(const std::vector<ir::Dim> &shape,
ir::Tensor CreatePlaceHolder(const std::vector<Expr> &origin_shape,
Type type,
const std::string &name) {
const auto shape = ir::utils::GetCompitableShape(origin_shape);
const auto shape = ir::utils::GetCompatibleShape(origin_shape);
if (type.is_float(32)) {
return Placeholder<float>(name, shape);
} else if (type.is_float(64)) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ static DimUsageRelation CreateOpRelativenessForBroadcast(pir::Operation* op) {
static DimUsageRelation CreateOpRelativenessForReduce(pir::Operation* op) {
const auto& reduce_axis_idx = GetReduceAxisIdx(op);
DimUsageRelation res;
const size_t input_rank = GetCompitableRank(op->operand_source(0));
const size_t input_rank = GetCompatibleRank(op->operand_source(0));
int out_idx = 0;
bool keep_dim = GetReduceOpKeepDims(op);
for (size_t i = 0; i < input_rank; i++) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ FusionItersSignature FusionItersManager::GetItersSignature(pir::Operation* op) {
if (axes.reduce_size > 0) {
PADDLE_ENFORCE_LE(
axes.reduce_size,
GetCompitableRank(op->operand(0).source()),
GetCompatibleRank(op->operand(0).source()),
::common::errors::InvalidArgument("The number of reduce_axis should be "
"no more than output value ranks."));
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -77,11 +77,11 @@ ShardableAxesSignature CreateDefaultSignature(pir::Operation* op) {
ShardableAxesSignature result = ShardableAxesSignature();
for (int i = 0; i < op->num_operands(); ++i) {
result.inputs.emplace_back(
CreateNewNamesWithRank(GetCompitableRank(op->operand_source(i))));
CreateNewNamesWithRank(GetCompatibleRank(op->operand_source(i))));
}
for (int i = 0; i < op->num_results(); ++i) {
result.outputs.emplace_back(
CreateNewNamesWithRank(GetCompitableRank(op->result(i))));
CreateNewNamesWithRank(GetCompatibleRank(op->result(i))));
}
return result;
}
Expand Down Expand Up @@ -109,7 +109,7 @@ ShardableAxesSignature CreateSignatureForReduce(pir::Operation* reduce_op) {
1,
::common::errors::PreconditionNotMet(
"Required reduce_op->num_results() shall be equal 1."));
const size_t input_rank = GetCompitableRank(reduce_op->operand_source(0));
const size_t input_rank = GetCompatibleRank(reduce_op->operand_source(0));
auto input_axes = CreateNewNamesWithRank(input_rank);

const std::vector<int64_t> reduce_axis_idx = GetReduceAxisIdx(reduce_op);
Expand Down Expand Up @@ -152,20 +152,20 @@ ShardableAxesSignature CreateSignatureForReduce(pir::Operation* reduce_op) {
ShardableAxesSignature CreateSignatureForElementWise(pir::Operation* op) {
ShardableAxesSignature result = ShardableAxesSignature();

int64_t rank = GetCompitableRank(op->result(0));
int64_t rank = GetCompatibleRank(op->result(0));
auto same_axes = CreateNewNamesWithRank(rank);

for (int i = 0; i < op->num_operands(); ++i) {
PADDLE_ENFORCE_EQ(rank,
GetCompitableRank(op->operand_source(i)),
GetCompatibleRank(op->operand_source(i)),
::common::errors::PreconditionNotMet(
"Required all inputs rank shall be equal output in "
"elementwise op."));
result.inputs.emplace_back(same_axes);
}
for (int i = 0; i < op->num_results(); ++i) {
PADDLE_ENFORCE_EQ(rank,
GetCompitableRank(op->result(i)),
GetCompatibleRank(op->result(i)),
::common::errors::PreconditionNotMet(
"Required all outputs rank shall be equal each other "
"in elementwise op."));
Expand All @@ -188,7 +188,7 @@ ShardableAxesSignature CreateSignatureForTranspose(pir::Operation* op) {
"Required transpose_op->num_results() shall be equal 1."));

const auto input_axes =
CreateNewNamesWithRank(GetCompitableRank(op->operand_source(0)));
CreateNewNamesWithRank(GetCompatibleRank(op->operand_source(0)));

std::vector<int32_t> perm =
GetInt32ArrayAttributeData(op->attributes().at("perm"));
Expand Down Expand Up @@ -224,7 +224,7 @@ ShardableAxesSignature CreateSignatureForSlice(
"Required slice_op->num_results() shall be equal 1."));

const auto input_axes =
CreateNewNamesWithRank(GetCompitableRank(op->operand_source(0)));
CreateNewNamesWithRank(GetCompatibleRank(op->operand_source(0)));

const auto [slice_axis, keepdim] = GetSliceAxis(op);
const auto output_axes = [&]() -> decltype(auto) {
Expand Down Expand Up @@ -266,8 +266,8 @@ ShardableAxesSignature CreateSignatureForBroadcast(
"Required broad_cast_value is not empty."));

const auto& [input_value, output_value] = broad_cast_value.value();
const int input_rank = GetCompitableRank(input_value);
const int output_rank = GetCompitableRank(output_value);
const int input_rank = GetCompatibleRank(input_value);
const int output_rank = GetCompatibleRank(output_value);
PADDLE_ENFORCE_GE(
output_rank,
input_rank,
Expand All @@ -278,7 +278,7 @@ ShardableAxesSignature CreateSignatureForBroadcast(
// output.
for (int i = 0; i < op->num_operands(); ++i) {
result.inputs.emplace_back(
CreateNewNamesWithRank(GetCompitableRank(op->operand_source(i))));
CreateNewNamesWithRank(GetCompatibleRank(op->operand_source(i))));
}

// Create output axes. Compare axis one by one, from back to front.
Expand Down Expand Up @@ -309,8 +309,8 @@ ShardableAxesSignature CreateSignatureForReshape(
pir::ShapeConstraintIRAnalysis* shape_analysis) {
const auto input_value = op->operand_source(0);
const auto output_value = op->result(0);
const auto input_rank = GetCompitableRank(op->operand_source(0));
const auto output_rank = GetCompitableRank(op->result(0));
const auto input_rank = GetCompatibleRank(op->operand_source(0));
const auto output_rank = GetCompatibleRank(op->result(0));
const auto in_shape = GetDimExprsFromValue(input_value);
const auto out_shape = GetDimExprsFromValue(output_value);

Expand All @@ -320,7 +320,7 @@ ShardableAxesSignature CreateSignatureForReshape(

if (op->name() == "pd_op.reshape" && op->num_operands() == 2) {
result.inputs.emplace_back(
CreateNewNamesWithRank(GetCompitableRank(op->operand_source(1))));
CreateNewNamesWithRank(GetCompatibleRank(op->operand_source(1))));
}

if (GetRank(input_value) == 0 || GetRank(output_value) == 0) {
Expand Down Expand Up @@ -387,7 +387,7 @@ ShardableAxesSignature CreateSignatureForReshape(

ShardableAxesSignature CreateSignatureForConcat(
pir::Operation* op, ShardableAxesInfoManager* axes_manager) {
size_t rank = GetCompitableRank(op->result(0));
size_t rank = GetCompatibleRank(op->result(0));
const auto same_axes = CreateNewNamesWithRank(rank - 1);

const auto axis_attr =
Expand All @@ -406,7 +406,7 @@ ShardableAxesSignature CreateSignatureForConcat(
ShardableAxesSignature result = ShardableAxesSignature();
for (int i = 0; i < op->num_operands(); ++i) {
PADDLE_ENFORCE_EQ(rank,
GetCompitableRank(op->operand_source(i)),
GetCompatibleRank(op->operand_source(i)),
::common::errors::PreconditionNotMet(
"Required all inputs rank shall be equal output in "
"concat op."));
Expand Down
2 changes: 1 addition & 1 deletion paddle/cinn/operator_fusion/utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ std::vector<int32_t> GetInt32ArrayAttributeData(
}

std::vector<int64_t> GetReduceAxisIdx(pir::Operation* reduce_op) {
const size_t input_rank = GetCompitableRank(reduce_op->operand_source(0));
const size_t input_rank = GetCompatibleRank(reduce_op->operand_source(0));
const auto& attr_val = reduce_op->attributes().at("axis");
PADDLE_ENFORCE_EQ(attr_val.isa<::pir::ArrayAttribute>(),
true,
Expand Down
6 changes: 3 additions & 3 deletions paddle/cinn/operator_fusion/utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -61,9 +61,9 @@ static size_t GetRank(pir::Value value) {
return value.type().dyn_cast<pir::DenseTensorType>().dims().size();
}

// FIXME(Aurelius84): 0D Tensor is not compitable with other rank.
// FIXME(Aurelius84): 0D Tensor is not compatible with other rank.
// So we need to add a special case for 0D Tensor.
static size_t GetCompitableRank(pir::Value value) {
static size_t GetCompatibleRank(pir::Value value) {
size_t rank = GetRank(value);
return rank == 0 ? 1 : rank;
}
Expand Down Expand Up @@ -404,7 +404,7 @@ struct ValueDim {

static std::vector<ValueDim> GetAllValueDimFromValue(const pir::Value& v) {
std::vector<ValueDim> value_dims;
size_t rank = GetCompitableRank(v);
size_t rank = GetCompatibleRank(v);
for (size_t i = 0; i < rank; ++i) {
value_dims.emplace_back(v, i);
}
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/framework/ir/op_compat_sensible_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -220,7 +220,7 @@ bool OpCompat::Judge(const OpDesc& op_desc, const std::string& pass_name) {
LOG(WARNING) << " Attribute(" << attr_compat.first << ") of Op("
<< op_name_
<< ") is not defined in opProto or is in extra set!"
<< "The compatable check for this attribute is not use."
<< "The compatible check for this attribute is not use."
<< " Please remove it from the precondition of pass: "
<< pass_name.c_str();
}
Expand Down Expand Up @@ -298,7 +298,7 @@ OpCompat& OpCompatSensiblePass::AddOpCompat(OpCompat&& op_compat) {
return *(op_compat_judgers_[name]);
}

//! Tell the Op compability of a subgraph.
//! Tell the Op compatibility of a subgraph.
bool OpCompatSensiblePass::IsCompat(
const GraphPatternDetector::subgraph_t& subgraph, Graph*) const {
PADDLE_ENFORCE_EQ(op_compat_judgers_.empty(),
Expand Down
8 changes: 4 additions & 4 deletions paddle/fluid/framework/ir/op_compat_sensible_pass.h
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ class OpCompat {
/**
* OpCompatSensiblePass is a base class for all the passes thouse is sensitive
* to Op update.
* There are two methods to help tell the compability of an Op
* There are two methods to help tell the compatibility of an Op
* bool IsCompat(const GraphPatternDetector::subgraph_t& subgraph, Graph* g);
* bool IsCompat(const OpDesc& op_desc);
*
Expand All @@ -172,7 +172,7 @@ class OpCompat {
* class FcFusePass : public OpCompatSensiblePass {
* public:
* FcFusePass() {
* // define Mul op compatiblity.
* // define Mul op compatibility.
* AddOpCompat(OpCompat("Mul"))
* .AddInput("Input").IsTensor().End()
* .AddAttr("in_num_col_dims").IsNumGE(1);
Expand All @@ -195,12 +195,12 @@ class OpCompatSensiblePass : public Pass {
/**
* Developer should push the compatibility `teller` for each kind of Op in the
* subgraph.
* NOTE One should add all the related op compatiblity in the construct so
* NOTE One should add all the related op compatibility in the construct so
* that all the following methods are valid.
*/
OpCompat& AddOpCompat(OpCompat&& op_compat);

//! Tell the Op compability of a subgraph.
//! Tell the Op compatibility of a subgraph.
bool IsCompat(const GraphPatternDetector::subgraph_t& subgraph,
Graph* g) const;

Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/pybind/tensor_py.h
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,7 @@ struct npy_format_descriptor<phi::dtype::complex<float>> {
// print '{0:14s} : {1:40s}'.format(str(k), v)
return "F";
}
static constexpr auto name = _("complext64");
static constexpr auto name = _("complex64");
};

template <>
Expand All @@ -214,7 +214,7 @@ struct npy_format_descriptor<phi::dtype::complex<double>> {
// print '{0:14s} : {1:40s}'.format(str(k), v)
return "D";
}
static constexpr auto name = _("complext128");
static constexpr auto name = _("complex128");
};

template <>
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/backends/gpu/gpu_resources.cc
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,7 @@ void InitGpuProperties(Place place,
<< get_cudnn_major(cudnn_dso_ver) << "."
<< get_cudnn_minor(cudnn_dso_ver) << ".";

// Check CUDA/CUDNN version compatiblity
// Check CUDA/CUDNN version compatibility
auto local_cuda_version =
(*driver_version / 1000) * 10 + (*driver_version % 100) / 10;
auto compile_cuda_version =
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/gpu/flash_attn_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -391,7 +391,7 @@ static void CheckFlashAttnStatus(const bool status) {
static void RaiseNotSupportedError(int version = 2) {
PADDLE_THROW(common::errors::Unimplemented(
"FlashAttentio%d is unsupported, please check "
"the GPU compability and CUDA Version.",
"the GPU compatibility and CUDA Version.",
version));
}

Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/auto_parallel/static/engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -810,7 +810,7 @@ def _parallel_pir(self, mode):
# and all the Pass in this Part should be optional to allow consistence in dynamic and static mode.
if self._strategy.auto_mode == "semi-auto":
# TODO(xxxx) Step 2.1 Entire Graph Completion in Pir.
# dist_program = apply_complition_pass(dist_program)
# dist_program = apply_completion_pass(dist_program)
pass
elif self._strategy.auto_mode == "random" or "full_random":
# TODO(caozhou) Step 2.3 Basic Random / MCMC Algorithm for Fully Auto Parallel Search.
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/tensor/creation.py
Original file line number Diff line number Diff line change
Expand Up @@ -1394,7 +1394,7 @@ def _check_attr(attr, message):
'int32',
'int64',
'complex64',
'comple128',
'complex128',
],
'eye',
)
Expand Down
Loading