Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 13 additions & 13 deletions paddle/cinn/adt/equation_solver.cc
Original file line number Diff line number Diff line change
Expand Up @@ -258,9 +258,9 @@ std::unordered_map<Variable, Value> InferValues(const Function* function,
DEFINE_ADT_TAG(tValueInferSuccess);

template <typename OnFailT>
tValueInferSuccess<bool> MergeInferedValuesIntoCtx(const Function* function,
IndexExprInferContext* ctx,
const OnFailT& OnFail) {
tValueInferSuccess<bool> MergeInferredValuesIntoCtx(const Function* function,
IndexExprInferContext* ctx,
const OnFailT& OnFail) {
auto output_variable2value = InferValues(function, ctx);
for (const auto& [variable, unsimplified_value] : output_variable2value) {
Value simplified_value({SimplifyValue(unsimplified_value, *ctx)});
Expand All @@ -279,9 +279,9 @@ tValueInferSuccess<bool> MergeInferedValuesIntoCtx(const Function* function,
return tValueInferSuccess<bool>{true};
}

tValueInferSuccess<bool> MergeInferedValuesIntoCtx(const Function* function,
IndexExprInferContext* ctx) {
return MergeInferedValuesIntoCtx(
tValueInferSuccess<bool> MergeInferredValuesIntoCtx(
const Function* function, IndexExprInferContext* ctx) {
return MergeInferredValuesIntoCtx(
function, ctx, [&](const std::optional<Value>& lhs, const Value& rhs) {
if (lhs.has_value()) {
VLOG(1) << "opt_old_value = " << ToTxtString(lhs.value());
Expand All @@ -304,7 +304,7 @@ void SolveEquations(
walker.WalkFunction(
starts.begin(), starts.end(), [&](const Function* function) {
tValueInferSuccess<bool> has_unique_value =
MergeInferedValuesIntoCtx(function, ctx);
MergeInferredValuesIntoCtx(function, ctx);
PADDLE_ENFORCE_EQ(
has_unique_value.value(),
true,
Expand All @@ -322,8 +322,8 @@ void CheckEquationsSolvable(
const EquationGraphTopoWalker<Variable, const Function*>& walker,
const Variable& start,
IndexExprInferContext* ctx) {
const auto& CheckNoConflictInferedValue = [&](const Function* function) {
MergeInferedValuesIntoCtx(
const auto& CheckNoConflictInferredValue = [&](const Function* function) {
MergeInferredValuesIntoCtx(
function,
ctx,
[&](const auto& opt_old_value, const auto& simplified_value) {
Expand All @@ -335,7 +335,7 @@ void CheckEquationsSolvable(
});
};

walker.WalkFunction(start, CheckNoConflictInferedValue);
walker.WalkFunction(start, CheckNoConflictInferredValue);
}

tHasNoConflictValue<bool> TrySolveEquations(
Expand All @@ -344,14 +344,14 @@ tHasNoConflictValue<bool> TrySolveEquations(
IndexExprInferContext* ctx) {
bool has_no_conflict_value = true;

const auto& HasConflictInferedValue = [&](const Function* function) {
const auto& HasConflictInferredValue = [&](const Function* function) {
tValueInferSuccess<bool> has_unique_value =
MergeInferedValuesIntoCtx(function, ctx);
MergeInferredValuesIntoCtx(function, ctx);
return !has_unique_value.value();
};

walker.WalkFunction(start, [&](const Function* function) {
if (has_no_conflict_value && HasConflictInferedValue(function)) {
if (has_no_conflict_value && HasConflictInferredValue(function)) {
has_no_conflict_value = false;
}
});
Expand Down
6 changes: 3 additions & 3 deletions paddle/cinn/adt/schedule_dim.cc
Original file line number Diff line number Diff line change
Expand Up @@ -90,16 +90,16 @@ void FilterReducedIterator(
const List<Iterator>& input_iterators,
std::unordered_set<Iterator>* unused_input_iterators) {
std::unordered_set<Iterator> used{};
bool is_output_infered = true;
bool is_output_inferred = true;
VisitEachOutputIterator(op_ctx, [&](const Iterator& output_iterator) {
if (infer_ctx->HasValue(output_iterator)) {
const auto& iterator_expr = infer_ctx->GetValue(output_iterator);
CollectTensorIndexIterators(iterator_expr, &used);
} else {
is_output_infered = false;
is_output_inferred = false;
}
});
if (!is_output_infered) {
if (!is_output_inferred) {
return;
}
for (const auto& input_iterator : *input_iterators) {
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/operators/reshape_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ class ReshapeOp : public framework::OperatorWithKernel {

for (size_t i = 0; i < shape.size(); ++i) {
if (shape[i] == -1) {
// only one dimension can be set to -1, whose size will be infered.
// only one dimension can be set to -1, whose size will be inferred.
PADDLE_ENFORCE_EQ(
unk_dim_idx,
-1,
Expand Down Expand Up @@ -331,7 +331,7 @@ to be copied from the corresponding dimension of Input(X).
Note:

1. One and only one dimension in Attr(shape) can be set -1. In this case,
the actual dimension value will be infered from the total element number of
the actual dimension value will be inferred from the total element number of
Input(X) and remaining dimensions.

2. More than one dimensions in Attr(shape) can be set to 0, which means the real
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3028,7 +3028,7 @@ bool ReshapeOpInferSymbolicShape(
}
}

// replace '-1' with infered shape
// replace '-1' with inferred shape

const auto &product_exclude_minus_one =
GetProduct(target_shape, IsPositiveInteger);
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/pir/transforms/pd_op_to_kernel_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1336,7 +1336,7 @@ phi::KernelKey GetKernelKey(
}

if (kernel_backend == phi::Backend::UNDEFINED) {
VLOG(8) << "Kernel backend cannot be infered from op operands";
VLOG(8) << "Kernel backend cannot be inferred from op operands";
kernel_backend = paddle::experimental::ParseBackend(place);
}

Expand All @@ -1348,7 +1348,7 @@ phi::KernelKey GetKernelKey(
#endif
phi::KernelKey res(kernel_backend, kernel_layout, kernel_dtype);

// kernel backend infered incorrectly from memcpy op operands,
// kernel backend inferred incorrectly from memcpy op operands,
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
// case that place from (not GPU) to GPU.
// We handle this special case by following code to fix up the problem.
Expand Down
6 changes: 3 additions & 3 deletions python/paddle/nn/initializer/kaiming.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ class MSRAInitializer(Initializer):

Args:
uniform (bool, optional): whether to use uniform or normal distribution. Default is True.
fan_in (float32|None, optional): fan_in (in_features) of trainable Tensor, If None, it will be infered automatically. If you don't want to use in_features of the Tensor, you can set the value of 'fan_in' smartly by yourself. Default is None.
fan_in (float32|None, optional): fan_in (in_features) of trainable Tensor, If None, it will be inferred automatically. If you don't want to use in_features of the Tensor, you can set the value of 'fan_in' smartly by yourself. Default is None.
seed (int32, optional): random seed. Default is 0.
negative_slope (float, optional): negative_slope (only used with leaky_relu). Default is 0.0.
nonlinearity(str, optional): the non-linear function. Default is relu.
Expand Down Expand Up @@ -270,7 +270,7 @@ class KaimingNormal(MSRAInitializer):
\frac{gain}{\sqrt{{fan\_in}}}

Args:
fan_in (float32|None, optional): fan_in (in_features) of trainable Tensor, If None, it will be infered automatically. If you don't want to use in_features of the Tensor, you can set the value of 'fan_in' smartly by yourself. Default is None.
fan_in (float32|None, optional): fan_in (in_features) of trainable Tensor, If None, it will be inferred automatically. If you don't want to use in_features of the Tensor, you can set the value of 'fan_in' smartly by yourself. Default is None.
negative_slope (float, optional): negative_slope (only used with leaky_relu). Default is 0.0.
nonlinearity(str, optional): the non-linear function. Default is relu.

Expand Down Expand Up @@ -321,7 +321,7 @@ class KaimingUniform(MSRAInitializer):
x = gain \times \sqrt{\frac{3}{fan\_in}}

Args:
fan_in (float32|None, optional): fan_in (in_features) of trainable Tensor, If None, it will be infered automatically. If you don't want to use in_features of the Tensor, you can set the value of 'fan_in' smartly by yourself. Default is None.
fan_in (float32|None, optional): fan_in (in_features) of trainable Tensor, If None, it will be inferred automatically. If you don't want to use in_features of the Tensor, you can set the value of 'fan_in' smartly by yourself. Default is None.
negative_slope (float, optional): negative_slope (only used with leaky_relu). Default is 0.0.
nonlinearity(str, optional): the non-linear function. Default is relu.

Expand Down
78 changes: 39 additions & 39 deletions test/auto_parallel/spmd_rules/test_add_n_rule.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,25 +55,25 @@ def test_infer_forward(self):
)
self.y_dist_tensor_spec.set_dims_mapping([0, -1, -1])

infered_dist_attr = self.rule1.infer_forward(
inferred_dist_attr = self.rule1.infer_forward(
[self.x_dist_tensor_spec, self.y_dist_tensor_spec]
)

self.assertEqual(len(infered_dist_attr), 2)
infered_input_dist_attr = infered_dist_attr[0]
infered_output_dist_attr = infered_dist_attr[1]
self.assertEqual(len(inferred_dist_attr), 2)
inferred_input_dist_attr = inferred_dist_attr[0]
inferred_output_dist_attr = inferred_dist_attr[1]

self.assertEqual(len(infered_input_dist_attr), 1)
self.assertEqual(len(infered_input_dist_attr[0]), 2)
self.assertEqual(len(infered_output_dist_attr), 1)
self.assertEqual(len(inferred_input_dist_attr), 1)
self.assertEqual(len(inferred_input_dist_attr[0]), 2)
self.assertEqual(len(inferred_output_dist_attr), 1)

self.assertEqual(
infered_input_dist_attr[0][0].dims_mapping, [0, -1, -1]
inferred_input_dist_attr[0][0].dims_mapping, [0, -1, -1]
)
self.assertEqual(
infered_input_dist_attr[0][1].dims_mapping, [0, -1, -1]
inferred_input_dist_attr[0][1].dims_mapping, [0, -1, -1]
)
self.assertEqual(infered_output_dist_attr[0].dims_mapping, [0, -1, -1])
self.assertEqual(inferred_output_dist_attr[0].dims_mapping, [0, -1, -1])

# [0, -1, -1], [-1, -1, -1] (x, y) partial_dim=[1] -->
# [0, -1, -1], [0, -1, -1] (x, y) partial_dim=[1]
Expand All @@ -89,33 +89,33 @@ def test_infer_forward(self):
)
self.y_dist_tensor_spec.set_dims_mapping([-1, -1, -1])

infered_dist_attr = self.rule1.infer_forward(
inferred_dist_attr = self.rule1.infer_forward(
[self.x_dist_tensor_spec, self.y_dist_tensor_spec]
)

self.assertEqual(len(infered_dist_attr), 2)
infered_input_dist_attr = infered_dist_attr[0]
infered_output_dist_attr = infered_dist_attr[1]
self.assertEqual(len(inferred_dist_attr), 2)
inferred_input_dist_attr = inferred_dist_attr[0]
inferred_output_dist_attr = inferred_dist_attr[1]

self.assertEqual(len(infered_input_dist_attr), 1)
self.assertEqual(len(infered_input_dist_attr[0]), 2)
self.assertEqual(len(infered_output_dist_attr), 1)
self.assertEqual(len(inferred_input_dist_attr), 1)
self.assertEqual(len(inferred_input_dist_attr[0]), 2)
self.assertEqual(len(inferred_output_dist_attr), 1)

self.assertEqual(
infered_input_dist_attr[0][0].dims_mapping, [0, -1, -1]
inferred_input_dist_attr[0][0].dims_mapping, [0, -1, -1]
)
self.assertEqual(infered_input_dist_attr[0][0]._is_partial(), True)
self.assertEqual(infered_input_dist_attr[0][0]._partial_dims(), {1})
self.assertEqual(inferred_input_dist_attr[0][0]._is_partial(), True)
self.assertEqual(inferred_input_dist_attr[0][0]._partial_dims(), {1})

self.assertEqual(
infered_input_dist_attr[0][1].dims_mapping, [0, -1, -1]
inferred_input_dist_attr[0][1].dims_mapping, [0, -1, -1]
)
self.assertEqual(infered_input_dist_attr[0][1]._is_partial(), True)
self.assertEqual(infered_input_dist_attr[0][1]._partial_dims(), {1})
self.assertEqual(inferred_input_dist_attr[0][1]._is_partial(), True)
self.assertEqual(inferred_input_dist_attr[0][1]._partial_dims(), {1})

self.assertEqual(infered_output_dist_attr[0].dims_mapping, [0, -1, -1])
self.assertEqual(infered_output_dist_attr[0]._is_partial(), True)
self.assertEqual(infered_output_dist_attr[0]._partial_dims(), {1})
self.assertEqual(inferred_output_dist_attr[0].dims_mapping, [0, -1, -1])
self.assertEqual(inferred_output_dist_attr[0]._is_partial(), True)
self.assertEqual(inferred_output_dist_attr[0]._partial_dims(), {1})

# [0, -1, -1] partial_dim=[0], [-1, -1, -1]partial_dim=[1] (x,y) -->
# [0, -1, -1], [0, -1, -1] (x, y)
Expand All @@ -134,30 +134,30 @@ def test_infer_forward(self):
)
self.y_dist_tensor_spec.set_dims_mapping([-1, -1, -1])

infered_dist_attr = self.rule1.infer_forward(
inferred_dist_attr = self.rule1.infer_forward(
[self.x_dist_tensor_spec, self.y_dist_tensor_spec]
)

self.assertEqual(len(infered_dist_attr), 2)
infered_input_dist_attr = infered_dist_attr[0]
infered_output_dist_attr = infered_dist_attr[1]
self.assertEqual(len(inferred_dist_attr), 2)
inferred_input_dist_attr = inferred_dist_attr[0]
inferred_output_dist_attr = inferred_dist_attr[1]

self.assertEqual(len(infered_input_dist_attr), 1)
self.assertEqual(len(infered_input_dist_attr[0]), 2)
self.assertEqual(len(infered_output_dist_attr), 1)
self.assertEqual(len(inferred_input_dist_attr), 1)
self.assertEqual(len(inferred_input_dist_attr[0]), 2)
self.assertEqual(len(inferred_output_dist_attr), 1)

self.assertEqual(
infered_input_dist_attr[0][0].dims_mapping, [0, -1, -1]
inferred_input_dist_attr[0][0].dims_mapping, [0, -1, -1]
)
self.assertEqual(infered_input_dist_attr[0][0]._is_partial(), False)
self.assertEqual(inferred_input_dist_attr[0][0]._is_partial(), False)

self.assertEqual(
infered_input_dist_attr[0][1].dims_mapping, [0, -1, -1]
inferred_input_dist_attr[0][1].dims_mapping, [0, -1, -1]
)
self.assertEqual(infered_input_dist_attr[0][1]._is_partial(), False)
self.assertEqual(inferred_input_dist_attr[0][1]._is_partial(), False)

self.assertEqual(infered_output_dist_attr[0].dims_mapping, [0, -1, -1])
self.assertEqual(infered_output_dist_attr[0]._is_partial(), False)
self.assertEqual(inferred_output_dist_attr[0].dims_mapping, [0, -1, -1])
self.assertEqual(inferred_output_dist_attr[0]._is_partial(), False)


if __name__ == "__main__":
Expand Down
Loading
Loading