Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 7 additions & 11 deletions _typos.toml
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,13 @@ extend-exclude = [
[default]
# Ignore 1-3 letter words, refer to https://github.com/crate-ci/typos/issues/1079
extend-ignore-words-re = ["^[a-zA-Z]{1,3}$"]
# refer to https://github.com/crate-ci/typos/blob/master/docs/reference.md#example-configurations
extend-ignore-re = [
# Ignore lines by `# typos: disable-line`
"(?Rm)^.*(#|//)\\s*typos:\\s*disable-line$",
# Ignore block by `# typos: off` and `# typos: on`
"(?s)(#|//)\\s*typos:\\s*off.*?\\n\\s*(#|//)\\s*typos:\\s*on"
]

[default.extend-words]
# PaddlePaddle specific words
Expand Down Expand Up @@ -69,19 +76,8 @@ fron = 'fron'
fullfill = 'fullfill'
Indexs = 'Indexs'
indexs = 'indexs'
indiates = 'indiates'
indeces = 'indeces'
inferrence = 'inferrence'
Infered = 'Infered'
infered = 'infered'
infering = 'infering'
informations = 'informations'
imformation = 'imformation'
infomation = 'infomation'
Infor = 'Infor'
infor = 'infor'
inheritted = 'inheritted'
initilization = 'initilization'
initilized = 'initilized'
initalized = 'initalized'
initalize = 'initalize'
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ OpLoweringGroupPtr BuildOpLoweringGroup(pir::Operation* fusion_op_ptr) {
if (FLAGS_cinn_enable_map_expr) {
cinn::adt::TryGenerateMapExprFromGroup(group);
}
// Rebuild other informations
// Rebuild other information
// TODO(zhangyuqin1998): Do we need group.master_ops?
return group;
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/cinn/hlir/framework/pir/utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ std::string GetDebugInfo(const std::unordered_set<std::string>& names) {
return debug_info;
}

// OpTransInfo contains informations used to detect subgraphs
// OpTransInfo contains information used to detect subgraphs
// supported by the CINN compiler.
class OpTransInfo {
using DeParamCondT =
Expand Down
2 changes: 1 addition & 1 deletion paddle/cinn/ir/ir.h
Original file line number Diff line number Diff line change
Expand Up @@ -848,7 +848,7 @@ struct ForBase {
BindInfo bind_info_;
};

/// LLVM loop unroll metadata infomation
/// LLVM loop unroll metadata information
struct LLVMForLoopMeta {
enum UnrollMode { DefaultUnroll, FullyUnroll, NoUnroll };

Expand Down
4 changes: 2 additions & 2 deletions paddle/cinn/poly/stage.h
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ struct StageForloopInfo {
ir::DeviceAPI device;
};

//! Store the informations about some other tensor `compute_at` this tensor.
//! Store the information about some other tensor `compute_at` this tensor.
struct ComputeAtInfo {
ComputeAtInfo(const std::string& consumer_tensor_name,
const std::string& producer_tensor_name,
Expand All @@ -84,7 +84,7 @@ struct ComputeAtInfo {
};

/**
* Meta infomation for tensor.
* Meta information for tensor.
*/
struct TensorScheduleMeta {
//! Store the information of all the other producer tensors `compute_at` this
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/distributed/ps/service/brpc_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -321,7 +321,7 @@ std::string GetIntTypeEndpoint(const std::string& ip, const uint32_t& port) {

if (nullptr == hp) {
LOG(ERROR) << "Brpc Start failed, ip_port= " << ip_port
<< " , Error infomation: " << hstrerror(h_errno);
<< " , Error information: " << hstrerror(h_errno);
}

int i = 0;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
#include "paddle/fluid/eager/tensor_wrapper.h"

/*
Each Operation has a specific GradNode inheritted from GradNodeBase
Each Operation has a specific GradNode inherited from GradNodeBase
A specific GradNode defines
1. Input Tensors
2. overrides operator() to perform actual backward computations
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -859,7 +859,7 @@ void BuildOpFuncList(const phi::Place& place,
op->Attr<bool>(kAllKernelsMustComputeRuntimeShape))) {
RuntimeInferShapeContext infer_shape_ctx(*op, runtime_context);
// TODO(Aurelius84): In case of control flow ops, they are NOT
// inheritted from OperatorWithKernel.
// inherited from OperatorWithKernel.
op_with_kernel->Info().infer_shape_(&infer_shape_ctx);
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ class EmbEltwiseLayerNormOpConverter : public OpConverter {
output_fp16,
1,
common::errors::InvalidArgument(
"Only Precision::KHalf(fp16) is supported when infering "
"Only Precision::KHalf(fp16) is supported when inferring "
"ernie(bert) model with config.EnableVarseqlen(). "
"But Precision::KFloat32 is setted."));

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ class PrelnEmbEltwiseLayerNormOpConverter : public OpConverter {
output_fp16,
1,
common::errors::InvalidArgument(
"Only Precision::KHalf(fp16) is supported when infering "
"Only Precision::KHalf(fp16) is supported when inferring "
"ernie(bert) model with config.EnableVarseqlen(). "
"But Precision::KFloat32 is setted."));

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ class PromptTuningEmbEltwiseLayerNormOpConverter : public OpConverter {
output_fp16,
1,
common::errors::InvalidArgument(
"Only Precision::KHalf(fp16) is supported when infering "
"Only Precision::KHalf(fp16) is supported when inferring "
"ernie(bert) model with config.EnableVarseqlen(). "
"But Precision::KFloat32 is setted."));

Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/generator/generate_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -309,7 +309,7 @@ def add_grad_op_compat_name(grad_op_item, args_name_map):
if new_op_name != op_name:
forward_op_item['op_name'] = op_name

# add complex promote infomation
# add complex promote information
if "complex_promote" in op_args:
forward_op_item["complex_promote"] = op_args["complex_promote"]
if has_backward:
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/pybind/tensor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -878,7 +878,7 @@ void BindTensor(pybind11::module &m) { // NOLINT

Returns:
tuple: contains ipc name, data size, data type,
tensor dims and lod imformation.
tensor dims and lod information.

Examples:
.. code-block:: python
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/infermeta/unary.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1242,7 +1242,7 @@ void EigvalshInferMeta(const MetaTensor& x,
void EinsumInferMeta(const std::vector<const MetaTensor*>& inputs,
const std::string& equation,
MetaTensor* out) {
// collect the following informations to prepare einsum.
// collect the following information to prepare einsum.
LabelMap labelshape(0);
LabelMap labeltype(LabelType::Reduction);
std::vector<LabelMap> label2perms(inputs.size(), LabelMap(-1));
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/kernels/funcs/seq2col.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ struct Seq2ColFunctor {
/*
Convert sequences to frames.

1. Dimension infomation:
1. Dimension information:

Sequences Frames
(N, seq_length) -> (N, frame_length, n_frames)
Expand Down Expand Up @@ -105,7 +105,7 @@ struct Col2SeqFunctor {
/*
Accumulate output gradient d_out to d_x.

1. Dimension infomation:
1. Dimension information:

d_out d_x
(N, frame_length, n_frames) -> (N, seq_length)
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/impl/einsum_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -603,7 +603,7 @@ void EinsumKernelImpl(const Context& dev_ctx,
VLOG(5) << " inputs [ " << i << " ].shape=" << i->dims();
}
ValidationCheck(equation);
// collect the following informations to prepare einsum.
// collect the following information to prepare einsum.
LabelMap labelshape(0);
LabelMap labeltype(LabelType::Reduction);
std::vector<LabelMap> label2perms(inputs.size(), LabelMap(-1));
Expand Down
6 changes: 3 additions & 3 deletions paddle/phi/kernels/sparse/cpu/elementwise_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -240,11 +240,11 @@ void ElementWiseCooKernelImpl(const Context& dev_ctx,
common::make_ddim(
{static_cast<int64_t>(sparse_dim), static_cast<int64_t>(nnz)}),
DataLayout::NCHW);
auto indeces_dim = common::vectorize(
auto indices_dim = common::vectorize(
slice_ddim(x.values().dims(), 1, x.values().dims().size()));
indeces_dim.insert(indeces_dim.begin(), nnz);
indices_dim.insert(indices_dim.begin(), nnz);
DenseTensorMeta values_meta(
x.dtype(), common::make_ddim(indeces_dim), DataLayout::NCHW);
x.dtype(), common::make_ddim(indices_dim), DataLayout::NCHW);
phi::DenseTensor out_indices = phi::Empty(dev_ctx, std::move(indices_meta));
phi::DenseTensor out_values = phi::Empty(dev_ctx, std::move(values_meta));

Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/sparse/gpu/convolution.cu.h
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ inline IntT* SortedAndUniqueIndex(const Context& dev_ctx,
/**
* @brief: update the out index and indices
* unique_keys: save the index of the output feature list
* unique_values: indiates the index of key before deduplication
* unique_values: indicates the index of key before deduplication
* out_indexs: indicates the position of the output index in the rulebook
* rulebook_len: indicates the length of rulebook
* out_dims: indicates the output dims
Expand Down
6 changes: 3 additions & 3 deletions paddle/pir/include/core/ir_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ class IR_API IrContext {
AbstractAttribute *GetRegisteredAbstractAttribute(TypeId id);

///
/// \brief Register an op infomation to IrContext
/// \brief Register an op information to IrContext
///
void RegisterOpInfo(Dialect *dialect,
TypeId op_id,
Expand All @@ -118,12 +118,12 @@ class IR_API IrContext {
void (*verify_region)(Operation *));

///
/// \brief Get registered operation infomation.
/// \brief Get registered operation information.
///
OpInfo GetRegisteredOpInfo(const std::string &name);

///
/// \brief Get registered operation infomation map.
/// \brief Get registered operation information map.
///
const OpInfoMap &registered_op_info_map();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -402,7 +402,7 @@ def find_diff_vars(fixed_vars_map, query_vars_map):
return diff_var_name_list

@staticmethod
def diff_informations(right_dir, wrong_dir):
def diff_information(right_dir, wrong_dir):
"""
Find the corresponding operator according to the variable name.
"""
Expand Down Expand Up @@ -448,7 +448,7 @@ def diff_informations(right_dir, wrong_dir):
return diff_ops_varname_dict

@staticmethod
def diff_informations_from_dirs(right_dirs, wrong_dirs):
def diff_information_from_dirs(right_dirs, wrong_dirs):
right_vars_list = []
right_program_list = []
right_dist_attr_map = {}
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/distributed/auto_parallel/static/engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -1338,12 +1338,12 @@ def _initialize(self, mode, init_parameters=True):
)

if self._in_pir_mode:
# FIXME(ljz) avoid shared same tensro more than once in different mode
# FIXME(ljz) avoid shared same tensor more than once in different mode
if mode != "train":
return
# TODO(2024-Q2)
# 1. unify random control
# 2. initilization of non-parameter buffer
# 2. initialization of non-parameter buffer
# 3. run startup program for pir
# 4. lazy init adaption
# 5. amp init adaption
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/fleet/utils/log_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ def check_memory_usage(msg=""):
mem_msg = f"checking pinned memory usage {msg}:"
for key in mem_dict:
mem_msg += f"\n{key}: {mem_dict[key]}GB"
logger.infor(mem_msg)
logger.info(mem_msg)

if hasattr(paddle.device, 'cpu') and hasattr(
paddle.device.cpu, 'max_memory_allocated'
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/rpc/rpc.py
Original file line number Diff line number Diff line change
Expand Up @@ -365,7 +365,7 @@ class `WorkerInfo` with attribute `name`, `rank`, `ip` and `port`.

def get_all_worker_infos() -> list[WorkerInfo]:
"""
Get all worker informations.
Get all worker information.

Returns:
List[WorkerInfo].
Expand Down
18 changes: 9 additions & 9 deletions python/paddle/framework/io_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -215,7 +215,7 @@ def _pickle_loads_mac(path, f):

def _pack_loaded_dict(load_obj):
if isinstance(load_obj, dict):
unpack_info = 'UnpackBigParamInfor@@'
unpack_info = 'UnpackBigParamInfor@@' # typos: disable-line
if unpack_info in load_obj:
removes = []
for key, value in load_obj[unpack_info].items():
Expand All @@ -233,7 +233,7 @@ def _pack_loaded_dict(load_obj):

def _unpack_saved_dict(saved_obj, protocol):
temp_saved_obj = {}
unpack_infor = {}
unpack_info = {}
# When pickle protocol=2 or protocol=3 the serialized object cannot be larger than 4G.
if 1 < protocol < 4:
if isinstance(saved_obj, dict):
Expand All @@ -244,9 +244,9 @@ def _unpack_saved_dict(saved_obj, protocol):
)
num_element = np.prod(value.shape)
if num_element > MAX_NUMBER_OF_ELEMENT:
unpack_infor[key] = {}
unpack_infor[key]["OriginShape"] = value.shape
unpack_infor[key]["slices"] = []
unpack_info[key] = {}
unpack_info[key]["OriginShape"] = value.shape
unpack_info[key]["slices"] = []
value = value.flatten()
for i in range(
int(
Expand All @@ -256,20 +256,20 @@ def _unpack_saved_dict(saved_obj, protocol):
)
):
part_name = key + "@@." + str(i)
unpack_infor[key]["slices"].append(part_name)
unpack_info[key]["slices"].append(part_name)
temp_saved_obj[part_name] = value[
i
* MAX_NUMBER_OF_ELEMENT : MAX_NUMBER_OF_ELEMENT
* (i + 1)
]

if unpack_infor:
for key, value in unpack_infor.items():
if unpack_info:
for key, value in unpack_info.items():
if key in saved_obj:
saved_obj.pop(key)
for part in value['slices']:
saved_obj[part] = temp_saved_obj[part]
saved_obj['UnpackBigParamInfor@@'] = unpack_infor
saved_obj['UnpackBigParamInfor@@'] = unpack_info # typos: disable-line
return saved_obj


Expand Down
6 changes: 2 additions & 4 deletions test/deprecated/auto_parallel/test_align_tool_deprecated.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,10 +97,8 @@ def test_align_tool(self):
os.mkdir("./serial")
align_tool.save("./serial", vars, fetch_list)
break
AutoAlignTool.diff_informations("./serial", "./serial")
AutoAlignTool.diff_informations_from_dirs(
["./serial"], ["./serial"]
)
AutoAlignTool.diff_information("./serial", "./serial")
AutoAlignTool.diff_information_from_dirs(["./serial"], ["./serial"])
break

print("test auto parallel align tool successfully!")
Expand Down
2 changes: 0 additions & 2 deletions tools/parallel_UT_rule.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,6 @@
'test_sampling_id_op',
'test_nce',
'graph_helper_test',
'test_static_shape_inferrence_for_shape_tensor',
'test_layer_norm_mkldnn_op',
'test_fleet_launch_async',
'test_multi_gru_fuse_pass',
Expand Down Expand Up @@ -1570,7 +1569,6 @@
'test_sysconfig',
'test_sync_batch_norm_pass',
'test_switch',
'test_static_shape_inferrence_for_shape_tensor',
'test_static_analysis',
'test_squared_mat_sub_fuse_pass',
'test_spawn_and_init_parallel_env',
Expand Down
Loading