Skip to content

Commit c535160

Browse files
authored
Fix opitimize optimize (#69145)
1 parent 2ad0057 commit c535160

File tree

7 files changed

+20
-20
lines changed

7 files changed

+20
-20
lines changed

paddle/fluid/pybind/compiled_program.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -770,7 +770,7 @@ void BindCompiledProgram(pybind11::module &m) { // NOLINT
770770
"or True"));
771771
}
772772
},
773-
R"DOC((bool, optional): memory opitimize aims to save total memory
773+
R"DOC((bool, optional): memory optimize aims to save total memory
774774
consumption, set to True to enable it.
775775
776776
Default None. None means framework would choose to use or not use

paddle/fluid/pybind/eager.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -764,7 +764,7 @@ PyDoc_STRVAR( // NOLINT
764764
765765
Tensor is the basic data structure in PaddlePaddle. There are some ways to create a Tensor:
766766
767-
- Use the exsiting ``data`` to create a Tensor, please refer to :ref:`api_paddle_to_tensor`.
767+
- Use the existing ``data`` to create a Tensor, please refer to :ref:`api_paddle_to_tensor`.
768768
- Create a Tensor with a specified ``shape``, please refer to :ref:`api_paddle_ones`,
769769
:ref:`api_paddle_zeros`, :ref:`api_paddle_full`.
770770
- Create a Tensor with the same ``shape`` and ``dtype`` as other Tensor, please refer to

paddle/fluid/pybind/eager_generator.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -671,7 +671,7 @@ static void PurifyForwardOpProto(const proto::OpProto& op_proto,
671671
}
672672
}
673673

674-
/* ------ Maping forward slot name to fwd position ------ */
674+
/* ------ Mapping forward slot name to fwd position ------ */
675675
size_t in_pos = 0;
676676
for (const auto& var : *in_vars) {
677677
VLOG(6) << "Mapping input tensor: " << var.name()
@@ -1877,7 +1877,7 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents(
18771877
trace_op_body_str += trace_op_str;
18781878
trace_op_body_str += "\n";
18791879

1880-
// [Generation] Log memory infomation
1880+
// [Generation] Log memory information
18811881
const char* LOG_MEMORY_INFO_TEMPLATE =
18821882
" // Log memory information\n"
18831883
" "

paddle/fluid/pybind/eager_method.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -719,7 +719,7 @@ PyDoc_STRVAR(tensor_method_clone__doc__, // NOLINT
719719
720720
Returns a new Tensor, which is clone of origin Tensor, and it remains in the current graph.
721721
It will always have a Tensor copy.
722-
Tn addition, the cloned Tensor provides gradient propagation.
722+
In addition, the cloned Tensor provides gradient propagation.
723723
724724
Returns:
725725
Tensor, The cloned Tensor.
@@ -841,7 +841,7 @@ PyDoc_STRVAR(tensor_clear_gradient__doc__, // NOLINT
841841
--
842842
843843
Only for Tensor that has gradient, normally we use this for Parameters since
844-
other temporary Tensor doesen't has gradient.
844+
other temporary Tensor doesn't has gradient.
845845
846846
The Gradient of current Tensor will be set to ``0`` elementwise or ``None``.
847847

paddle/fluid/pybind/pybind.cc

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -936,7 +936,7 @@ void BindVjp(pybind11::module *m) {
936936
PADDLE_ENFORCE_EQ(inputs[idx].size(),
937937
vjp_res[grad_index].size(),
938938
common::errors::InvalidArgument(
939-
"The size of inouts[%d] should be the "
939+
"The size of inputs[%d] should be the "
940940
"same as vjp_res[%d] size.",
941941
idx,
942942
grad_index));
@@ -1385,7 +1385,7 @@ PYBIND11_MODULE(libpaddle, m) {
13851385
Return the registered kernels in paddle.
13861386
13871387
Args:
1388-
lib[string]: the libarary, could be 'phi', 'fluid' and 'all'.
1388+
lib[string]: the library, could be 'phi', 'fluid' and 'all'.
13891389
)DOC");
13901390

13911391
m.def(
@@ -1437,7 +1437,7 @@ PYBIND11_MODULE(libpaddle, m) {
14371437
Return the registered kernels in phi.
14381438
14391439
Args:
1440-
kernel_registered_type[string]: the libarary, could be 'function', 'structure', and 'all'.
1440+
kernel_registered_type[string]: the library, could be 'function', 'structure', and 'all'.
14411441
)DOC");
14421442

14431443
// NOTE(Aganlengzi): KernelFactory static instance is initialized BEFORE
@@ -1825,7 +1825,7 @@ All parameter, weight, gradient are variables in Paddle.
18251825
VLOG(3) << "need skip: " << need_skip << std::endl;
18261826
if (paddle::prim::PrimCommonUtils::IsBwdPrimEnabled()) {
18271827
if ((grad_comp_op_maker != nullptr) && (!need_skip)) {
1828-
VLOG(3) << "Prim Flag Open: Runing composite grad fun for "
1828+
VLOG(3) << "Prim Flag Open: Running composite grad fun for "
18291829
<< op_desc.Type();
18301830
grad_op_descs = grad_comp_op_maker(op_desc,
18311831
no_grad_set,
@@ -1838,12 +1838,12 @@ All parameter, weight, gradient are variables in Paddle.
18381838
}
18391839
} else {
18401840
if (grad_op_maker != nullptr) {
1841-
VLOG(6) << "Prim Flag Close: Runing origin grad fun for "
1841+
VLOG(6) << "Prim Flag Close: Running origin grad fun for "
18421842
<< op_desc.Type();
18431843
grad_op_descs = grad_op_maker(
18441844
op_desc, no_grad_set, &grad_to_var, grad_sub_block);
18451845
} else {
1846-
VLOG(6) << "Prim Flag Close: Runing composite grad fun for "
1846+
VLOG(6) << "Prim Flag Close: Running composite grad fun for "
18471847
<< op_desc.Type();
18481848
grad_op_descs = grad_comp_op_maker(op_desc,
18491849
no_grad_set,

paddle/fluid/pybind/tensor.cc

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -700,7 +700,7 @@ void BindTensor(pybind11::module &m) { // NOLINT
700700
return dst;
701701
})
702702
.def("_copy", [](const phi::DenseTensor &self, const phi::Place &place) {
703-
// follow fetch_op's inplementation
703+
// follow fetch_op's implementation
704704
phi::DenseTensor dst;
705705
if (self.IsInitialized() && self.numel() > 0) {
706706
TensorCopySync(self, place, &dst);
@@ -759,7 +759,7 @@ void BindTensor(pybind11::module &m) { // NOLINT
759759
760760
Params:
761761
tensor: Shared Cuda IPC tensor.
762-
tuple: contrains data size, data type,
762+
tuple: contains data size, data type,
763763
tensor dims, lod information, device index.
764764
765765
)DOC")
@@ -812,7 +812,7 @@ void BindTensor(pybind11::module &m) { // NOLINT
812812
Serialize GPU Tensor by cudaIpcMemHandle.
813813
814814
Returns:
815-
tuple: contrains handle, data size, data type,
815+
tuple: contains handle, data size, data type,
816816
tensor dims, lod information, device index.
817817
818818
Examples:
@@ -858,7 +858,7 @@ void BindTensor(pybind11::module &m) { // NOLINT
858858
Deserialize GPU lod tensor from cudaIpcMemHandle.
859859
860860
Params:
861-
tuple: contrains handle, data size, data type,
861+
tuple: contains handle, data size, data type,
862862
tensor dims, lod information, device index.
863863
864864
Examples:
@@ -942,7 +942,7 @@ void BindTensor(pybind11::module &m) { // NOLINT
942942
If the tensor is not in shared memory, we will copy it first.
943943
944944
Returns:
945-
tuple: contrains ipc name, data size, data type,
945+
tuple: contains ipc name, data size, data type,
946946
tensor dims and lod imformation.
947947
948948
Examples:

python/paddle/tensor/linalg.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -374,7 +374,7 @@ def __check_input(x, y):
374374
dtype='bfloat16'
375375
)
376376
else:
377-
raise ValueError("The output_dtype must be float16 or bfloa16")
377+
raise ValueError("The output_dtype must be float16 or bfloat16")
378378

379379
helper.append_op(
380380
type='fp8_fp8_half_gemm_fused',
@@ -408,7 +408,7 @@ def __check_input(x, y):
408408
bias, 'bias', ['bfloat16'], 'fp8_fp8_half_gemm_fused'
409409
)
410410
else:
411-
raise ValueError("The output_dtype must be float16 or bfloa16")
411+
raise ValueError("The output_dtype must be float16 or bfloat16")
412412

413413
helper = LayerHelper('fp8_fp8_half_gemm_fused', **locals())
414414

@@ -419,7 +419,7 @@ def __check_input(x, y):
419419
dtype='bfloat16'
420420
)
421421
else:
422-
raise ValueError("The output_dtype must be float16 or bfloa16")
422+
raise ValueError("The output_dtype must be float16 or bfloat16")
423423

424424
helper.append_op(
425425
type='fp8_fp8_half_gemm_fused',

0 commit comments

Comments
 (0)