Skip to content

Commit b914a8c

Browse files
enkileeSigureMo
andauthored
[CodeStyle][Typos][G-[6-8]] Fix typos(Greate,groupped,GARD,gard) (#70540)
--------- Co-authored-by: Nyakku Shigure <[email protected]>
1 parent 47c92e4 commit b914a8c

File tree

11 files changed

+26
-30
lines changed

11 files changed

+26
-30
lines changed

_typos.toml

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -124,10 +124,6 @@ funcion = 'funcion'
124124
Funcion = 'Funcion'
125125
futher = 'futher'
126126
furture = 'furture'
127-
Greate = 'Greate'
128-
groupped = 'groupped'
129-
gard = 'gard'
130-
GARD = 'GARD'
131127
identiy = 'identiy'
132128
indentify = 'indentify'
133129
implemention = 'implemention'

paddle/fluid/pir/dialect/op_generator/op_gen.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,7 @@
8585
# =====================================
8686
# String Template for h file code gen
8787
# =====================================
88-
NAMESPACE_GARD_TEMPLATE = """namespace {namespace} {{
88+
NAMESPACE_GUARD_TEMPLATE = """namespace {namespace} {{
8989
{input}
9090
}} // namespace {namespace}"""
9191

@@ -2343,7 +2343,7 @@ def OpGenerator(
23432343
if dialect_name == "pd_op":
23442344
other_info = OP_TO_MULTI_KERNELS_MAP_H
23452345
for name in reversed(namespaces):
2346-
other_info = NAMESPACE_GARD_TEMPLATE.format(
2346+
other_info = NAMESPACE_GUARD_TEMPLATE.format(
23472347
namespace=name, input=other_info
23482348
) # Add namespaces
23492349
only_pd_op_header_files_str = """
@@ -2353,7 +2353,7 @@ def OpGenerator(
23532353
elif dialect_name == "onednn_op":
23542354
other_info = ONEDNN_ONLY_OP_SET_H
23552355
for name in reversed(namespaces):
2356-
other_info = NAMESPACE_GARD_TEMPLATE.format(
2356+
other_info = NAMESPACE_GUARD_TEMPLATE.format(
23572357
namespace=name, input=other_info
23582358
) # Add namespaces
23592359
else:
@@ -2362,7 +2362,7 @@ def OpGenerator(
23622362
head_file_str = "\n".join(head_file_strs)
23632363
declare_type_id_str = "\n".join(declare_type_id_strs)
23642364
for name in reversed(namespaces):
2365-
head_file_str = NAMESPACE_GARD_TEMPLATE.format(
2365+
head_file_str = NAMESPACE_GUARD_TEMPLATE.format(
23662366
namespace=name, input=head_file_str
23672367
) # Add namespaces
23682368
head_file_str = H_FILE_TEMPLATE.format(
@@ -2384,15 +2384,15 @@ def OpGenerator(
23842384
)
23852385
other_info_str += sp_other_info_str
23862386
for name in reversed(namespaces):
2387-
other_info_str = NAMESPACE_GARD_TEMPLATE.format(
2387+
other_info_str = NAMESPACE_GUARD_TEMPLATE.format(
23882388
namespace=name, input=other_info_str
23892389
) # Add namespaces
23902390
elif dialect_name == "onednn_op":
23912391
other_info_str = ONEDNN_ONLY_OP_SET.format(
23922392
maps=", \r".join(onednn_only_op_list)
23932393
)
23942394
for name in reversed(namespaces):
2395-
other_info_str = NAMESPACE_GARD_TEMPLATE.format(
2395+
other_info_str = NAMESPACE_GUARD_TEMPLATE.format(
23962396
namespace=name, input=other_info_str
23972397
) # Add namespaces
23982398
else:
@@ -2442,7 +2442,7 @@ def OpGenerator(
24422442
for id in range(len(new_op_def_cc_file)):
24432443
source_file_str = source_file_strs[id]
24442444
for name in reversed(namespaces):
2445-
source_file_str = NAMESPACE_GARD_TEMPLATE.format(
2445+
source_file_str = NAMESPACE_GUARD_TEMPLATE.format(
24462446
namespace=name, input=source_file_str
24472447
) # Add namespaces
24482448

paddle/fluid/pir/transforms/gpu/fused_linear_param_grad_add_pass.cc

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323

2424
namespace {
2525

26-
// add_grad + matmul_grad + add_ -> matmul + fused_liner_param_gard_add
26+
// add_grad + matmul_grad + add_ -> matmul + fused_linear_param_grad_add
2727
class FusedMatmulAddGradAddPattern : public paddle::drr::DrrPatternBase {
2828
public:
2929
std::string name() const override { return "FusedMatmulAddGradAddPattern"; }
@@ -91,7 +91,7 @@ class FusedMatmulAddGradAddPattern : public paddle::drr::DrrPatternBase {
9191
}
9292
};
9393

94-
// matmul_grad + add_ -> matmul + fused_liner_param_gard_add
94+
// matmul_grad + add_ -> matmul + fused_linear_param_grad_add
9595
class FusedMatmulGradAddPattern : public paddle::drr::DrrPatternBase {
9696
public:
9797
std::string name() const override { return "FusedMatmulGradAddPattern"; }
@@ -148,7 +148,7 @@ class FusedMatmulGradAddPattern : public paddle::drr::DrrPatternBase {
148148
};
149149

150150
// matmul + reshape + reshape + matmul + reshape + add_ -> matmul +
151-
// fused_liner_param_gard_add
151+
// fused_linear_param_grad_add
152152
class FusedMatmulReshapeMatmulAddPattern : public paddle::drr::DrrPatternBase {
153153
public:
154154
std::string name() const override {
@@ -214,7 +214,7 @@ class FusedMatmulReshapeMatmulAddPattern : public paddle::drr::DrrPatternBase {
214214
}
215215
};
216216

217-
// matmul + 0 = add_(0,1) -> fused_liner_param_gard_add
217+
// matmul + 0 = add_(0,1) -> fused_linear_param_grad_add
218218
class FusedMatmulAddaPattern : public paddle::drr::DrrPatternBase {
219219
public:
220220
std::string name() const override { return "FusedMatmulAddaPattern"; }
@@ -258,7 +258,7 @@ class FusedMatmulAddaPattern : public paddle::drr::DrrPatternBase {
258258
}
259259
};
260260

261-
// matmul + 1 = add_(1,0) -> fused_liner_param_gard_add
261+
// matmul + 1 = add_(1,0) -> fused_linear_param_grad_add
262262
class FusedMatmulAddbPattern : public paddle::drr::DrrPatternBase {
263263
public:
264264
std::string name() const override { return "FusedMatmulAddbPattern"; }
@@ -302,7 +302,7 @@ class FusedMatmulAddbPattern : public paddle::drr::DrrPatternBase {
302302
}
303303
};
304304

305-
// add_grad + matmul + 0 = add_(0,1) -> fused_liner_param_gard_add
305+
// add_grad + matmul + 0 = add_(0,1) -> fused_linear_param_grad_add
306306
class FusedMatmulAddGradAddaPattern : public paddle::drr::DrrPatternBase {
307307
public:
308308
std::string name() const override { return "FusedMatmulAddGradAddaPattern"; }
@@ -360,7 +360,7 @@ class FusedMatmulAddGradAddaPattern : public paddle::drr::DrrPatternBase {
360360
}
361361
};
362362

363-
// add_grad + matmul + 1 = add_(1,0) -> fused_liner_param_gard_add
363+
// add_grad + matmul + 1 = add_(1,0) -> fused_linear_param_grad_add
364364
class FusedMatmulAddGradAddbPattern : public paddle::drr::DrrPatternBase {
365365
public:
366366
std::string name() const override { return "FusedMatmulAddGradAddbPattern"; }

paddle/fluid/pir/transforms/tensorrt/trt_op_marker_pass.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1108,7 +1108,7 @@ class GreaterEqualOpPattern
11081108
auto x_dtype = pir::GetDataTypeFromValue(x);
11091109
auto y_dtype = pir::GetDataTypeFromValue(y);
11101110
if (x_dtype.isa<pir::BoolType>() || y_dtype.isa<pir::BoolType>()) {
1111-
VLOG(3) << "Greate_equal op do not support bool datatype";
1111+
VLOG(3) << "Greater_equal op do not support bool datatype";
11121112
return false;
11131113
}
11141114
#endif

paddle/phi/kernels/cpu/i0e_grad_kernel.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -31,10 +31,10 @@ void I0eGradKernel(const Context& ctx,
3131
auto* x_data = x.data<T>();
3232
auto* out_data = out.data<T>();
3333
auto* out_grad_data = out_grad.data<T>();
34-
auto* x_gard_data = ctx.template Alloc<T>(x_grad);
34+
auto* x_grad_data = ctx.template Alloc<T>(x_grad);
3535

3636
phi::funcs::ForRange<Context> for_range(ctx, size);
37-
I0eGradFunctor<T> functor(x_data, out_data, out_grad_data, x_gard_data, size);
37+
I0eGradFunctor<T> functor(x_data, out_data, out_grad_data, x_grad_data, size);
3838
for_range(functor);
3939
}
4040

paddle/phi/kernels/cpu/polygamma_grad_kernel.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -30,11 +30,11 @@ void PolygammaGradKernel(const Context& ctx,
3030
auto size = x.numel();
3131
auto* x_data = x.data<T>();
3232
auto* out_grad_data = out_grad.data<T>();
33-
auto* x_gard_data = ctx.template Alloc<T>(x_grad);
33+
auto* x_grad_data = ctx.template Alloc<T>(x_grad);
3434

3535
phi::funcs::ForRange<Context> for_range(ctx, size);
3636
PolygammaGradFunctor<T> functor(
37-
x_data, n + 1, out_grad_data, x_gard_data, size);
37+
x_data, n + 1, out_grad_data, x_grad_data, size);
3838
for_range(functor);
3939
}
4040

paddle/phi/kernels/funcs/sparse/softmax.cu.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -111,7 +111,7 @@ std::tuple<DenseTensor, DenseTensor, DenseTensor, DenseTensor> ComputePoolMax(
111111

112112
DenseTensor pool_sizes = phi::Empty<IntT>(dev_ctx, {nnz});
113113

114-
/* reduce the elements which are groupped by pool index,
114+
/* reduce the elements which are grouped by pool index,
115115
returns all the pool indexes with unique offset value for each. */
116116
auto new_end =
117117
thrust::reduce_by_key(policy,

python/paddle/distributed/ps/utils/public.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1303,7 +1303,7 @@ def block_append_op(program, origin_program, block, op):
13031303
new_op_desc.copy_from(op_desc)
13041304
new_op_desc._set_attr(RPC_OP_ROLE_ATTR_NAME, backward)
13051305

1306-
# set device gard
1306+
# set device grad
13071307
if op.desc.has_attr(device_attr_name):
13081308
op_device = op_desc.attr(device_attr_name)
13091309
new_op_desc._set_attr(device_attr_name, op_device)

python/paddle/incubate/distributed/fleet/parameter_server/ir/trainer_pass.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2016,7 +2016,7 @@ def block_append_op(program, origin_program, block, op):
20162016
new_op_desc.copy_from(op_desc)
20172017
new_op_desc._set_attr(op_role_attr_name, backward)
20182018

2019-
# set device gard
2019+
# set device grad
20202020
if op.desc.has_attr(device_attr_name):
20212021
op_device = op_desc.attr(device_attr_name)
20222022
new_op_desc._set_attr(device_attr_name, op_device)

test/legacy_test/test_adaptive_log_softmax_with_loss.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -452,7 +452,7 @@ def test_dim_error(self):
452452
y = paddle.randint(low=0, high=20, shape=[128, 1])
453453
_ = model(x, y)
454454

455-
def test_gard(self):
455+
def test_grad(self):
456456
n_classes = 4
457457
in_features = 8
458458
cutoffs = [2]

0 commit comments

Comments
 (0)