Skip to content

Commit bdeaa8b

Browse files
authored
[CodeStyle][Typos][L-[1-7]] Fix typos(lable,lamda,labmda,lastest,Lanuch,lanuch,leyer,learing,Leafs,leafs) (#70536)
1 parent fc4ff25 commit bdeaa8b

File tree

14 files changed

+21
-30
lines changed

14 files changed

+21
-30
lines changed

_typos.toml

Lines changed: 1 addition & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ extend-exclude = [
44
"third_party",
55
"patches",
66
"build",
7-
"test/dataset/imikolov_test.py"
7+
"test/dataset/imikolov_test.py",
88
]
99

1010
[default.extend-words]
@@ -210,16 +210,6 @@ interated = 'interated'
210210
Iteraion = 'Iteraion'
211211
IIT = 'IIT'
212212
iy = 'iy'
213-
lable = 'lable'
214-
lamda = 'lamda'
215-
labmda = 'labmda'
216-
lastest = 'lastest'
217-
Lanuch = 'Lanuch'
218-
lanuch = 'lanuch'
219-
leyer = 'leyer'
220-
learing = 'learing'
221-
Leafs = 'Leafs'
222-
leafs = 'leafs'
223213
occured = 'occured'
224214
Ocurred = 'Ocurred'
225215
occures = 'occures'

paddle/fluid/distributed/index_dataset/index_wrapper.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -187,7 +187,7 @@ std::vector<uint64_t> TreeIndex::GetTravelCodes(uint64_t id, int start_level) {
187187
return res;
188188
}
189189

190-
std::vector<IndexNode> TreeIndex::GetAllLeafs() {
190+
std::vector<IndexNode> TreeIndex::GetAllLeaves() {
191191
std::vector<IndexNode> res;
192192
res.reserve(id_codes_map_.size());
193193
for (auto& ite : id_codes_map_) {

paddle/fluid/distributed/index_dataset/index_wrapper.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ class TreeIndex : public Index {
5555
int level);
5656
std::vector<uint64_t> GetChildrenCodes(uint64_t ancestor, int level);
5757
std::vector<uint64_t> GetTravelCodes(uint64_t id, int start_level);
58-
std::vector<IndexNode> GetAllLeafs();
58+
std::vector<IndexNode> GetAllLeaves();
5959

6060
std::unordered_map<uint64_t, IndexNode> data_;
6161
std::unordered_map<uint64_t, uint64_t> id_codes_map_;

paddle/fluid/framework/ir/graph_pattern_detector.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -280,7 +280,7 @@ class PDPattern {
280280
* detector.mutable_pattern().AddEdge(node0, node1);
281281
* // Create an handler, to define the behavior of treating the filtered
282282
* // subgraphs that comply with the patterns.
283-
* GraphPatternDetector::handle_t handler = some labmda
283+
* GraphPatternDetector::handle_t handler = some lambda
284284
* // Execute the detector.
285285
* detector(&graph, handler);
286286
*/

paddle/fluid/pir/drr/README_cn.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@ DRR PASS 包含以下三个部分:
9292
<tr>
9393
<td> <pre>void AddConstraint(
9494
const std::function&lt;bool(const MatchContext&)&gt;& constraint_fn)</pre></td>
95-
<td> 在 SourcePattern 中定义一个约束,可以利用此接口和 lamda 表达式实现对 SourcePattern 的自定义约束</td>
95+
<td> 在 SourcePattern 中定义一个约束,可以利用此接口和 lambda 表达式实现对 SourcePattern 的自定义约束</td>
9696
<td> constraint_fn: 自定义的约束函数</td>
9797
</tr>
9898
<tr>

paddle/fluid/pybind/fleet_py.cc

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -303,7 +303,8 @@ void BindTreeIndex(py::module* m) {
303303
.def("total_node_nums",
304304
[](TreeIndex& self) { return self.TotalNodeNums(); })
305305
.def("emb_size", [](TreeIndex& self) { return self.EmbSize(); })
306-
.def("get_all_leafs", [](TreeIndex& self) { return self.GetAllLeafs(); })
306+
.def("get_all_leaves",
307+
[](TreeIndex& self) { return self.GetAllLeaves(); })
307308
.def("get_nodes",
308309
[](TreeIndex& self, const std::vector<uint64_t>& codes) {
309310
return self.GetNodes(codes);

paddle/phi/kernels/fusion/gpu/fused_layernorm_residual_dropout_bias.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ using LayerNormScaleBiasT =
4040
typename std::conditional<ScaleBiasWithSameTypeX, T, U>::type;
4141

4242
/**
43-
* @brief fused add_bias, dropout, add residual and leyer_norm into one
43+
* @brief fused add_bias, dropout, add residual and layer_norm into one
4444
* operators. Currently only support forward
4545
*/
4646

paddle/phi/kernels/gpu/lars_momentum_kernel.cu

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -578,7 +578,7 @@ void LarsMomentumKernel(
578578
reinterpret_cast<void*>(&epsilon_),
579579
reinterpret_cast<void*>(&rescale_grad_),
580580
reinterpret_cast<void*>(&multi_precision)};
581-
// Lanuch all sm theads, and thead of each block synchronizedly cooperate.
581+
// Launch all sm theads, and thead of each block synchronizedly cooperate.
582582
cudaLaunchCooperativeKernel(
583583
reinterpret_cast<void*>(MergedMomentumLarsKernel<T, MT>),
584584
lars_thread_config.grid_for_lars,
@@ -630,7 +630,7 @@ void LarsMomentumKernel(
630630
reinterpret_cast<void*>(&thresh), // Just a placeholder
631631
reinterpret_cast<void*>(&numel),
632632
reinterpret_cast<void*>(&multi_precision)};
633-
// Lanuch all sm theads.
633+
// Launch all sm theads.
634634
cudaLaunchCooperativeKernel(
635635
reinterpret_cast<void*>(MomentumLarsKernel<T, MT>),
636636
lars_thread_config.grid_for_lars,

paddle/phi/kernels/gpu/top_k_grad_kernel.cu

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,7 @@ void TopkGradKernel(const Context& dev_ctx,
7373
const int max_blocks = std::max(((max_threads - 1) / block_size + 1), 1);
7474
int grid_size = std::min(max_blocks, pre);
7575

76-
// lanuch the cuda kernel to assign the grad
76+
// launch the cuda kernel to assign the grad
7777
phi::funcs::AssignGradWithAxis<T>
7878
<<<grid_size, block_size, 64 * 4, dev_ctx.stream()>>>(
7979
out_grad_data, indices_data, x_grad_data, pre, post, n, k);

python/paddle/distributed/fleet/dataset/index_dataset.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -50,8 +50,8 @@ def total_node_nums(self) -> int:
5050
def emb_size(self) -> int:
5151
return self._emb_size
5252

53-
def get_all_leafs(self) -> list[Any]:
54-
return self._tree.get_all_leafs()
53+
def get_all_leaves(self) -> list[Any]:
54+
return self._tree.get_all_leaves()
5555

5656
def get_nodes(self, codes: list[int]) -> list[Any]:
5757
return self._tree.get_nodes(codes)

0 commit comments

Comments
 (0)