Skip to content

Commit 3d26e74

Browse files
committed
remove useless code
1 parent d3aaaf8 commit 3d26e74

File tree

17 files changed

+0
-701
lines changed

17 files changed

+0
-701
lines changed

paddle/fluid/framework/details/CMakeLists.txt

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,6 @@ set(op_handle_srcs
2222
share_tensor_buffer_functor.cc
2323
computation_op_handle.cc
2424
share_tensor_buffer_op_handle.cc
25-
multi_devices_helper.cc
2625
variable_visitor.cc
2726
eager_deletion_op_handle.cc)
2827

@@ -78,7 +77,6 @@ set(IR_PASS_DEPS
7877
multi_batch_merge_pass
7978
fuse_relu_depthwise_conv_pass
8079
lock_free_optimize_pass
81-
sequential_execution_pass
8280
add_reader_dependency_pass
8381
modify_op_lock_and_record_event_pass
8482
coalesce_grad_tensor_pass

paddle/fluid/framework/details/build_strategy.cc

Lines changed: 0 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -30,16 +30,6 @@ PD_DECLARE_bool(use_cinn);
3030

3131
namespace paddle::framework::details {
3232

33-
static inline bool SeqOnlyAllReduceOps(const BuildStrategy &strategy) {
34-
// Should fix the allreduce op order if scheduling
35-
// them in multiple threads or processes to avoid hang.
36-
// NOTE: ParallelGraph would execute this pass on each graph, so
37-
// don't need to append it here.
38-
return (!strategy.enable_sequential_execution_ &&
39-
strategy.num_trainers_ > 1) &&
40-
!strategy.enable_parallel_graph_;
41-
}
42-
4333
static inline void ConvertDefaultValue(paddle::optional<bool> *default_value) {
4434
if (*default_value == paddle::none) {
4535
*default_value = true;
@@ -62,8 +52,6 @@ class ParallelExecutorPassBuilder : public ir::PassBuilder {
6252
}
6353
#endif
6454

65-
AppendPassWithCheck(strategy_.enable_sequential_execution_,
66-
"sequential_execution_pass");
6755
AppendPassWithCheck(strategy_.sync_batch_norm_, "sync_batch_norm_pass");
6856

6957
AppendOpFusePasses();
@@ -225,9 +213,6 @@ class ParallelExecutorPassBuilder : public ir::PassBuilder {
225213
case BuildStrategy::ReduceStrategy::kReduce:
226214
multi_devices_pass = AppendPass("reduce_mode_multi_devices_pass").get();
227215
break;
228-
case BuildStrategy::ReduceStrategy::kNoReduce:
229-
multi_devices_pass = AppendPass("no_reduce_multi_devices_pass").get();
230-
break;
231216
default:
232217
PADDLE_THROW(
233218
platform::errors::Unimplemented("Unknown reduce strategy."));
@@ -350,9 +335,6 @@ ir::Graph *BuildStrategy::Apply(ir::Graph *graph,
350335
} else if (pass->Type() == "coalesce_grad_tensor_pass") {
351336
pass->Erase(kNRanks);
352337
pass->Set<size_t>(kNRanks, new size_t(nranks));
353-
} else if (pass->Type() == "sequential_execution_pass") {
354-
LOG(INFO) << "set enable_sequential_execution:"
355-
<< enable_sequential_execution_;
356338
} else if (pass->Type() == "fuse_relu_depthwise_conv_pass") {
357339
if (use_device != p::kCUDA) {
358340
VLOG(1) << "fuse_relu_depthwise_conv_pass is only supported on "
@@ -405,10 +387,8 @@ USE_PASS(fuse_bn_act_pass);
405387
USE_PASS(fuse_bn_add_act_pass);
406388
USE_PASS(graph_viz_pass);
407389
USE_PASS(multi_batch_merge_pass);
408-
USE_PASS(no_reduce_multi_devices_pass);
409390
USE_PASS(reduce_mode_multi_devices_pass);
410391
USE_PASS(all_reduce_mode_multi_devices_pass);
411-
USE_PASS(sequential_execution_pass);
412392
USE_PASS(modify_op_lock_and_record_event_pass);
413393
USE_PASS(lock_free_optimize_pass);
414394
USE_PASS(coalesce_grad_tensor_pass);

paddle/fluid/framework/details/build_strategy.h

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -91,18 +91,11 @@ struct BuildStrategy {
9191
// all the backward ops are finished before running the optimization
9292
// ops. It might make the training speed of data parallelism faster.
9393
bool enable_backward_optimizer_op_deps_{true};
94-
// TODO(dev-paddle): enable_sequential_execution depends on
95-
// kStaleProgramOpDescs, it is not appropriate, because kStaleProgramOpDescs
96-
// will be removed in the near future.
97-
bool enable_sequential_execution_{false};
9894
bool remove_unnecessary_lock_{true};
9995
// TODO(dev-paddle): cache_runtime_context may cause some models to hang up
10096
// while running.
10197
bool cache_runtime_context_{false};
10298

103-
// Fix the op run order.
104-
bool fix_op_run_order_{false};
105-
10699
// Lowering sub-graph into cinn ops.
107100
bool build_cinn_pass_{false};
108101

@@ -251,13 +244,10 @@ inline std::ostream &operator<<(std::ostream &os,
251244
os << "debug_graphviz_path_: " << strategy.debug_graphviz_path_ << std::endl;
252245
os << "enable_backward_optimizer_op_deps_: "
253246
<< strategy.enable_backward_optimizer_op_deps_ << std::endl;
254-
os << "enable_sequential_execution_: "
255-
<< strategy.enable_sequential_execution_ << std::endl;
256247
os << "remove_unnecessary_lock_: " << strategy.remove_unnecessary_lock_
257248
<< std::endl;
258249
os << "cache_runtime_context_: " << strategy.cache_runtime_context_
259250
<< std::endl;
260-
os << "fix_op_run_order_: " << strategy.fix_op_run_order_ << std::endl;
261251
os << "fuse_bn_act_ops_: " << strategy.fuse_bn_act_ops_ << std::endl;
262252
os << "fuse_bn_add_act_ops_: " << strategy.fuse_bn_add_act_ops_ << std::endl;
263253
os << "fuse_elewise_add_act_ops_: " << strategy.fuse_elewise_add_act_ops_

0 commit comments

Comments
 (0)