Skip to content

Commit 230ef01

Browse files
ccsuzzhBeingGod
authored andcommitted
[clang-tidy] NO.33,64 enable bugprone-signed-char-misuse,clang-analyzer-optin.portability.UnixAPI check (PaddlePaddle#56744)
* enable bugprone-signed-char-misuse&clang-analyzer-optin.portability.UnixAPI check * fix bugprone-signed-char-misuse * fix bug
1 parent 8baad29 commit 230ef01

19 files changed

+28
-28
lines changed

.clang-tidy

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ bugprone-misplaced-widening-cast,
2424
-bugprone-not-null-terminated-result,
2525
-bugprone-parent-virtual-call,
2626
-bugprone-posix-return,
27-
-bugprone-signed-char-misuse,
27+
bugprone-signed-char-misuse,
2828
-bugprone-sizeof-container,
2929
-bugprone-sizeof-expression,
3030
-bugprone-string-constructor,
@@ -91,7 +91,7 @@ clang-analyzer-optin.cplusplus.UninitializedObject,
9191
-clang-analyzer-optin.osx.cocoa.localizability.NonLocalizedStringChecker,
9292
-clang-analyzer-optin.performance.GCDAntipattern,
9393
-clang-analyzer-optin.performance.Padding,
94-
-clang-analyzer-optin.portability.UnixAPI,
94+
clang-analyzer-optin.portability.UnixAPI,
9595
-clang-analyzer-osx.API,
9696
-clang-analyzer-osx.MIG,
9797
-clang-analyzer-osx.NSOrCFErrorDerefChecker,

paddle/fluid/framework/details/all_reduce_op_handle.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -321,7 +321,7 @@ void AllReduceOpHandle::NCCLAllReduceFunc(
321321
void AllReduceOpHandle::SyncNCCLAllReduce() {
322322
if (FLAGS_sync_nccl_allreduce) {
323323
for (auto &p : places_) {
324-
int dev_id = p.device;
324+
int dev_id = p.device; // NOLINT
325325
auto *nccl_ctxs =
326326
nccl_ctxs_->GetRunEnvNCCLCtx(run_order_, use_hierarchical_allreduce_);
327327
auto &nccl_ctx = nccl_ctxs->at(dev_id);

paddle/fluid/framework/details/broadcast_op_handle.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,7 @@ void BroadcastOpHandle::BroadcastOneVar(
9090
} else if (platform::is_gpu_place(in_tensor.place())) {
9191
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
9292
VarHandle *out_handle = nullptr;
93-
int root_id = in_tensor.place().device;
93+
int root_id = in_tensor.place().device; // NOLINT
9494
std::vector<std::function<void()>> broadcast_calls;
9595

9696
int type = platform::ToNCCLDataType(
@@ -101,7 +101,7 @@ void BroadcastOpHandle::BroadcastOneVar(
101101
Variable *out_var = var_scopes.at(out_var_handle->scope_idx())
102102
->FindVar(out_var_handle->name());
103103

104-
int dst_id = out_var_handle->place().device;
104+
int dst_id = out_var_handle->place().device; // NOLINT
105105

106106
auto &nccl_ctx = nccl_ctxs_->at(dst_id);
107107

paddle/fluid/framework/details/op_handle_base.cc

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ OpHandleBase::~OpHandleBase() PADDLE_MAY_THROW { // NOLINT
4747
void OpHandleBase::InitCUDA() {
4848
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
4949
for (auto &p : dev_ctxes_) {
50-
int dev_id = p.first.device;
50+
int dev_id = p.first.device; // NOLINT
5151
platform::SetDeviceId(dev_id);
5252
#ifdef PADDLE_WITH_HIP
5353
PADDLE_ENFORCE_GPU_SUCCESS(
@@ -61,7 +61,7 @@ void OpHandleBase::InitCUDA() {
6161
for (auto &out_var : outputs_) {
6262
auto *out_var_handle = dynamic_cast<VarHandle *>(out_var);
6363
if (out_var_handle) {
64-
int dev_id = out_var_handle->place().device;
64+
int dev_id = out_var_handle->place().device; // NOLINT
6565
out_var_handle->SetGenerateEvent(events_.at(dev_id));
6666
}
6767
}
@@ -74,7 +74,7 @@ void OpHandleBase::InitCUDA() {
7474
Name(),
7575
dev_ctxes_.size()));
7676
auto &place = dev_ctxes_.begin()->first;
77-
int dev_id = place.device;
77+
int dev_id = place.device; // NOLINT
7878
for (auto &out_var : outputs_) {
7979
auto *out_var_handle = dynamic_cast<VarHandle *>(out_var);
8080
if (out_var_handle) {

paddle/fluid/framework/details/parallel_ssa_graph_executor.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ static std::vector<std::unique_ptr<ir::Graph>> SeparateMultiDevicesGraph(
4545
for (auto &op : op_handles) {
4646
auto &dev_ctx = op->DeviceContext();
4747
auto &p = dev_ctx.begin()->first;
48-
int dev_id = p.device;
48+
int dev_id = p.device; // NOLINT
4949
auto &dev_dummys = graphs[dev_id]->Get<GraphDepVars>(kGraphDepVars);
5050
graphs[dev_id]->AddNode(graph->RemoveNode(op->Node()).release());
5151

paddle/fluid/framework/details/reduce_op_handle.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -189,13 +189,13 @@ void ReduceOpHandle::RunImpl() {
189189
out_var_handle->place(), pre_in.dtype());
190190

191191
auto out_p = out_var_handle->place();
192-
int root_id = out_p.device;
192+
int root_id = out_p.device; // NOLINT
193193
std::vector<std::function<void()>> all_reduce_calls;
194194
for (size_t i = 0; i < var_scopes.size(); ++i) {
195195
auto &p = in_places[i];
196196
auto &lod_tensor = *lod_tensors[i];
197197

198-
int dev_id = p.device;
198+
int dev_id = p.device; // NOLINT
199199
auto &nccl_ctx = nccl_ctxs_->at(dev_id);
200200

201201
void *buffer = const_cast<void *>(lod_tensor.data());

paddle/fluid/framework/dlpack_tensor.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -99,7 +99,7 @@ struct DLDeviceVisitor {
9999
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
100100
::DLDevice device;
101101
device.device_type = kDLGPU;
102-
device.device_id = place.device;
102+
device.device_id = place.device; // NOLINT
103103
return device;
104104
#else
105105
PADDLE_THROW(platform::errors::Unavailable(

paddle/fluid/imperative/nccl_context.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@ void NCCLParallelContext::Init() {
8080
}
8181
BcastNCCLId(nccl_ids, 0, server_fd);
8282

83-
int gpu_id = place_.device;
83+
int gpu_id = place_.device; // NOLINT
8484
for (int ring_id = 0; ring_id < strategy_.nrings_; ring_id++) {
8585
VLOG(0) << "init nccl context nranks: " << strategy_.nranks_
8686
<< " local rank: " << strategy_.local_rank_ << " gpu id: " << gpu_id
@@ -115,7 +115,7 @@ void NCCLParallelContext::InitWithRingID(int ring_id) {
115115
}
116116
BcastNCCLId(nccl_ids, 0, server_fd);
117117

118-
int gpu_id = place_.device;
118+
int gpu_id = place_.device; // NOLINT
119119
VLOG(0) << "init nccl context nranks: " << strategy_.nranks_
120120
<< " local rank: " << strategy_.local_rank_ << " gpu id: " << gpu_id
121121
<< " ring id: " << ring_id;

paddle/fluid/imperative/xccl_context.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -99,7 +99,7 @@ void XCCLParallelContext::Init() {
9999
}
100100
BcastXCCLId(xccl_ids, 0, server_fd);
101101

102-
int dev_id = place_.device;
102+
int dev_id = place_.device; // NOLINT
103103
for (int ring_id = 0; ring_id < strategy_.nrings_; ring_id++) {
104104
VLOG(0) << "init nccl context nranks: " << strategy_.nranks_
105105
<< " local rank: " << strategy_.local_rank_ << " dev id: " << dev_id
@@ -136,7 +136,7 @@ void XCCLParallelContext::InitWithRingID(int ring_id) {
136136
}
137137
BcastXCCLId(xccl_ids, 0, server_fd);
138138

139-
int dev_id = place_.device;
139+
int dev_id = place_.device; // NOLINT
140140
VLOG(0) << "init xccl context nranks: " << strategy_.nranks_
141141
<< " local rank: " << strategy_.local_rank_ << " dev id: " << dev_id
142142
<< " ring id: " << ring_id;

paddle/fluid/memory/allocation/cuda_managed_allocator.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ void CUDAManagedAllocator::FreeImpl(phi::Allocation* allocation) {
4848
phi::Allocation* CUDAManagedAllocator::AllocateImpl(size_t size) {
4949
std::call_once(once_flag_, [this] { platform::SetDeviceId(place_.device); });
5050

51-
int dev_id = place_.device;
51+
int dev_id = place_.device; // NOLINT
5252
void* ptr;
5353
auto result = platform::RecordedGpuMalloc(&ptr,
5454
size,

0 commit comments

Comments
 (0)