Skip to content

Commit 9756b09

Browse files
author
sandyhouse
committed
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into develop
2 parents 7d68080 + 9aa6bfc commit 9756b09

File tree

1,101 files changed

+51249
-24340
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

1,101 files changed

+51249
-24340
lines changed

.gitignore

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,10 +6,14 @@ paddle/fluid/eager/api/generated/*
66
paddle/fluid/op_use_default_grad_maker_DEV.spec
77
paddle/fluid/op_use_default_grad_maker_PR.spec
88
paddle/phi/api/backward/backward_api.h
9+
paddle/phi/api/backward/sparse_bw_api.h
910
paddle/phi/api/include/api.h
11+
paddle/phi/api/include/sparse_api.h
1012
paddle/phi/api/lib/api.cc
1113
paddle/phi/api/lib/dygraph_api.*
1214
paddle/phi/api/lib/backward_api.cc
15+
paddle/phi/api/lib/sparse_api.cc
16+
paddle/phi/api/lib/sparse_bw_api.cc
1317
paddle/phi/extension.h
1418
paddle/phi/include/*
1519
paddle/phi/infermeta/generated.*
@@ -49,6 +53,9 @@ tools/__pycache__
4953
# This file is automatically generated.
5054
# TODO(zhiqiang) Move this file to build directory.
5155
paddle/infrt/dialect/pd_ops.td
56+
paddle/infrt/dialect/phi/ir/phi_cpu_kernels.td
57+
paddle/infrt/dialect/phi/ir/phi_gpu_kernels.td
58+
tools/infrt/kernels.json
5259
paddle/infrt/dialect/pd_ops_info.h
5360
.lit_test_times.txt
5461
paddle/infrt/tests/dialect/Output

CMakeLists.txt

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -238,7 +238,8 @@ option(WITH_MIPS "Compile PaddlePaddle with mips support" OFF)
238238
option(WITH_MUSL "Compile with musl libc instead of gblic" OFF)
239239
option(WITH_UNITY_BUILD "Compile with UnityBuild mode" OFF)
240240
option(WITH_STRIP "Strip so files of Whl packages" OFF)
241-
option(NEW_RELEASE_CUBIN "PaddlePaddle next-level release strategy for pypi cubin package" OFF)
241+
option(NEW_RELEASE_PYPI "PaddlePaddle next-level release strategy for pypi cubin package" OFF)
242+
option(NEW_RELEASE_ALL "PaddlePaddle next-level release strategy for all arches cubin package" OFF)
242243
option(NEW_RELEASE_JIT "PaddlePaddle next-level release strategy for backup jit package" OFF)
243244
option(WITH_ASCEND_INT64 "Compile with int64 kernel for ascend NPU" OFF)
244245
option(WITH_POCKETFFT "Compile with pocketfft support" ON)

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ English | [简体中文](./README_cn.md)
1515
Welcome to the PaddlePaddle GitHub.
1616

1717
PaddlePaddle, as the only independent R&D deep learning platform in China, has been officially open-sourced to professional communities since 2016. It is an industrial platform with advanced technologies and rich features that cover core deep learning frameworks, basic model libraries, end-to-end development kits, tools & components as well as service platforms.
18-
PaddlePaddle is originated from industrial practices with dedication and commitments to industrialization. It has been widely adopted by a wide range of sectors including manufacturing, agriculture, enterprise service, and so on while serving more than 2.3 million developers. With such advantages, PaddlePaddle has helped an increasing number of partners commercialize AI.
18+
PaddlePaddle is originated from industrial practices with dedication and commitments to industrialization. It has been widely adopted by a wide range of sectors including manufacturing, agriculture, enterprise service, and so on while serving more than 4 million developers. With such advantages, PaddlePaddle has helped an increasing number of partners commercialize AI.
1919

2020

2121

README_cn.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515

1616
欢迎来到 PaddlePaddle GitHub
1717

18-
飞桨(PaddlePaddle)以百度多年的深度学习技术研究和业务应用为基础,是中国首个自主研发、功能完备、 开源开放的产业级深度学习平台,集深度学习核心训练和推理框架、基础模型库、端到端开发套件和丰富的工具组件于一体。目前,飞桨累计开发者265万,服务企业10万家,基于飞桨开源深度学习平台产生了34万个模型。飞桨助力开发者快速实现AI想法,快速上线AI业务。帮助越来越多的行业完成AI赋能,实现产业智能化升级。
18+
飞桨(PaddlePaddle)以百度多年的深度学习技术研究和业务应用为基础,是中国首个自主研发、功能完备、 开源开放的产业级深度学习平台,集深度学习核心训练和推理框架、基础模型库、端到端开发套件和丰富的工具组件于一体。目前,飞桨累计开发者406万,服务企业15.7万家,基于飞桨开源深度学习平台产生了47.6万个模型。飞桨助力开发者快速实现AI想法,快速上线AI业务。帮助越来越多的行业完成AI赋能,实现产业智能化升级。
1919

2020
## 安装
2121

cmake/cuda.cmake

Lines changed: 13 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -6,16 +6,22 @@ if(WITH_NV_JETSON)
66
add_definitions(-DWITH_NV_JETSON)
77
set(paddle_known_gpu_archs "53 62 72")
88
set(paddle_known_gpu_archs10 "53 62 72")
9-
elseif(NEW_RELEASE_CUBIN)
9+
elseif(NEW_RELEASE_ALL)
10+
message("Using New Release Strategy - All Arches Packge")
11+
add_definitions(-DNEW_RELEASE_ALL)
12+
set(paddle_known_gpu_archs "35 50 52 60 61 70 75 80 86")
13+
set(paddle_known_gpu_archs10 "35 50 52 60 61 70 75")
14+
set(paddle_known_gpu_archs11 "35 50 52 60 61 70 75 80")
15+
elseif(NEW_RELEASE_PYPI)
1016
message("Using New Release Strategy - Cubin Packge")
11-
add_definitions(-DNEW_RELEASE_CUBIN)
12-
set(paddle_known_gpu_archs "35 37 50 52 60 61 70 75 80 86")
13-
set(paddle_known_gpu_archs10 "50 60 70 75")
14-
set(paddle_known_gpu_archs11 "60 70 75 80")
17+
add_definitions(-DNEW_RELEASE_PYPI)
18+
set(paddle_known_gpu_archs "35 50 52 60 61 70 75 80 86")
19+
set(paddle_known_gpu_archs10 "")
20+
set(paddle_known_gpu_archs11 "60 61 70 75 80")
1521
elseif(NEW_RELEASE_JIT)
1622
message("Using New Release Strategy - JIT Packge")
1723
add_definitions(-DNEW_RELEASE_JIT)
18-
set(paddle_known_gpu_archs "35 37 50 52 60 61 70 75 80 86")
24+
set(paddle_known_gpu_archs "35 50 52 60 61 70 75 80 86")
1925
set(paddle_known_gpu_archs10 "35 50 60 70 75")
2026
set(paddle_known_gpu_archs11 "35 50 60 70 75 80")
2127
else()
@@ -148,7 +154,7 @@ function(select_nvcc_arch_flags out_variable)
148154

149155
# remove dots and convert to lists
150156
string(REGEX REPLACE "\\." "" cuda_arch_bin "${cuda_arch_bin}")
151-
string(REGEX REPLACE "\\." "" cuda_arch_ptx "${CUDA_ARCH_PTX}")
157+
string(REGEX REPLACE "\\." "" cuda_arch_ptx "${cuda_arch_ptx}")
152158
string(REGEX MATCHALL "[0-9()]+" cuda_arch_bin "${cuda_arch_bin}")
153159
string(REGEX MATCHALL "[0-9]+" cuda_arch_ptx "${cuda_arch_ptx}")
154160

cmake/external/llvm.cmake

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -100,8 +100,8 @@ endfunction()
100100
function(mlir_add_rewriter td_base)
101101
set(LLVM_TARGET_DEFINITIONS ${td_base}.td)
102102
mlir_tablegen(${td_base}.cpp.inc -gen-rewriters "-I${CMAKE_SOURCE_DIR}/infrt/dialect/pass")
103-
add_public_tablegen_target(${td_base}_IncGen)
104-
add_custom_target(${td_base}_inc DEPENDS ${td_base}_IncGen)
103+
add_public_tablegen_target(MLIR${td_base}IncGen)
104+
add_dependencies(mlir-headers MLIR${td_base}IncGen)
105105
endfunction()
106106

107107
# Execute the mlir script with infrt-exec program.

cmake/generic.cmake

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -651,6 +651,7 @@ function(hip_test TARGET_NAME)
651651
set_property(TEST ${TARGET_NAME} PROPERTY ENVIRONMENT FLAGS_cpu_deterministic=true)
652652
set_property(TEST ${TARGET_NAME} PROPERTY ENVIRONMENT FLAGS_init_allocated_mem=true)
653653
set_property(TEST ${TARGET_NAME} PROPERTY ENVIRONMENT FLAGS_cudnn_deterministic=true)
654+
set_property(TEST ${TARGET_NAME} PROPERTY ENVIRONMENT "LD_LIBRARY_PATH=${CMAKE_BINARY_DIR}/python/paddle/libs:$LD_LIBRARY_PATH")
654655
endif()
655656
endfunction(hip_test)
656657

cmake/operators.cmake

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -293,11 +293,11 @@ function(op_library TARGET)
293293
# Define operators that don't need pybind here.
294294
foreach(manual_pybind_op "compare_all_op" "compare_op" "logical_op" "bitwise_op" "nccl_op"
295295
"tensor_array_read_write_op" "tensorrt_engine_op" "conv_fusion_op")
296-
297-
if ("${TARGET}" STREQUAL "${manual_pybind_op}")
298-
set(pybind_flag 1)
299-
endif()
300-
endforeach()
296+
297+
if ("${TARGET}" STREQUAL "${manual_pybind_op}")
298+
set(pybind_flag 1)
299+
endif()
300+
endforeach()
301301

302302
# The registration of USE_OP, please refer to paddle/fluid/framework/op_registry.h.
303303
# Note that it's enough to just adding one operator to pybind in a *_op.cc file.
@@ -478,7 +478,7 @@ function(op_library TARGET)
478478
if (${pybind_flag} EQUAL 0)
479479
# NOTE(*): activation use macro to regist the kernels, set use_op manually.
480480
if(${TARGET} STREQUAL "activation")
481-
file(APPEND ${pybind_file} "USE_OP(relu);\n")
481+
file(APPEND ${pybind_file} "USE_OP_ITSELF(relu);\n")
482482
elseif(${TARGET} STREQUAL "fake_dequantize")
483483
file(APPEND ${pybind_file} "USE_OP(fake_dequantize_max_abs);\n")
484484
elseif(${TARGET} STREQUAL "fake_quantize")
Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,12 @@
11
cc_library(processgroup SRCS ProcessGroup.cc DEPS phi phi_api eager_api)
2+
if (WITH_DISTRIBUTE)
3+
cc_library(processgroup_gloo SRCS ProcessGroupGloo.cc DEPS phi phi_api eager_api gloo_wrapper)
4+
endif()
25
cc_library(eager_reducer SRCS reducer.cc DEPS eager_api processgroup)
36

47
if(WITH_NCCL)
58
cc_library(processgroup_nccl SRCS ProcessGroupNCCL.cc DEPS place cuda_stream enforce collective_helper device_context phi phi_api eager_api)
69
endif()
10+
if(WITH_ASCEND_CL)
11+
cc_library(processgroup_hccl SRCS ProcessGroupHCCL.cc DEPS place npu_stream enforce collective_helper device_context phi phi_api eager_api)
12+
endif()
Lines changed: 174 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,174 @@
1+
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2+
//
3+
// Licensed under the Apache License, Version 2.0 (the "License");
4+
// you may not use this file except in compliance with the License.
5+
// You may obtain a copy of the License at
6+
//
7+
// http://www.apache.org/licenses/LICENSE-2.0
8+
//
9+
// Unless required by applicable law or agreed to in writing, software
10+
// distributed under the License is distributed on an "AS IS" BASIS,
11+
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
// See the License for the specific language governing permissions and
13+
// limitations under the License.
14+
15+
#pragma once
16+
17+
#include <error.h>
18+
#include <string>
19+
20+
#include "boost/variant.hpp"
21+
#include "paddle/fluid/framework/data_type.h"
22+
#include "paddle/fluid/framework/variable.h"
23+
#include "paddle/fluid/platform/collective_helper.h"
24+
#include "paddle/fluid/platform/device/npu/enforce_npu.h"
25+
#include "paddle/fluid/platform/device/npu/npu_info.h"
26+
#include "paddle/fluid/platform/device_context.h"
27+
#include "paddle/fluid/platform/enforce.h"
28+
29+
namespace paddle {
30+
namespace distributed {
31+
32+
class NPUEventManager {
33+
public:
34+
NPUEventManager() = default;
35+
36+
~NPUEventManager() {
37+
if (is_created_) {
38+
platform::NPUDeviceGuard guard(device_index_);
39+
platform::NPUEventDestroy(event_);
40+
}
41+
}
42+
43+
NPUEventManager(const NPUEventManager&) = delete;
44+
NPUEventManager& operator=(const NPUEventManager&) = delete;
45+
46+
NPUEventManager(NPUEventManager&& other) {
47+
std::swap(is_created_, other.is_created_);
48+
std::swap(device_index_, other.device_index_);
49+
std::swap(event_, other.event_);
50+
}
51+
52+
NPUEventManager& operator=(NPUEventManager&& other) {
53+
std::swap(is_created_, other.is_created_);
54+
std::swap(device_index_, other.device_index_);
55+
std::swap(event_, other.event_);
56+
return *this;
57+
}
58+
59+
bool IsCreated() const { return is_created_; }
60+
bool DeviceId() const { return device_index_; }
61+
aclrtEvent GetRawNPUEvent() const { return event_; }
62+
63+
void Record(const paddle::platform::NPUDeviceContext& ctx) {
64+
auto device_index = ctx.GetPlace().device;
65+
if (!is_created_) {
66+
CreateEvent(device_index);
67+
}
68+
PADDLE_ENFORCE_EQ(device_index, device_index_,
69+
platform::errors::PreconditionNotMet(
70+
"NPUDeviceContext's device %d does not match"
71+
"Event's device %d",
72+
device_index, device_index_));
73+
74+
platform::NPUDeviceGuard guard(device_index_);
75+
platform::NPUEventRecord(event_, ctx.stream());
76+
}
77+
78+
bool Query() const {
79+
aclrtEventStatus status = ACL_EVENT_STATUS_COMPLETE;
80+
platform::NPUEventQuery(event_, &status);
81+
if (status == ACL_EVENT_STATUS_COMPLETE) {
82+
return true;
83+
}
84+
return false;
85+
}
86+
87+
void Block(const paddle::platform::NPUDeviceContext& ctx) const {
88+
if (is_created_) {
89+
auto device_index = ctx.GetPlace().device;
90+
PADDLE_ENFORCE_EQ(device_index, device_index_,
91+
platform::errors::PreconditionNotMet(
92+
"CUDADeviceContext's device %d does not match"
93+
"Event's device %d",
94+
device_index, device_index_));
95+
platform::NPUDeviceGuard guard(device_index_);
96+
platform::NPUStreamWaitEvent(ctx.stream(), event_);
97+
}
98+
}
99+
100+
private:
101+
bool is_created_{false};
102+
aclrtEvent event_{};
103+
int8_t device_index_{0};
104+
105+
private:
106+
void CreateEvent(int device_index) {
107+
device_index_ = device_index;
108+
platform::NPUDeviceGuard guard(device_index);
109+
platform::NPUEventCreate(&event_);
110+
is_created_ = true;
111+
}
112+
};
113+
114+
class HCCLCommManager {
115+
public:
116+
explicit HCCLCommManager(HcclComm hcclComm) : hccl_comm_(hcclComm) {}
117+
118+
HCCLCommManager() : HCCLCommManager(nullptr) {}
119+
120+
~HCCLCommManager() noexcept {
121+
std::unique_lock<std::mutex> lock(mutex_);
122+
if (hccl_comm_) {
123+
platform::dynload::HcclCommDestroy(hccl_comm_);
124+
}
125+
}
126+
127+
static std::shared_ptr<HCCLCommManager> Create(int num_ranks, int rank,
128+
HcclRootInfo* comm_id,
129+
HcclComm hccl_comm) {
130+
auto hccl_manager = std::make_shared<HCCLCommManager>();
131+
auto ret = platform::dynload::HcclCommInitRootInfo(num_ranks, comm_id, rank,
132+
&hccl_comm);
133+
using __NPU_STATUS_TYPE__ = decltype(ret);
134+
constexpr auto __success_type__ =
135+
platform::details::NPUStatusType<__NPU_STATUS_TYPE__>::kSuccess;
136+
if (UNLIKELY(ret != __success_type__)) {
137+
VLOG(0) << "Error: create hccl_id error.";
138+
exit(-1);
139+
}
140+
141+
hccl_manager->hccl_id_ = comm_id;
142+
hccl_manager->rank_ = rank;
143+
hccl_manager->hccl_comm_ = hccl_comm;
144+
return hccl_manager;
145+
}
146+
147+
HcclRootInfo* GetHcclId() const {
148+
std::unique_lock<std::mutex> lock(mutex_);
149+
return hccl_id_;
150+
}
151+
152+
HcclComm GetHcclComm() const {
153+
std::unique_lock<std::mutex> lock(mutex_);
154+
return hccl_comm_;
155+
}
156+
157+
HCCLCommManager(const HCCLCommManager&) = delete;
158+
HCCLCommManager& operator=(const HCCLCommManager&) = delete;
159+
HCCLCommManager& operator=(HCCLCommManager&& other) = delete;
160+
161+
HCCLCommManager(HCCLCommManager&& other) {
162+
std::unique_lock<std::mutex> lock(other.mutex_);
163+
std::swap(hccl_comm_, other.hccl_comm_);
164+
}
165+
166+
protected:
167+
HcclComm hccl_comm_;
168+
HcclRootInfo* hccl_id_;
169+
int rank_;
170+
mutable std::mutex mutex_;
171+
};
172+
173+
} // namespace distributed
174+
} // namespace paddle

0 commit comments

Comments
 (0)