Skip to content

Commit 1b50638

Browse files
committed
Merge branch 'cinn_2' of https://github.com/Luohongzhige/Paddle into cinn_2
2 parents c4dd4b1 + cc500fc commit 1b50638

File tree

191 files changed

+11748
-3491
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

191 files changed

+11748
-3491
lines changed

CMakeLists.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -65,6 +65,7 @@ option(WITH_SHARED_PHI "Compile PaddlePaddle with SHARED LIB of PHI" ON)
6565
option(CINN_WITH_CUDNN "Compile CINN with CUDNN support" ON)
6666
option(WITH_PIP_CUDA_LIBRARIES
6767
"Paddle uses the CUDA library provided by NVIDIA" OFF)
68+
option(WITH_PIP_TENSORRT "Paddle uses the tensorrt provided by NVIDIA" OFF)
6869
option(WITH_NIGHTLY_BUILD
6970
"Compile nightly paddle whl package of the develop branch" OFF)
7071
option(WITH_CPP_TEST "Compile PaddlePaddle skip cpp test" ON)

cmake/external/xpu.cmake

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -30,9 +30,9 @@ if(NOT DEFINED XPU_XRE_BASE_VERSION)
3030
set(XPU_XRE_BASE_VERSION "4.32.0.1")
3131
endif()
3232
if(NOT DEFINED XPU_XHPC_BASE_DATE)
33-
set(XPU_XHPC_BASE_DATE "20240809")
33+
set(XPU_XHPC_BASE_DATE "20240818")
3434
endif()
35-
set(XPU_XCCL_BASE_VERSION "1.2.5")
35+
set(XPU_XCCL_BASE_VERSION "1.2.9")
3636
if(NOT DEFINED XPU_XFT_BASE_VERSION)
3737
set(XPU_XFT_BASE_VERSION "20230602")
3838
endif()
@@ -60,7 +60,7 @@ if(WITH_XPTI)
6060
endif()
6161

6262
if(WITH_XPU_XRE5)
63-
set(XPU_XRE_BASE_VERSION "5.0.11.1")
63+
set(XPU_XRE_BASE_VERSION "5.0.15.1")
6464
set(XPU_XRE_BASE_URL
6565
"https://klx-sdk-release-public.su.bcebos.com/xre/kl3-release/${XPU_XRE_BASE_VERSION}"
6666
)

paddle/cinn/adt/naive_op_equation_context.h

Lines changed: 31 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@
2727
#include "paddle/cinn/adt/map_expr.h"
2828
#include "paddle/cinn/adt/op_arg_pos.h"
2929
#include "paddle/cinn/adt/op_equation_context.h"
30+
#include "paddle/common/enforce.h"
3031

3132
namespace cinn::adt::config {
3233

@@ -85,7 +86,13 @@ class NaiveOpEquationContext final : public OpEquationContext {
8586
}
8687

8788
void Equal(const IteratorTuple& lhs, const IteratorTuple& rhs) override {
88-
CHECK(lhs->size() == rhs->size());
89+
PADDLE_ENFORCE_EQ(
90+
lhs->size(),
91+
rhs->size(),
92+
phi::errors::InvalidArgument("The sizes of lhs and rhs must be equal. "
93+
"lhs size: %d, rhs size: %d",
94+
lhs->size(),
95+
rhs->size()));
8996
for (std::size_t i = 0; i < lhs->size(); ++i) {
9097
this->Equal(lhs->at(i), rhs->at(i));
9198
}
@@ -250,7 +257,10 @@ class NaiveOpEquationContext final : public OpEquationContext {
250257
vec->push_back(DimTuple{});
251258
for (std::size_t j = 0; j < tensors_ranks.at(i); ++j) {
252259
const auto& opt_expr = GetSymbolicInDim_(i, j);
253-
CHECK(opt_expr.has_value());
260+
PADDLE_ENFORCE_EQ(opt_expr.has_value(),
261+
true,
262+
phi::errors::InvalidArgument(
263+
"The optional expression must have a value."));
254264
vec->at(i)->emplace_back(opt_expr.value());
255265
}
256266
}
@@ -262,15 +272,27 @@ class NaiveOpEquationContext final : public OpEquationContext {
262272
vec->push_back(DimTuple{});
263273
for (std::size_t j = 0; j < tensors_ranks.at(i); ++j) {
264274
const auto& opt_expr = GetSymbolicOutDim_(i, j);
265-
CHECK(opt_expr.has_value());
275+
PADDLE_ENFORCE_EQ(opt_expr.has_value(),
276+
true,
277+
phi::errors::InvalidArgument(
278+
"The optional expression must have a value at "
279+
"tensor index %d and dimension index %d.",
280+
i,
281+
j));
266282
vec->at(i)->emplace_back(opt_expr.value());
267283
}
268284
}
269285
}
270286

271287
Index IndexDot(const IteratorTuple& iterator_tuple,
272288
const DimTuple& dim_tuple) {
273-
CHECK(iterator_tuple->size() == dim_tuple->size());
289+
PADDLE_ENFORCE_EQ(iterator_tuple->size(),
290+
dim_tuple->size(),
291+
phi::errors::InvalidArgument(
292+
"The sizes of iterator_tuple and dim_tuple must be "
293+
"equal. iterator_tuple size: %d, dim_tuple size: %d",
294+
iterator_tuple->size(),
295+
dim_tuple->size()));
274296
Index index{UniqueId::New()};
275297
equations_->emplace_back(
276298
adt::IndexDot<List<DimExpr>, tOut<Index>, tIn<List<Iterator>>>{
@@ -318,8 +340,11 @@ class NaiveOpEquationContext final : public OpEquationContext {
318340

319341
const utils::Attribute& GetAttribute(const std::string& name) const {
320342
const auto& iter = attr_map_type_.find(name);
321-
CHECK(iter != attr_map_type_.end())
322-
<< "Can't find Attribute with this name";
343+
PADDLE_ENFORCE_EQ(
344+
iter != attr_map_type_.end(),
345+
true,
346+
phi::errors::InvalidArgument("Can't find Attribute with this name: %s",
347+
name.c_str()));
323348
return iter->second;
324349
}
325350

paddle/cinn/auto_schedule/search_space/auto_gen_rule/auto_inline.cc

Lines changed: 29 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@
3131
#include "paddle/cinn/ir/schedule/ir_schedule_util.h"
3232
#include "paddle/cinn/ir/utils/ir_copy.h"
3333
#include "paddle/cinn/ir/utils/ir_nodes_collector.h"
34+
#include "paddle/common/enforce.h"
3435

3536
namespace cinn {
3637
namespace auto_schedule {
@@ -196,15 +197,26 @@ RuleApplyType AutoInline::Init(ir::IRSchedule* ir_schedule) {
196197
}
197198

198199
void AutoInline::Apply(int index) {
199-
CHECK(ir_schedule_ != nullptr) << "Run AutoInline::Apply without Init";
200-
CHECK(num_applicable_ > 0 &&
201-
apply_indices_and_type_.size() == num_applicable_)
202-
<< "AutoInline::Apply pre-condition doesn't meet";
203-
CHECK(index >= 0 && num_applicable_ > index)
204-
<< "Invalid index for AutoInline::Apply, the index needs 0 <= index && "
205-
"index < NumberApplicable(), "
206-
<< "Currently index = " << index
207-
<< ", NumberApplicable() = " << num_applicable_;
200+
PADDLE_ENFORCE_EQ(
201+
ir_schedule_ != nullptr,
202+
true,
203+
phi::errors::InvalidArgument("Run AutoInline::Apply without Init"));
204+
205+
PADDLE_ENFORCE_EQ(
206+
num_applicable_ > 0 && apply_indices_and_type_.size() == num_applicable_,
207+
true,
208+
phi::errors::InvalidArgument(
209+
"AutoInline::Apply pre-condition doesn't meet"));
210+
211+
PADDLE_ENFORCE_EQ(
212+
index >= 0 && num_applicable_ > index,
213+
true,
214+
phi::errors::InvalidArgument(
215+
"Invalid index for AutoInline::Apply, the index needs 0 <= index && "
216+
"index < NumberApplicable(), "
217+
"Currently index = %d, NumberApplicable() = %d",
218+
index,
219+
num_applicable_));
208220

209221
int apply_index = apply_indices_and_type_[index].first;
210222
Apply(ir_schedule_, all_block_realizes_[apply_index]);
@@ -217,7 +229,10 @@ RuleApplyType AutoInline::AnalyseApplyType(
217229
SearchState state, const std::string& block_name) const {
218230
Expr block_expr = state->ir_schedule.GetBlock(block_name);
219231
auto* block_realize = block_expr.As<ir::ScheduleBlockRealize>();
220-
CHECK(block_realize) << "stmt is not a ScheduleBlockRealize:" << block_expr;
232+
PADDLE_ENFORCE_NOT_NULL(
233+
block_realize,
234+
phi::errors::InvalidArgument("stmt is not a ScheduleBlockRealize: %s",
235+
block_expr));
221236

222237
AnalyzeScheduleBlockReadWriteBuffer(
223238
block_realize->schedule_block.As<ir::ScheduleBlock>());
@@ -239,7 +254,10 @@ std::vector<SearchState> AutoInline::ApplyOnBlock(
239254

240255
void AutoInline::Apply(ir::IRSchedule* ir_schedule, ir::Expr& block_expr) {
241256
auto* block_realize = block_expr.As<ir::ScheduleBlockRealize>();
242-
CHECK(block_realize) << "stmt is not a ScheduleBlockRealize:" << block_expr;
257+
PADDLE_ENFORCE_NOT_NULL(
258+
block_realize,
259+
phi::errors::InvalidArgument("stmt is not a ScheduleBlockRealize: %s",
260+
block_expr));
243261

244262
AnalyzeScheduleBlockReadWriteBuffer(
245263
block_realize->schedule_block.As<ir::ScheduleBlock>());

paddle/cinn/hlir/framework/op_lowering_impl_base.h

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -41,11 +41,6 @@ class OpLowererImplBase {
4141
OpLowererImplBase() = default;
4242
~OpLowererImplBase() = default;
4343

44-
virtual std::vector<ir::LoweredFunc> Lower(const T& group,
45-
bool apply_op_schedule = true,
46-
bool apply_group_schedule = true,
47-
bool apply_pass = true) = 0;
48-
4944
virtual BucketLoweredFuncsWrapper BucketLower(
5045
const T& group,
5146
bool apply_op_schedule = false,

0 commit comments

Comments
 (0)