Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 6 additions & 5 deletions paddle/fluid/distributed/ps/table/common_graph_table.h
Original file line number Diff line number Diff line change
Expand Up @@ -511,7 +511,7 @@ class GraphTable : public Table {
}
virtual ~GraphTable();

virtual void *GetShard(size_t shard_idx) { return 0; }
virtual void *GetShard(size_t shard_idx UNUSED) { return 0; }

static int32_t sparse_local_shard_num(uint32_t shard_num,
uint32_t server_num) {
Expand Down Expand Up @@ -624,15 +624,16 @@ class GraphTable : public Table {
Node *find_node(GraphTableType table_type, int idx, uint64_t id);
Node *find_node(GraphTableType table_type, uint64_t id);

virtual int32_t Pull(TableContext &context) { return 0; } // NOLINT
virtual int32_t Push(TableContext &context) { return 0; } // NOLINT
virtual int32_t Pull(TableContext &context UNUSED) { return 0; } // NOLINT
virtual int32_t Push(TableContext &context UNUSED) { return 0; } // NOLINT

virtual int32_t clear_nodes(GraphTableType table_type, int idx);
virtual void Clear() {}
virtual int32_t Flush() { return 0; }
virtual int32_t Shrink(const std::string &param) { return 0; }
virtual int32_t Shrink(const std::string &param UNUSED) { return 0; }
// 指定保存路径
virtual int32_t Save(const std::string &path, const std::string &converter) {
virtual int32_t Save(const std::string &path UNUSED,
const std::string &converter UNUSED) {
return 0;
}
virtual int32_t InitializeShard() { return 0; }
Expand Down
4 changes: 1 addition & 3 deletions paddle/fluid/framework/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -928,9 +928,7 @@ if(WITH_DISTRIBUTE)
fleet_executor)
endif()
elseif(WITH_PSLIB)
set(DISTRIBUTE_COMPILE_FLAGS
"-Wno-non-virtual-dtor -Wno-error=non-virtual-dtor -Wno-error=delete-non-virtual-dtor"
)
set(DISTRIBUTE_COMPILE_FLAGS "")
if(CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 7.0)
set(DISTRIBUTE_COMPILE_FLAGS "${DISTRIBUTE_COMPILE_FLAGS} -faligned-new")
endif()
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/backends/onednn/onednn_reuse.h
Original file line number Diff line number Diff line change
Expand Up @@ -1178,7 +1178,7 @@ class ReductionOneDNNHandler
const dnnl::engine engine,
Place cpu_place,
const DenseTensor* x,
const DenseTensor* out,
const DenseTensor* out UNUSED,
std::vector<int64_t> out_tz,
const dnnl::primitive_attr& attrs = NULL)
: OneDNNHandlerNoCachingT<T, dnnl::reduction>(engine, cpu_place) {
Expand Down
5 changes: 3 additions & 2 deletions paddle/phi/core/utils/unroll_array_ops.h
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,8 @@ struct UnrollCompare {
template <size_t kStart, size_t kEnd>
struct UnrollCompare<kStart, kEnd, true> {
template <typename T>
HOSTDEVICE inline constexpr static bool Run(const T *d1, const T *d2) {
HOSTDEVICE inline constexpr static bool Run(const T *d1 UNUSED,
const T *d2 UNUSED) {
return true;
}
};
Expand All @@ -104,7 +105,7 @@ struct UnrollProduct {
template <size_t kStart, size_t kEnd>
struct UnrollProduct<kStart, kEnd, true> {
template <typename T>
HOSTDEVICE inline constexpr static T Run(const T *d) {
HOSTDEVICE inline constexpr static T Run(const T *d UNUSED) {
return 1;
}
};
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/complex_kernel.h
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ template <
std::enable_if_t<!std::is_same<T, phi::dtype::complex<float>>::value &&
!std::is_same<T, phi::dtype::complex<double>>::value,
bool> = true>
DenseTensor Conj(const Context& dev_ctx, const DenseTensor& x) {
DenseTensor Conj(const Context& dev_ctx UNUSED, const DenseTensor& x) {
return x;
}

Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/cpu/cross_entropy_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ void CrossEntropyWithSoftmaxGradCPUKernel(const CPUContext& dev_ctx,
const DenseTensor& loss_grad,
bool soft_label,
bool use_softmax,
bool numeric_stable_mode,
bool numeric_stable_mode UNUSED,
int ignore_index,
int axis,
DenseTensor* logits_grad) {
Expand Down
6 changes: 3 additions & 3 deletions paddle/phi/kernels/cpu/full_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -32,17 +32,17 @@ template <typename T, typename Context>
void FullKernel(const Context& dev_ctx,
const IntArray& shape,
const Scalar& val,
DataType dtype,
DataType dtype UNUSED,
DenseTensor* out) {
out->Resize(phi::make_ddim(shape.GetData()));
FullValue<T>(dev_ctx, out, val.to<T>());
}

template <typename T, typename Context>
void FullLikeKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& x UNUSED,
const Scalar& val,
DataType dtype,
DataType dtype UNUSED,
DenseTensor* out) {
auto value = val.to<double>();
using CommonType = typename std::common_type<
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/cpu/graph_send_recv_funcs.h
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ namespace phi {

template <typename T>
struct GraphSendRecvSumFunctor {
void operator()(const bool& first_flag,
void operator()(const bool& first_flag UNUSED,
const DenseTensor& src_slice,
DenseTensor* dst_slice) {
auto eigen_src = phi::EigenVector<T>::Flatten(src_slice);
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/full_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ namespace phi {
template <typename T, typename Context>
void FullBatchSizeLikeKernel(const Context& dev_ctx,
const DenseTensor& x,
const std::vector<int>& shape,
const std::vector<int>& shape UNUSED,
const Scalar& val,
DataType dtype,
int x_batch_size_dim,
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/kernels/funcs/gather_scatter_functor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -181,11 +181,11 @@ void cpu_scatter_mul_kernel(phi::DenseTensor self,
}

template <typename tensor_t, typename index_t>
void cpu_scatter_input_grad_kernel(phi::DenseTensor self,
void cpu_scatter_input_grad_kernel(phi::DenseTensor self UNUSED,
int dim,
const phi::DenseTensor& index,
phi::DenseTensor output,
const phi::DeviceContext& ctx) {
const phi::DeviceContext& ctx UNUSED) {
auto* index_data = index.data<index_t>();
auto* output_data = output.data<tensor_t>();

Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/funcs/math_function.cc
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ DEFINE_CPU_TRANS(6);

template <typename DeviceContext, typename T>
void TransposeNormal<DeviceContext, T>::operator()(
const DeviceContext& context,
const DeviceContext& context UNUSED,
const phi::DenseTensor& in,
phi::DenseTensor* out,
const std::vector<int>& axis) {
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/funcs/reduce_functor.h
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ struct FrobeniusNormGradFunctor {
DX* dx,
DY* dy,
const Dim& dim,
int size) {
int size UNUSED) {
dx->device(place) = y->broadcast(dim);
dx->device(place) = *dx + dx->constant(1e-12f);
dx->device(place) = (*x / *dx) * (dy->broadcast(dim));
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/kernels/funcs/segment_pooling.cc
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ class SegmentPoolFunctor<phi::CPUContext, T, IndexT> {
const DenseTensor& input,
const DenseTensor& segments,
DenseTensor* output,
DenseTensor* index,
DenseTensor* index UNUSED,
const std::string pooltype = "SUM") {
const IndexT* segment_ids = segments.data<IndexT>();
auto curent_id = segment_ids[0];
Expand Down Expand Up @@ -90,7 +90,7 @@ class SegmentPoolGradFunctor<phi::CPUContext, T, IndexT> {
const DenseTensor& out_grad,
const DenseTensor& segments,
DenseTensor* in_grad,
const paddle::optional<DenseTensor>& index,
const paddle::optional<DenseTensor>& index UNUSED,
const std::string pooltype = "SUM") {
const IndexT* segment_ids = segments.data<IndexT>();
auto& place = *dev_ctx.eigen_device();
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/funcs/unique_functor.h
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,7 @@ static void UniqueFlattendTensor(const Context& context,
}

template <typename Context, typename ForwardIt, typename InT, typename IndexT>
static ForwardIt UniqueDimImpl(const Context& context,
static ForwardIt UniqueDimImpl(const Context& context UNUSED,
ForwardIt first,
ForwardIt last,
const std::vector<IndexT>& sorted_indices_vec,
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/impl/gumbel_softmax_kernel_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ namespace phi {

template <typename Context, typename T, int64_t Rank>
struct ArgMaxFunctor {
void operator()(const Context& ctx,
void operator()(const Context& ctx UNUSED,
const DenseTensor& in,
DenseTensor* index_tensor,
const int64_t& axis) {
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/kernels/impl/lerp_grad_kernel_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,8 @@ namespace phi {

template <typename Context, typename T, size_t D>
static void LerpGradFunction(const Context& ctx,
const DenseTensor& x,
const DenseTensor& y,
const DenseTensor& x UNUSED,
const DenseTensor& y UNUSED,
const DenseTensor& weight,
const DenseTensor& out,
const DenseTensor& out_grad,
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/reverse_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ namespace phi {
template <typename T, typename Context>
void ReverseArrayKernel(const Context& dev_ctx,
const TensorArray& x,
const IntArray& axis,
const IntArray& axis UNUSED,
TensorArray* out) {
PADDLE_ENFORCE_EQ(
x.size(),
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/sparse/cpu/full_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ template <typename T, typename Context>
void FullLikeCooKernel(const Context& dev_ctx,
const SparseCooTensor& x,
const Scalar& val,
DataType dtype,
DataType dtype UNUSED,
SparseCooTensor* out) {
phi::Copy<Context>(dev_ctx,
x.non_zero_indices(),
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/kernels/sparse/sparse_utils_kernel.h
Original file line number Diff line number Diff line change
Expand Up @@ -144,14 +144,14 @@ DenseTensor CsrToDense(const Context& dev_ctx, const SparseCsrTensor& x) {
}

template <typename T, typename Context>
void ValuesCooKernel(const Context& dev_ctx,
void ValuesCooKernel(const Context& dev_ctx UNUSED,
const SparseCooTensor& x,
DenseTensor* out) {
*out = x.non_zero_elements();
}

template <typename T, typename Context>
void ValuesCsrKernel(const Context& dev_ctx,
void ValuesCsrKernel(const Context& dev_ctx UNUSED,
const SparseCsrTensor& x,
DenseTensor* out) {
*out = x.non_zero_elements();
Expand Down