|
| 1 | +// Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. |
| 2 | +// |
| 3 | +// Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | +// you may not use this file except in compliance with the License. |
| 5 | +// You may obtain a copy of the License at |
| 6 | +// |
| 7 | +// http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | +// |
| 9 | +// Unless required by applicable law or agreed to in writing, software |
| 10 | +// distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | +// See the License for the specific language governing permissions and |
| 13 | +// limitations under the License. |
| 14 | + |
| 15 | +#include <memory> |
| 16 | + |
| 17 | +#include "paddle/phi/backends/xpu/enforce_xpu.h" |
| 18 | +#include "paddle/phi/core/kernel_registry.h" |
| 19 | +namespace phi { |
| 20 | + |
| 21 | +template <typename T, typename Context> |
| 22 | +void LogLossGradXPUKernel(const Context& dev_ctx, |
| 23 | + const DenseTensor& input, |
| 24 | + const DenseTensor& label, |
| 25 | + const DenseTensor& out_grad, |
| 26 | + float epsilon_in, |
| 27 | + DenseTensor* in_grad) { |
| 28 | + auto* predict = &input; |
| 29 | + auto* labels = &label; |
| 30 | + auto* dloss = &out_grad; |
| 31 | + auto* dpred = in_grad; |
| 32 | + if (dpred == nullptr) { |
| 33 | + return; |
| 34 | + } |
| 35 | + auto epsilon = static_cast<T>(epsilon_in); |
| 36 | + dev_ctx.template Alloc<T>(dpred); |
| 37 | + int n = predict->numel(); |
| 38 | + int r = xpu::log_loss_grad(dev_ctx.x_context(), |
| 39 | + predict->data<T>(), |
| 40 | + labels->data<T>(), |
| 41 | + dloss->data<T>(), |
| 42 | + dpred->data<T>(), |
| 43 | + n, |
| 44 | + epsilon); |
| 45 | + PADDLE_ENFORCE_XDNN_SUCCESS(r, "log_loss_grad"); |
| 46 | +} |
| 47 | +} // namespace phi |
| 48 | + |
| 49 | +PD_REGISTER_KERNEL( |
| 50 | + log_loss_grad, XPU, ALL_LAYOUT, phi::LogLossGradXPUKernel, float) {} |
0 commit comments