Skip to content

Commit

Permalink
【Hackathon 5th No.11】add gammaincc and gammainc API (PaddlePaddle…
Browse files Browse the repository at this point in the history
…#59357)

* 【Hackathon 5th No.11】add igamma and igammac API

* fix bug

* Merge branch 'develop' into add_igamma_igammac

* fix codestyle

* fix bug

* update ut

* fix bug

* fix bug

* add test inplace

* fix bug

* fix bug

* remove unused comment

* remove some c++ impl

* update code

* fix bug

* fix bug

* update

* remove some paddle.enable_static()

* remove eigen impl

* fix test_inplace

* rename op

* igamma(a, x) -> gammaincc(x, y)
  • Loading branch information
GreatV authored and eee4017 committed Jan 30, 2024
1 parent 1ec5b78 commit bad3c29
Show file tree
Hide file tree
Showing 16 changed files with 752 additions and 0 deletions.
10 changes: 10 additions & 0 deletions paddle/phi/api/yaml/backward.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -953,6 +953,16 @@
kernel :
func : frame_grad

- backward_op : gammaincc_grad
forward : gammaincc(Tensor x, Tensor y) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad)
output : Tensor(y_grad)
infer_meta :
func : UnchangedInferMeta
param : [y]
kernel :
func : gammaincc_grad

- backward_op : gammaln_grad
forward : gammaln(Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad)
Expand Down
11 changes: 11 additions & 0 deletions paddle/phi/api/yaml/ops.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -1105,6 +1105,17 @@
backend : place
interfaces : paddle::dialect::InferSymbolicShapeInterface

- op : gammaincc
args : (Tensor x, Tensor y)
output : Tensor(out)
infer_meta :
func : ElementwiseInferMeta
param : [x, y]
kernel :
func : gammaincc
inplace: (x -> out)
backward : gammaincc_grad

- op : gammaln
args : (Tensor x)
output : Tensor(out)
Expand Down
21 changes: 21 additions & 0 deletions paddle/phi/kernels/cpu/gammaincc_grad_kernel.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/phi/kernels/gammaincc_grad_kernel.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/gammaincc_grad_kernel_impl.h"

PD_REGISTER_KERNEL(
gammaincc_grad, CPU, ALL_LAYOUT, phi::GammainccGradKernel, float, double) {}
21 changes: 21 additions & 0 deletions paddle/phi/kernels/cpu/gammaincc_kernel.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/phi/kernels/gammaincc_kernel.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/gammaincc_kernel_impl.h"

PD_REGISTER_KERNEL(
gammaincc, CPU, ALL_LAYOUT, phi::GammainccKernel, float, double) {}
28 changes: 28 additions & 0 deletions paddle/phi/kernels/gammaincc_grad_kernel.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@

// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include "paddle/phi/core/dense_tensor.h"

namespace phi {

template <typename T, typename Context>
void GammainccGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
const DenseTensor& d_out,
DenseTensor* d_y);
} // namespace phi
27 changes: 27 additions & 0 deletions paddle/phi/kernels/gammaincc_kernel.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@

// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include "paddle/phi/core/dense_tensor.h"

namespace phi {

template <typename T, typename Context>
void GammainccKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
DenseTensor* out);
} // namespace phi
21 changes: 21 additions & 0 deletions paddle/phi/kernels/gpu/gammaincc_grad_kernel.cu
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/phi/kernels/gammaincc_grad_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/gammaincc_grad_kernel_impl.h"

PD_REGISTER_KERNEL(
gammaincc_grad, GPU, ALL_LAYOUT, phi::GammainccGradKernel, float, double) {}
21 changes: 21 additions & 0 deletions paddle/phi/kernels/gpu/gammaincc_kernel.cu
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/phi/kernels/gammaincc_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/gammaincc_kernel_impl.h"

PD_REGISTER_KERNEL(
gammaincc, GPU, ALL_LAYOUT, phi::GammainccKernel, float, double) {}
62 changes: 62 additions & 0 deletions paddle/phi/kernels/impl/gammaincc_grad_kernel_impl.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include "paddle/phi/common/amp_type_traits.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/kernels/funcs/for_range.h"

namespace phi {
template <typename T>
struct IgammaGradFunctor {
IgammaGradFunctor(
const T* dout, const T* x, const T* a, T* output, int64_t numel)
: dout_(dout), x_(x), a_(a), output_(output), numel_(numel) {}

HOSTDEVICE void operator()(int64_t idx) const {
using MT = typename phi::dtype::MPTypeTrait<T>::Type;
const MT mp_dout = static_cast<MT>(dout_[idx]);
const MT mp_x = static_cast<MT>(x_[idx]);
const MT mp_a = static_cast<MT>(a_[idx]);
const MT mp_a_1 = static_cast<MT>(a_[idx] - 1);
output_[idx] = static_cast<T>(mp_dout * -std::exp(-mp_x) *
std::pow(mp_x, mp_a_1) / std::tgamma(mp_a));
}

private:
const T* dout_;
const T* x_;
const T* a_;
T* output_;
int64_t numel_;
};

template <typename T, typename Context>
void GammainccGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
const DenseTensor& d_out,
DenseTensor* d_y) {
auto numel = d_out.numel();
auto* dout_data = d_out.data<T>();
auto* x_data = x.data<T>();
auto* y_data = y.data<T>();
auto* dy_data =
dev_ctx.template Alloc<T>(d_y, static_cast<size_t>(numel * sizeof(T)));
phi::funcs::ForRange<Context> for_range(dev_ctx, numel);
IgammaGradFunctor<T> functor(dout_data, y_data, x_data, dy_data, numel);
for_range(functor);
}
} // namespace phi
143 changes: 143 additions & 0 deletions paddle/phi/kernels/impl/gammaincc_kernel_impl.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,143 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include "paddle/phi/common/amp_type_traits.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/kernels/funcs/for_range.h"

#define MAXLOG 7.09782712893383996732E2
#define MACHEP 1.11022302462515654042E-16

namespace phi {
template <typename T>
HOSTDEVICE T igam(const T a, const T x);
template <typename T>
HOSTDEVICE T igamc(const T a, const T x);

template <typename T>
HOSTDEVICE T igam(const T a, const T x) {
if ((x <= T{0}) || (a <= T{0})) return (T{0.0});

if ((x > T{1.0}) && (x > a)) return (T{1.0} - igamc(a, x));

/* Compute x**a * exp(-x) / gamma(a) */
T ax = a * log(x) - x - std::lgamma(a);
if (ax < -MAXLOG) {
return (T{0.0});
}
ax = exp(ax);

/* power series */
T r = a;
T c = T{1.0};
T ans = T{1.0};

do {
r += T{1.0};
c *= x / r;
ans += c;
} while (c / ans > MACHEP);

return (ans * ax / a);
}

template <typename T>
HOSTDEVICE T igamc(const T a, const T x) {
static T big = 4.503599627370496e15;
static T biginv = 2.22044604925031308085e-16;

if ((x <= T{0}) || (a <= T{0})) return (T{1.0});

if ((x < T{1.0}) || (x < a)) return (T{1.0} - igam(a, x));

T ax = a * log(x) - x - std::lgamma(a);
if (ax < -MAXLOG) {
return (T{0.0});
}
ax = exp(ax);

/* continued fraction */
T y = T{1.0} - a;
T z = x + y + T{1.0};
T c = T{0.0};
T pkm2 = T{1.0};
T qkm2 = x;
T pkm1 = x + T{1.0};
T qkm1 = z * x;
T ans = pkm1 / qkm1;
T t;
do {
c += T{1.0};
y += T{1.0};
z += T{2.0};
T yc = y * c;
T pk = pkm1 * z - pkm2 * yc;
T qk = qkm1 * z - qkm2 * yc;
if (qk != T{0}) {
T r = pk / qk;
t = fabs((ans - r) / r);
ans = r;
} else {
t = T{1.0};
}
pkm2 = pkm1;
pkm1 = pk;
qkm2 = qkm1;
qkm1 = qk;
if (fabs(pk) > big) {
pkm2 *= biginv;
pkm1 *= biginv;
qkm2 *= biginv;
qkm1 *= biginv;
}
} while (t > MACHEP);

return (ans * ax);
}

template <typename T>
struct IgammaFunctor {
IgammaFunctor(const T* x, const T* a, T* output, int64_t numel)
: x_(x), a_(a), output_(output), numel_(numel) {}

HOSTDEVICE void operator()(int64_t idx) const {
using MT = typename phi::dtype::MPTypeTrait<T>::Type;
const MT mp_x = static_cast<MT>(x_[idx]);
const MT mp_a = static_cast<MT>(a_[idx]);
output_[idx] = static_cast<T>(igamc<MT>(mp_a, mp_x));
}

private:
const T* x_;
const T* a_;
T* output_;
int64_t numel_;
};

template <typename T, typename Context>
void GammainccKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
DenseTensor* out) {
auto numel = x.numel();
auto* x_data = x.data<T>();
auto* y_data = y.data<T>();
auto* out_data = dev_ctx.template Alloc<T>(out);
phi::funcs::ForRange<Context> for_range(dev_ctx, numel);
IgammaFunctor<T> functor(y_data, x_data, out_data, numel);
for_range(functor);
}
} // namespace phi
Loading

0 comments on commit bad3c29

Please sign in to comment.