Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[phi] move isnan_v2、isfinite_v2、isinf_v2 to phi #40076

Merged
merged 16 commits into from
Mar 8, 2022
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
66 changes: 19 additions & 47 deletions paddle/fluid/operators/isfinite_v2_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -12,11 +12,13 @@
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/operators/isfinite_v2_op.h"

#include <string>

#include "paddle/fluid/operators/common_infer_shape_functions.h"
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/unary.h"

namespace paddle {
namespace framework {
Expand Down Expand Up @@ -49,12 +51,6 @@ class OverflowV2Op : public framework::OperatorWithKernel {
const framework::VariableNameMap &outputs,
const framework::AttributeMap &attrs)
: OperatorWithKernel(type, inputs, outputs, attrs) {}
void InferShape(framework::InferShapeContext *ctx) const override {
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "isfinitev2");
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "isfinitev2");
UnaryOpUnchangedInferShape(ctx);
}

protected:
framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext &ctx) const override {
Expand Down Expand Up @@ -104,6 +100,14 @@ element of X as a tensor.
} // namespace paddle

namespace ops = paddle::operators;
DELCARE_INFER_SHAPE_FUNCTOR(isinf_v2, IsinfInferShapeFunctor,
PT_INFER_META(phi::UnchangedInferMeta));

DELCARE_INFER_SHAPE_FUNCTOR(isnan_v2, IsnanInferShapeFunctor,
PT_INFER_META(phi::UnchangedInferMeta));

DELCARE_INFER_SHAPE_FUNCTOR(isfinite_v2, IsfiniteInferShapeFunctor,
PT_INFER_META(phi::UnchangedInferMeta));

#define REGISTER_V2OP_MAKER(op_type, comment) \
namespace paddle { \
Expand All @@ -124,50 +128,18 @@ REGISTER_V2OP_MAKER(isfinite_v2, "isfinitev2(X)");
REGISTER_OPERATOR(
isinf_v2, ops::OverflowV2Op, ops::_isinf_v2OverflowV2OpMaker,
paddle::framework::EmptyGradOpMaker<paddle::framework::OpDesc>,
paddle::framework::EmptyGradOpMaker<paddle::imperative::OpBase>);
paddle::framework::EmptyGradOpMaker<paddle::imperative::OpBase>,
IsinfInferShapeFunctor);

REGISTER_OPERATOR(
isnan_v2, ops::OverflowV2Op, ops::_isnan_v2OverflowV2OpMaker,
paddle::framework::EmptyGradOpMaker<paddle::framework::OpDesc>,
paddle::framework::EmptyGradOpMaker<paddle::imperative::OpBase>);
paddle::framework::EmptyGradOpMaker<paddle::imperative::OpBase>,
IsnanInferShapeFunctor);

REGISTER_OPERATOR(
isfinite_v2, ops::OverflowV2Op, ops::_isfinite_v2OverflowV2OpMaker,
paddle::framework::EmptyGradOpMaker<paddle::framework::OpDesc>,
paddle::framework::EmptyGradOpMaker<paddle::imperative::OpBase>);

REGISTER_OP_CPU_KERNEL(isnan_v2,
ops::OverflowKernel<paddle::platform::CPUDeviceContext,
int, ops::NANV2Functor>,
ops::OverflowKernel<paddle::platform::CPUDeviceContext,
int64_t, ops::NANV2Functor>,
ops::OverflowKernel<paddle::platform::CPUDeviceContext,
float, ops::NANV2Functor>,
ops::OverflowKernel<paddle::platform::CPUDeviceContext,
double, ops::NANV2Functor>,
ops::OverflowKernel<paddle::platform::CPUDeviceContext,
plat::float16, ops::NANV2Functor>);

REGISTER_OP_CPU_KERNEL(
isinf_v2, ops::OverflowKernel<paddle::platform::CPUDeviceContext, int,
ops::InfinityV2Functor>,
ops::OverflowKernel<paddle::platform::CPUDeviceContext, int64_t,
ops::InfinityV2Functor>,
ops::OverflowKernel<paddle::platform::CPUDeviceContext, float,
ops::InfinityV2Functor>,
ops::OverflowKernel<paddle::platform::CPUDeviceContext, double,
ops::InfinityV2Functor>,
ops::OverflowKernel<paddle::platform::CPUDeviceContext, plat::float16,
ops::InfinityV2Functor>);

REGISTER_OP_CPU_KERNEL(
isfinite_v2, ops::OverflowKernel<paddle::platform::CPUDeviceContext, int,
ops::IsfiniteV2Functor>,
ops::OverflowKernel<paddle::platform::CPUDeviceContext, int64_t,
ops::IsfiniteV2Functor>,
ops::OverflowKernel<paddle::platform::CPUDeviceContext, float,
ops::IsfiniteV2Functor>,
ops::OverflowKernel<paddle::platform::CPUDeviceContext, double,
ops::IsfiniteV2Functor>,
ops::OverflowKernel<paddle::platform::CPUDeviceContext, plat::float16,
ops::IsfiniteV2Functor>);
paddle::framework::EmptyGradOpMaker<paddle::imperative::OpBase>,
IsfiniteInferShapeFunctor);

55 changes: 0 additions & 55 deletions paddle/fluid/operators/isfinite_v2_op.cu

This file was deleted.

3 changes: 3 additions & 0 deletions paddle/phi/core/compat/op_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,9 @@ const std::unordered_set<std::string> standard_kernel_suffixs({
const std::unordered_set<std::string> deprecated_op_names({"diag",
"flatten",
"flatten_grad",
"isinf",
"isnan",
"isfinite",
"matmul",
"matmul_grad",
"matmul_grad_grad",
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
Expand All @@ -14,39 +14,32 @@

#pragma once

#include <vector>

#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

op_registry这个头文件还需要吗

#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/operators/isfinite_op.h"
#include "paddle/fluid/platform/float16.h"
#include "paddle/fluid/platform/transform.h"
#include "paddle/phi/common/scalar.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/infermeta/unary.h"

namespace phi {
class DenseTensor;
} // namespace phi

namespace paddle {
namespace operators {
namespace funcs {

struct InfinityV2Functor {
void operator()(const framework::Tensor& tensor, framework::Tensor* out) {
framework::TensorContainsInfV2(tensor, out);
void operator()(const DenseTensor& tensor, DenseTensor* out) {
paddle::framework::TensorContainsInfV2(tensor, out);
}
};

struct NANV2Functor {
void operator()(const framework::Tensor& tensor, framework::Tensor* out) {
framework::TensorContainsNANV2(tensor, out);
void operator()(const DenseTensor& tensor, DenseTensor* out) {
paddle::framework::TensorContainsNANV2(tensor, out);
}
};

struct IsfiniteV2Functor {
void operator()(const framework::Tensor& tensor, framework::Tensor* out) {
framework::TensorIsfiniteV2(tensor, out);
void operator()(const DenseTensor& tensor, DenseTensor* out) {
paddle::framework::TensorIsfiniteV2(tensor, out);
}
};

} // namespace operators
} // namespace paddle
} // namespace funcs
} // namespace phi
56 changes: 56 additions & 0 deletions paddle/phi/kernels/impl/isfinite_kernel_impl.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include "paddle/phi/kernels/funcs/isfinite_functor.h"
#include "paddle/phi/kernels/isfinite_kernel.h"

namespace phi {

template <typename T, typename Context, typename Functor>
inline void IsfiniteKernelImpl(const Context& ctx,
const DenseTensor& x,
DenseTensor* out);

template <typename T, typename Context, typename Functor>
inline void IsfiniteSRImpl(const Context& ctx,
const SelectedRows& x,
SelectedRows* out);

#define DEFINE_ISFINITE_KERNEL(isfinite_kernel, functor) \
template <typename T, typename Context> \
void isfinite_kernel( \
const Context& ctx, const DenseTensor& x, DenseTensor* out) { \
IsfiniteKernelImpl<T, Context, functor>(ctx, x, out); \
}

DEFINE_ISFINITE_KERNEL(IsinfKernel, funcs::InfinityV2Functor)
DEFINE_ISFINITE_KERNEL(IsnanKernel, funcs::NANV2Functor)
DEFINE_ISFINITE_KERNEL(IsfiniteKernel, funcs::IsfiniteV2Functor)
#undef DEFINE_ISFINITE_KERNEL

#define DEFINE_ISFINITE_SR(isfinite_sr, functor) \
template <typename T, typename Context> \
void isfinite_sr( \
const Context& ctx, const SelectedRows& x, SelectedRows* out) { \
IsfiniteSRImpl<T, Context, functor>(ctx, x, out); \
}

DEFINE_ISFINITE_SR(IsinfSR, funcs::InfinityV2Functor)
DEFINE_ISFINITE_SR(IsnanSR, funcs::NANV2Functor)
DEFINE_ISFINITE_SR(IsfiniteSR, funcs::IsfiniteV2Functor)
#undef DEFINE_ISFINITE_SR
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

SelectedRows相关的kernel逻辑需要放在selected_rows目录下

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Done.


} // namespace phi
95 changes: 95 additions & 0 deletions paddle/phi/kernels/isfinite_kernel.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

我看了下底层的TensorContainsInfV2这几个实现,还是属于cpu和gpu公用的实现,这样的话,这个还是属于应该在cpu和gpu子目录下分别创建cc和cu文件的情况,kernels根目录下是放置纯设备无关的kernel实现,比如说这个也需要在xpu和将来的其他设备上能用才行,目前的实现是不行的

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@chenwhql Done.已在cpu和gpu子目录下分别创建cc和cu文件

//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/phi/kernels/isfinite_kernel.h"

#include "paddle/phi/backends/all_context.h"
#include "paddle/phi/common/bfloat16.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/isfinite_kernel_impl.h"

namespace phi {

template <typename T, typename Context, typename Functor>
inline void IsfiniteKernelImpl(const Context& dev_ctx,
const DenseTensor& x,
DenseTensor* out) {
dev_ctx.template Alloc<T>(out);
Functor functor;
functor(x, out);
}

} // namespace phi

PD_REGISTER_KERNEL(isinf,
CPU,
ALL_LAYOUT,
phi::IsinfKernel,
float,
double,
phi::dtype::bfloat16,
int,
int64_t) {}

PD_REGISTER_KERNEL(isnan,
CPU,
ALL_LAYOUT,
phi::IsnanKernel,
float,
double,
phi::dtype::bfloat16,
int,
int64_t) {}

PD_REGISTER_KERNEL(isfinite,
CPU,
ALL_LAYOUT,
phi::IsfiniteKernel,
float,
double,
phi::dtype::bfloat16,
int,
int64_t) {}

#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
PD_REGISTER_KERNEL(isinf,
GPU,
ALL_LAYOUT,
phi::IsinfKernel,
float,
double,
phi::dtype::bfloat16,
int,
int64_t) {}

PD_REGISTER_KERNEL(isnan,
GPU,
ALL_LAYOUT,
phi::IsnanKernel,
float,
double,
phi::dtype::bfloat16,
int,
int64_t) {}

PD_REGISTER_KERNEL(isfinite,
GPU,
ALL_LAYOUT,
phi::IsfiniteKernel,
float,
double,
phi::dtype::bfloat16,
int,
int64_t) {}
#endif
Loading