Skip to content

Commit

Permalink
Resolve CI issues
Browse files Browse the repository at this point in the history
  • Loading branch information
jim19930609 committed Nov 27, 2021
1 parent f6981d7 commit f5542ee
Show file tree
Hide file tree
Showing 5 changed files with 30 additions and 106 deletions.
12 changes: 9 additions & 3 deletions paddle/fluid/eager/api/utils/hook_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@ void RetainGradForTensor(const egr::EagerTensor& tensor) {
[grad_tensor](const egr::EagerTensor& t) {
if (!grad_tensor) {
PADDLE_THROW(paddle::platform::errors::Fatal(
"Detected null grad_tensor."
"Grad tensor in AutogradMeta of should not be nullptr"));
}
if (t.defined()) {
Expand All @@ -58,8 +59,12 @@ void RetainGradForTensor(const egr::EagerTensor& tensor) {
} else {
PADDLE_ENFORCE_EQ(
t.Var().IsInitialized(), true,
"Variable %s has to be initialized while we need to set it.",
t.name());
paddle::platform::errors::Fatal(
"Detected uninitialized variable, causing segmentation fault "
"inside the hook."
"Variable %s has to be initialized while we need to set it."
"please check tensor initialization status.",
t.name()));
grad_tensor->MutableVar()
->GetMutable<paddle::framework::LoDTensor>()
->ShareDataWith(t.Var().Get<paddle::framework::LoDTensor>());
Expand All @@ -72,7 +77,8 @@ void RetainGradForTensor(const egr::EagerTensor& tensor) {
std::shared_ptr<GradNodeBase> grad_node = EagerUtils::grad_node(tensor);
PADDLE_ENFORCE(
grad_node.get() != nullptr,
paddle::platform::errors::Fatal("Leaf tensor should have had grad_node "
paddle::platform::errors::Fatal("Detected NULL grad_node"
"Leaf tensor should have had grad_node "
"with type: GradNodeAccumulation"));
auto accumulation_grad_node =
std::dynamic_pointer_cast<GradNodeAccumulation>(grad_node);
Expand Down
12 changes: 9 additions & 3 deletions paddle/fluid/eager/backward.cc
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,9 @@ void RunBackward(const std::vector<egr::EagerTensor>& tensors,

PADDLE_ENFORCE(grad_node,
paddle::platform::errors::Fatal(
"Grad Node is nullptr for grad input tensor %d", i));
"Detected null grad_node."
"Grad Node is nullptr for grad input tensor %d",
i));
// Prepare GradTensorHolder
if (!node_input_buffers_dict.count(grad_node)) {
VLOG(6) << "Create Value for grad input tensor " << i;
Expand All @@ -104,8 +106,10 @@ void RunBackward(const std::vector<egr::EagerTensor>& tensors,
if (grad_tensors.size() > 0) {
PADDLE_ENFORCE(
grad_tensors.size() == tensors.size(),
paddle::platform::errors::Fatal("grad_tensors should either have "
"size = 0 or same size as tensors"));
paddle::platform::errors::Fatal(
"Detected size mismatch between tensors and grad_tensors"
"grad_tensors should either have "
"size = 0 or same size as tensors"));
// Feed given tensor if it's provided
VLOG(6) << "Fill grad input tensor " << i << "with give grad tensor";
node_input_buffers_dict[grad_node]->add(
Expand Down Expand Up @@ -146,6 +150,7 @@ void RunBackward(const std::vector<egr::EagerTensor>& tensors,
PADDLE_ENFORCE(
node_input_buffers_dict.count(node),
paddle::platform::errors::Fatal(
"Unable to find next node in the InputBuufer"
"Trying to run Node without configuring its GradTensorHolder"));

std::unique_ptr<GradTensorHolder> node_input_buffer =
Expand Down Expand Up @@ -194,6 +199,7 @@ void RunBackward(const std::vector<egr::EagerTensor>& tensors,
node_in_degree_map[next_node]--;
PADDLE_ENFORCE(node_in_degree_map[next_node] >= 0,
paddle::platform::errors::Fatal(
"Detected in-degree value smaller than zero."
"Node's in-degree cannot be negative"));
if (node_in_degree_map[next_node] == 0) {
queue.emplace(std::move(next_node));
Expand Down
20 changes: 4 additions & 16 deletions paddle/fluid/eager/tests/task_tests/backward_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@
#include "paddle/pten/core/dense_tensor.h"
#include "paddle/pten/core/tensor_meta.h"

// TODO(jiabin): remove nolint here!!!
using namespace egr; // NOLINT

namespace eager_test {
Expand All @@ -55,7 +54,6 @@ TEST(Backward, SingleNodeEmptyGrad) {

// Set grad in/out meta
node0_ptr->SetDefaultGradInOutMeta();
// Connect Tensor and Node via AutoGradMeta
AutogradMeta* auto_grad_meta = EagerUtils::autograd_meta(&target_tensor);
auto_grad_meta->SetGradNode(
std::dynamic_pointer_cast<GradNodeBase>(node0_ptr));
Expand All @@ -82,9 +80,7 @@ TEST(Backward, SingleNodeEmptyGrad) {
RunBackward(outs, {});

// Check Output Value
PADDLE_ENFORCE(
CompareGradTensorWithValue<float>(leaf_tensor, 5.0) == true,
paddle::platform::errors::Fatal("Numerical Error, Expected %f", 5.0));
CompareGradTensorWithValue<float>(leaf_tensor, 5.0);
}

TEST(Backward, SingleNodeCustomGrad) {
Expand Down Expand Up @@ -145,9 +141,7 @@ TEST(Backward, SingleNodeCustomGrad) {
RunBackward(target_tensors, grad_tensors);

// Check Output Value
PADDLE_ENFORCE(
CompareGradTensorWithValue<float>(leaf_tensor, 50.0) == true,
paddle::platform::errors::Fatal("Numerical Error, Expected %f", 50.0));
CompareGradTensorWithValue<float>(leaf_tensor, 50.0);
}

/*
Expand Down Expand Up @@ -221,9 +215,7 @@ TEST(Backward, LinearNodes) {
RunBackward(target_tensors, {});

// Check Output Value
PADDLE_ENFORCE(
CompareGradTensorWithValue<float>(leaf_tensor, 50.0) == true,
paddle::platform::errors::Fatal("Numerical Error, Expected %f", 50.0));
CompareGradTensorWithValue<float>(leaf_tensor, 50.0);
}

/*
Expand Down Expand Up @@ -320,13 +312,9 @@ TEST(Backward, WithAccumulation) {
node2_ptr->AddEdges({&meta2}, 0);
}

// Use Empty Grad Tensor
RunBackward(target_tensors, grad_tensors);

// Check Output Value
PADDLE_ENFORCE(
CompareGradTensorWithValue<float>(leaf_tensor, 2500.0) == true,
paddle::platform::errors::Fatal("Numerical Error, Expected %f", 2500.0));
CompareGradTensorWithValue<float>(leaf_tensor, 2500.0);
}

} // namespace eager_test
Original file line number Diff line number Diff line change
Expand Up @@ -33,24 +33,14 @@

using namespace egr; // NOLINT

/*
AccumulationNode
|
ScaleNode
|
inp0
*/
namespace eager_test {

TEST(CrossBatchAccumulation, SingleScaleNode) {
// Prepare Device Contexts
InitEnv(paddle::platform::CPUPlace());

// Prepare Inputs
std::vector<egr::EagerTensor> target_tensors;
paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32});

// Create Target Tensor
egr::EagerTensor tensor = CreateTensorWithValue(
ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32,
pten::DataLayout::NCHW, 1.0 /*value*/, false /*is_leaf*/);
Expand All @@ -59,25 +49,19 @@ TEST(CrossBatchAccumulation, SingleScaleNode) {

egr::EagerTensor leaf_tensor = egr::EagerTensor();
{
// Create ScaleNode
auto scale_node_ptr = std::make_shared<GradNodeScale>(1, 1);
scale_node_ptr->SetAttributes_scale(5.0 /*scale*/);

// Set grad in/out meta for node0
scale_node_ptr->SetDefaultGradInOutMeta();

// Create AccumulationNode
auto acc_node_ptr = std::make_shared<GradNodeAccumulation>();

// Connect Input Tensor and ScaleNode via AutoGradMeta
// Apply RetainGrad
AutogradMeta* auto_grad_meta = EagerUtils::autograd_meta(&target_tensor);
auto_grad_meta->SetGradNode(
std::dynamic_pointer_cast<GradNodeBase>(scale_node_ptr));
auto_grad_meta->SetSingleOutRankWithSlot(0, 0);
RetainGradForTensor(target_tensor); // result: 1.0

// Connect ScaleNode -> AccumulationNode via Edge
auto meta = AutogradMeta();
meta.SetSingleOutRankWithSlot(0, 0);
meta.SetGradNode(acc_node_ptr);
Expand All @@ -90,31 +74,15 @@ TEST(CrossBatchAccumulation, SingleScaleNode) {
RetainGradForTensor(leaf_tensor);
}

// Use Empty Grad Tensor
RunBackward(target_tensors, {});

// target tensor's grad should remain the same
PADDLE_ENFORCE(
CompareGradTensorWithValue<float>(target_tensor, 1.0) == true,
paddle::platform::errors::Fatal("Numerical Error, Expected %f", 1.0));
CompareGradTensorWithValue<float>(target_tensor, 1.0);
CompareGradTensorWithValue<float>(leaf_tensor, 5.0);

// Leaf tensor should keep accumulated grad
PADDLE_ENFORCE(
CompareGradTensorWithValue<float>(leaf_tensor, 5.0) == true,
paddle::platform::errors::Fatal("Numerical Error, Expected %f", 5.0));

// Cross-Batch Accumulation
RunBackward(target_tensors, {});

// target tensor's grad should remain the same
PADDLE_ENFORCE(
CompareGradTensorWithValue<float>(target_tensor, 1.0) == true,
paddle::platform::errors::Fatal("Numerical Error, Expected %f", 1.0));

// Leaf tensor should keep accumulated grad
PADDLE_ENFORCE(
CompareGradTensorWithValue<float>(leaf_tensor, 10.0) == true,
paddle::platform::errors::Fatal("Numerical Error, Expected %f", 10.0));
CompareGradTensorWithValue<float>(target_tensor, 1.0);
CompareGradTensorWithValue<float>(leaf_tensor, 10.0);
}

} // namespace eager_test
52 changes: 4 additions & 48 deletions paddle/fluid/eager/tests/task_tests/hook_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@

#include "paddle/fluid/eager/tests/test_utils.h"

// TODO(jiabin): remove nolint here!!!
using namespace egr; // NOLINT

namespace eager_test {
Expand Down Expand Up @@ -61,19 +60,6 @@ egr::EagerTensor hook_function(const egr::EagerTensor& t) {
return ret;
}

/*
AccumulationNode
|
|retain_grad
|hook
|
ScaleNode
|
|retain_grad
|hook
|
inp0
*/
TEST(RetainGrad, HookBeforeRetainGrad) {
InitEnv(paddle::platform::CPUPlace());

Expand Down Expand Up @@ -144,33 +130,12 @@ TEST(RetainGrad, HookBeforeRetainGrad) {
RetainGradForTensor(leaf_tensor); // result: 4.0*5.0 + 3.0 = 23.0