Skip to content

Commit

Permalink
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
Browse files Browse the repository at this point in the history
… dygraph_second_pr_utils
  • Loading branch information
jim19930609 committed Nov 24, 2021
2 parents 2ba9580 + 1799c03 commit 51fb833
Show file tree
Hide file tree
Showing 65 changed files with 1,021 additions and 136 deletions.
1 change: 0 additions & 1 deletion paddle/fluid/eager/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -2,5 +2,4 @@ add_subdirectory(api)
add_subdirectory(tests)
cc_library(grad_node_info SRCS grad_node_info.cc DEPS pten pten_api)
cc_library(autograd_meta SRCS autograd_meta.cc DEPS pten pten_api)

cc_library(utils SRCS utils.cc DEPS pten pten_api global_utils layer proto_desc operator op_registry variable_helper memcpy scale_op autograd_meta)
18 changes: 18 additions & 0 deletions paddle/fluid/eager/api/all.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//

#include "paddle/fluid/eager/api/all.h"

namespace egr {} // namespace egr
17 changes: 17 additions & 0 deletions paddle/fluid/eager/api/all.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#pragma once

#include "paddle/fluid/eager/api/utils/global_utils.h"
1 change: 0 additions & 1 deletion paddle/fluid/eager/eager_tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@

#pragma once
// framework deps
#include "paddle/fluid/framework/data_layout_transform.h"
#include "paddle/fluid/framework/pten_utils.h"
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/framework/variable.h"
Expand Down
91 changes: 91 additions & 0 deletions paddle/fluid/eager/tensor_wrapper.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

/**
* We now still need TensorWrapper and it is designed to Copy
* tensor in autograd mode.
*
* Since in autograd usage, we need to pass autograd_meta to
* backward computation however in tensor interface add to much
* autograd_related method is not a good choice.
*
* In TensorWrapper we will keep autograd info to backward, only
* for input var, but for output var it will only copy autograd
* with no grad **/

#pragma once
#include "paddle/fluid/eager/autograd_meta.h"
#include "paddle/fluid/eager/grad_node_info.h"
#include "paddle/fluid/eager/utils.h"

namespace egr {
class TensorWrapper {
public:
TensorWrapper() = default;
explicit TensorWrapper(const egr::EagerTensor& tensor,
bool full_reserved = false) {
/**
* Normally, we should fully reserved all non-output or non-leaf fwd tensor
* here. And for fwd output tensor, we should not reserve its autogradmeta,
* to avoid recursive depends on GradNodeBase
* **/
full_reserved_ = full_reserved;
if (full_reserved_) {
VLOG(6) << "Fully reserved tensor: " << tensor.name();
intermidiate_tensor_ = tensor;
return;
}

// shallow copy tensor_impl here
intermidiate_tensor_.set_impl(tensor.impl());
intermidiate_tensor_.ResetVar(tensor.Var());
intermidiate_tensor_.set_name(tensor.name() + "@Saved");
PADDLE_ENFORCE_NOT_NULL(
EagerUtils::unsafe_autograd_meta(tensor),
paddle::platform::errors::Fatal(
"Full reserved Tensor should not have null autograd meta, since "
"tensor_wrapper is used to build backward info. There is no way "
"for us to build it with null autograd_meta."));
// copy output_rank
out_rank_info_ = EagerUtils::OutRankInfo(tensor);
}

egr::EagerTensor recover(const std::shared_ptr<GradNodeBase>& grad_node) {
VLOG(6) << "Recover tensor for wrapper";
if ((!intermidiate_tensor_.defined()) &&
(!intermidiate_tensor_.Var().IsInitialized())) {
VLOG(6) << "Return NULL tensor Here. ";
return egr::EagerTensor();
}

// if it's full_reserved just return the full copy of tensor
if (full_reserved_) {
return intermidiate_tensor_;
} else {
std::shared_ptr<GradNodeBase> new_grad_node = grad_node;
auto p_ab_autograd_meta =
std::make_shared<AutogradMeta>(Edge(new_grad_node, out_rank_info_));
intermidiate_tensor_.set_autograd_meta(
std::static_pointer_cast<paddle::experimental::AbstractAutogradMeta>(
p_ab_autograd_meta));
return intermidiate_tensor_;
}
}

private:
bool full_reserved_ = false;
std::pair<size_t, size_t> out_rank_info_;
egr::EagerTensor intermidiate_tensor_;
};
} // namespace egr
2 changes: 1 addition & 1 deletion paddle/fluid/eager/tests/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
set(eager_deps pten pten_api pten_tensor utils)
set(eager_deps pten pten_api pten_tensor utils global_utils autograd_meta grad_node_info)
add_subdirectory(data_structure_tests)
add_subdirectory(task_tests)
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
cc_test(test_egr_ds_eager_tensor SRCS eager_tensor_test.cc DEPS ${eager_deps} )
cc_test(test_egr_ds_auotgrad_meta SRCS autograd_meta_test.cc DEPS ${eager_deps} grad_node_info)
cc_test(test_egr_ds_grad_node_info SRCS grad_node_info_test.cc DEPS ${eager_deps} grad_node_info)
cc_test(test_egr_ds_tensor_wrapper SRCS tensor_wrapper_test.cc DEPS ${eager_deps} grad_node_info utils)
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,9 @@
#include "paddle/fluid/eager/eager_tensor.h"
#include "paddle/fluid/eager/grad_node_info.h"
#include "paddle/pten/api/lib/utils/allocator.h"
namespace egr {
class TensorWrapper;
}

namespace eager_test {
class GradTestNode : public egr::GradNodeBase {
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "glog/logging.h"
#include "gtest/gtest.h"

#include "paddle/fluid/eager/tensor_wrapper.h"
#include "paddle/fluid/eager/tests/data_structure_tests/grad_node_test.h"
#include "paddle/fluid/eager/utils.h"

TEST(TensorWrapper, Basic) {
VLOG(6) << "Test Full reserved";
egr::EagerTensor et1;
pten::DenseTensorMeta meta = pten::DenseTensorMeta(
pten::DataType::FLOAT32, paddle::framework::make_ddim({1, 2}));
std::shared_ptr<pten::DenseTensor> dt = std::make_shared<pten::DenseTensor>(
std::make_shared<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace()),
meta);
auto* dt_ptr = dt->mutable_data<float>();
dt_ptr[0] = 5.0f;
dt_ptr[1] = 10.0f;
et1.set_impl(dt);
// Create grad node;
auto grad_test_node0 = std::make_shared<eager_test::GradTestNode>(
/* val */ 5.0, /* in_num */ 2, /* out_num */ 2);
egr::Edge edge0(grad_test_node0, 1, 2);
auto auto_grad0 = std::make_shared<egr::AutogradMeta>(edge0);
et1.set_autograd_meta(auto_grad0);
et1.set_name("et1");
auto tw0 = egr::TensorWrapper(et1, true);
auto recover_et1 = tw0.recover(std::make_shared<eager_test::GradTestNode>());
CHECK_EQ(recover_et1.name(), std::string("et1"));
CHECK_EQ(egr::EagerUtils::OutRankInfo(recover_et1).first,
egr::EagerUtils::OutRankInfo(et1).first);
CHECK_EQ(egr::EagerUtils::OutRankInfo(recover_et1).second,
egr::EagerUtils::OutRankInfo(et1).second);
VLOG(6) << "Test reconstruct";
egr::EagerTensor et2;
pten::DenseTensorMeta meta2 = pten::DenseTensorMeta(
pten::DataType::FLOAT32, paddle::framework::make_ddim({1, 2}));
std::shared_ptr<pten::DenseTensor> dt2 = std::make_shared<pten::DenseTensor>(
std::make_shared<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace()),
meta2);
auto* dt_ptr2 = dt->mutable_data<float>();
dt_ptr2[0] = 6.0f;
dt_ptr2[1] = 11.0f;
et2.set_impl(dt2);
et2.set_name("et2");
auto grad_test_node1 =
std::make_shared<eager_test::GradTestNode>(/* val */ 5.0, 2, 2);
egr::Edge edge1(grad_test_node1, 1, 2);
auto auto_grad1 = std::make_shared<egr::AutogradMeta>(edge1);
et2.set_autograd_meta(auto_grad1);
auto tw1 = egr::TensorWrapper(et2, false);
auto recover_et2 = tw1.recover(grad_test_node1);
CHECK_EQ(recover_et2.name(), std::string("et2@Saved"));
CHECK_EQ(egr::EagerUtils::OutRankInfo(recover_et2).first,
egr::EagerUtils::OutRankInfo(et2).first);
CHECK_EQ(egr::EagerUtils::OutRankInfo(recover_et2).second,
egr::EagerUtils::OutRankInfo(et2).second);
// Test Raw recover
egr::EagerTensor et3;
auto tw2 = egr::TensorWrapper(et3, true);
CHECK(
tw2.recover(std::make_shared<eager_test::GradTestNode>()).initialized() ==
false);
}
Loading

0 comments on commit 51fb833

Please sign in to comment.