From 31bdb3f3cc98a38a3ec4951f26dd06260cdc4695 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Mon, 2 Oct 2017 14:03:38 -0700 Subject: [PATCH 01/11] tmp --- paddle/framework/operator.h | 98 +++++++++++++++++++++++++++++++++++++ 1 file changed, 98 insertions(+) diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index 73e53a4176db3..f807909650f00 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -317,6 +317,104 @@ class ExecutionContext : public InferShapeContext { const platform::DeviceContext& device_context_; }; +class CompileTimeInferShapeContext : public InferShapeContextBase { + public: + CompileTimeInferShapeContext(const OperatorBase& op, const Scope& scope) + : op_(op), scope_(scope) {} + + bool HasInput(const std::string& name) const { + auto ipt = op_.Input(name); + auto* var = ipt == kEmptyVarName ? nullptr : scope_.FindVar(ipt); + return var != nullptr; + } + + bool HasOutput(const std::string& name) const { + auto ipt = op_.Output(name); + auto* var = ipt == kEmptyVarName ? nullptr : scope_.FindVar(ipt); + return var != nullptr; + } + + bool HasInputs(const std::string& name) const { + auto inputs = op_.Inputs(name); + if (inputs.size() == 0UL) { + return false; + } + for (auto& input : inputs) { + if (scope_.FindVar(input) == nullptr) { + return false; + } + } + return true; + } + + bool HasOutputs(const std::string& name) const { + auto outputs = op_.Outputs(name); + if (outputs.size() == 0UL) { + return false; + } + for (auto& output : outputs) { + if (scope_.FindVar(output) == nullptr) { + return false; + } + } + return true; + } + + DDim GetInputDim(const std::string& name) const { + return GetDim(op_.Input(name)); + } + + void SetInputDim(const std::string& name, const DDim& dim) { + SetDim(op_.Input(name), dim); + } + + DDim GetOutputDim(const std::string& name) const { + return GetDim(op_.Output(name)); + } + + void SetOutputDim(const std::string& name, const DDim& dim) { + SetDim(op_.Output(name), dim); + } + + AttrReader Attrs() const { return AttrReader(op_.Attrs()); } + + const std::vector& Inputs(const std::string& name) const { + return op_.Inputs(name); + } + + const std::vector& Outputs(const std::string& name) const { + return op_.Outputs(name); + } + + private: + template + Tensor* GetTensor(const std::string& name) const { + Tensor* t = nullptr; + auto* var = scope_.FindVar(name); + if (!var->IsType() && !var->IsType()) { + if (Allocate) { + t = var->GetMutable(); + } else { + PADDLE_THROW("Variable(%s) should be tensor", name); + } + } else { + t = GetTensorFromVar(scope_.FindVar(name)); + } + return t; + } + + DDim GetDim(const std::string& name) const { + return GetTensor(name)->dims(); + } + + void SetDim(const std::string& name, const DDim& dim) { + GetTensor(name)->Resize(dim); + } + + const OperatorBase& op_; + const Scope& scope_; +}; + class RuntimeInferShapeContext : public InferShapeContextBase { public: RuntimeInferShapeContext(const OperatorBase& op, const Scope& scope) From d550380ed92ed9c762ff0248780a3c28dbf27416 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Mon, 2 Oct 2017 16:52:11 -0700 Subject: [PATCH 02/11] add CompileTimeInferShapeContext --- paddle/framework/CMakeLists.txt | 2 +- paddle/framework/block_desc.cc | 5 ++ paddle/framework/block_desc.h | 2 + paddle/framework/operator.h | 84 +++++++++++++-------------------- 4 files changed, 41 insertions(+), 52 deletions(-) diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt index 5d394132b7f3d..a2efcdb55cfc7 100644 --- a/paddle/framework/CMakeLists.txt +++ b/paddle/framework/CMakeLists.txt @@ -23,7 +23,7 @@ cc_library(proto_desc SRCS var_desc.cc op_desc.cc block_desc.cc program_desc.cc cc_library(op_proto_maker SRCS op_proto_maker.cc DEPS framework_proto attribute) cc_test(op_proto_maker_test SRCS op_proto_maker_test.cc DEPS op_proto_maker) cc_library(op_info SRCS op_info.cc DEPS attribute framework_proto proto_desc) -cc_library(operator SRCS operator.cc DEPS op_info device_context tensor scope) +cc_library(operator SRCS operator.cc DEPS op_info device_context tensor scope proto_desc) cc_test(operator_test SRCS operator_test.cc DEPS operator op_registry) cc_library(grad_op_builder SRCS grad_op_builder.cc DEPS operator proto_desc) diff --git a/paddle/framework/block_desc.cc b/paddle/framework/block_desc.cc index 9570aedfdda33..670533a3fe35d 100644 --- a/paddle/framework/block_desc.cc +++ b/paddle/framework/block_desc.cc @@ -34,6 +34,11 @@ VarDescBind *BlockDescBind::Var(const std::string &name) const { return it->second.get(); } +bool BlockDescBind::HasVar(const std::string &name) const { + auto it = vars_.find(name); + return it != vars_.end(); +} + std::vector BlockDescBind::AllVars() const { std::vector res; for (const auto &p : vars_) { diff --git a/paddle/framework/block_desc.h b/paddle/framework/block_desc.h index 1a1135bab44cd..41cf1dc385aa8 100644 --- a/paddle/framework/block_desc.h +++ b/paddle/framework/block_desc.h @@ -45,6 +45,8 @@ class BlockDescBind { VarDescBind *Var(const std::string &name_bytes) const; + bool HasVar(const std::string &var_name) const; + std::vector AllVars() const; BlockDescBind *ParentBlock() const; diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index f807909650f00..2874237b9c3b4 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -319,100 +319,82 @@ class ExecutionContext : public InferShapeContext { class CompileTimeInferShapeContext : public InferShapeContextBase { public: - CompileTimeInferShapeContext(const OperatorBase& op, const Scope& scope) - : op_(op), scope_(scope) {} + CompileTimeInferShapeContext(const OpDescBind& op, const BlockDescBind& block) + : op_(op), block_(block) {} bool HasInput(const std::string& name) const { - auto ipt = op_.Input(name); - auto* var = ipt == kEmptyVarName ? nullptr : scope_.FindVar(ipt); - return var != nullptr; + const std::vector& input_names = op_.Input(name); + PADDLE_ENFORCE_EQ(input_names.size(), 1UL, "Inputs(%s) length is not 1", + name); + return block_.HasVar(input_names[0]); } bool HasOutput(const std::string& name) const { - auto ipt = op_.Output(name); - auto* var = ipt == kEmptyVarName ? nullptr : scope_.FindVar(ipt); - return var != nullptr; + const std::vector& output_names = op_.Output(name); + PADDLE_ENFORCE_EQ(output_names.size(), 1UL, "Outputs(%s) length is not 1", + name); + return block_.HasVar(output_names[0]); } bool HasInputs(const std::string& name) const { - auto inputs = op_.Inputs(name); - if (inputs.size() == 0UL) { - return false; - } - for (auto& input : inputs) { - if (scope_.FindVar(input) == nullptr) { - return false; - } + const std::vector& input_names = op_.Input(name); + PADDLE_ENFORCE_GT(input_names.size(), 0UL, "Inputs(%s) length is 0", name); + for (auto& input : input_names) { + if (!block_.HasVar(input)) return false; } return true; } bool HasOutputs(const std::string& name) const { - auto outputs = op_.Outputs(name); - if (outputs.size() == 0UL) { - return false; - } - for (auto& output : outputs) { - if (scope_.FindVar(output) == nullptr) { - return false; - } + const std::vector& output_names = op_.Output(name); + PADDLE_ENFORCE_GT(output_names.size(), 0UL, "Inputs(%s) length is 0", name); + for (auto& output : output_names) { + if (!block_.HasVar(name)) return false; } return true; } DDim GetInputDim(const std::string& name) const { - return GetDim(op_.Input(name)); + std::vector ddims = GetInputsDim(name); + PADDLE_ENFORCE_EQ(ddims.size(), 1UL, "Inputs(%s) length is not 1", name); + return ddims[0]; } void SetInputDim(const std::string& name, const DDim& dim) { - SetDim(op_.Input(name), dim); + SetInputsDim(name, {dim}); } DDim GetOutputDim(const std::string& name) const { - return GetDim(op_.Output(name)); + std::vector ddims = GetOutputsDim(name); + PADDLE_ENFORCE_EQ(ddims.size(), 1UL, "Outputs(%s) length is not 1", name); + return ddims[0]; } void SetOutputDim(const std::string& name, const DDim& dim) { - SetDim(op_.Output(name), dim); + SetOutputsDim(name, {dim}); } - AttrReader Attrs() const { return AttrReader(op_.Attrs()); } + AttrReader Attrs() const { return AttrReader(op_.GetAttrMap()); } const std::vector& Inputs(const std::string& name) const { - return op_.Inputs(name); + return op_.Input(name); } const std::vector& Outputs(const std::string& name) const { - return op_.Outputs(name); + return op_.Output(name); } private: - template - Tensor* GetTensor(const std::string& name) const { - Tensor* t = nullptr; - auto* var = scope_.FindVar(name); - if (!var->IsType() && !var->IsType()) { - if (Allocate) { - t = var->GetMutable(); - } else { - PADDLE_THROW("Variable(%s) should be tensor", name); - } - } else { - t = GetTensorFromVar(scope_.FindVar(name)); - } - return t; - } - DDim GetDim(const std::string& name) const { - return GetTensor(name)->dims(); + return framework::make_ddim(block_.Var(name)->Shape()); } void SetDim(const std::string& name, const DDim& dim) { - GetTensor(name)->Resize(dim); + block_.Var(name)->SetShape(framework::vectorize(dim)); } - const OperatorBase& op_; - const Scope& scope_; + const OpDescBind& op_; + const BlockDescBind& block_; }; class RuntimeInferShapeContext : public InferShapeContextBase { From 455436e5148ad0a84cae89e46931e4785c57870d Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Mon, 2 Oct 2017 18:04:00 -0700 Subject: [PATCH 03/11] fix compile problem --- paddle/framework/operator.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index 2874237b9c3b4..5bb5c8e2f9f1e 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -22,6 +22,7 @@ limitations under the License. */ #include "op_info.h" #include "paddle/framework/attribute.h" +#include "paddle/framework/block_desc.h" #include "paddle/framework/data_type.h" #include "paddle/framework/framework.pb.h" #include "paddle/framework/lod_tensor.h" @@ -349,7 +350,7 @@ class CompileTimeInferShapeContext : public InferShapeContextBase { const std::vector& output_names = op_.Output(name); PADDLE_ENFORCE_GT(output_names.size(), 0UL, "Inputs(%s) length is 0", name); for (auto& output : output_names) { - if (!block_.HasVar(name)) return false; + if (!block_.HasVar(output)) return false; } return true; } From 81fc7774ec02e23163014e66eeddeea43e5dd703 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Tue, 3 Oct 2017 16:59:32 -0700 Subject: [PATCH 04/11] optimize infershape context --- paddle/framework/operator.h | 64 ++++++++++++++++++++----------------- 1 file changed, 34 insertions(+), 30 deletions(-) diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index 5bb5c8e2f9f1e..99f721cc671e5 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -323,74 +323,76 @@ class CompileTimeInferShapeContext : public InferShapeContextBase { CompileTimeInferShapeContext(const OpDescBind& op, const BlockDescBind& block) : op_(op), block_(block) {} - bool HasInput(const std::string& name) const { + bool HasInput(const std::string& name) const override { const std::vector& input_names = op_.Input(name); PADDLE_ENFORCE_EQ(input_names.size(), 1UL, "Inputs(%s) length is not 1", name); return block_.HasVar(input_names[0]); } - bool HasOutput(const std::string& name) const { + bool HasOutput(const std::string& name) const override { const std::vector& output_names = op_.Output(name); PADDLE_ENFORCE_EQ(output_names.size(), 1UL, "Outputs(%s) length is not 1", name); return block_.HasVar(output_names[0]); } - bool HasInputs(const std::string& name) const { + bool HasInputs(const std::string& name) const override { const std::vector& input_names = op_.Input(name); - PADDLE_ENFORCE_GT(input_names.size(), 0UL, "Inputs(%s) length is 0", name); + PADDLE_ENFORCE(!input_names.empty(), "Inputs(%s) length is 0", name); for (auto& input : input_names) { if (!block_.HasVar(input)) return false; } return true; } - bool HasOutputs(const std::string& name) const { + bool HasOutputs(const std::string& name) const override { const std::vector& output_names = op_.Output(name); - PADDLE_ENFORCE_GT(output_names.size(), 0UL, "Inputs(%s) length is 0", name); + PADDLE_ENFORCE(!output_names.empty(), "Inputs(%s) length is 0", name); for (auto& output : output_names) { if (!block_.HasVar(output)) return false; } return true; } - DDim GetInputDim(const std::string& name) const { + DDim GetInputDim(const std::string& name) const override { std::vector ddims = GetInputsDim(name); PADDLE_ENFORCE_EQ(ddims.size(), 1UL, "Inputs(%s) length is not 1", name); return ddims[0]; } - void SetInputDim(const std::string& name, const DDim& dim) { + void SetInputDim(const std::string& name, const DDim& dim) override { SetInputsDim(name, {dim}); } - DDim GetOutputDim(const std::string& name) const { + DDim GetOutputDim(const std::string& name) const override { std::vector ddims = GetOutputsDim(name); PADDLE_ENFORCE_EQ(ddims.size(), 1UL, "Outputs(%s) length is not 1", name); return ddims[0]; } - void SetOutputDim(const std::string& name, const DDim& dim) { + void SetOutputDim(const std::string& name, const DDim& dim) override { SetOutputsDim(name, {dim}); } - AttrReader Attrs() const { return AttrReader(op_.GetAttrMap()); } + AttrReader Attrs() const override { return AttrReader(op_.GetAttrMap()); } - const std::vector& Inputs(const std::string& name) const { + const std::vector& Inputs( + const std::string& name) const override { return op_.Input(name); } - const std::vector& Outputs(const std::string& name) const { + const std::vector& Outputs( + const std::string& name) const override { return op_.Output(name); } private: - DDim GetDim(const std::string& name) const { + DDim GetDim(const std::string& name) const override { return framework::make_ddim(block_.Var(name)->Shape()); } - void SetDim(const std::string& name, const DDim& dim) { + void SetDim(const std::string& name, const DDim& dim) override { block_.Var(name)->SetShape(framework::vectorize(dim)); } @@ -403,21 +405,21 @@ class RuntimeInferShapeContext : public InferShapeContextBase { RuntimeInferShapeContext(const OperatorBase& op, const Scope& scope) : op_(op), scope_(scope) {} - bool HasInput(const std::string& name) const { + bool HasInput(const std::string& name) const override { auto ipt = op_.Input(name); auto* var = ipt == kEmptyVarName ? nullptr : scope_.FindVar(ipt); return var != nullptr; } - bool HasOutput(const std::string& name) const { + bool HasOutput(const std::string& name) const override { auto ipt = op_.Output(name); auto* var = ipt == kEmptyVarName ? nullptr : scope_.FindVar(ipt); return var != nullptr; } - bool HasInputs(const std::string& name) const { + bool HasInputs(const std::string& name) const override { auto inputs = op_.Inputs(name); - if (inputs.size() == 0UL) { + if (inputs.empty()) { return false; } for (auto& input : inputs) { @@ -428,9 +430,9 @@ class RuntimeInferShapeContext : public InferShapeContextBase { return true; } - bool HasOutputs(const std::string& name) const { + bool HasOutputs(const std::string& name) const override { auto outputs = op_.Outputs(name); - if (outputs.size() == 0UL) { + if (outputs.empty()) { return false; } for (auto& output : outputs) { @@ -441,29 +443,31 @@ class RuntimeInferShapeContext : public InferShapeContextBase { return true; } - DDim GetInputDim(const std::string& name) const { + DDim GetInputDim(const std::string& name) const override { return GetDim(op_.Input(name)); } - void SetInputDim(const std::string& name, const DDim& dim) { + void SetInputDim(const std::string& name, const DDim& dim) override { SetDim(op_.Input(name), dim); } - DDim GetOutputDim(const std::string& name) const { + DDim GetOutputDim(const std::string& name) const override { return GetDim(op_.Output(name)); } - void SetOutputDim(const std::string& name, const DDim& dim) { + void SetOutputDim(const std::string& name, const DDim& dim) override { SetDim(op_.Output(name), dim); } - AttrReader Attrs() const { return AttrReader(op_.Attrs()); } + AttrReader Attrs() const override { return AttrReader(op_.Attrs()); } - const std::vector& Inputs(const std::string& name) const { + const std::vector& Inputs( + const std::string& name) const override { return op_.Inputs(name); } - const std::vector& Outputs(const std::string& name) const { + const std::vector& Outputs( + const std::string& name) const override { return op_.Outputs(name); } @@ -484,11 +488,11 @@ class RuntimeInferShapeContext : public InferShapeContextBase { return t; } - DDim GetDim(const std::string& name) const { + DDim GetDim(const std::string& name) const override { return GetTensor(name)->dims(); } - void SetDim(const std::string& name, const DDim& dim) { + void SetDim(const std::string& name, const DDim& dim) override { GetTensor(name)->Resize(dim); } From ab9545aa95fb482e7b51b58e0abe2191c9ef3bea Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Wed, 4 Oct 2017 00:44:07 -0700 Subject: [PATCH 05/11] add shape_inference_map --- paddle/framework/CMakeLists.txt | 4 +- paddle/framework/op_registry.h | 4 ++ paddle/framework/shape_inference.h | 1 + paddle/framework/shape_inference_map.cc | 57 +++++++++++++++++++++++++ paddle/framework/shape_inference_map.h | 56 ++++++++++++++++++++++++ 5 files changed, 121 insertions(+), 1 deletion(-) create mode 100644 paddle/framework/shape_inference_map.cc create mode 100644 paddle/framework/shape_inference_map.h diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt index a2efcdb55cfc7..986b45451fe71 100644 --- a/paddle/framework/CMakeLists.txt +++ b/paddle/framework/CMakeLists.txt @@ -26,8 +26,10 @@ cc_library(op_info SRCS op_info.cc DEPS attribute framework_proto proto_desc) cc_library(operator SRCS operator.cc DEPS op_info device_context tensor scope proto_desc) cc_test(operator_test SRCS operator_test.cc DEPS operator op_registry) +cc_library(shape_inference_map SRCS shape_inference_map.cc DEPS op_info operator) + cc_library(grad_op_builder SRCS grad_op_builder.cc DEPS operator proto_desc) -cc_library(op_registry SRCS op_registry.cc DEPS grad_op_builder op_proto_maker op_info) +cc_library(op_registry SRCS op_registry.cc DEPS grad_op_builder op_proto_maker op_info shape_inference_map) cc_test(op_registry_test SRCS op_registry_test.cc DEPS op_registry) cc_test(grad_op_builder_test SRCS grad_op_builder_test.cc DEPS grad_op_builder op_registry sum_op) diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index 4ee2c7d27561c..f04b6c503a95e 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -26,6 +26,7 @@ limitations under the License. */ #include "paddle/framework/grad_op_builder.h" #include "paddle/framework/operator.h" #include "paddle/framework/scope.h" +#include "paddle/framework/shape_inference_map.h" namespace paddle { namespace framework { @@ -54,9 +55,12 @@ class OpRegistry { const std::string& grad_op_type) { OperatorRegistrar reg(op_type.c_str()); reg.info.grad_op_type_ = grad_op_type; + ShapeInferenceMap::Instance().CreateOpWithKernel(reg.info, op_type); // register gradient op if (!grad_op_type.empty()) { OperatorRegistrar grad_reg(grad_op_type.c_str()); + ShapeInferenceMap::Instance().CreateOpWithKernel(grad_reg.info, + grad_op_type); } } diff --git a/paddle/framework/shape_inference.h b/paddle/framework/shape_inference.h index bc8af0eb3ec7e..ac6f238638cfd 100644 --- a/paddle/framework/shape_inference.h +++ b/paddle/framework/shape_inference.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once +#include "paddle/framework/attribute.h" #include "paddle/framework/ddim.h" namespace paddle { diff --git a/paddle/framework/shape_inference_map.cc b/paddle/framework/shape_inference_map.cc new file mode 100644 index 0000000000000..1a27037221a9e --- /dev/null +++ b/paddle/framework/shape_inference_map.cc @@ -0,0 +1,57 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/framework/shape_inference_map.h" + +namespace paddle { +namespace framework { + +static VariableNameMap ConvertOpProtoVarsToVarNameMap( + const google::protobuf::RepeatedPtrField& op_proto_vars) { + VariableNameMap ret_val; + for (auto& var : op_proto_vars) { + ret_val[var.name()] = {}; + } + return ret_val; +} + +static ShapeInferenceMap* g_shape_inference_map = nullptr; + +ShapeInferenceMap& ShapeInferenceMap::Instance() { + if (g_shape_inference_map == nullptr) { + g_shape_inference_map = new ShapeInferenceMap(); + } + return *g_shape_inference_map; +} + +void ShapeInferenceMap::CreateOpWithKernel(const OpInfo& op_info, + const std::string& op_type) { + const VariableNameMap inputs = + ConvertOpProtoVarsToVarNameMap(op_info.Proto().inputs()); + const VariableNameMap outputs = + ConvertOpProtoVarsToVarNameMap(op_info.Proto().outputs()); + auto* op = op_info.Creator()(op_type, inputs, outputs, {}); + auto* op_with_kernel = dynamic_cast(op); + auto it = op_shape_inference_map_.find(op_type); + if (it != op_shape_inference_map_.end()) { + PADDLE_THROW("OpWithKernel(%s) is already registered for infer_shape", + op_type); + } + if (op_with_kernel != nullptr) { + op_shape_inference_map_[op_type] = op_with_kernel; + } +} + +} // namespace framework +} // namespace paddle diff --git a/paddle/framework/shape_inference_map.h b/paddle/framework/shape_inference_map.h new file mode 100644 index 0000000000000..fb126690268b1 --- /dev/null +++ b/paddle/framework/shape_inference_map.h @@ -0,0 +1,56 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include + +#include "paddle/framework/op_info.h" +#include "paddle/framework/operator.h" +#include "paddle/framework/shape_inference.h" + +namespace paddle { +namespace framework { + +class ShapeInferenceMap { + public: + static ShapeInferenceMap& Instance(); + + const OperatorBase* GetOperator(const std::string& op_type) { + auto it = op_shape_inference_map_.find(op_type); + if (it == op_shape_inference_map_.end()) { + PADDLE_THROW("op with kernel for Op(%s) is not registered", op_type); + } + return it->second; + } + + void CreateOpWithKernel(const OpInfo& op_info, const std::string& op_type); + + OperatorWithKernel* GetOpWithKernel(const std::string& op_type) { + auto it = op_shape_inference_map_.find(op_type); + if (it == op_shape_inference_map_.end()) { + return nullptr; + } + return it->second; + } + + private: + ShapeInferenceMap() = default; + DISABLE_COPY_AND_ASSIGN(ShapeInferenceMap); + + std::unordered_map op_shape_inference_map_; +}; + +} // namespace framework +} // namespace paddle From 5917e09cde86401005261914964eca4ef54de193 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Wed, 4 Oct 2017 15:26:19 -0700 Subject: [PATCH 06/11] tmp work --- paddle/framework/op_registry.h | 4 +++ paddle/framework/operator.h | 2 +- paddle/framework/shape_inference_map.cc | 9 ++++-- paddle/framework/shape_inference_map.h | 8 ----- paddle/pybind/pybind.cc | 9 ++++++ .../v2/framework/tests/test_infer_shape.py | 29 +++++++++++++++++++ 6 files changed, 49 insertions(+), 12 deletions(-) create mode 100644 python/paddle/v2/framework/tests/test_infer_shape.py diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index f04b6c503a95e..8138ba117aac9 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -55,6 +55,10 @@ class OpRegistry { const std::string& grad_op_type) { OperatorRegistrar reg(op_type.c_str()); reg.info.grad_op_type_ = grad_op_type; + auto proto = reg.info.Proto(); + std::cout << "====== " << op_type << " =======" << std::endl; + std::cout << proto.SerializeAsString() << std::endl; + std::cout << "=============" << std::endl; ShapeInferenceMap::Instance().CreateOpWithKernel(reg.info, op_type); // register gradient op if (!grad_op_type.empty()) { diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index 99f721cc671e5..458404af6d3c4 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -598,9 +598,9 @@ class OperatorWithKernel : public OperatorBase { }); } - protected: virtual void InferShape(InferShapeContextBase* ctx) const = 0; + protected: // indicate kernel DataType by input data. Defaultly all input data must be // same. virtual DataType IndicateDataType(const ExecutionContext& ctx) const { diff --git a/paddle/framework/shape_inference_map.cc b/paddle/framework/shape_inference_map.cc index 1a27037221a9e..bd2b8679841a9 100644 --- a/paddle/framework/shape_inference_map.cc +++ b/paddle/framework/shape_inference_map.cc @@ -37,10 +37,13 @@ ShapeInferenceMap& ShapeInferenceMap::Instance() { void ShapeInferenceMap::CreateOpWithKernel(const OpInfo& op_info, const std::string& op_type) { - const VariableNameMap inputs = - ConvertOpProtoVarsToVarNameMap(op_info.Proto().inputs()); + auto proto = op_info.Proto(); + std::cout << "========= " << op_type << " in======" << std::endl; + std::cout << proto.SerializeAsString() << std::endl; + std::cout << "========= " << op_type << " out======" << std::endl; + const VariableNameMap inputs = ConvertOpProtoVarsToVarNameMap(proto.inputs()); const VariableNameMap outputs = - ConvertOpProtoVarsToVarNameMap(op_info.Proto().outputs()); + ConvertOpProtoVarsToVarNameMap(proto.outputs()); auto* op = op_info.Creator()(op_type, inputs, outputs, {}); auto* op_with_kernel = dynamic_cast(op); auto it = op_shape_inference_map_.find(op_type); diff --git a/paddle/framework/shape_inference_map.h b/paddle/framework/shape_inference_map.h index fb126690268b1..6c7304f6c0ccf 100644 --- a/paddle/framework/shape_inference_map.h +++ b/paddle/framework/shape_inference_map.h @@ -27,14 +27,6 @@ class ShapeInferenceMap { public: static ShapeInferenceMap& Instance(); - const OperatorBase* GetOperator(const std::string& op_type) { - auto it = op_shape_inference_map_.find(op_type); - if (it == op_shape_inference_map_.end()) { - PADDLE_THROW("op with kernel for Op(%s) is not registered", op_type); - } - return it->second; - } - void CreateOpWithKernel(const OpInfo& op_info, const std::string& op_type); OperatorWithKernel* GetOpWithKernel(const std::string& op_type) { diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index f4121e9d71824..e11bcc0e0f055 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -223,6 +223,15 @@ All parameter, weight, gradient are variables in Paddle. desc.InitializationErrorString()); return OpRegistry::CreateOp(desc); }) + .def("infer_shape", + [](const OpDescBind &op_desc, BlockDescBind &block) { + auto &shape_inference_map = ShapeInferenceMap::Instance(); + auto *op = shape_inference_map.GetOpWithKernel(op_desc.Type()); + if (op != nullptr) { + auto ctx = CompileTimeInferShapeContext(op_desc, block); + op->InferShape(&ctx); + } + }) .def("backward", [](const OperatorBase &forwardOp, const std::unordered_set &no_grad_vars) { diff --git a/python/paddle/v2/framework/tests/test_infer_shape.py b/python/paddle/v2/framework/tests/test_infer_shape.py new file mode 100644 index 0000000000000..56d3a90123fa1 --- /dev/null +++ b/python/paddle/v2/framework/tests/test_infer_shape.py @@ -0,0 +1,29 @@ +import unittest +import paddle.v2.framework.core as core +from paddle.v2.framework.op import Operator + + +class TestInferShape(unittest.TestCase): + def test_sum_op(self): + prog = core.ProgramDesc.__create_program_desc__() + self.assertIsNotNone(prog) + block = prog.block(0) + self.assertIsNotNone(block) + + # prepare input/output + x1 = block.new_var("x1") + x1.set_shape([10, 20]) + x2 = block.new_var("x2") + x2.set_shape([10, 20]) + + out = block.new_var("out") + + # prepare the operator + sum_op_desc = block.append_op() + sum_op_desc.set_type("sum") + sum_op_desc.set_input("X", ["x1", "x2"]) + sum_op_desc.set_output("Out", ["out"]) + + sum_op = Operator("sum", X=["x1", "x2"], Out="out") + sum_op.infer_shape(sum_op_desc, block) + print(out.shape()) From 352af966d7a62a1ed4cedaf9562d05db8026fe23 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Thu, 5 Oct 2017 15:30:03 -0700 Subject: [PATCH 07/11] add python unit test --- paddle/framework/CMakeLists.txt | 4 +- paddle/framework/op_registry.h | 9 +-- paddle/framework/shape_inference.h | 4 ++ paddle/framework/shape_inference_map.cc | 60 ------------------- paddle/framework/shape_inference_map.h | 48 --------------- paddle/pybind/pybind.cc | 24 +++++--- .../v2/framework/tests/test_infer_shape.py | 46 ++++++++++++-- 7 files changed, 62 insertions(+), 133 deletions(-) delete mode 100644 paddle/framework/shape_inference_map.cc delete mode 100644 paddle/framework/shape_inference_map.h diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt index 986b45451fe71..a2efcdb55cfc7 100644 --- a/paddle/framework/CMakeLists.txt +++ b/paddle/framework/CMakeLists.txt @@ -26,10 +26,8 @@ cc_library(op_info SRCS op_info.cc DEPS attribute framework_proto proto_desc) cc_library(operator SRCS operator.cc DEPS op_info device_context tensor scope proto_desc) cc_test(operator_test SRCS operator_test.cc DEPS operator op_registry) -cc_library(shape_inference_map SRCS shape_inference_map.cc DEPS op_info operator) - cc_library(grad_op_builder SRCS grad_op_builder.cc DEPS operator proto_desc) -cc_library(op_registry SRCS op_registry.cc DEPS grad_op_builder op_proto_maker op_info shape_inference_map) +cc_library(op_registry SRCS op_registry.cc DEPS grad_op_builder op_proto_maker op_info) cc_test(op_registry_test SRCS op_registry_test.cc DEPS op_registry) cc_test(grad_op_builder_test SRCS grad_op_builder_test.cc DEPS grad_op_builder op_registry sum_op) diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index 8138ba117aac9..ee02da7b4dcb8 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -26,7 +26,6 @@ limitations under the License. */ #include "paddle/framework/grad_op_builder.h" #include "paddle/framework/operator.h" #include "paddle/framework/scope.h" -#include "paddle/framework/shape_inference_map.h" namespace paddle { namespace framework { @@ -55,16 +54,10 @@ class OpRegistry { const std::string& grad_op_type) { OperatorRegistrar reg(op_type.c_str()); reg.info.grad_op_type_ = grad_op_type; - auto proto = reg.info.Proto(); - std::cout << "====== " << op_type << " =======" << std::endl; - std::cout << proto.SerializeAsString() << std::endl; - std::cout << "=============" << std::endl; - ShapeInferenceMap::Instance().CreateOpWithKernel(reg.info, op_type); + // register gradient op if (!grad_op_type.empty()) { OperatorRegistrar grad_reg(grad_op_type.c_str()); - ShapeInferenceMap::Instance().CreateOpWithKernel(grad_reg.info, - grad_op_type); } } diff --git a/paddle/framework/shape_inference.h b/paddle/framework/shape_inference.h index ac6f238638cfd..8189823c1943e 100644 --- a/paddle/framework/shape_inference.h +++ b/paddle/framework/shape_inference.h @@ -20,6 +20,10 @@ limitations under the License. */ namespace paddle { namespace framework { +class InferShapeContextBase; + +typedef std::function InferShapeFn; + class InferShapeContextBase { public: virtual ~InferShapeContextBase() {} diff --git a/paddle/framework/shape_inference_map.cc b/paddle/framework/shape_inference_map.cc deleted file mode 100644 index bd2b8679841a9..0000000000000 --- a/paddle/framework/shape_inference_map.cc +++ /dev/null @@ -1,60 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/framework/shape_inference_map.h" - -namespace paddle { -namespace framework { - -static VariableNameMap ConvertOpProtoVarsToVarNameMap( - const google::protobuf::RepeatedPtrField& op_proto_vars) { - VariableNameMap ret_val; - for (auto& var : op_proto_vars) { - ret_val[var.name()] = {}; - } - return ret_val; -} - -static ShapeInferenceMap* g_shape_inference_map = nullptr; - -ShapeInferenceMap& ShapeInferenceMap::Instance() { - if (g_shape_inference_map == nullptr) { - g_shape_inference_map = new ShapeInferenceMap(); - } - return *g_shape_inference_map; -} - -void ShapeInferenceMap::CreateOpWithKernel(const OpInfo& op_info, - const std::string& op_type) { - auto proto = op_info.Proto(); - std::cout << "========= " << op_type << " in======" << std::endl; - std::cout << proto.SerializeAsString() << std::endl; - std::cout << "========= " << op_type << " out======" << std::endl; - const VariableNameMap inputs = ConvertOpProtoVarsToVarNameMap(proto.inputs()); - const VariableNameMap outputs = - ConvertOpProtoVarsToVarNameMap(proto.outputs()); - auto* op = op_info.Creator()(op_type, inputs, outputs, {}); - auto* op_with_kernel = dynamic_cast(op); - auto it = op_shape_inference_map_.find(op_type); - if (it != op_shape_inference_map_.end()) { - PADDLE_THROW("OpWithKernel(%s) is already registered for infer_shape", - op_type); - } - if (op_with_kernel != nullptr) { - op_shape_inference_map_[op_type] = op_with_kernel; - } -} - -} // namespace framework -} // namespace paddle diff --git a/paddle/framework/shape_inference_map.h b/paddle/framework/shape_inference_map.h deleted file mode 100644 index 6c7304f6c0ccf..0000000000000 --- a/paddle/framework/shape_inference_map.h +++ /dev/null @@ -1,48 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include - -#include "paddle/framework/op_info.h" -#include "paddle/framework/operator.h" -#include "paddle/framework/shape_inference.h" - -namespace paddle { -namespace framework { - -class ShapeInferenceMap { - public: - static ShapeInferenceMap& Instance(); - - void CreateOpWithKernel(const OpInfo& op_info, const std::string& op_type); - - OperatorWithKernel* GetOpWithKernel(const std::string& op_type) { - auto it = op_shape_inference_map_.find(op_type); - if (it == op_shape_inference_map_.end()) { - return nullptr; - } - return it->second; - } - - private: - ShapeInferenceMap() = default; - DISABLE_COPY_AND_ASSIGN(ShapeInferenceMap); - - std::unordered_map op_shape_inference_map_; -}; - -} // namespace framework -} // namespace paddle diff --git a/paddle/pybind/pybind.cc b/paddle/pybind/pybind.cc index e11bcc0e0f055..2ad0344c094a6 100644 --- a/paddle/pybind/pybind.cc +++ b/paddle/pybind/pybind.cc @@ -223,15 +223,21 @@ All parameter, weight, gradient are variables in Paddle. desc.InitializationErrorString()); return OpRegistry::CreateOp(desc); }) - .def("infer_shape", - [](const OpDescBind &op_desc, BlockDescBind &block) { - auto &shape_inference_map = ShapeInferenceMap::Instance(); - auto *op = shape_inference_map.GetOpWithKernel(op_desc.Type()); - if (op != nullptr) { - auto ctx = CompileTimeInferShapeContext(op_desc, block); - op->InferShape(&ctx); - } - }) + .def_static("infer_shape", + [](OpDescBind &op_desc, BlockDescBind &block) { + auto op = OpRegistry::CreateOp(*op_desc.Proto()); + auto *op_with_kernel = + dynamic_cast(op.get()); + if (op_with_kernel != nullptr) { + auto ctx = CompileTimeInferShapeContext(op_desc, block); + op_with_kernel->InferShape(&ctx); + } else { + PADDLE_THROW( + "OP(%s) is not type of OperatorWithKernel, " + "should not call this function", + op_desc.Type()); + } + }) .def("backward", [](const OperatorBase &forwardOp, const std::unordered_set &no_grad_vars) { diff --git a/python/paddle/v2/framework/tests/test_infer_shape.py b/python/paddle/v2/framework/tests/test_infer_shape.py index 56d3a90123fa1..ec93aaf84370d 100644 --- a/python/paddle/v2/framework/tests/test_infer_shape.py +++ b/python/paddle/v2/framework/tests/test_infer_shape.py @@ -10,11 +10,13 @@ def test_sum_op(self): block = prog.block(0) self.assertIsNotNone(block) + shape = [10, 20] + # prepare input/output x1 = block.new_var("x1") - x1.set_shape([10, 20]) + x1.set_shape(shape) x2 = block.new_var("x2") - x2.set_shape([10, 20]) + x2.set_shape(shape) out = block.new_var("out") @@ -24,6 +26,40 @@ def test_sum_op(self): sum_op_desc.set_input("X", ["x1", "x2"]) sum_op_desc.set_output("Out", ["out"]) - sum_op = Operator("sum", X=["x1", "x2"], Out="out") - sum_op.infer_shape(sum_op_desc, block) - print(out.shape()) + print(type(sum_op_desc)) + print(type(block)) + core.Operator.infer_shape(sum_op_desc, block) + self.assertEqual(out.shape(), shape) + + def test_mul_op(self): + prog = core.ProgramDesc.__create_program_desc__() + self.assertIsNotNone(prog) + block = prog.block(0) + self.assertIsNotNone(block) + + x_shape = [10, 20] + y_shape = [20, 30] + + # prepare input/output + x1 = block.new_var("x") + x1.set_shape(x_shape) + x2 = block.new_var("y") + x2.set_shape(y_shape) + + out = block.new_var("out") + + # prepare the operator + mul_op_desc = block.append_op() + mul_op_desc.set_type("mul") + mul_op_desc.set_input("X", ["x"]) + mul_op_desc.set_input("Y", ["y"]) + mul_op_desc.set_output("Out", ["out"]) + mul_op_desc.set_attr("x_num_col_dims", 1) + mul_op_desc.set_attr("y_num_col_dims", 1) + + core.Operator.infer_shape(mul_op_desc, block) + self.assertEqual(out.shape(), [x_shape[0], y_shape[1]]) + + +if __name__ == '__main__': + unittest.main() From 628715d602491732f78f314d355760ca8ba326e3 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Thu, 5 Oct 2017 15:43:51 -0700 Subject: [PATCH 08/11] clean code --- paddle/framework/shape_inference.h | 5 ----- python/paddle/v2/framework/tests/test_infer_shape.py | 2 -- 2 files changed, 7 deletions(-) diff --git a/paddle/framework/shape_inference.h b/paddle/framework/shape_inference.h index 8189823c1943e..bc8af0eb3ec7e 100644 --- a/paddle/framework/shape_inference.h +++ b/paddle/framework/shape_inference.h @@ -14,16 +14,11 @@ limitations under the License. */ #pragma once -#include "paddle/framework/attribute.h" #include "paddle/framework/ddim.h" namespace paddle { namespace framework { -class InferShapeContextBase; - -typedef std::function InferShapeFn; - class InferShapeContextBase { public: virtual ~InferShapeContextBase() {} diff --git a/python/paddle/v2/framework/tests/test_infer_shape.py b/python/paddle/v2/framework/tests/test_infer_shape.py index ec93aaf84370d..b38ec9c03740a 100644 --- a/python/paddle/v2/framework/tests/test_infer_shape.py +++ b/python/paddle/v2/framework/tests/test_infer_shape.py @@ -26,8 +26,6 @@ def test_sum_op(self): sum_op_desc.set_input("X", ["x1", "x2"]) sum_op_desc.set_output("Out", ["out"]) - print(type(sum_op_desc)) - print(type(block)) core.Operator.infer_shape(sum_op_desc, block) self.assertEqual(out.shape(), shape) From e043b386606afc95ffd9135c14866d5e3b77b642 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Thu, 5 Oct 2017 15:47:42 -0700 Subject: [PATCH 09/11] clean code --- paddle/framework/op_registry.h | 1 - 1 file changed, 1 deletion(-) diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index ee02da7b4dcb8..4ee2c7d27561c 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -54,7 +54,6 @@ class OpRegistry { const std::string& grad_op_type) { OperatorRegistrar reg(op_type.c_str()); reg.info.grad_op_type_ = grad_op_type; - // register gradient op if (!grad_op_type.empty()) { OperatorRegistrar grad_reg(grad_op_type.c_str()); From 8c7ee7a5cdc6e959ad55a6a24d3ffe4bc3f37144 Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Fri, 6 Oct 2017 11:41:59 -0700 Subject: [PATCH 10/11] optimize code --- paddle/framework/block_desc.cc | 3 +-- paddle/framework/shape_inference.h | 3 +++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/paddle/framework/block_desc.cc b/paddle/framework/block_desc.cc index 670533a3fe35d..01f50e1393606 100644 --- a/paddle/framework/block_desc.cc +++ b/paddle/framework/block_desc.cc @@ -35,8 +35,7 @@ VarDescBind *BlockDescBind::Var(const std::string &name) const { } bool BlockDescBind::HasVar(const std::string &name) const { - auto it = vars_.find(name); - return it != vars_.end(); + return vars_.find(name) != vars_.end(); } std::vector BlockDescBind::AllVars() const { diff --git a/paddle/framework/shape_inference.h b/paddle/framework/shape_inference.h index bc8af0eb3ec7e..74e0371e32811 100644 --- a/paddle/framework/shape_inference.h +++ b/paddle/framework/shape_inference.h @@ -19,6 +19,9 @@ limitations under the License. */ namespace paddle { namespace framework { +// TODO(longfei): Once after both CompileTimeInferShapeContext and +// RuntimeInferShapeContext get merged, we can rename InferShapeContextBase into +// InferShapeContext so to replace the current InferShapeContext. class InferShapeContextBase { public: virtual ~InferShapeContextBase() {} From 4acd5abaaa6c18109e505bf2b2ef07a03df25ccc Mon Sep 17 00:00:00 2001 From: qiaolongfei Date: Fri, 6 Oct 2017 13:25:56 -0700 Subject: [PATCH 11/11] update comment for input/output length check --- paddle/framework/operator.h | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index 458404af6d3c4..d7bc9c9ffb9d5 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -325,15 +325,21 @@ class CompileTimeInferShapeContext : public InferShapeContextBase { bool HasInput(const std::string& name) const override { const std::vector& input_names = op_.Input(name); - PADDLE_ENFORCE_EQ(input_names.size(), 1UL, "Inputs(%s) length is not 1", - name); + auto length = input_names.size(); + PADDLE_ENFORCE_EQ(length, 1UL, + "Input(%s) should have only one value, " + "but it have %d now", + name, length); return block_.HasVar(input_names[0]); } bool HasOutput(const std::string& name) const override { const std::vector& output_names = op_.Output(name); - PADDLE_ENFORCE_EQ(output_names.size(), 1UL, "Outputs(%s) length is not 1", - name); + auto length = output_names.size(); + PADDLE_ENFORCE_EQ(length, 1UL, + "Output(%s) should have only one value, " + "but it have %d now", + name, length); return block_.HasVar(output_names[0]); } @@ -357,7 +363,11 @@ class CompileTimeInferShapeContext : public InferShapeContextBase { DDim GetInputDim(const std::string& name) const override { std::vector ddims = GetInputsDim(name); - PADDLE_ENFORCE_EQ(ddims.size(), 1UL, "Inputs(%s) length is not 1", name); + auto length = ddims.size(); + PADDLE_ENFORCE_EQ(length, 1UL, + "Input(%s) should have 1 value, " + "but it has %d now", + name, length); return ddims[0]; } @@ -367,7 +377,11 @@ class CompileTimeInferShapeContext : public InferShapeContextBase { DDim GetOutputDim(const std::string& name) const override { std::vector ddims = GetOutputsDim(name); - PADDLE_ENFORCE_EQ(ddims.size(), 1UL, "Outputs(%s) length is not 1", name); + auto length = ddims.size(); + PADDLE_ENFORCE_EQ(length, 1UL, + "Output(%s) should have 1 value, " + "but it has %d now", + name, length); return ddims[0]; }