Skip to content

Commit

Permalink
rename to ndarray_size
Browse files Browse the repository at this point in the history
  • Loading branch information
yongwww committed Jul 19, 2019
1 parent 25de6b3 commit e017ed3
Show file tree
Hide file tree
Showing 9 changed files with 32 additions and 33 deletions.
4 changes: 2 additions & 2 deletions docs/api/python/topi.rst
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ List of operators
topi.repeat
topi.tile
topi.shape
topi.size
topi.ndarray_size
topi.layout_transform
topi.image.resize
topi.argsort
Expand Down Expand Up @@ -166,7 +166,7 @@ topi
.. autofunction:: topi.repeat
.. autofunction:: topi.tile
.. autofunction:: topi.shape
.. autofunction:: topi.size
.. autofunction:: topi.ndarray_size
.. autofunction:: topi.layout_transform
.. autofunction:: topi.argsort
.. autofunction:: topi.topk
Expand Down
4 changes: 2 additions & 2 deletions include/tvm/relay/attrs/transform.h
Original file line number Diff line number Diff line change
Expand Up @@ -288,10 +288,10 @@ struct SequenceMaskAttrs : public tvm::AttrsNode<SequenceMaskAttrs> {
}; // struct SequenceMaskAttrs.

/*! \brief Attributes for ndarray_size operator */
struct SizeAttrs : public tvm::AttrsNode<SizeAttrs> {
struct NdarraySizeAttrs : public tvm::AttrsNode<NdarraySizeAttrs> {
DataType dtype;

TVM_DECLARE_ATTRS(SizeAttrs, "relay.attrs.SizeAttrs") {
TVM_DECLARE_ATTRS(NdarraySizeAttrs, "relay.attrs.NdarraySizeAttrs") {
TVM_ATTR_FIELD(dtype)
.describe("Target data type")
.set_default(NullValue<DataType>());
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/relay/op/contrib/contrib.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,4 +128,4 @@ def ndarray_size(data, dtype="int32"):
result : tvm.relay.Expr
The number of elements of input tensor.
"""
return _make.size(data, dtype)
return _make.ndarray_size(data, dtype)
22 changes: 11 additions & 11 deletions src/relay/op/tensor/unary.cc
Original file line number Diff line number Diff line change
Expand Up @@ -280,34 +280,34 @@ RELAY_REGISTER_OP("shape_of")
.set_attr<FTVMCompute>("FTVMCompute", ShapeOfCompute);


TVM_REGISTER_NODE_TYPE(NumElementsAttrs);
TVM_REGISTER_NODE_TYPE(NdarraySizeAttrs);

bool NumElementsRel(const Array<Type>& types,
bool NdarraySizeRel(const Array<Type>& types,
int num_inputs,
const Attrs& attrs,
const TypeReporter& reporter) {
CHECK_EQ(num_inputs, 1);
auto tt = types[0].as<TensorTypeNode>();
CHECK(tt != nullptr);
const auto* param = attrs.as<NumElementsAttrs>();
const auto* param = attrs.as<NdarraySizeAttrs>();
CHECK(param != nullptr);
reporter->Assign(types[1], TensorTypeNode::make({1}, param->dtype));
return true;
}

Array<Tensor> NumElementsCompute(const Attrs& attrs,
Array<Tensor> NdarraySizeCompute(const Attrs& attrs,
const Array<Tensor>& inputs,
const Type& out_type,
const Target& target) {
CHECK_EQ(inputs.size(), 1);
const auto* param = attrs.as<NumElementsAttrs>();
const auto* param = attrs.as<NdarraySizeAttrs>();
CHECK(param != nullptr);
return Array<Tensor>{topi::size(inputs[0], param->dtype)};
return Array<Tensor>{topi::ndarray_size(inputs[0], param->dtype)};
}

TVM_REGISTER_API("relay.op.contrib._make.size")
TVM_REGISTER_API("relay.op.contrib._make.ndarray_size")
.set_body_typed<Expr(Expr, DataType)>([](Expr data, DataType dtype) {
auto attrs = make_node<NumElementsAttrs>();
auto attrs = make_node<NdarraySizeAttrs>();
attrs->dtype = dtype;
static const Op& op = Op::Get("contrib.ndarray_size");
return CallNode::make(op, {data}, Attrs(attrs), {});
Expand All @@ -318,15 +318,15 @@ RELAY_REGISTER_OP("contrib.ndarray_size")
)code" TVM_ADD_FILELINE)
.set_num_inputs(1)
.set_attrs_type_key("relay.attrs.NumElementsAttrs")
.set_attrs_type_key("relay.attrs.NdarraySizeAttrs")
.add_argument("data", "Tensor", "The input tensor.")
.add_type_rel("NumElements", NumElementsRel)
.add_type_rel("NdarraySize", NdarraySizeRel)
.set_attr<TOpIsStateful>("TOpIsStateful", false)
.set_attr<TOpPattern>("TOpPattern", kInjective)
.set_attr<FInferCorrectLayout>("FInferCorrectLayout",
ElemwiseArbitraryLayout)
.set_support_level(10)
.set_attr<FTVMCompute>("FTVMCompute", NumElementsCompute);
.set_attr<FTVMCompute>("FTVMCompute", NdarraySizeCompute);

} // namespace relay
} // namespace tvm
4 changes: 2 additions & 2 deletions tests/python/relay/test_op_level10.py
Original file line number Diff line number Diff line change
Expand Up @@ -218,8 +218,8 @@ def test_shape_of():
def test_ndarray_size():
def verify_ndarray_size(shape):
x = relay.var("x", shape=shape)
func = relay.Function([x], relay.op.contrib.num_elements(x))
func = relay.ir_pass.infer_type(func)
func = relay.Function([x], relay.op.contrib.ndarray_size(x))
func = run_infer_type(func)

x_data = np.random.uniform(size=shape).astype("float32")
ref_res = np.size(x_data)
Expand Down
12 changes: 6 additions & 6 deletions topi/include/topi/transform.h
Original file line number Diff line number Diff line change
Expand Up @@ -1231,13 +1231,13 @@ inline Tensor shape(const Tensor& src,
* \param tag output tensor tag.
* \return Tensor of input shape.
*/
inline Tensor size(const Tensor& src,
const Type& dtype,
const std::string& name = "size",
const std::string& tag = kInjective) {
inline Tensor ndarray_size(const Tensor& src,
const Type& dtype,
const std::string& name = "ndarray_size",
const std::string& tag = kInjective) {
int ndim = static_cast<int>(src->shape.size());
Array<Expr> out_size = {1};
return compute(out_size, [&](const Array<Var>& indices) {
Array<Expr> out_ndarray_size = {1};
return compute(out_ndarray_size, [&](const Array<Var>& indices) {
Expr ret = 1;
for (int i = 0; i < ndim; ++i) {
ret *= src->shape[i];
Expand Down
5 changes: 2 additions & 3 deletions topi/python/topi/transform.py
Original file line number Diff line number Diff line change
Expand Up @@ -479,7 +479,7 @@ def sequence_mask(data, valid_length, mask_value=0, axis=0):
return cpp.sequence_mask(data, valid_length, mask_value, axis)


def size(array, dtype="int32"):
def ndarray_size(array, dtype="int32"):
"""Get the number of elements of input array
Parameters
Expand All @@ -495,5 +495,4 @@ def size(array, dtype="int32"):
result : tvm.Tensor
The resulting tensor.
"""
return cpp.size(array, dtype)

return cpp.ndarray_size(array, dtype)
4 changes: 2 additions & 2 deletions topi/src/topi.cc
Original file line number Diff line number Diff line change
Expand Up @@ -311,9 +311,9 @@ TVM_REGISTER_GLOBAL("topi.shape")
*rv = shape(args[0], args[1]);
});

TVM_REGISTER_GLOBAL("topi.size")
TVM_REGISTER_GLOBAL("topi.ndarray_size")
.set_body([](TVMArgs args, TVMRetValue *rv) {
*rv = size(args[0], args[1]);
*rv = ndarray_size(args[0], args[1]);
});

TVM_REGISTER_GLOBAL("topi.split")
Expand Down
8 changes: 4 additions & 4 deletions topi/tests/python/test_topi_transform.py
Original file line number Diff line number Diff line change
Expand Up @@ -649,11 +649,11 @@ def check_device(device):
for backend in get_all_backend():
check_device(backend)

def test_size():
def test_ndarray_size():
in_shape = (5, 11, 7)
dtype = "int32"
A = tvm.placeholder(shape=in_shape, dtype="float32", name="A")
B = topi.size(A, dtype)
B = topi.ndarray_size(A, dtype)

input = np.random.uniform(size=in_shape).astype(A.dtype)
output = np.asarray(np.size(input)).astype(dtype)
Expand All @@ -668,7 +668,7 @@ def check_device(device):
print("Running on target: %s" % device)
with tvm.target.create(device):
s = topi.generic.schedule_injective(B)
f = tvm.build(s, [A, B], device, name="size")
f = tvm.build(s, [A, B], device, name="ndarray_size")
f(tvm_input, tvm_output)
tvm.testing.assert_allclose(tvm_output.asnumpy(), output)

Expand All @@ -695,4 +695,4 @@ def check_device(device):
test_tile()
test_shape()
test_sequence_mask()
test_size()
test_ndarray_size()

0 comments on commit e017ed3

Please sign in to comment.