Skip to content

Commit

Permalink
upgraded tests file as reviewer suggested
Browse files Browse the repository at this point in the history
  • Loading branch information
jakpiase committed May 25, 2021
1 parent be9e734 commit ada7c31
Showing 1 changed file with 19 additions and 42 deletions.
61 changes: 19 additions & 42 deletions python/paddle/fluid/tests/unittests/mkldnn/test_cast_mkldnn_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,15 +27,18 @@
@unittest.skipIf(not core.supports_bfloat16(),
"place does not support BF16 evaluation")
class TestCastBF16ToFP32MKLDNNOp(OpTest):
def setUp(self):
self.x_fp32 = np.random.random(size=[10, 10]).astype("float32")
self.x_bf16 = convert_float_to_uint16(self.x_fp32)
def init_data(self):
self.out = np.random.random(size=[10, 10]).astype("float32")
self.x = convert_float_to_uint16(self.out)

self.inputs = {'X': self.x_bf16}
self.outputs = {'Out': self.x_fp32}
def setUp(self):
self.init_data()
self.inputs = {'X': self.x}
self.outputs = {'Out': self.out}
prepare_dtype = lambda x: int(core.VarDesc.VarType.BF16 if x.dtype != np.float32 else core.VarDesc.VarType.FP32)
self.attrs = {
'in_dtype': int(core.VarDesc.VarType.BF16),
'out_dtype': int(core.VarDesc.VarType.FP32),
'in_dtype': prepare_dtype(self.x),
'out_dtype': prepare_dtype(self.out),
'use_mkldnn': True
}
self.op_type = 'cast'
Expand All @@ -53,47 +56,21 @@ def test_check_grad(self):


class TestCastFP32ToBF16MKLDNNOp(TestCastBF16ToFP32MKLDNNOp):
def setUp(self):
self.x_fp32 = np.random.random(size=[2, 6]).astype("float32")
self.x_bf16 = convert_float_to_uint16(self.x_fp32)

self.inputs = {'X': self.x_fp32}
self.outputs = {'Out': self.x_bf16}
self.attrs = {
'in_dtype': int(core.VarDesc.VarType.FP32),
'out_dtype': int(core.VarDesc.VarType.BF16),
'use_mkldnn': True
}
self.op_type = 'cast'
def init_data(self):
self.x = np.random.random(size=[2, 6]).astype("float32")
self.out = convert_float_to_uint16(self.x)


class TestCastBF16ToBF16MKLDNNOp(TestCastBF16ToFP32MKLDNNOp):
def setUp(self):
self.x_fp32 = np.random.random(size=[6, 13]).astype("float32")
self.x_bf16 = convert_float_to_uint16(self.x_fp32)

self.inputs = {'X': self.x_bf16}
self.outputs = {'Out': self.x_bf16}
self.attrs = {
'in_dtype': int(core.VarDesc.VarType.BF16),
'out_dtype': int(core.VarDesc.VarType.BF16),
'use_mkldnn': True
}
self.op_type = 'cast'
def init_data(self):
self.x = np.random.random(size=[6, 13]).astype("uint16")
self.out = self.x


class TestCastFP32ToFP32MKLDNNOp(TestCastBF16ToFP32MKLDNNOp):
def setUp(self):
self.x_fp32 = np.random.random(size=[7, 15]).astype("float32")

self.inputs = {'X': self.x_fp32}
self.outputs = {'Out': self.x_fp32}
self.attrs = {
'in_dtype': int(core.VarDesc.VarType.FP32),
'out_dtype': int(core.VarDesc.VarType.FP32),
'use_mkldnn': True
}
self.op_type = 'cast'
def init_data(self):
self.x = np.random.random(size=[7, 15]).astype("float32")
self.out = self.x


if __name__ == '__main__':
Expand Down

0 comments on commit ada7c31

Please sign in to comment.