From ada7c313f6bab66abb546d01949c2c14c00fdbfe Mon Sep 17 00:00:00 2001 From: Jakub Piasecki Date: Tue, 25 May 2021 12:42:58 +0200 Subject: [PATCH] upgraded tests file as reviewer suggested --- .../unittests/mkldnn/test_cast_mkldnn_op.py | 61 ++++++------------- 1 file changed, 19 insertions(+), 42 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_cast_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_cast_mkldnn_op.py index 27c9852b8d02d..8986f1f84e36e 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_cast_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_cast_mkldnn_op.py @@ -27,15 +27,18 @@ @unittest.skipIf(not core.supports_bfloat16(), "place does not support BF16 evaluation") class TestCastBF16ToFP32MKLDNNOp(OpTest): - def setUp(self): - self.x_fp32 = np.random.random(size=[10, 10]).astype("float32") - self.x_bf16 = convert_float_to_uint16(self.x_fp32) + def init_data(self): + self.out = np.random.random(size=[10, 10]).astype("float32") + self.x = convert_float_to_uint16(self.out) - self.inputs = {'X': self.x_bf16} - self.outputs = {'Out': self.x_fp32} + def setUp(self): + self.init_data() + self.inputs = {'X': self.x} + self.outputs = {'Out': self.out} + prepare_dtype = lambda x: int(core.VarDesc.VarType.BF16 if x.dtype != np.float32 else core.VarDesc.VarType.FP32) self.attrs = { - 'in_dtype': int(core.VarDesc.VarType.BF16), - 'out_dtype': int(core.VarDesc.VarType.FP32), + 'in_dtype': prepare_dtype(self.x), + 'out_dtype': prepare_dtype(self.out), 'use_mkldnn': True } self.op_type = 'cast' @@ -53,47 +56,21 @@ def test_check_grad(self): class TestCastFP32ToBF16MKLDNNOp(TestCastBF16ToFP32MKLDNNOp): - def setUp(self): - self.x_fp32 = np.random.random(size=[2, 6]).astype("float32") - self.x_bf16 = convert_float_to_uint16(self.x_fp32) - - self.inputs = {'X': self.x_fp32} - self.outputs = {'Out': self.x_bf16} - self.attrs = { - 'in_dtype': int(core.VarDesc.VarType.FP32), - 'out_dtype': int(core.VarDesc.VarType.BF16), - 'use_mkldnn': True - } - self.op_type = 'cast' + def init_data(self): + self.x = np.random.random(size=[2, 6]).astype("float32") + self.out = convert_float_to_uint16(self.x) class TestCastBF16ToBF16MKLDNNOp(TestCastBF16ToFP32MKLDNNOp): - def setUp(self): - self.x_fp32 = np.random.random(size=[6, 13]).astype("float32") - self.x_bf16 = convert_float_to_uint16(self.x_fp32) - - self.inputs = {'X': self.x_bf16} - self.outputs = {'Out': self.x_bf16} - self.attrs = { - 'in_dtype': int(core.VarDesc.VarType.BF16), - 'out_dtype': int(core.VarDesc.VarType.BF16), - 'use_mkldnn': True - } - self.op_type = 'cast' + def init_data(self): + self.x = np.random.random(size=[6, 13]).astype("uint16") + self.out = self.x class TestCastFP32ToFP32MKLDNNOp(TestCastBF16ToFP32MKLDNNOp): - def setUp(self): - self.x_fp32 = np.random.random(size=[7, 15]).astype("float32") - - self.inputs = {'X': self.x_fp32} - self.outputs = {'Out': self.x_fp32} - self.attrs = { - 'in_dtype': int(core.VarDesc.VarType.FP32), - 'out_dtype': int(core.VarDesc.VarType.FP32), - 'use_mkldnn': True - } - self.op_type = 'cast' + def init_data(self): + self.x = np.random.random(size=[7, 15]).astype("float32") + self.out = self.x if __name__ == '__main__':