From 2dbef0f15905169edf13f5c7ecfc29f8c95f8685 Mon Sep 17 00:00:00 2001 From: Reese Wang Date: Thu, 7 Mar 2024 10:50:28 +0800 Subject: [PATCH] Enhance several unit tests (#62477) * Manually release predictor_tuned Signed-off-by: rewang * Add indices to no_cast_list to keep it as fp32 Signed-off-by: rewang * Set both atol and rtol for the fp16 test_trt_convert_solve Signed-off-by: rewang * Merge branch 'rewang/fix_test_sparse_fused_attention_seed' into 'nv-2.6.0' Fix test_sparse_fused_attention random seed See merge request dl/paddle/paddle!312 --------- Signed-off-by: rewang Co-authored-by: Ryan Jeng --- test/cpp/inference/api/trt_dynamic_shape_test.cc | 1 + test/ir/inference/test_trt_convert_lookup_table.py | 1 + test/ir/inference/test_trt_convert_solve.py | 2 +- test/legacy_test/test_sparse_fused_attention_op.py | 5 +++++ 4 files changed, 8 insertions(+), 1 deletion(-) diff --git a/test/cpp/inference/api/trt_dynamic_shape_test.cc b/test/cpp/inference/api/trt_dynamic_shape_test.cc index 80929f10447b8..52336e7e8a541 100644 --- a/test/cpp/inference/api/trt_dynamic_shape_test.cc +++ b/test/cpp/inference/api/trt_dynamic_shape_test.cc @@ -191,6 +191,7 @@ void TestTunedDynamic() { output_t->copy_to_cpu(out_data.data()); }; check_func(predictor_tuned.get()); + predictor_tuned.reset(nullptr); // check tuned_dynamic_shape AnalysisConfig config; diff --git a/test/ir/inference/test_trt_convert_lookup_table.py b/test/ir/inference/test_trt_convert_lookup_table.py index e1fb64bcdf545..b7cf7d657d7a0 100644 --- a/test/ir/inference/test_trt_convert_lookup_table.py +++ b/test/ir/inference/test_trt_convert_lookup_table.py @@ -80,6 +80,7 @@ def generate_input2(dims, attrs: List[Dict[str, Any]]): ) }, outputs=["out_data"], + no_cast_list=["indices"], ) yield program_config diff --git a/test/ir/inference/test_trt_convert_solve.py b/test/ir/inference/test_trt_convert_solve.py index fa86a84e61f19..de70cfacc4e07 100644 --- a/test/ir/inference/test_trt_convert_solve.py +++ b/test/ir/inference/test_trt_convert_solve.py @@ -90,7 +90,7 @@ def clear_dynamic_shape(): yield self.create_inference_config(), (1, 3), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half - yield self.create_inference_config(), (1, 3), 1e-3 + yield self.create_inference_config(), (1, 3), (1e-3, 1e-3) def test(self): self.run_test() diff --git a/test/legacy_test/test_sparse_fused_attention_op.py b/test/legacy_test/test_sparse_fused_attention_op.py index 68cdd16d4bd12..098f4815b85f3 100644 --- a/test/legacy_test/test_sparse_fused_attention_op.py +++ b/test/legacy_test/test_sparse_fused_attention_op.py @@ -42,6 +42,7 @@ def get_cuda_version(): ) class TestSparseAttentionAPI1(unittest.TestCase): def setUp(self): + paddle.seed(0) self.batch_size = 16 self.num_heads = 16 self.seq_len = 128 @@ -134,6 +135,7 @@ def test_dygraph(self): class TestSparseAttentionAPI2(TestSparseAttentionAPI1): def setUp(self): + super().setUp() self.batch_size = 16 self.num_heads = 16 self.seq_len = 128 @@ -144,6 +146,7 @@ def setUp(self): class TestSparseAttentionAPI3(TestSparseAttentionAPI1): def setUp(self): + super().setUp() self.batch_size = 16 self.num_heads = 16 self.seq_len = 512 @@ -154,6 +157,7 @@ def setUp(self): class TestSparseAttentionAPI4(TestSparseAttentionAPI1): def setUp(self): + super().setUp() self.batch_size = 16 self.num_heads = 16 self.seq_len = 512 @@ -164,6 +168,7 @@ def setUp(self): class TestSparseAttentionAPI5(TestSparseAttentionAPI1): def setUp(self): + super().setUp() self.batch_size = 16 self.num_heads = 16 self.seq_len = 512