Skip to content

Commit

Permalink
fix(tests): fix the tests
Browse files Browse the repository at this point in the history
  • Loading branch information
Sharad Sirsat committed Jul 5, 2023
1 parent 497a463 commit b68dba4
Showing 1 changed file with 8 additions and 8 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -97,11 +97,11 @@ def test_composed_metrics(self):
size, batch_size, pred=1, label=1):
evaluator.process(predictions, data_samples)

metrics = evaluator.evaluate(size=size)
metrics_results, averaged_results = evaluator.evaluate(size=size)

self.assertAlmostEqual(metrics['Fake/Toy/accuracy'], 1.0)
self.assertAlmostEqual(metrics['Fake/Toy/mAP'], 0.0)
self.assertEqual(metrics['Fake/Toy/size'], size)
self.assertAlmostEqual(metrics_results['Fake/Toy/accuracy'], 1.0)
self.assertAlmostEqual(metrics_results['Fake/Toy/mAP'], 0.0)
self.assertEqual(metrics_results['Fake/Toy/size'], size)
with self.assertWarns(Warning):
evaluator.evaluate(size=0)

Expand All @@ -124,9 +124,9 @@ def test_composed_metrics(self):
for data_samples, predictions in generate_test_results(
size, batch_size, pred=1, label=1):
evaluator.process(predictions, data_samples)
metrics = evaluator.evaluate(size=size)
self.assertIn('Fake/Toy/accuracy', metrics)
self.assertIn('Fake/accuracy', metrics)
metrics_results, averaged_results = evaluator.evaluate(size=size)
self.assertIn('Fake/Toy/accuracy', metrics_results)
self.assertIn('Fake/accuracy', metrics_results)

metrics_results = OrderedDict({
'dataset1/metric1/accuracy': 0.9,
Expand All @@ -135,7 +135,7 @@ def test_composed_metrics(self):
'dataset2/metric2/f1_score': 0.75
})

evaluator = MultiDatasetsEvaluator([], [])
evaluator = MultiDatasetsEvaluator(cfg, dataset_prefixes=['Fake'])
averaged_results = evaluator.average_results(metrics_results)

expected_averaged_results = {
Expand Down

0 comments on commit b68dba4

Please sign in to comment.