Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Fix] fix evaluation bug in eval_hooks.py #176

Merged
merged 1 commit into from
Jun 22, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions mmtrack/core/evaluation/eval_hooks.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ class EvalHook(_EvalHook):
detailed docstring."""

def after_train_epoch(self, runner):
if not self.evaluation_flag(runner):
if not self._should_evaluate(runner):
return
if self.dataloader.dataset.load_as_video:
from mmtrack.apis import single_gpu_test
Expand All @@ -24,7 +24,7 @@ class DistEvalHook(_DistEvalHook):
detailed docstring."""

def after_train_epoch(self, runner):
if not self.evaluation_flag(runner):
if not self._should_evaluate(runner):
return
if self.dataloader.dataset.load_as_video:
from mmtrack.apis import multi_gpu_test
Expand Down
9 changes: 3 additions & 6 deletions tests/test_data/test_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -457,13 +457,10 @@ def test_evaluation_hook(EvalHookParam):
runner.run([dataloader], [('train', 1)], 2)
assert evalhook.evaluate.call_count == 3 # before epoch1 and after e1 & e2

# the evaluation start epoch cannot be less than 0
runner = _build_demo_runner()
with pytest.warns(UserWarning):
evalhook = EvalHookParam(dataloader, start=-2)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 2)
assert evalhook.evaluate.call_count == 3 # before epoch1 and after e1 & e2
with pytest.raises(ValueError):
EvalHookParam(dataloader, start=-2)

# 6. resuming from epoch i, start = x (x<=i), interval =1: perform
# evaluation after each epoch and before the first epoch.
Expand Down