Skip to content

Commit

Permalink
tests: add default_root_dir=tmpdir
Browse files Browse the repository at this point in the history
  • Loading branch information
Borda committed Jun 27, 2020
1 parent 51711c2 commit 5ff2b08
Show file tree
Hide file tree
Showing 15 changed files with 76 additions and 66 deletions.
1 change: 1 addition & 0 deletions tests/callbacks/test_callbacks.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,6 +165,7 @@ def on_test_end(self, trainer, pl_module):
limit_val_batches=0.1,
limit_train_batches=0.2,
progress_bar_refresh_rate=0,
default_root_dir=tmpdir,
)

assert not test_callback.setup_called
Expand Down
6 changes: 4 additions & 2 deletions tests/callbacks/test_progress_bar.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ def test_progress_bar_misconfiguration():
Trainer(callbacks=callbacks)


def test_progress_bar_totals():
def test_progress_bar_totals(tmpdir):
"""Test that the progress finishes with the correct total steps processed."""

model = EvalModelTemplate()
Expand All @@ -63,6 +63,7 @@ def test_progress_bar_totals():
progress_bar_refresh_rate=1,
limit_val_batches=1.0,
max_epochs=1,
default_root_dir=tmpdir,
)
bar = trainer.progress_bar_callback
assert 0 == bar.total_train_batches
Expand Down Expand Up @@ -136,7 +137,7 @@ def test_progress_bar_fast_dev_run():


@pytest.mark.parametrize('refresh_rate', [0, 1, 50])
def test_progress_bar_progress_refresh(refresh_rate):
def test_progress_bar_progress_refresh(tmpdir, refresh_rate):
"""Test that the three progress bars get correctly updated when using different refresh rates."""

model = EvalModelTemplate()
Expand Down Expand Up @@ -177,6 +178,7 @@ def on_test_batch_end(self, trainer, pl_module):
limit_train_batches=1.0,
num_sanity_val_steps=2,
max_epochs=3,
default_root_dir=tmpdir,
)
assert trainer.progress_bar_callback.refresh_rate == refresh_rate

Expand Down
1 change: 1 addition & 0 deletions tests/loggers/test_all.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@ def log_metrics(self, metrics, step):
limit_train_batches=0.2,
limit_val_batches=0.5,
fast_dev_run=True,
default_root_dir=tmpdir,
)
trainer.fit(model)

Expand Down
2 changes: 1 addition & 1 deletion tests/loggers/test_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ def test_multiple_loggers(tmpdir):
assert logger2.finalized_status == "success"


def test_multiple_loggers_pickle(tmpdir):
def test_multiple_loggers_pickle():
"""Verify that pickling trainer with multiple loggers works."""

logger1 = CustomLogger()
Expand Down
5 changes: 4 additions & 1 deletion tests/loggers/test_tensorboard.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,10 @@
def test_tensorboard_hparams_reload(tmpdir):
model = EvalModelTemplate()

trainer = Trainer(max_epochs=1, default_root_dir=tmpdir)
trainer = Trainer(
max_epochs=1,
default_root_dir=tmpdir,
)
trainer.fit(model)

folder_path = trainer.logger.log_dir
Expand Down
16 changes: 6 additions & 10 deletions tests/models/test_amp.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ def test_amp_single_gpu(tmpdir, backend):
max_epochs=1,
gpus=1,
distributed_backend=backend,
precision=16
precision=16,
)

model = EvalModelTemplate()
Expand All @@ -39,18 +39,15 @@ def test_amp_multi_gpu(tmpdir, backend):
tutils.set_random_master_port()

model = EvalModelTemplate()

trainer_options = dict(
# tutils.run_model_test(trainer_options, model)
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
# gpus=2,
gpus='0, 1', # test init with gpu string
distributed_backend=backend,
precision=16,
)

# tutils.run_model_test(trainer_options, model)
trainer = Trainer(**trainer_options)
result = trainer.fit(model)
assert result

Expand All @@ -66,17 +63,15 @@ def test_multi_gpu_wandb(tmpdir, backend):
model = EvalModelTemplate()
logger = WandbLogger(name='utest')

trainer_options = dict(
# tutils.run_model_test(trainer_options, model)
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
gpus=2,
distributed_backend=backend,
precision=16,
logger=logger,

)
# tutils.run_model_test(trainer_options, model)
trainer = Trainer(**trainer_options)
result = trainer.fit(model)
assert result
trainer.test(model)
Expand Down Expand Up @@ -106,6 +101,7 @@ def test_amp_gpu_ddp_slurm_managed(tmpdir):
precision=16,
checkpoint_callback=checkpoint,
logger=logger,
default_root_dir=tmpdir,
)
trainer.is_slurm_managing_tasks = True
result = trainer.fit(model)
Expand Down
3 changes: 3 additions & 0 deletions tests/models/test_cpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ def test_cpu_slurm_save_load(tmpdir):
limit_train_batches=0.2,
limit_val_batches=0.2,
checkpoint_callback=ModelCheckpoint(tmpdir),
default_root_dir=tmpdir,
)
result = trainer.fit(model)
real_global_step = trainer.global_step
Expand Down Expand Up @@ -64,6 +65,7 @@ def test_cpu_slurm_save_load(tmpdir):
max_epochs=1,
logger=logger,
checkpoint_callback=ModelCheckpoint(tmpdir),
default_root_dir=tmpdir,
)
model = EvalModelTemplate(**hparams)

Expand Down Expand Up @@ -220,6 +222,7 @@ def test_running_test_no_val(tmpdir):
checkpoint_callback=checkpoint,
logger=logger,
early_stop_callback=False,
default_root_dir=tmpdir,
)
result = trainer.fit(model)

Expand Down
20 changes: 8 additions & 12 deletions tests/models/test_gpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,18 +39,16 @@ def test_multi_gpu_model(tmpdir, backend):
"""Make sure DDP works."""
tutils.set_random_master_port()

trainer_options = dict(
model = EvalModelTemplate()
# tutils.run_model_test(trainer_options, model)
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
limit_train_batches=0.4,
limit_val_batches=0.2,
gpus=[0, 1],
distributed_backend=backend,
)

model = EvalModelTemplate()
# tutils.run_model_test(trainer_options, model)
trainer = Trainer(**trainer_options)
result = trainer.fit(model)
assert result

Expand All @@ -64,7 +62,11 @@ def test_ddp_all_dataloaders_passed_to_fit(tmpdir):
"""Make sure DDP works with dataloaders passed to fit()"""
tutils.set_random_master_port()

trainer_options = dict(
model = EvalModelTemplate()
fit_options = dict(train_dataloader=model.train_dataloader(),
val_dataloaders=model.val_dataloader())

trainer = Trainer(
default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
max_epochs=1,
Expand All @@ -73,12 +75,6 @@ def test_ddp_all_dataloaders_passed_to_fit(tmpdir):
gpus=[0, 1],
distributed_backend='ddp'
)

model = EvalModelTemplate()
fit_options = dict(train_dataloader=model.train_dataloader(),
val_dataloaders=model.val_dataloader())

trainer = Trainer(**trainer_options)
result = trainer.fit(model, **fit_options)
assert result == 1, "DDP doesn't work with dataloaders passed to fit()."

Expand Down
1 change: 1 addition & 0 deletions tests/models/test_grad_norm.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,7 @@ def test_grad_tracking(tmpdir, norm_type, rtol=5e-3):
logger=logger,
track_grad_norm=norm_type,
row_log_interval=1, # request grad_norms every batch
default_root_dir=tmpdir,
)
result = trainer.fit(model)

Expand Down
3 changes: 2 additions & 1 deletion tests/models/test_hooks.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@


@pytest.mark.parametrize('max_steps', [1, 2, 3])
def test_on_before_zero_grad_called(max_steps):
def test_on_before_zero_grad_called(tmpdir, max_steps):

class CurrentTestModel(EvalModelTemplate):
on_before_zero_grad_called = 0
Expand All @@ -21,6 +21,7 @@ def on_before_zero_grad(self, optimizer):
trainer = Trainer(
max_steps=max_steps,
num_sanity_val_steps=5,
default_root_dir=tmpdir,
)
assert 0 == model.on_before_zero_grad_called
trainer.fit(model)
Expand Down
6 changes: 2 additions & 4 deletions tests/models/test_horovod.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,8 @@ def validation_step(self, batch, *args, **kwargs):
def test_horovod_multi_optimizer(tmpdir):
model = TestGAN(**EvalModelTemplate.get_default_hparams())

trainer_options = dict(
# fit model
trainer = Trainer(
default_root_dir=str(tmpdir),
progress_bar_refresh_rate=0,
max_epochs=1,
Expand All @@ -155,9 +156,6 @@ def test_horovod_multi_optimizer(tmpdir):
deterministic=True,
distributed_backend='horovod',
)

# fit model
trainer = Trainer(**trainer_options)
result = trainer.fit(model)
assert result == 1, 'model failed to complete'

Expand Down
12 changes: 7 additions & 5 deletions tests/models/test_restore.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ def test_running_test_pretrained_model_distrib(tmpdir, backend):
logger=logger,
gpus=[0, 1],
distributed_backend=backend,
default_root_dir=tmpdir,
)

# fit model
Expand Down Expand Up @@ -84,6 +85,7 @@ def test_running_test_pretrained_model_cpu(tmpdir):
limit_val_batches=0.2,
checkpoint_callback=checkpoint,
logger=logger,
default_root_dir=tmpdir,
)

# fit model
Expand Down Expand Up @@ -154,6 +156,7 @@ def test_dp_resume(tmpdir):
max_epochs=1,
gpus=2,
distributed_backend='dp',
default_root_dir=tmpdir,
)

# get logger
Expand Down Expand Up @@ -224,14 +227,13 @@ def test_model_saving_loading(tmpdir):
# logger file to get meta
logger = tutils.get_default_logger(tmpdir)

trainer_options = dict(
# fit model
trainer = Trainer(
max_epochs=1,
logger=logger,
checkpoint_callback=ModelCheckpoint(tmpdir)
checkpoint_callback=ModelCheckpoint(tmpdir),
default_root_dir=tmpdir,
)

# fit model
trainer = Trainer(**trainer_options)
result = trainer.fit(model)

# traning complete
Expand Down
30 changes: 15 additions & 15 deletions tests/trainer/test_dataloaders.py
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,7 @@ def test_all_dataloaders_passed_to_fit(tmpdir, ckpt_path):
default_root_dir=tmpdir,
max_epochs=1,
limit_val_batches=0.1,
limit_train_batches=0.2
limit_train_batches=0.2,
)
fit_options = dict(train_dataloader=model.dataloader(train=True),
val_dataloaders=model.dataloader(train=False))
Expand Down Expand Up @@ -232,7 +232,7 @@ def test_multiple_dataloaders_passed_to_fit(tmpdir, ckpt_path):
default_root_dir=tmpdir,
max_epochs=1,
limit_val_batches=0.1,
limit_train_batches=0.2
limit_train_batches=0.2,
)
fit_options = dict(train_dataloader=model.dataloader(train=True),
val_dataloaders=[model.dataloader(train=False),
Expand Down Expand Up @@ -336,7 +336,7 @@ def test_mixing_of_dataloader_options(tmpdir, ckpt_path):
default_root_dir=tmpdir,
max_epochs=1,
limit_val_batches=0.1,
limit_train_batches=0.2
limit_train_batches=0.2,
)

# fit model
Expand Down Expand Up @@ -401,7 +401,7 @@ def test_inf_train_dataloader(tmpdir, check_interval):
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
val_check_interval=check_interval
val_check_interval=check_interval,
)
result = trainer.fit(model)
# verify training completed
Expand Down Expand Up @@ -440,7 +440,7 @@ def test_error_on_zero_len_dataloader(tmpdir):
max_epochs=1,
limit_train_batches=0.1,
limit_val_batches=0.1,
limit_test_batches=0.1
limit_test_batches=0.1,
)
trainer.fit(model)

Expand All @@ -453,13 +453,6 @@ def test_warning_with_few_workers(tmpdir, ckpt_path):
model = EvalModelTemplate()

# logger file to get meta
trainer_options = dict(
default_root_dir=tmpdir,
max_epochs=1,
limit_val_batches=0.1,
limit_train_batches=0.2
)

train_dl = model.dataloader(train=True)
train_dl.num_workers = 0

Expand All @@ -471,7 +464,12 @@ def test_warning_with_few_workers(tmpdir, ckpt_path):

fit_options = dict(train_dataloader=train_dl,
val_dataloaders=val_dl)
trainer = Trainer(**trainer_options)
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
limit_val_batches=0.1,
limit_train_batches=0.2,
)

# fit model
with pytest.warns(UserWarning, match='train'):
Expand All @@ -488,7 +486,7 @@ def test_warning_with_few_workers(tmpdir, ckpt_path):


@pytest.mark.skipif(torch.cuda.device_count() < 2, reason='Test requires multiple GPUs')
def test_dataloader_reinit_for_subclass():
def test_dataloader_reinit_for_subclass(tmpdir):

class CustomDataLoader(torch.utils.data.DataLoader):
def __init__(self, dataset, batch_size=1, shuffle=False, sampler=None,
Expand All @@ -505,6 +503,7 @@ def __init__(self, dataset, batch_size=1, shuffle=False, sampler=None,
gpus=[0, 1],
num_nodes=1,
distributed_backend='ddp',
default_root_dir=tmpdir,
)

class CustomDummyObj:
Expand Down Expand Up @@ -534,7 +533,7 @@ class CustomSampler(torch.utils.data.Sampler):


@pytest.mark.skipif(torch.cuda.device_count() < 3, reason='Test requires multiple GPUs')
def test_batch_size_smaller_than_num_gpus():
def test_batch_size_smaller_than_num_gpus(tmpdir):
# we need at least 3 gpus for this test
num_gpus = 3
batch_size = 3
Expand Down Expand Up @@ -576,6 +575,7 @@ def train_dataloader(self):
limit_train_batches=0.1,
limit_val_batches=0,
gpus=num_gpus,
default_root_dir=tmpdir,
)

# we expect the reduction for the metrics also to happen on the last batch
Expand Down
Loading

0 comments on commit 5ff2b08

Please sign in to comment.