Skip to content

Commit

Permalink
tests: add default_root_dir=tmpdir (#2392)
Browse files Browse the repository at this point in the history
* tests: add default_root_dir=tmpdir

* remove duplicate tmpdir args

* add missing fixture

* test requires multi gpu

* typo

* resize

Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com>
  • Loading branch information
Borda and awaelchli committed Jul 28, 2020
1 parent a3aebc1 commit 590e7fb
Show file tree
Hide file tree
Showing 15 changed files with 41 additions and 38 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -337,7 +337,7 @@ Lightning has out-of-the-box integration with the popular logging/visualizing fr
- [Wandb](https://www.wandb.com/)
- ...

![tensorboard-support](docs/source/_images/general/tf_loss.png)
![tensorboard-support](docs/source/_images/general/tf_loss.jpg)


## Lightning automates 40+ parts of DL/ML research
Expand Down
Binary file added docs/source/_images/general/tf_loss.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file removed docs/source/_images/general/tf_loss.png
Binary file not shown.
Binary file added docs/source/_images/general/tf_tags.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file removed docs/source/_images/general/tf_tags.png
Binary file not shown.
Binary file modified docs/source/_images/lightning_module/pt_to_pl.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified docs/source/_images/lightning_module/pt_trainer.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
1 change: 1 addition & 0 deletions tests/loggers/test_all.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,7 @@ def log_metrics(self, metrics, step):
limit_train_batches=0.2,
limit_val_batches=0.5,
fast_dev_run=True,
default_root_dir=tmpdir,
)
trainer.fit(model)
trainer.test()
Expand Down
5 changes: 4 additions & 1 deletion tests/loggers/test_tensorboard.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,10 @@
def test_tensorboard_hparams_reload(tmpdir):
model = EvalModelTemplate()

trainer = Trainer(max_epochs=1, default_root_dir=tmpdir)
trainer = Trainer(
max_epochs=1,
default_root_dir=tmpdir,
)
trainer.fit(model)

folder_path = trainer.logger.log_dir
Expand Down
2 changes: 1 addition & 1 deletion tests/models/test_amp.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ def test_amp_multi_gpu_dp(tmpdir):
assert result == 1


@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires GPU machine")
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
def test_amp_multi_gpu_ddp_spawn(tmpdir):
"""Make sure DP/DDP + AMP work."""
tutils.reset_seed()
Expand Down
12 changes: 5 additions & 7 deletions tests/models/test_gpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,11 @@ def test_ddp_all_dataloaders_passed_to_fit(tmpdir):
"""Make sure DDP works with dataloaders passed to fit()"""
tutils.set_random_master_port()

trainer_options = dict(
model = EvalModelTemplate()
fit_options = dict(train_dataloader=model.train_dataloader(),
val_dataloaders=model.val_dataloader())

trainer = Trainer(
default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
max_epochs=1,
Expand All @@ -145,12 +149,6 @@ def test_ddp_all_dataloaders_passed_to_fit(tmpdir):
gpus=[0, 1],
distributed_backend='ddp_spawn'
)

model = EvalModelTemplate()
fit_options = dict(train_dataloader=model.train_dataloader(),
val_dataloaders=model.val_dataloader())

trainer = Trainer(**trainer_options)
result = trainer.fit(model, **fit_options)
assert result == 1, "DDP doesn't work with dataloaders passed to fit()."

Expand Down
6 changes: 2 additions & 4 deletions tests/models/test_horovod.py
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,8 @@ def validation_step(self, batch, *args, **kwargs):
def test_horovod_multi_optimizer(tmpdir):
model = TestGAN(**EvalModelTemplate.get_default_hparams())

trainer_options = dict(
# fit model
trainer = Trainer(
default_root_dir=str(tmpdir),
progress_bar_refresh_rate=0,
max_epochs=1,
Expand All @@ -158,9 +159,6 @@ def test_horovod_multi_optimizer(tmpdir):
deterministic=True,
distributed_backend='horovod',
)

# fit model
trainer = Trainer(**trainer_options)
result = trainer.fit(model)
assert result == 1, 'model failed to complete'

Expand Down
12 changes: 7 additions & 5 deletions tests/models/test_restore.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ def test_running_test_pretrained_model_distrib_dp(tmpdir):
logger=logger,
gpus=[0, 1],
distributed_backend='dp',
default_root_dir=tmpdir,
)

# fit model
Expand Down Expand Up @@ -85,6 +86,7 @@ def test_running_test_pretrained_model_distrib_ddp_spawn(tmpdir):
logger=logger,
gpus=[0, 1],
distributed_backend='ddp_spawn',
default_root_dir=tmpdir,
)

# fit model
Expand Down Expand Up @@ -130,6 +132,7 @@ def test_running_test_pretrained_model_cpu(tmpdir):
limit_val_batches=0.2,
checkpoint_callback=checkpoint,
logger=logger,
default_root_dir=tmpdir,
)

# fit model
Expand Down Expand Up @@ -269,14 +272,13 @@ def test_model_saving_loading(tmpdir):
# logger file to get meta
logger = tutils.get_default_logger(tmpdir)

trainer_options = dict(
# fit model
trainer = Trainer(
max_epochs=1,
logger=logger,
checkpoint_callback=ModelCheckpoint(tmpdir)
checkpoint_callback=ModelCheckpoint(tmpdir),
default_root_dir=tmpdir,
)

# fit model
trainer = Trainer(**trainer_options)
result = trainer.fit(model)

# traning complete
Expand Down
21 changes: 10 additions & 11 deletions tests/trainer/test_dataloaders.py
Original file line number Diff line number Diff line change
Expand Up @@ -236,7 +236,7 @@ def test_multiple_dataloaders_passed_to_fit(tmpdir, ckpt_path):
default_root_dir=tmpdir,
max_epochs=1,
limit_val_batches=0.1,
limit_train_batches=0.2
limit_train_batches=0.2,
)
fit_options = dict(train_dataloader=model.dataloader(train=True),
val_dataloaders=[model.dataloader(train=False),
Expand Down Expand Up @@ -406,7 +406,7 @@ def test_mixing_of_dataloader_options(tmpdir, ckpt_path):
default_root_dir=tmpdir,
max_epochs=1,
limit_val_batches=0.1,
limit_train_batches=0.2
limit_train_batches=0.2,
)

# fit model
Expand Down Expand Up @@ -524,13 +524,6 @@ def test_warning_with_few_workers(mock, tmpdir, ckpt_path):
model = EvalModelTemplate()

# logger file to get meta
trainer_options = dict(
default_root_dir=tmpdir,
max_epochs=1,
limit_val_batches=0.1,
limit_train_batches=0.2
)

train_dl = model.dataloader(train=True)
train_dl.num_workers = 0

Expand All @@ -542,7 +535,12 @@ def test_warning_with_few_workers(mock, tmpdir, ckpt_path):

fit_options = dict(train_dataloader=train_dl,
val_dataloaders=val_dl)
trainer = Trainer(**trainer_options)
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
limit_val_batches=0.1,
limit_train_batches=0.2,
)

# fit model
with pytest.warns(
Expand Down Expand Up @@ -595,7 +593,7 @@ def __len__(self):


@pytest.mark.skipif(torch.cuda.device_count() < 2, reason='Test requires multiple GPUs')
def test_dataloader_reinit_for_subclass():
def test_dataloader_reinit_for_subclass(tmpdir):

class CustomDataLoader(torch.utils.data.DataLoader):
def __init__(self, dataset, batch_size=1, shuffle=False, sampler=None,
Expand All @@ -612,6 +610,7 @@ def __init__(self, dataset, batch_size=1, shuffle=False, sampler=None,
gpus=[0, 1],
num_nodes=1,
distributed_backend='ddp_spawn',
default_root_dir=tmpdir,
)

class CustomDummyObj:
Expand Down
18 changes: 10 additions & 8 deletions tests/trainer/test_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,11 +134,13 @@ def test_gradient_accumulation_scheduling(tmpdir, schedule, expected):

model = EvalModelTemplate()

trainer = Trainer(accumulate_grad_batches=schedule,
limit_train_batches=0.8,
limit_val_batches=0.8,
max_epochs=4,
default_root_dir=tmpdir)
trainer = Trainer(
accumulate_grad_batches=schedule,
limit_train_batches=0.8,
limit_val_batches=0.8,
max_epochs=4,
default_root_dir=tmpdir,
)

# test optimizer call freq matches scheduler
def _optimizer_step(epoch, batch_idx, optimizer, optimizer_idx,
Expand Down Expand Up @@ -444,7 +446,7 @@ def test_trainer_max_steps_and_epochs(tmpdir):
trainer_options.update(
default_root_dir=tmpdir,
max_epochs=3,
max_steps=num_train_samples + 10
max_steps=num_train_samples + 10,
)

# fit model
Expand All @@ -458,7 +460,7 @@ def test_trainer_max_steps_and_epochs(tmpdir):
# define less train epochs than steps
trainer_options.update(
max_epochs=2,
max_steps=trainer_options['max_epochs'] * 2 * num_train_samples
max_steps=trainer_options['max_epochs'] * 2 * num_train_samples,
)

# fit model
Expand All @@ -481,7 +483,7 @@ def test_trainer_min_steps_and_epochs(tmpdir):
early_stop_callback=EarlyStopping(monitor='val_loss', min_delta=1.0),
val_check_interval=2,
min_epochs=1,
max_epochs=7
max_epochs=7,
)

# define less min steps than 1 epoch
Expand Down

0 comments on commit 590e7fb

Please sign in to comment.