diff --git a/pytorch_lightning/trainer/__init__.py b/pytorch_lightning/trainer/__init__.py index c4425b036f1e5..2fb90f1f523ed 100644 --- a/pytorch_lightning/trainer/__init__.py +++ b/pytorch_lightning/trainer/__init__.py @@ -801,7 +801,9 @@ def on_train_end(self, trainer, pl_module): replace_sampler_ddp ^^^^^^^^^^^^^^^^^^^ -Enables auto adding of distributed sampler. +Enables auto adding of distributed sampler. By default it will add ``shuffle=True`` +for train sampler and ``shuffle=False`` for val/test sampler. If you want to customize +it, you can set ``replace_ddp_sampler=False`` and add your own distributed sampler. .. testcode:: diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index 59a33dad7e5dd..0be565c86990d 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -295,7 +295,7 @@ def __init__( distributed_backend: The distributed backend to use (dp, ddp, ddp2, ddp_spawn, ddp_cpu) - precision: Full precision (32), half precision (16). + precision: Full precision (32), half precision (16). Can be used on CPU, GPU or TPUs. weights_summary: Prints a summary of the weights when training begins. @@ -309,12 +309,13 @@ def __init__( num_sanity_val_steps: Sanity check runs n validation batches before starting the training routine. Set it to `-1` to run all batches in all validation dataloaders. Default: 2 - truncated_bptt_steps: Truncated back prop breaks performs backprop every k steps of + truncated_bptt_steps: Truncated back prop breaks performs backprop every k steps of much longer + sequence. resume_from_checkpoint: To resume training from a specific checkpoint pass in the path here. This can be a URL. - profiler: To profile individual steps during training and assist in + profiler: To profile individual steps during training and assist in identifying bottlenecks. reload_dataloaders_every_epoch: Set to True to reload dataloaders every epoch @@ -323,12 +324,14 @@ def __init__( rate in self.lr or self.learning_rate in the LightningModule. To use a different key, set a string instead of True with the key name. - replace_sampler_ddp: Explicitly enables or disables sampler replacement. - If not specified this will toggled automatically ddp is used + replace_sampler_ddp: Explicitly enables or disables sampler replacement. If not specified this + will toggled automatically when ddp is used. By default it will add ``shuffle=True`` for + train sampler and ``shuffle=False`` for val/test sampler. If you want to customize it, + you can set ``replace_ddp_sampler=False`` and add your own distributed sampler. benchmark: If true enables cudnn.benchmark. - deterministic: If true enables cudnn.deterministic + deterministic: If true enables cudnn.deterministic. terminate_on_nan: If set to True, will terminate training (by raising a `ValueError`) at the end of each training batch, if any of the parameters or the loss are NaN or +/-inf.