diff --git a/pytorch_lightning/trainer/distrib_parts.py b/pytorch_lightning/trainer/distrib_parts.py index c25070a6999d3..eb28119c76ccd 100644 --- a/pytorch_lightning/trainer/distrib_parts.py +++ b/pytorch_lightning/trainer/distrib_parts.py @@ -441,7 +441,7 @@ def transfer_batch_to_tpu(self, batch: Any, tpu_id: Optional[int] = None): batch: A tensor or collection of tensors. tpu_id: The id of the TPU core. If omitted, the first available core is chosen. - Returns: + Return: the tensor on the TPU device. See Also: @@ -463,7 +463,7 @@ def transfer_batch_to_gpu(self, batch: Any, gpu_id: Optional[int] = None): batch: A tensor or collection of tensors. gpu_id: The id of the GPU device. If omitted, the first available GPU is chosen. - Returns: + Return: the tensor on the GPU device. See Also: diff --git a/pytorch_lightning/utilities/apply_func.py b/pytorch_lightning/utilities/apply_func.py index 6034967a99d2a..2c1191ad7e05d 100644 --- a/pytorch_lightning/utilities/apply_func.py +++ b/pytorch_lightning/utilities/apply_func.py @@ -47,7 +47,7 @@ def transfer_batch_to_device(batch: Any, device: torch.device): for a list of supported collection types. device: The device to which tensors should be moved - Returns: + Return: the same collection but with all contained tensors residing on the new device. See Also: