From c05077fae3b0f22fe37dfac472251754bacbc8a8 Mon Sep 17 00:00:00 2001 From: Justus Schock <12886177+justusschock@users.noreply.github.com> Date: Thu, 14 May 2020 23:56:40 +0200 Subject: [PATCH] Enable non-blocking for gpu device transfer (#1843) * Update distrib_parts.py * Update CHANGELOG.md --- CHANGELOG.md | 2 ++ pytorch_lightning/trainer/distrib_parts.py | 8 ++++++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d6d5fe74a12d9..94ad71ea5842e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -28,6 +28,8 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Changed +- Enable `non-blocking` for device transfers to GPU ([#1843](https://github.com/PyTorchLightning/pytorch-lightning/pull/1843)) + - Replace mata_tags.csv with hparams.yaml ([#1271](https://github.com/PyTorchLightning/pytorch-lightning/pull/1271)) - Reduction when `batch_size < num_gpus` ([#1609](https://github.com/PyTorchLightning/pytorch-lightning/pull/1609)) diff --git a/pytorch_lightning/trainer/distrib_parts.py b/pytorch_lightning/trainer/distrib_parts.py index e8b45199a8e29..2825027c83aa1 100644 --- a/pytorch_lightning/trainer/distrib_parts.py +++ b/pytorch_lightning/trainer/distrib_parts.py @@ -449,10 +449,14 @@ def __transfer_data_to_device(self, batch, device, gpu_id=None): if device == 'gpu': # base case: object can be directly moved using `cuda` or `to` if callable(getattr(batch, 'cuda', None)): - return batch.cuda(gpu_id) + # non_blocking will be ignored if tensor is not pinned. + # so we can always set it to True + return batch.cuda(gpu_id, non_blocking=True) if callable(getattr(batch, 'to', None)): - return batch.to(torch.device('cuda', gpu_id)) + # non_blocking will be ignored if tensor is not pinned. + # so we can always set it to True + return batch.to(torch.device('cuda', gpu_id), non_blocking=True) # when list if isinstance(batch, list):