From 27b7d8d8785918e2651e04a67991795f70c38d8d Mon Sep 17 00:00:00 2001 From: William Falcon Date: Mon, 8 Jun 2020 11:30:51 -0400 Subject: [PATCH 01/28] training batch clean up --- pytorch_lightning/trainer/distrib_data_parallel.py | 9 ++++++++- pytorch_lightning/trainer/trainer.py | 8 ++++++++ 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/pytorch_lightning/trainer/distrib_data_parallel.py b/pytorch_lightning/trainer/distrib_data_parallel.py index 59bf81e7128c5..1f0ab30b2c410 100644 --- a/pytorch_lightning/trainer/distrib_data_parallel.py +++ b/pytorch_lightning/trainer/distrib_data_parallel.py @@ -394,7 +394,7 @@ def spawn_ddp_children(self, model): local_rank = 0 self.ddp_train(local_rank, model, is_master=True) - def ddp_train(self, process_idx, model, is_master=False): + def ddp_train(self, process_idx, model, is_master=False, proc_offset=0): """ Entry point into a DP thread :param gpu_idx: @@ -402,6 +402,9 @@ def ddp_train(self, process_idx, model, is_master=False): :param cluster_obj: :return: """ + # offset the process id if requested + process_idx = process_idx + proc_offset + # show progressbar only on progress_rank 0 if (self.node_rank != 0 or process_idx != 0) and self.progress_bar_callback is not None: self.progress_bar_callback.disable() @@ -467,6 +470,10 @@ def ddp_train(self, process_idx, model, is_master=False): # continue training routine self.run_pretrain_routine(model) + # spawn removed the memory from the model + if self.distributed_backend == 'ddp_spawn': + self.save_spawn_weights(model) + def save_spawn_weights(self, model): """ Dump a temporary checkpoint after ddp ends to get weights out of the process diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index 1f5b73f9be364..bb8f994507c31 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -876,9 +876,17 @@ def fit( self.ddp_train(task, model) elif self.distributed_backend == 'cpu_ddp': + self.__set_random_port() self.model = model mp.spawn(self.ddp_train, nprocs=self.num_processes, args=(model,)) + elif self.distributed_backend == 'ddp_spawn': + # spin up peers + mp.spawn(self.ddp_train, nprocs=self.num_processes-1, args=(model, False, 1), join=False) + + # stay in context for main proc + self.ddp_train(process_idx=self.num_processes, is_master=True) + elif self.distributed_backend == 'ddp': self.spawn_ddp_children(model) From d210d1943dd99b4a30b31caf2ae24c725e5307bd Mon Sep 17 00:00:00 2001 From: William Falcon Date: Mon, 8 Jun 2020 11:31:13 -0400 Subject: [PATCH 02/28] training batch clean up --- pytorch_lightning/trainer/distrib_data_parallel.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/pytorch_lightning/trainer/distrib_data_parallel.py b/pytorch_lightning/trainer/distrib_data_parallel.py index 1f0ab30b2c410..c64bd28120330 100644 --- a/pytorch_lightning/trainer/distrib_data_parallel.py +++ b/pytorch_lightning/trainer/distrib_data_parallel.py @@ -470,10 +470,6 @@ def ddp_train(self, process_idx, model, is_master=False, proc_offset=0): # continue training routine self.run_pretrain_routine(model) - # spawn removed the memory from the model - if self.distributed_backend == 'ddp_spawn': - self.save_spawn_weights(model) - def save_spawn_weights(self, model): """ Dump a temporary checkpoint after ddp ends to get weights out of the process From 3b57580d47456a22046533ccdc783bc60959322a Mon Sep 17 00:00:00 2001 From: William Falcon Date: Mon, 8 Jun 2020 11:35:24 -0400 Subject: [PATCH 03/28] training batch clean up --- pytorch_lightning/trainer/distrib_data_parallel.py | 2 +- pytorch_lightning/trainer/trainer.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pytorch_lightning/trainer/distrib_data_parallel.py b/pytorch_lightning/trainer/distrib_data_parallel.py index c64bd28120330..b1310eb74d17f 100644 --- a/pytorch_lightning/trainer/distrib_data_parallel.py +++ b/pytorch_lightning/trainer/distrib_data_parallel.py @@ -457,7 +457,7 @@ def ddp_train(self, process_idx, model, is_master=False, proc_offset=0): self.reinit_scheduler_properties(self.optimizers, self.lr_schedulers) # DDP2 uses all GPUs on the machine - if self.distributed_backend == 'ddp': + if self.distributed_backend == 'ddp' or self.distributed_backend == 'ddp_spawn': device_ids = [self.root_gpu] elif self.use_ddp2: device_ids = self.data_parallel_device_ids diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index bb8f994507c31..c6bbae7842903 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -246,7 +246,7 @@ def __init__( Use `row_log_interval` instead. Will remove 0.9.0. - distributed_backend: The distributed backend to use. + distributed_backend: The distributed backend to use (dp, ddp, ddp2, ddp_spawn) use_amp: .. warning:: .. deprecated:: 0.7.0 From 9c69793c78425986461c710614989900618a4dc4 Mon Sep 17 00:00:00 2001 From: William Falcon Date: Mon, 8 Jun 2020 11:37:59 -0400 Subject: [PATCH 04/28] training batch clean up --- pytorch_lightning/trainer/trainer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index c6bbae7842903..7f482e1bb1730 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -882,7 +882,7 @@ def fit( elif self.distributed_backend == 'ddp_spawn': # spin up peers - mp.spawn(self.ddp_train, nprocs=self.num_processes-1, args=(model, False, 1), join=False) + mp.spawn(self.ddp_train, nprocs=self.num_processes - 1, args=(model, False, 1), join=False) # stay in context for main proc self.ddp_train(process_idx=self.num_processes, is_master=True) From 32f7e26dfaf4db9d529f96a5875d093735522d5c Mon Sep 17 00:00:00 2001 From: William Falcon Date: Mon, 8 Jun 2020 11:51:30 -0400 Subject: [PATCH 05/28] training batch clean up --- pytorch_lightning/trainer/distrib_data_parallel.py | 1 + 1 file changed, 1 insertion(+) diff --git a/pytorch_lightning/trainer/distrib_data_parallel.py b/pytorch_lightning/trainer/distrib_data_parallel.py index b1310eb74d17f..5445d6afee03d 100644 --- a/pytorch_lightning/trainer/distrib_data_parallel.py +++ b/pytorch_lightning/trainer/distrib_data_parallel.py @@ -392,6 +392,7 @@ def spawn_ddp_children(self, model): sleep(delay) local_rank = 0 + import pdb; pdb.set_trace() self.ddp_train(local_rank, model, is_master=True) def ddp_train(self, process_idx, model, is_master=False, proc_offset=0): From fca185f556ea0962d1f9710ea68dfa5590903b88 Mon Sep 17 00:00:00 2001 From: William Falcon Date: Mon, 8 Jun 2020 11:54:57 -0400 Subject: [PATCH 06/28] training batch clean up --- pytorch_lightning/trainer/distrib_data_parallel.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pytorch_lightning/trainer/distrib_data_parallel.py b/pytorch_lightning/trainer/distrib_data_parallel.py index 5445d6afee03d..e0662efaa890e 100644 --- a/pytorch_lightning/trainer/distrib_data_parallel.py +++ b/pytorch_lightning/trainer/distrib_data_parallel.py @@ -392,7 +392,6 @@ def spawn_ddp_children(self, model): sleep(delay) local_rank = 0 - import pdb; pdb.set_trace() self.ddp_train(local_rank, model, is_master=True) def ddp_train(self, process_idx, model, is_master=False, proc_offset=0): @@ -466,9 +465,11 @@ def ddp_train(self, process_idx, model, is_master=False, proc_offset=0): device_ids = None # allow user to configure ddp + print('configuring ddp') model = model.configure_ddp(model, device_ids) # continue training routine + print('training') self.run_pretrain_routine(model) def save_spawn_weights(self, model): From a7fb17976a4b68497f42c8ef26eb7e6db85c59db Mon Sep 17 00:00:00 2001 From: William Falcon Date: Mon, 8 Jun 2020 11:55:12 -0400 Subject: [PATCH 07/28] training batch clean up --- pytorch_lightning/trainer/distrib_data_parallel.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pytorch_lightning/trainer/distrib_data_parallel.py b/pytorch_lightning/trainer/distrib_data_parallel.py index e0662efaa890e..09b61ee56a062 100644 --- a/pytorch_lightning/trainer/distrib_data_parallel.py +++ b/pytorch_lightning/trainer/distrib_data_parallel.py @@ -465,7 +465,7 @@ def ddp_train(self, process_idx, model, is_master=False, proc_offset=0): device_ids = None # allow user to configure ddp - print('configuring ddp') + print('configuring ddp', device_ids, self.root_gpu) model = model.configure_ddp(model, device_ids) # continue training routine From 40f7d0ae8918563a2d88d9ad18a61fbd9addec27 Mon Sep 17 00:00:00 2001 From: William Falcon Date: Mon, 8 Jun 2020 12:06:21 -0400 Subject: [PATCH 08/28] training batch clean up --- pytorch_lightning/trainer/distrib_data_parallel.py | 1 + 1 file changed, 1 insertion(+) diff --git a/pytorch_lightning/trainer/distrib_data_parallel.py b/pytorch_lightning/trainer/distrib_data_parallel.py index 09b61ee56a062..dc878c9a28df5 100644 --- a/pytorch_lightning/trainer/distrib_data_parallel.py +++ b/pytorch_lightning/trainer/distrib_data_parallel.py @@ -402,6 +402,7 @@ def ddp_train(self, process_idx, model, is_master=False, proc_offset=0): :param cluster_obj: :return: """ + print('ddp_train', process_idx) # offset the process id if requested process_idx = process_idx + proc_offset From 24c905e4e839eaeb9df9970d0a4afc40f2ee9f9e Mon Sep 17 00:00:00 2001 From: William Falcon Date: Mon, 8 Jun 2020 12:08:33 -0400 Subject: [PATCH 09/28] training batch clean up --- pytorch_lightning/trainer/distrib_data_parallel.py | 1 + pytorch_lightning/trainer/trainer.py | 1 + 2 files changed, 2 insertions(+) diff --git a/pytorch_lightning/trainer/distrib_data_parallel.py b/pytorch_lightning/trainer/distrib_data_parallel.py index dc878c9a28df5..aeec35d758859 100644 --- a/pytorch_lightning/trainer/distrib_data_parallel.py +++ b/pytorch_lightning/trainer/distrib_data_parallel.py @@ -378,6 +378,7 @@ def spawn_ddp_children(self, model): self.interactive_ddp_procs = [] for local_rank in range(1, self.num_processes): + print('launching local_rank', local_rank) env_copy = os.environ.copy() env_copy['LOCAL_RANK'] = f'{local_rank}' diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index 7f482e1bb1730..5eb306860cdb6 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -888,6 +888,7 @@ def fit( self.ddp_train(process_idx=self.num_processes, is_master=True) elif self.distributed_backend == 'ddp': + print('starting spawn children') self.spawn_ddp_children(model) # 1 gpu or dp option triggers training using DP module From 6962d2e7e2d9d5ee70f1b7bd801492ae7d90cb75 Mon Sep 17 00:00:00 2001 From: William Falcon Date: Mon, 8 Jun 2020 12:10:31 -0400 Subject: [PATCH 10/28] training batch clean up --- pytorch_lightning/trainer/distrib_data_parallel.py | 1 + 1 file changed, 1 insertion(+) diff --git a/pytorch_lightning/trainer/distrib_data_parallel.py b/pytorch_lightning/trainer/distrib_data_parallel.py index aeec35d758859..9b975b31edeaf 100644 --- a/pytorch_lightning/trainer/distrib_data_parallel.py +++ b/pytorch_lightning/trainer/distrib_data_parallel.py @@ -471,6 +471,7 @@ def ddp_train(self, process_idx, model, is_master=False, proc_offset=0): model = model.configure_ddp(model, device_ids) # continue training routine + print('-'*100) print('training') self.run_pretrain_routine(model) From 7e365049b3090ce7a199b65b988c414b2dc1f6ef Mon Sep 17 00:00:00 2001 From: William Falcon Date: Mon, 8 Jun 2020 12:11:48 -0400 Subject: [PATCH 11/28] training batch clean up --- pytorch_lightning/trainer/distrib_data_parallel.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pytorch_lightning/trainer/distrib_data_parallel.py b/pytorch_lightning/trainer/distrib_data_parallel.py index 9b975b31edeaf..47fe50052037c 100644 --- a/pytorch_lightning/trainer/distrib_data_parallel.py +++ b/pytorch_lightning/trainer/distrib_data_parallel.py @@ -467,7 +467,9 @@ def ddp_train(self, process_idx, model, is_master=False, proc_offset=0): device_ids = None # allow user to configure ddp + print('-'*100) print('configuring ddp', device_ids, self.root_gpu) + print('-'*100) model = model.configure_ddp(model, device_ids) # continue training routine From 8d08b4dac15d613819a57ade5b6944de29dc7d3e Mon Sep 17 00:00:00 2001 From: William Falcon Date: Mon, 8 Jun 2020 12:13:33 -0400 Subject: [PATCH 12/28] training batch clean up --- pytorch_lightning/trainer/distrib_data_parallel.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pytorch_lightning/trainer/distrib_data_parallel.py b/pytorch_lightning/trainer/distrib_data_parallel.py index 47fe50052037c..83bfcd29e8d3a 100644 --- a/pytorch_lightning/trainer/distrib_data_parallel.py +++ b/pytorch_lightning/trainer/distrib_data_parallel.py @@ -221,7 +221,7 @@ def set_distributed_mode(self, distributed_backend): elif self.num_gpus > 1: self.use_dp = True - elif distributed_backend == "ddp": + elif distributed_backend in ['ddp', 'ddp_spawn']: if self.num_gpus == 0: if self.num_nodes > 1 or self.num_processes > 1: self.use_ddp = True # ddp_cpu From 3f8163148b58e159d6813b8df9ab7ce479eeafe1 Mon Sep 17 00:00:00 2001 From: William Falcon Date: Mon, 8 Jun 2020 12:14:23 -0400 Subject: [PATCH 13/28] training batch clean up --- pytorch_lightning/trainer/trainer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index 5eb306860cdb6..259b74f1b98a4 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -882,7 +882,7 @@ def fit( elif self.distributed_backend == 'ddp_spawn': # spin up peers - mp.spawn(self.ddp_train, nprocs=self.num_processes - 1, args=(model, False, 1), join=False) + mp.spawn(self.ddp_train, nprocs=self.num_processes - 1, args=(model, False, 1, ), join=False) # stay in context for main proc self.ddp_train(process_idx=self.num_processes, is_master=True) From 661bfa3cfbae5ef63e130cc0e2ac6d9b5a73e9e5 Mon Sep 17 00:00:00 2001 From: William Falcon Date: Mon, 8 Jun 2020 12:22:18 -0400 Subject: [PATCH 14/28] training batch clean up --- pytorch_lightning/trainer/trainer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index 259b74f1b98a4..03b38cbe758a0 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -882,7 +882,7 @@ def fit( elif self.distributed_backend == 'ddp_spawn': # spin up peers - mp.spawn(self.ddp_train, nprocs=self.num_processes - 1, args=(model, False, 1, ), join=False) + mp.spawn(self.ddp_train, nprocs=self.num_processes - 1, args=(model, False), join=False) # stay in context for main proc self.ddp_train(process_idx=self.num_processes, is_master=True) From 4a7f4faf8e5ad1d214da4e22f10e6e15d168c327 Mon Sep 17 00:00:00 2001 From: William Falcon Date: Mon, 8 Jun 2020 12:23:24 -0400 Subject: [PATCH 15/28] training batch clean up --- pytorch_lightning/trainer/trainer.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index 03b38cbe758a0..5d9f23fe1a6c5 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -882,10 +882,10 @@ def fit( elif self.distributed_backend == 'ddp_spawn': # spin up peers - mp.spawn(self.ddp_train, nprocs=self.num_processes - 1, args=(model, False), join=False) + mp.spawn(self.ddp_train, nprocs=self.num_processes - 1, args=(model, False, 1), join=False) # stay in context for main proc - self.ddp_train(process_idx=self.num_processes, is_master=True) + self.ddp_train(process_idx=0, model=model, is_master=True) elif self.distributed_backend == 'ddp': print('starting spawn children') From ae7ffee301381205ed180cb00e87531c2ceba142 Mon Sep 17 00:00:00 2001 From: William Falcon Date: Mon, 8 Jun 2020 12:26:15 -0400 Subject: [PATCH 16/28] training batch clean up --- pytorch_lightning/trainer/trainer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index 5d9f23fe1a6c5..ced3e4a7314d1 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -882,7 +882,7 @@ def fit( elif self.distributed_backend == 'ddp_spawn': # spin up peers - mp.spawn(self.ddp_train, nprocs=self.num_processes - 1, args=(model, False, 1), join=False) + mp.spawn(self.ddp_train, nprocs=self.num_processes - 1, args=(model, False, 1)) # stay in context for main proc self.ddp_train(process_idx=0, model=model, is_master=True) From aa78f87625213352cd812d5834b5d57f8d5cfac9 Mon Sep 17 00:00:00 2001 From: William Falcon Date: Mon, 8 Jun 2020 12:30:05 -0400 Subject: [PATCH 17/28] training batch clean up --- pytorch_lightning/trainer/trainer.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index ced3e4a7314d1..11be94208ef94 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -885,6 +885,8 @@ def fit( mp.spawn(self.ddp_train, nprocs=self.num_processes - 1, args=(model, False, 1)) # stay in context for main proc + print('*'*100) + print('main proc start') self.ddp_train(process_idx=0, model=model, is_master=True) elif self.distributed_backend == 'ddp': From a352478d1ac982f6da121be1323ccfbe026e20af Mon Sep 17 00:00:00 2001 From: William Falcon Date: Mon, 8 Jun 2020 12:36:25 -0400 Subject: [PATCH 18/28] training batch clean up --- pytorch_lightning/trainer/trainer.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index 11be94208ef94..a4d197193276d 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -881,13 +881,10 @@ def fit( mp.spawn(self.ddp_train, nprocs=self.num_processes, args=(model,)) elif self.distributed_backend == 'ddp_spawn': - # spin up peers - mp.spawn(self.ddp_train, nprocs=self.num_processes - 1, args=(model, False, 1)) + model.share_memory() - # stay in context for main proc - print('*'*100) - print('main proc start') - self.ddp_train(process_idx=0, model=model, is_master=True) + # spin up peers + mp.spawn(self.ddp_train, nprocs=self.num_processes, args=(model, )) elif self.distributed_backend == 'ddp': print('starting spawn children') From 8368b03ebd0666a1d6b07c4cad1fd2a4301f01aa Mon Sep 17 00:00:00 2001 From: William Falcon Date: Mon, 8 Jun 2020 12:37:50 -0400 Subject: [PATCH 19/28] training batch clean up --- pytorch_lightning/trainer/trainer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index a4d197193276d..e269b0f2ea2ed 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -881,7 +881,7 @@ def fit( mp.spawn(self.ddp_train, nprocs=self.num_processes, args=(model,)) elif self.distributed_backend == 'ddp_spawn': - model.share_memory() + # model.share_memory() # spin up peers mp.spawn(self.ddp_train, nprocs=self.num_processes, args=(model, )) From 6fde56ac708829918a76b3092ffa504bd3597472 Mon Sep 17 00:00:00 2001 From: William Falcon Date: Mon, 8 Jun 2020 12:52:15 -0400 Subject: [PATCH 20/28] training batch clean up --- pytorch_lightning/trainer/distrib_data_parallel.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pytorch_lightning/trainer/distrib_data_parallel.py b/pytorch_lightning/trainer/distrib_data_parallel.py index 83bfcd29e8d3a..3af1354a5963f 100644 --- a/pytorch_lightning/trainer/distrib_data_parallel.py +++ b/pytorch_lightning/trainer/distrib_data_parallel.py @@ -427,7 +427,13 @@ def ddp_train(self, process_idx, model, is_master=False, proc_offset=0): # try to init for 20 times at max in case ports are taken # where to store ip_table model.trainer = self + print('-'*100) + print('starting ddp') + print('-'*100) model.init_ddp_connection(self.proc_rank, self.world_size, self.is_slurm_managing_tasks) + print('-'*100) + print('ddp started') + print('-'*100) # CHOOSE OPTIMIZER # allow for lr schedulers as well From 77a442eedd495d3210f9f2be7f11b1eef03a6533 Mon Sep 17 00:00:00 2001 From: William Falcon Date: Mon, 8 Jun 2020 16:35:18 -0400 Subject: [PATCH 21/28] adding spawn --- pytorch_lightning/trainer/trainer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index e269b0f2ea2ed..a4d197193276d 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -881,7 +881,7 @@ def fit( mp.spawn(self.ddp_train, nprocs=self.num_processes, args=(model,)) elif self.distributed_backend == 'ddp_spawn': - # model.share_memory() + model.share_memory() # spin up peers mp.spawn(self.ddp_train, nprocs=self.num_processes, args=(model, )) From 7256e2733a61239d1ebcf114dbe14d5d86e7a27c Mon Sep 17 00:00:00 2001 From: William Falcon Date: Mon, 8 Jun 2020 16:36:36 -0400 Subject: [PATCH 22/28] adding spawn --- pytorch_lightning/trainer/data_loading.py | 1 + 1 file changed, 1 insertion(+) diff --git a/pytorch_lightning/trainer/data_loading.py b/pytorch_lightning/trainer/data_loading.py index 5361f47b5f3d0..d5ee54e1a1658 100644 --- a/pytorch_lightning/trainer/data_loading.py +++ b/pytorch_lightning/trainer/data_loading.py @@ -139,6 +139,7 @@ def _get_distributed_sampler(self, dataloader): else: world_size = { 'ddp': self.num_nodes * self.num_processes, + 'ddp_spawn': self.num_nodes * self.num_processes, 'ddp2': self.num_nodes, 'ddp_cpu': self.num_processes * self.num_nodes } From 26d7e9e671f160081cb3638b5140cb25ba842e91 Mon Sep 17 00:00:00 2001 From: William Falcon Date: Mon, 8 Jun 2020 16:37:36 -0400 Subject: [PATCH 23/28] adding spawn --- pytorch_lightning/trainer/distrib_data_parallel.py | 12 ------------ pytorch_lightning/trainer/trainer.py | 1 - 2 files changed, 13 deletions(-) diff --git a/pytorch_lightning/trainer/distrib_data_parallel.py b/pytorch_lightning/trainer/distrib_data_parallel.py index 3af1354a5963f..a17e386ee168c 100644 --- a/pytorch_lightning/trainer/distrib_data_parallel.py +++ b/pytorch_lightning/trainer/distrib_data_parallel.py @@ -403,7 +403,6 @@ def ddp_train(self, process_idx, model, is_master=False, proc_offset=0): :param cluster_obj: :return: """ - print('ddp_train', process_idx) # offset the process id if requested process_idx = process_idx + proc_offset @@ -427,13 +426,7 @@ def ddp_train(self, process_idx, model, is_master=False, proc_offset=0): # try to init for 20 times at max in case ports are taken # where to store ip_table model.trainer = self - print('-'*100) - print('starting ddp') - print('-'*100) model.init_ddp_connection(self.proc_rank, self.world_size, self.is_slurm_managing_tasks) - print('-'*100) - print('ddp started') - print('-'*100) # CHOOSE OPTIMIZER # allow for lr schedulers as well @@ -473,14 +466,9 @@ def ddp_train(self, process_idx, model, is_master=False, proc_offset=0): device_ids = None # allow user to configure ddp - print('-'*100) - print('configuring ddp', device_ids, self.root_gpu) - print('-'*100) model = model.configure_ddp(model, device_ids) # continue training routine - print('-'*100) - print('training') self.run_pretrain_routine(model) def save_spawn_weights(self, model): diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index a4d197193276d..1cf216eac345d 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -887,7 +887,6 @@ def fit( mp.spawn(self.ddp_train, nprocs=self.num_processes, args=(model, )) elif self.distributed_backend == 'ddp': - print('starting spawn children') self.spawn_ddp_children(model) # 1 gpu or dp option triggers training using DP module From 34954e7f1d8427fd93f41eed596205075a5b0e05 Mon Sep 17 00:00:00 2001 From: William Falcon Date: Mon, 8 Jun 2020 17:04:44 -0400 Subject: [PATCH 24/28] adding spawn --- pytorch_lightning/trainer/trainer.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index 1cf216eac345d..5690bfc6e16ac 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -880,11 +880,11 @@ def fit( self.model = model mp.spawn(self.ddp_train, nprocs=self.num_processes, args=(model,)) - elif self.distributed_backend == 'ddp_spawn': + elif self.distributed_backend == 'ddp_fork': model.share_memory() # spin up peers - mp.spawn(self.ddp_train, nprocs=self.num_processes, args=(model, )) + mp.start_processes(self.ddp_train, nprocs=self.num_processes, args=(model, ), start_method='fork') elif self.distributed_backend == 'ddp': self.spawn_ddp_children(model) From 6d285eb630c388d182e4313a86737413b7028e78 Mon Sep 17 00:00:00 2001 From: William Falcon Date: Mon, 8 Jun 2020 17:05:34 -0400 Subject: [PATCH 25/28] adding spawn --- pytorch_lightning/trainer/trainer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index 5690bfc6e16ac..6bd1f98b212d4 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -880,7 +880,7 @@ def fit( self.model = model mp.spawn(self.ddp_train, nprocs=self.num_processes, args=(model,)) - elif self.distributed_backend == 'ddp_fork': + elif self.distributed_backend == 'ddp_spawn': model.share_memory() # spin up peers From c890a70432507667ac7fef655b19f30b1f05c275 Mon Sep 17 00:00:00 2001 From: William Falcon Date: Mon, 8 Jun 2020 17:06:47 -0400 Subject: [PATCH 26/28] adding spawn --- pytorch_lightning/trainer/trainer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index 6bd1f98b212d4..1cf216eac345d 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -884,7 +884,7 @@ def fit( model.share_memory() # spin up peers - mp.start_processes(self.ddp_train, nprocs=self.num_processes, args=(model, ), start_method='fork') + mp.spawn(self.ddp_train, nprocs=self.num_processes, args=(model, )) elif self.distributed_backend == 'ddp': self.spawn_ddp_children(model) From 0f97fe0e368418483d30675f92569685e1af22f9 Mon Sep 17 00:00:00 2001 From: William Falcon Date: Mon, 8 Jun 2020 17:31:26 -0400 Subject: [PATCH 27/28] adding spawn --- docs/source/multi_gpu.rst | 96 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 95 insertions(+), 1 deletion(-) diff --git a/docs/source/multi_gpu.rst b/docs/source/multi_gpu.rst index 212aa6e512b8e..298086e6e3550 100644 --- a/docs/source/multi_gpu.rst +++ b/docs/source/multi_gpu.rst @@ -200,7 +200,8 @@ Distributed modes Lightning allows multiple ways of training - Data Parallel (`distributed_backend='dp'`) (multiple-gpus, 1 machine) -- DistributedDataParallel (`distributed_backend='ddp'`) (multiple-gpus across many machines). +- DistributedDataParallel (`distributed_backend='ddp'`) (multiple-gpus across many machines (python script based)). +- DistributedDataParallel (`distributed_backend='ddp_spawn'`) (multiple-gpus across many machines (spawn based)). - DistributedDataParallel 2 (`distributed_backend='ddp2'`) (dp in a machine, ddp across machines). - Horovod (`distributed_backend='horovod'`) (multi-machine, multi-gpu, configured at runtime) - TPUs (`tpu_cores=8|x`) (tpu or TPU pod) @@ -253,6 +254,25 @@ Distributed Data Parallel # train on 32 GPUs (4 nodes) trainer = Trainer(gpus=8, distributed_backend='ddp', num_nodes=4) +This Lightning implementation of ddp calls your script under the hood multiple times with the correct environment +variables. If your code does not support this (ie: jupyter notebook, colab, or a nested script without a root package), +use `dp` or `ddp_spawn` + +.. code-block:: bash + + # example for 3 GPUs ddp + MASTER_ADDR=localhost MASTER_PORT=random() WORLD_SIZE=3 NODE_RANK=0 LOCAL_RANK=0 python my_file.py --gpus 3 --etc + MASTER_ADDR=localhost MASTER_PORT=random() WORLD_SIZE=3 NODE_RANK=1 LOCAL_RANK=0 python my_file.py --gpus 3 --etc + MASTER_ADDR=localhost MASTER_PORT=random() WORLD_SIZE=3 NODE_RANK=2 LOCAL_RANK=0 python my_file.py --gpus 3 --etc + +The reason we use ddp this way is because `ddp_spawn` has a few limitations (because of Python and PyTorch): + +1. Since `.spawn()` trains the model in subprocesses, the model on the main process does not get updated. +2. Dataloader(num_workers=N) where N is large bottlenecks training with ddp... + ie: it will be VERY slow or not work at all. This is a PyTorch limitation. + +However, if you don't mind these limitations, please use `ddp_spawn`. + Distributed Data Parallel 2 ^^^^^^^^^^^^^^^^^^^^^^^^^^^ In certain cases, it's advantageous to use all batches on the same machine instead of a subset. @@ -275,6 +295,75 @@ In this case, we can use ddp2 which behaves like dp in a machine and ddp across # train on 32 GPUs (4 nodes) trainer = Trainer(gpus=8, distributed_backend='ddp2', num_nodes=4) +Distributed Data Parallel Spawn +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +`ddp_spawn` is exactly like `ddp` except that it uses .spawn to start the training processes. + +.. warning:: It is STRONGLY recommended to use `ddp` for speed and performance. + +.. code-block:: python + + mp.spawn(self.ddp_train, nprocs=self.num_processes, args=(model, )) + +Here's how to call this. + +.. code-block:: python + + # train on 8 GPUs (same machine (ie: node)) + trainer = Trainer(gpus=8, distributed_backend='ddp') + +Use this method if your script does not support being called from the command line (ie: it is nested without a root +project module). However, we STRONGLY discourage this use because it has limitations (because of Python and PyTorch): + +1. The model you pass in will not update. Please save a checkpoint and restore from there. +2. Set Dataloader(num_workers=0) or it will bottleneck training. + +`ddp` is MUCH faster than `ddp_spawn`. We recommend you install a top-level module for your project using setup.py + +.. code-block:: python + + # setup.py + #!/usr/bin/env python + + from setuptools import setup, find_packages + + setup(name='src', + version='0.0.1', + description='Describe Your Cool Project', + author='', + author_email='', + url='https://github.com/YourSeed', # REPLACE WITH YOUR OWN GITHUB PROJECT LINK + install_requires=[ + 'pytorch-lightning' + ], + packages=find_packages() + ) + +Then setup your project like so: + +.. code-block:: bash + + /project + /src + some_file.py + /or_a_folder + setup.py + +Then install as a root-level package + +.. code-block:: bash + + cd /project + pip install -e . + +Now you can call your scripts anywhere + +.. code-block:: bash + + cd /project/src + python some_file.py --distributed_backend 'ddp' --gpus 8 + + Horovod ^^^^^^^ `Horovod `_ allows the same training script to be used for single-GPU, @@ -516,3 +605,8 @@ And then launch the elastic job with: See the official `PytorchElastic documentation `_ for details on installation and more use cases. + +Jupyter Notebooks +----------------- +Unfortunately any `ddp_` is not supported in jupyter notebooks. Please use `dp` for multiple GPUs. This is a known +Jupyter issue. If you feel like taking a stab at adding this support, feel free to submit a PR! From cf1de1891179f77c72b99d8d14e0c2de061e3dcf Mon Sep 17 00:00:00 2001 From: William Falcon Date: Mon, 8 Jun 2020 17:36:17 -0400 Subject: [PATCH 28/28] adding spawn --- docs/source/multi_gpu.rst | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/docs/source/multi_gpu.rst b/docs/source/multi_gpu.rst index 298086e6e3550..2596f6a73b478 100644 --- a/docs/source/multi_gpu.rst +++ b/docs/source/multi_gpu.rst @@ -270,6 +270,7 @@ The reason we use ddp this way is because `ddp_spawn` has a few limitations (bec 1. Since `.spawn()` trains the model in subprocesses, the model on the main process does not get updated. 2. Dataloader(num_workers=N) where N is large bottlenecks training with ddp... ie: it will be VERY slow or not work at all. This is a PyTorch limitation. +3. Forces everything to be picklable. However, if you don't mind these limitations, please use `ddp_spawn`. @@ -610,3 +611,18 @@ Jupyter Notebooks ----------------- Unfortunately any `ddp_` is not supported in jupyter notebooks. Please use `dp` for multiple GPUs. This is a known Jupyter issue. If you feel like taking a stab at adding this support, feel free to submit a PR! + +Pickle Errors +-------------- +Multi-GPU training sometimes requires your model to be pickled. If you run into an issue with pickling +try the following to figure out the issue + +.. code-block:: python + + import pickle + + model = YourModel() + pickle.dumps(model) + +However, if you use `ddp` the pickling requirement is not there and you should be fine. If you use `ddp_spawn` the +pickling requirement remains. This is a limitation of Python.