diff --git a/CHANGELOG.md b/CHANGELOG.md index beb5f70b1da73..91c163c2616bf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -240,6 +240,10 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - Deprecated `.get_model()` with explicit `.lightning_module` property ([#6035](https://github.com/PyTorchLightning/pytorch-lightning/pull/6035)) +- Deprecated Trainer attribute `accelerator_backend` in favor of `accelerator` ([#6034](https://github.com/PyTorchLightning/pytorch-lightning/pull/6034)) + + + ### Removed - Removed deprecated checkpoint argument `filepath` ([#5321](https://github.com/PyTorchLightning/pytorch-lightning/pull/5321)) diff --git a/pl_examples/basic_examples/conv_sequential_example.py b/pl_examples/basic_examples/conv_sequential_example.py index bba8b489a022f..b558020838cdb 100644 --- a/pl_examples/basic_examples/conv_sequential_example.py +++ b/pl_examples/basic_examples/conv_sequential_example.py @@ -222,6 +222,6 @@ def instantiate_datamodule(args): trainer.fit(model, cifar10_dm) trainer.test(model, datamodule=cifar10_dm) - if trainer.accelerator_backend.rpc_enabled: + if trainer.accelerator.rpc_enabled: # Called at the end of trainer to ensure all processes are killed trainer.training_type_plugin.exit_rpc_process() diff --git a/pytorch_lightning/core/lightning.py b/pytorch_lightning/core/lightning.py index 91d2b3565d193..473a792d3ba44 100644 --- a/pytorch_lightning/core/lightning.py +++ b/pytorch_lightning/core/lightning.py @@ -41,7 +41,6 @@ from pytorch_lightning.utilities import rank_zero_warn from pytorch_lightning.utilities.apply_func import apply_to_collection, convert_to_tensors from pytorch_lightning.utilities.device_dtype_mixin import DeviceDtypeModuleMixin -from pytorch_lightning.utilities.distributed import all_gather_ddp_if_available from pytorch_lightning.utilities.exceptions import MisconfigurationException from pytorch_lightning.utilities.parsing import AttributeDict, collect_init_args, get_init_args @@ -448,11 +447,7 @@ def all_gather( the output will also be a collection with tensors of this shape. """ group = group if group is not None else torch.distributed.group.WORLD - if self.trainer.accelerator_backend is not None: - all_gather = self.trainer.accelerator_backend.all_gather - else: - all_gather = all_gather_ddp_if_available - + all_gather = self.trainer.accelerator.all_gather data = convert_to_tensors(data, device=self.device) all_gather = partial(all_gather, group=group, sync_grads=sync_grads) return apply_to_collection(data, torch.Tensor, all_gather) diff --git a/pytorch_lightning/core/optimizer.py b/pytorch_lightning/core/optimizer.py index 06c0323466cee..b6aca3b457a1f 100644 --- a/pytorch_lightning/core/optimizer.py +++ b/pytorch_lightning/core/optimizer.py @@ -132,7 +132,7 @@ def __optimizer_step(self, closure: Optional[Callable] = None, profiler_name: st model = trainer.lightning_module with trainer.profiler.profile(profiler_name): - trainer.accelerator_backend.optimizer_step(optimizer, self._optimizer_idx, lambda_closure=closure, **kwargs) + trainer.accelerator.optimizer_step(optimizer, self._optimizer_idx, lambda_closure=closure, **kwargs) if self._trainer.train_loop.automatic_optimization: trainer.train_loop.on_before_zero_grad(optimizer) diff --git a/pytorch_lightning/plugins/training_type/deepspeed.py b/pytorch_lightning/plugins/training_type/deepspeed.py index 69fdb4c19a4b6..d9949b97f6483 100644 --- a/pytorch_lightning/plugins/training_type/deepspeed.py +++ b/pytorch_lightning/plugins/training_type/deepspeed.py @@ -185,7 +185,7 @@ def init_deepspeed(self): self._format_config() self._config_initialized = True - precision = self.lightning_module.trainer.accelerator_backend.precision + precision = self.lightning_module.trainer.accelerator.precision model = LightningDeepSpeedModule(pl_module=self.model, precision=precision) if self.lightning_module.trainer.training: diff --git a/pytorch_lightning/plugins/training_type/tpu_spawn.py b/pytorch_lightning/plugins/training_type/tpu_spawn.py index 205d1c3bf6da9..cd8a132c07786 100644 --- a/pytorch_lightning/plugins/training_type/tpu_spawn.py +++ b/pytorch_lightning/plugins/training_type/tpu_spawn.py @@ -90,7 +90,7 @@ def new_process(self, process_idx: int, trainer, mp_queue) -> None: trainer.progress_bar_callback.disable() self.model_to_device() - trainer.accelerator_backend.setup_optimizers(trainer) + trainer.accelerator.setup_optimizers(trainer) trainer.precision_plugin.connect(self._model, None, None) # replace trainer save_checkpoint to use `xm.save` diff --git a/pytorch_lightning/trainer/connectors/checkpoint_connector.py b/pytorch_lightning/trainer/connectors/checkpoint_connector.py index 4f5238a570ede..3b75f406b1917 100644 --- a/pytorch_lightning/trainer/connectors/checkpoint_connector.py +++ b/pytorch_lightning/trainer/connectors/checkpoint_connector.py @@ -219,8 +219,7 @@ def hpc_save(self, folderpath: str, logger): model.on_hpc_save(checkpoint) - if self.trainer.accelerator_backend: - checkpoint = self.trainer.accelerator_backend.on_save(checkpoint) + checkpoint = self.trainer.accelerator.on_save(checkpoint) # do the actual save # TODO: fix for anything with multiprocess DP, DDP, DDP2 @@ -286,7 +285,7 @@ def dump_checkpoint(self, weights_only: bool = False) -> dict: optimizer_states = [] for i, optimizer in enumerate(self.trainer.optimizers): # Rely on accelerator to dump optimizer state - optimizer_state = self.trainer.accelerator_backend.optimizer_state(optimizer) + optimizer_state = self.trainer.accelerator.optimizer_state(optimizer) optimizer_states.append(optimizer_state) checkpoint['optimizer_states'] = optimizer_states diff --git a/pytorch_lightning/trainer/data_loading.py b/pytorch_lightning/trainer/data_loading.py index 6d8fa4ad9040f..06a3da750032c 100644 --- a/pytorch_lightning/trainer/data_loading.py +++ b/pytorch_lightning/trainer/data_loading.py @@ -51,7 +51,7 @@ class TrainerDataLoadingMixin(ABC): limit_val_batches: Union[int, float] limit_test_batches: Union[int, float] replace_sampler_ddp: bool - accelerator_backend: Accelerator + accelerator: Accelerator num_nodes: int num_processes: int distributed_backend: Optional[str] @@ -398,8 +398,7 @@ def request_dataloader(self, dataloader_fx: Callable) -> DataLoader: dataloader = dataloader_fx() dataloader = self._flatten_dl_only(dataloader) - if self.accelerator_backend is not None: - self.accelerator_backend.barrier('get_dataloaders') + self.accelerator.barrier('get_dataloaders') return dataloader def _flatten_dl_only(self, dataloaders): diff --git a/pytorch_lightning/trainer/deprecated_api.py b/pytorch_lightning/trainer/deprecated_api.py index e1eecf26ed70e..46cfc545c889d 100644 --- a/pytorch_lightning/trainer/deprecated_api.py +++ b/pytorch_lightning/trainer/deprecated_api.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from pytorch_lightning.accelerators import Accelerator from pytorch_lightning.core.lightning import LightningModule from pytorch_lightning.trainer.connectors.accelerator_connector import AcceleratorConnector from pytorch_lightning.trainer.states import RunningStage @@ -133,10 +134,19 @@ def use_single_gpu(self, val: bool) -> None: self.accelerator_connector._device_type = DeviceType.GPU -class DeprecatedModelAttributes: +class DeprecatedTrainerAttributes: + accelerator: Accelerator lightning_module = LightningModule + @property + def accelerator_backend(self) -> Accelerator: + rank_zero_warn( + "The `Trainer.accelerator_backend` attribute is deprecated in favor of `Trainer.accelerator`" + " since 1.2 and will be removed in v1.4.", DeprecationWarning + ) + return self.accelerator + def get_model(self) -> LightningModule: rank_zero_warn( "The use of `Trainer.get_model()` is deprecated in favor of `Trainer.lightning_module`" diff --git a/pytorch_lightning/trainer/evaluation_loop.py b/pytorch_lightning/trainer/evaluation_loop.py index 284baff3e2a63..087741aa69c2b 100644 --- a/pytorch_lightning/trainer/evaluation_loop.py +++ b/pytorch_lightning/trainer/evaluation_loop.py @@ -157,11 +157,11 @@ def evaluation_step(self, batch, batch_idx, dataloader_idx): if self.testing: model_ref._current_fx_name = "test_step" with self.trainer.profiler.profile("test_step"): - output = self.trainer.accelerator_backend.test_step(args) + output = self.trainer.accelerator.test_step(args) else: model_ref._current_fx_name = "validation_step" with self.trainer.profiler.profile("validation_step"): - output = self.trainer.accelerator_backend.validation_step(args) + output = self.trainer.accelerator.validation_step(args) # capture any logged information self.trainer.logger_connector.cache_logged_metrics() diff --git a/pytorch_lightning/trainer/predict_loop.py b/pytorch_lightning/trainer/predict_loop.py index 4fecbbaf05348..6b801cc7f5dea 100644 --- a/pytorch_lightning/trainer/predict_loop.py +++ b/pytorch_lightning/trainer/predict_loop.py @@ -74,7 +74,7 @@ def predict(self, batch, batch_idx, dataloader_idx): model_ref = self.trainer.lightning_module model_ref._current_fx_name = "predict" - predictions = self.trainer.accelerator_backend.predict(args) + predictions = self.trainer.accelerator.predict(args) self._predictions[dataloader_idx].append(predictions) self.trainer._progress_bar_callback.on_predict_batch_end( self.trainer, model_ref, predictions, batch, batch_idx, dataloader_idx diff --git a/pytorch_lightning/trainer/properties.py b/pytorch_lightning/trainer/properties.py index 47aad2710394d..c061c6ef28d4c 100644 --- a/pytorch_lightning/trainer/properties.py +++ b/pytorch_lightning/trainer/properties.py @@ -62,11 +62,6 @@ class TrainerProperties(ABC): def accelerator(self) -> Accelerator: return self.accelerator_connector.accelerator - @property - def accelerator_backend(self) -> Accelerator: - # for backward compatibility - return self.accelerator - @property def distributed_backend(self) -> Optional[str]: # for backward compatibility @@ -138,7 +133,7 @@ def log_dir(self) -> Optional[str]: else: dirpath = getattr(self.logger, 'log_dir' if isinstance(self.logger, TensorBoardLogger) else 'save_dir') - dirpath = self.accelerator_backend.broadcast(dirpath) + dirpath = self.accelerator.broadcast(dirpath) return dirpath @property @@ -360,7 +355,7 @@ def lightning_optimizers(self) -> List[LightningOptimizer]: @property def lightning_module(self) -> LightningModule: - return self.accelerator_backend.lightning_module + return self.accelerator.lightning_module @property def optimizers(self) -> Optional[List[Optimizer]]: diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index e1c2bcbbbce71..10545a075cb32 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -45,7 +45,7 @@ from pytorch_lightning.trainer.connectors.slurm_connector import SLURMConnector from pytorch_lightning.trainer.connectors.training_trick_connector import TrainingTricksConnector from pytorch_lightning.trainer.data_loading import TrainerDataLoadingMixin -from pytorch_lightning.trainer.deprecated_api import DeprecatedDistDeviceAttributes, DeprecatedModelAttributes +from pytorch_lightning.trainer.deprecated_api import DeprecatedDistDeviceAttributes, DeprecatedTrainerAttributes from pytorch_lightning.trainer.evaluation_loop import EvaluationLoop from pytorch_lightning.trainer.logging import TrainerLoggingMixin from pytorch_lightning.trainer.model_hooks import TrainerModelHooksMixin @@ -80,7 +80,7 @@ class Trainer( TrainerTrainingTricksMixin, TrainerDataLoadingMixin, DeprecatedDistDeviceAttributes, - DeprecatedModelAttributes, + DeprecatedTrainerAttributes, ): @overwrite_by_env_vars @@ -470,7 +470,7 @@ def fit( # ---------------------------- self.call_setup_hook(model) self.call_hook("on_before_accelerator_backend_setup", model) - self.accelerator_backend.setup(self, model) + self.accelerator.setup(self, model) self.setup_trainer(model) # ---------------------------- @@ -533,24 +533,24 @@ def fit( self._set_running_stage(None, model) - return self.accelerator_backend.results or 1 + return self.accelerator.results or 1 def pre_dispatch(self): - self.accelerator_backend.pre_dispatch() + self.accelerator.pre_dispatch() def post_dispatch(self): - self.accelerator_backend.post_dispatch() - self.accelerator_backend.teardown() + self.accelerator.post_dispatch() + self.accelerator.teardown() def dispatch(self): if self.testing: - self.accelerator_backend.start_testing(self) + self.accelerator.start_testing(self) elif self.predicting: - self.accelerator_backend.start_predicting(self) + self.accelerator.start_predicting(self) else: - self.accelerator_backend.start_training(self) + self.accelerator.start_training(self) def train_or_test_or_predict(self): if self.testing: @@ -949,7 +949,7 @@ def __test_using_best_weights(self, ckpt_path, test_dataloaders): ) return {} if not self._device_type == DeviceType.TPU: - self.accelerator_backend.barrier() + self.accelerator.barrier() ckpt = pl_load(ckpt_path, map_location=lambda storage, loc: storage) model.load_state_dict(ckpt['state_dict']) @@ -1109,8 +1109,8 @@ def call_hook(self, hook_name, *args, **kwargs): # if the PL module doesn't have the hook then call the accelerator # used to auto-reduce things for the user with Results obj - elif hasattr(self.accelerator_backend, hook_name): - accelerator_hook = getattr(self.accelerator_backend, hook_name) + elif hasattr(self.accelerator, hook_name): + accelerator_hook = getattr(self.accelerator, hook_name) output = accelerator_hook(*args, **kwargs) if not skip: diff --git a/pytorch_lightning/trainer/training_loop.py b/pytorch_lightning/trainer/training_loop.py index 9a2a9da1636bf..9d10a1f67c5dc 100644 --- a/pytorch_lightning/trainer/training_loop.py +++ b/pytorch_lightning/trainer/training_loop.py @@ -290,8 +290,8 @@ def training_step(self, split_batch, batch_idx, opt_idx, hiddens): model_ref._current_fx_name = 'training_step' model_ref._results = Result() with self.trainer.profiler.profile("training_step"): - training_step_output = self.trainer.accelerator_backend.training_step(args) - self.trainer.accelerator_backend.post_training_step() + training_step_output = self.trainer.accelerator.training_step(args) + self.trainer.accelerator.post_training_step() self.trainer.logger_connector.cache_logged_metrics() @@ -438,14 +438,14 @@ def on_before_zero_grad(self, optimizer): self.trainer.call_hook('on_before_zero_grad', optimizer) def optimizer_zero_grad(self, batch_idx, optimizer, opt_idx): - self.trainer.accelerator_backend.optimizer_zero_grad(self.trainer.current_epoch, batch_idx, optimizer, opt_idx) + self.trainer.accelerator.optimizer_zero_grad(self.trainer.current_epoch, batch_idx, optimizer, opt_idx) def track_and_norm_grad(self, optimizer): # track gradient norms grad_norm_dic = self._track_gradient_norm() # clip gradients - self.trainer.accelerator_backend.clip_gradients(optimizer, self.trainer.gradient_clip_val) + self.trainer.accelerator.clip_gradients(optimizer, self.trainer.gradient_clip_val) self._cur_grad_norm_dict = grad_norm_dic def _track_gradient_norm(self): @@ -769,9 +769,9 @@ def backward(self, result, optimizer, opt_idx, *args, **kwargs): # backward can be called manually in the training loop if isinstance(result, torch.Tensor): - self.trainer.accelerator_backend.backward(result, optimizer, opt_idx, should_accumulate, *args, **kwargs) + self.trainer.accelerator.backward(result, optimizer, opt_idx, should_accumulate, *args, **kwargs) else: - result.closure_loss = self.trainer.accelerator_backend.backward( + result.closure_loss = self.trainer.accelerator.backward( result.closure_loss, optimizer, opt_idx, should_accumulate, *args, **kwargs ) diff --git a/tests/accelerators/test_accelerator_connector.py b/tests/accelerators/test_accelerator_connector.py index c0f6c0c0a5b9b..76d4a597d8ecb 100644 --- a/tests/accelerators/test_accelerator_connector.py +++ b/tests/accelerators/test_accelerator_connector.py @@ -33,7 +33,7 @@ def test_accelerator_choice_cpu(tmpdir): default_root_dir=tmpdir, fast_dev_run=True, ) - assert isinstance(trainer.accelerator_backend, CPUAccelerator) + assert isinstance(trainer.accelerator, CPUAccelerator) assert isinstance(trainer.training_type_plugin, SingleDevicePlugin) @@ -42,7 +42,7 @@ def test_accelerator_choice_ddp_cpu(tmpdir): fast_dev_run=True, accelerator='ddp_cpu', ) - assert isinstance(trainer.accelerator_backend, CPUAccelerator) + assert isinstance(trainer.accelerator, CPUAccelerator) assert isinstance(trainer.training_type_plugin, DDPSpawnPlugin) assert isinstance(trainer.training_type_plugin.cluster_environment, TorchElasticEnvironment) @@ -56,7 +56,7 @@ def test_accelerator_choice_ddp(cuda_available_mock, device_count_mock): accelerator='ddp', gpus=1, ) - assert isinstance(trainer.accelerator_backend, GPUAccelerator) + assert isinstance(trainer.accelerator, GPUAccelerator) assert isinstance(trainer.training_type_plugin, DDPPlugin) assert isinstance(trainer.training_type_plugin.cluster_environment, TorchElasticEnvironment) @@ -70,7 +70,7 @@ def test_accelerator_choice_ddp_spawn(cuda_available_mock, device_count_mock): accelerator='ddp_spawn', gpus=1, ) - assert isinstance(trainer.accelerator_backend, GPUAccelerator) + assert isinstance(trainer.accelerator, GPUAccelerator) assert isinstance(trainer.training_type_plugin, DDPSpawnPlugin) assert isinstance(trainer.training_type_plugin.cluster_environment, TorchElasticEnvironment) @@ -92,7 +92,7 @@ class CB(Callback): def on_fit_start(self, trainer, pl_module): assert trainer.use_ddp assert trainer.accelerator_connector.is_slurm_managing_tasks - assert isinstance(trainer.accelerator_backend, GPUAccelerator) + assert isinstance(trainer.accelerator, GPUAccelerator) assert isinstance(trainer.training_type_plugin, DDPPlugin) assert isinstance(trainer.training_type_plugin.cluster_environment, SLURMEnvironment) assert trainer.training_type_plugin.cluster_environment.local_rank() == 10 @@ -130,7 +130,7 @@ class CB(Callback): def on_fit_start(self, trainer, pl_module): assert trainer.use_ddp2 assert trainer.accelerator_connector.is_slurm_managing_tasks - assert isinstance(trainer.accelerator_backend, GPUAccelerator) + assert isinstance(trainer.accelerator, GPUAccelerator) assert isinstance(trainer.training_type_plugin, DDP2Plugin) assert isinstance(trainer.training_type_plugin.cluster_environment, SLURMEnvironment) assert trainer.training_type_plugin.cluster_environment.local_rank() == 10 @@ -158,7 +158,7 @@ class CB(Callback): def on_fit_start(self, trainer, pl_module): assert trainer.use_ddp - assert isinstance(trainer.accelerator_backend, GPUAccelerator) + assert isinstance(trainer.accelerator, GPUAccelerator) assert isinstance(trainer.training_type_plugin, DDPPlugin) assert isinstance(trainer.training_type_plugin.cluster_environment, TorchElasticEnvironment) assert trainer.training_type_plugin.cluster_environment.local_rank() == 10 @@ -186,7 +186,7 @@ class CB(Callback): def on_fit_start(self, trainer, pl_module): assert trainer.use_ddp2 - assert isinstance(trainer.accelerator_backend, GPUAccelerator) + assert isinstance(trainer.accelerator, GPUAccelerator) assert isinstance(trainer.training_type_plugin, DDP2Plugin) assert isinstance(trainer.training_type_plugin.cluster_environment, TorchElasticEnvironment) assert trainer.training_type_plugin.cluster_environment.local_rank() == 10 @@ -217,7 +217,7 @@ class CB(Callback): def on_fit_start(self, trainer, pl_module): assert trainer.use_ddp - assert isinstance(trainer.accelerator_backend, CPUAccelerator) + assert isinstance(trainer.accelerator, CPUAccelerator) assert isinstance(trainer.training_type_plugin, DDPPlugin) assert isinstance(trainer.training_type_plugin.cluster_environment, TorchElasticEnvironment) assert trainer.training_type_plugin.cluster_environment.local_rank() == 10 @@ -253,7 +253,7 @@ class CB(Callback): def on_fit_start(self, trainer, pl_module): assert trainer.use_ddp assert trainer.accelerator_connector.is_slurm_managing_tasks - assert isinstance(trainer.accelerator_backend, CPUAccelerator) + assert isinstance(trainer.accelerator, CPUAccelerator) assert isinstance(trainer.training_type_plugin, DDPPlugin) assert isinstance(trainer.training_type_plugin.cluster_environment, SLURMEnvironment) assert trainer.training_type_plugin.task_idx == 0 @@ -295,7 +295,7 @@ class CB(Callback): def on_fit_start(self, trainer, pl_module): assert trainer.use_ddp - assert isinstance(trainer.accelerator_backend, CPUAccelerator) + assert isinstance(trainer.accelerator, CPUAccelerator) assert isinstance(trainer.training_type_plugin, DDPPlugin) assert isinstance(trainer.training_type_plugin.cluster_environment, CustomCluster) raise SystemExit() @@ -343,7 +343,7 @@ class TrainTypePlugin(SingleDevicePlugin): fast_dev_run=True, num_processes=2, ) - assert isinstance(trainer.accelerator_backend, Accel) + assert isinstance(trainer.accelerator, Accel) assert isinstance(trainer.training_type_plugin, TrainTypePlugin) assert isinstance(trainer.precision_plugin, Prec) @@ -363,7 +363,7 @@ def test_dist_backend_accelerator_mapping(device_count_mock): class CB(Callback): def on_fit_start(self, trainer, pl_module): - assert isinstance(trainer.accelerator_backend, CPUAccelerator) + assert isinstance(trainer.accelerator, CPUAccelerator) assert isinstance(trainer.training_type_plugin, DDPPlugin) assert trainer.training_type_plugin.task_idx == 0 raise SystemExit() diff --git a/tests/core/test_datamodules.py b/tests/core/test_datamodules.py index e2f3b559073d2..299d196604be0 100644 --- a/tests/core/test_datamodules.py +++ b/tests/core/test_datamodules.py @@ -473,7 +473,7 @@ def transfer_batch_to_device(self, batch, device): model.transfer_batch_to_device = dm.transfer_batch_to_device model.on_after_batch_transfer = dm.on_after_batch_transfer - batch_gpu = trainer.accelerator_backend.batch_to_device(batch, expected_device) + batch_gpu = trainer.accelerator.batch_to_device(batch, expected_device) assert dm.on_before_batch_transfer_hook_rank == 0 assert dm.transfer_batch_to_device_hook_rank == 1 diff --git a/tests/deprecated_api/test_remove_1-4.py b/tests/deprecated_api/test_remove_1-4.py index c13e5b9dfadfd..7ccd1dafe02a3 100644 --- a/tests/deprecated_api/test_remove_1-4.py +++ b/tests/deprecated_api/test_remove_1-4.py @@ -30,6 +30,13 @@ from tests.helpers import BoringModel +def test_v1_4_0_deprecated_trainer_attributes(): + with pytest.deprecated_call(match="will be removed in v1.4."): + trainer = Trainer() + _ = trainer.accelerator_backend + assert trainer.accelerator == trainer.accelerator_backend + + def test_v1_4_0_deprecated_trainer_methods(): with pytest.deprecated_call(match='will be removed in v1.4'): trainer = Trainer() diff --git a/tests/models/test_gpu.py b/tests/models/test_gpu.py index 1c3e4b284b2e2..f30f12009450e 100644 --- a/tests/models/test_gpu.py +++ b/tests/models/test_gpu.py @@ -219,35 +219,35 @@ def test_single_gpu_batch_parse(): # non-transferrable types primitive_objects = [None, {}, [], 1.0, "x", [None, 2], {"x": (1, 2), "y": None}] for batch in primitive_objects: - data = trainer.accelerator_backend.batch_to_device(batch, torch.device('cuda:0')) + data = trainer.accelerator.batch_to_device(batch, torch.device('cuda:0')) assert data == batch # batch is just a tensor batch = torch.rand(2, 3) - batch = trainer.accelerator_backend.batch_to_device(batch, torch.device('cuda:0')) + batch = trainer.accelerator.batch_to_device(batch, torch.device('cuda:0')) assert batch.device.index == 0 and batch.type() == 'torch.cuda.FloatTensor' # tensor list batch = [torch.rand(2, 3), torch.rand(2, 3)] - batch = trainer.accelerator_backend.batch_to_device(batch, torch.device('cuda:0')) + batch = trainer.accelerator.batch_to_device(batch, torch.device('cuda:0')) assert batch[0].device.index == 0 and batch[0].type() == 'torch.cuda.FloatTensor' assert batch[1].device.index == 0 and batch[1].type() == 'torch.cuda.FloatTensor' # tensor list of lists batch = [[torch.rand(2, 3), torch.rand(2, 3)]] - batch = trainer.accelerator_backend.batch_to_device(batch, torch.device('cuda:0')) + batch = trainer.accelerator.batch_to_device(batch, torch.device('cuda:0')) assert batch[0][0].device.index == 0 and batch[0][0].type() == 'torch.cuda.FloatTensor' assert batch[0][1].device.index == 0 and batch[0][1].type() == 'torch.cuda.FloatTensor' # tensor dict batch = [{'a': torch.rand(2, 3), 'b': torch.rand(2, 3)}] - batch = trainer.accelerator_backend.batch_to_device(batch, torch.device('cuda:0')) + batch = trainer.accelerator.batch_to_device(batch, torch.device('cuda:0')) assert batch[0]['a'].device.index == 0 and batch[0]['a'].type() == 'torch.cuda.FloatTensor' assert batch[0]['b'].device.index == 0 and batch[0]['b'].type() == 'torch.cuda.FloatTensor' # tuple of tensor list and list of tensor dict batch = ([torch.rand(2, 3) for _ in range(2)], [{'a': torch.rand(2, 3), 'b': torch.rand(2, 3)} for _ in range(2)]) - batch = trainer.accelerator_backend.batch_to_device(batch, torch.device('cuda:0')) + batch = trainer.accelerator.batch_to_device(batch, torch.device('cuda:0')) assert batch[0][0].device.index == 0 and batch[0][0].type() == 'torch.cuda.FloatTensor' assert batch[1][0]['a'].device.index == 0 @@ -259,7 +259,7 @@ def test_single_gpu_batch_parse(): # namedtuple of tensor BatchType = namedtuple('BatchType', ['a', 'b']) batch = [BatchType(a=torch.rand(2, 3), b=torch.rand(2, 3)) for _ in range(2)] - batch = trainer.accelerator_backend.batch_to_device(batch, torch.device('cuda:0')) + batch = trainer.accelerator.batch_to_device(batch, torch.device('cuda:0')) assert batch[0].a.device.index == 0 assert batch[0].a.type() == 'torch.cuda.FloatTensor' @@ -273,7 +273,7 @@ def to(self, *args, **kwargs): self.a = self.a.to(*args, **kwargs) return self - batch = trainer.accelerator_backend.batch_to_device(CustomBatchType(), torch.device('cuda:0')) + batch = trainer.accelerator.batch_to_device(CustomBatchType(), torch.device('cuda:0')) assert batch.a.type() == 'torch.cuda.FloatTensor' # torchtext.data.Batch @@ -297,7 +297,7 @@ def to(self, *args, **kwargs): label_field.build_vocab(dataset) batch = Batch(data=examples, dataset=dataset) - batch = trainer.accelerator_backend.batch_to_device(batch, torch.device('cuda:0')) + batch = trainer.accelerator.batch_to_device(batch, torch.device('cuda:0')) assert batch.text.type() == 'torch.cuda.LongTensor' assert batch.label.type() == 'torch.cuda.LongTensor' @@ -310,7 +310,7 @@ def test_non_blocking(): batch = torch.zeros(2, 3) with patch.object(batch, 'to', wraps=batch.to) as mocked: - batch = trainer.accelerator_backend.batch_to_device(batch, torch.device('cuda:0')) + batch = trainer.accelerator.batch_to_device(batch, torch.device('cuda:0')) mocked.assert_called_with(torch.device('cuda', 0), non_blocking=True) class BatchObject(object): @@ -320,5 +320,5 @@ def to(self, *args, **kwargs): batch = BatchObject() with patch.object(batch, 'to', wraps=batch.to) as mocked: - batch = trainer.accelerator_backend.batch_to_device(batch, torch.device('cuda:0')) + batch = trainer.accelerator.batch_to_device(batch, torch.device('cuda:0')) mocked.assert_called_with(torch.device('cuda', 0)) diff --git a/tests/models/test_hooks.py b/tests/models/test_hooks.py index 7323996bcab3e..d714062fb7915 100644 --- a/tests/models/test_hooks.py +++ b/tests/models/test_hooks.py @@ -187,7 +187,7 @@ def transfer_batch_to_device(self, batch, device): # running .fit() would require us to implement custom data loaders, we mock the model reference instead model_getter_mock.return_value = model - batch_gpu = trainer.accelerator_backend.batch_to_device(batch, expected_device) + batch_gpu = trainer.accelerator.batch_to_device(batch, expected_device) assert model.on_before_batch_transfer_hook_rank == 0 assert model.transfer_batch_to_device_hook_rank == 1 diff --git a/tests/models/test_horovod.py b/tests/models/test_horovod.py index 060b78a712e10..0b89c3b06c041 100644 --- a/tests/models/test_horovod.py +++ b/tests/models/test_horovod.py @@ -303,7 +303,7 @@ def _compute_batch(): accelerator='horovod', ) - assert isinstance(trainer.accelerator_backend, CPUAccelerator) + assert isinstance(trainer.accelerator, CPUAccelerator) # TODO: test that we selected the correct training_type_plugin based on horovod flags metric = Accuracy( diff --git a/tests/models/test_tpu.py b/tests/models/test_tpu.py index 4c6620b07b74a..bfa8f2432e3a2 100644 --- a/tests/models/test_tpu.py +++ b/tests/models/test_tpu.py @@ -271,7 +271,7 @@ def test_broadcast_on_tpu(): def test_broadcast(rank): trainer = Trainer(tpu_cores=8) - assert isinstance(trainer.accelerator_backend, TPUAccelerator) + assert isinstance(trainer.accelerator, TPUAccelerator) assert isinstance(trainer.training_type_plugin, TPUSpawnPlugin) obj = ("ver_0.5", "logger_name", rank) result = trainer.training_type_plugin.broadcast(obj) diff --git a/tests/plugins/test_deepspeed_plugin.py b/tests/plugins/test_deepspeed_plugin.py index 1d25c529dd963..9c9c5c097b4c5 100644 --- a/tests/plugins/test_deepspeed_plugin.py +++ b/tests/plugins/test_deepspeed_plugin.py @@ -46,8 +46,8 @@ def test_deepspeed_plugin_string(tmpdir): plugins='deepspeed', ) - assert isinstance(trainer.accelerator_backend.training_type_plugin, DeepSpeedPlugin) - assert trainer.accelerator_backend.training_type_plugin.parallel_devices == [torch.device('cpu')] + assert isinstance(trainer.accelerator.training_type_plugin, DeepSpeedPlugin) + assert trainer.accelerator.training_type_plugin.parallel_devices == [torch.device('cpu')] @pytest.mark.skipif(not _DEEPSPEED_AVAILABLE, reason="DeepSpeed not available.") @@ -62,8 +62,8 @@ def test_deepspeed_plugin(tmpdir): plugins=[DeepSpeedPlugin()], ) - assert isinstance(trainer.accelerator_backend.training_type_plugin, DeepSpeedPlugin) - assert trainer.accelerator_backend.training_type_plugin.parallel_devices == [torch.device('cpu')] + assert isinstance(trainer.accelerator.training_type_plugin, DeepSpeedPlugin) + assert trainer.accelerator.training_type_plugin.parallel_devices == [torch.device('cpu')] @pytest.mark.skipif(not _DEEPSPEED_AVAILABLE, reason="DeepSpeed not available.") @@ -82,7 +82,7 @@ def test_deepspeed_plugin_env(tmpdir, monkeypatch, deepspeed_config): plugins='deepspeed', ) - plugin = trainer.accelerator_backend.training_type_plugin + plugin = trainer.accelerator.training_type_plugin assert isinstance(plugin, DeepSpeedPlugin) assert plugin.parallel_devices == [torch.device('cpu')] assert plugin.config == deepspeed_config @@ -106,9 +106,9 @@ def test_deepspeed_precision_choice(amp_backend, tmpdir): fast_dev_run=True, default_root_dir=tmpdir, plugins='deepspeed', amp_backend=amp_backend, precision=16 ) - assert isinstance(trainer.accelerator_backend.training_type_plugin, DeepSpeedPlugin) - assert isinstance(trainer.accelerator_backend.precision_plugin, DeepSpeedPrecisionPlugin) - assert trainer.accelerator_backend.precision_plugin.precision == 16 + assert isinstance(trainer.accelerator.training_type_plugin, DeepSpeedPlugin) + assert isinstance(trainer.accelerator.precision_plugin, DeepSpeedPrecisionPlugin) + assert trainer.accelerator.precision_plugin.precision == 16 @pytest.mark.skipif(not _DEEPSPEED_AVAILABLE, reason="DeepSpeed not available.") diff --git a/tests/plugins/test_rpc_sequential_plugin.py b/tests/plugins/test_rpc_sequential_plugin.py index f1a4743080289..8be0190566df6 100644 --- a/tests/plugins/test_rpc_sequential_plugin.py +++ b/tests/plugins/test_rpc_sequential_plugin.py @@ -50,9 +50,9 @@ def test_rpc_sequential_plugin_manual(tmpdir, args=None): if torch_distrib.is_initialized() and torch_distrib.get_rank() == 0: assert len(trainer.dev_debugger.pbar_added_metrics) > 0 - if trainer.accelerator_backend.rpc_enabled: + if trainer.accelerator.rpc_enabled: # Called at the end of trainer to ensure all processes are killed - trainer.accelerator_backend.training_type_plugin.exit_rpc_process() + trainer.accelerator.training_type_plugin.exit_rpc_process() @pytest.mark.skipif(not _FAIRSCALE_PIPE_AVAILABLE, reason="test requires FairScale to be installed") @@ -104,9 +104,9 @@ def test_rpc_sequential_plugin_automatic(tmpdir, args=None): if torch_distrib.is_initialized() and torch_distrib.get_rank() == 0: assert len(trainer.dev_debugger.pbar_added_metrics) > 0 - if trainer.accelerator_backend.rpc_enabled: + if trainer.accelerator.rpc_enabled: # Called at the end of trainer to ensure all processes are killed - trainer.accelerator_backend.training_type_plugin.exit_rpc_process() + trainer.accelerator.training_type_plugin.exit_rpc_process() @pytest.mark.skipif(not _FAIRSCALE_PIPE_AVAILABLE, reason="test requires FairScale to be installed") @@ -132,9 +132,9 @@ def test_rpc_sequential_plugin_with_wrong_balance(tmpdir, args=None): ): trainer.fit(model) - if trainer.accelerator_backend.rpc_enabled: + if trainer.accelerator.rpc_enabled: # Called at the end of trainer to ensure all processes are killed - trainer.accelerator_backend.training_type_plugin.exit_rpc_process() + trainer.accelerator.training_type_plugin.exit_rpc_process() class SequentialModelRPCManual(LightningModule): diff --git a/tests/plugins/test_sharded_plugin.py b/tests/plugins/test_sharded_plugin.py index 85158adf5d59c..f3683ffcba252 100644 --- a/tests/plugins/test_sharded_plugin.py +++ b/tests/plugins/test_sharded_plugin.py @@ -23,9 +23,9 @@ class CB(Callback): def on_fit_start(self, trainer, pl_module): if accelerator == 'ddp_sharded': - assert isinstance(trainer.accelerator_backend.training_type_plugin, DDPShardedPlugin) + assert isinstance(trainer.accelerator.training_type_plugin, DDPShardedPlugin) elif accelerator == 'ddp_sharded_spawn': - assert isinstance(trainer.accelerator_backend.training_type_plugin, DDPSpawnShardedPlugin) + assert isinstance(trainer.accelerator.training_type_plugin, DDPSpawnShardedPlugin) raise SystemExit() model = BoringModel() @@ -71,9 +71,9 @@ class CB(Callback): def on_fit_start(self, trainer, pl_module): if accelerator == 'ddp_sharded': - assert isinstance(trainer.accelerator_backend.training_type_plugin, DDPShardedPlugin) + assert isinstance(trainer.accelerator.training_type_plugin, DDPShardedPlugin) elif accelerator == 'ddp_sharded_spawn': - assert isinstance(trainer.accelerator_backend.training_type_plugin, DDPSpawnShardedPlugin) + assert isinstance(trainer.accelerator.training_type_plugin, DDPSpawnShardedPlugin) raise SystemExit() model = BoringModel()