From f45913a52ec5a7a7aca0e1541055e70b08b49455 Mon Sep 17 00:00:00 2001 From: Brendan Fahy Date: Wed, 2 Sep 2020 16:02:41 -0400 Subject: [PATCH] make the fs variable private --- .../callbacks/model_checkpoint.py | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/pytorch_lightning/callbacks/model_checkpoint.py b/pytorch_lightning/callbacks/model_checkpoint.py index 1257ac36c5b4b7..71a502fe1e1c0a 100644 --- a/pytorch_lightning/callbacks/model_checkpoint.py +++ b/pytorch_lightning/callbacks/model_checkpoint.py @@ -120,10 +120,10 @@ def __init__(self, filepath: Optional[str] = None, monitor: str = 'val_loss', ve mode: str = 'auto', period: int = 1, prefix: str = ''): super().__init__() if filepath: - self.fs = get_filesystem(filepath) + self._fs = get_filesystem(filepath) else: - self.fs = get_filesystem("") # will give local fileystem - if save_top_k > 0 and filepath is not None and self.fs.isdir(filepath) and len(self.fs.ls(filepath)) > 0: + self._fs = get_filesystem("") # will give local fileystem + if save_top_k > 0 and filepath is not None and self._fs.isdir(filepath) and len(self._fs.ls(filepath)) > 0: rank_zero_warn( f"Checkpoint directory {filepath} exists and is not empty with save_top_k != 0." "All files in this directory will be deleted when a checkpoint is saved!" @@ -135,13 +135,13 @@ def __init__(self, filepath: Optional[str] = None, monitor: str = 'val_loss', ve if filepath is None: # will be determined by trainer at runtime self.dirpath, self.filename = None, None else: - if self.fs.isdir(filepath): + if self._fs.isdir(filepath): self.dirpath, self.filename = filepath, "{epoch}" else: - if self.fs.protocol == "file": # dont normalize remote paths + if self._fs.protocol == "file": # dont normalize remote paths filepath = os.path.realpath(filepath) self.dirpath, self.filename = os.path.split(filepath) - self.fs.makedirs(self.dirpath, exist_ok=True) + self._fs.makedirs(self.dirpath, exist_ok=True) self.save_last = save_last self.save_top_k = save_top_k self.save_weights_only = save_weights_only @@ -184,8 +184,8 @@ def kth_best_model(self): return self.kth_best_model_path def _del_model(self, filepath): - if self.fs.exists(filepath): - self.fs.rm(filepath) + if self._fs.exists(filepath): + self._fs.rm(filepath) def _save_model(self, filepath, trainer, pl_module): @@ -193,7 +193,7 @@ def _save_model(self, filepath, trainer, pl_module): trainer.dev_debugger.track_checkpointing_history(filepath) # make paths - self.fs.makedirs(os.path.dirname(filepath), exist_ok=True) + self._fs.makedirs(os.path.dirname(filepath), exist_ok=True) # delegate the saving to the model if self.save_function is not None: @@ -299,7 +299,7 @@ def on_pretrain_routine_start(self, trainer, pl_module): self.dirpath = ckpt_path assert trainer.global_rank == 0, "tried to make a checkpoint from non global_rank=0" - self.fs.makedirs(self.dirpath, exist_ok=True) + self._fs.makedirs(self.dirpath, exist_ok=True) def __warn_deprecated_monitor_key(self): using_result_obj = os.environ.get('PL_USING_RESULT_OBJ', None) @@ -348,7 +348,7 @@ def on_validation_end(self, trainer, pl_module): ckpt_name_metrics = trainer.logged_metrics filepath = self.format_checkpoint_name(epoch, ckpt_name_metrics) version_cnt = 0 - while self.fs.exists(filepath): + while self._fs.exists(filepath): filepath = self.format_checkpoint_name(epoch, ckpt_name_metrics, ver=version_cnt) # this epoch called before version_cnt += 1