From d52f9d52273ddbc2c688c90963b26871c128a9be Mon Sep 17 00:00:00 2001 From: Mike Clark Date: Sun, 26 Jan 2020 00:19:23 +0000 Subject: [PATCH 1/3] use tqdm.auto in trainer This will import the ipywidgets version of tqdm if available. This works nicely in notebooks by not filling up the log. In the terminal it will use the same old tqdm. We might also want to consider passing in the tqdm we want as an argument since there may be some edge cases where ipywidgets is available but the interface doesn't support it (e.g. vscode?) or isn't working. In which case people will get a warning message, but may want to configure it themselves. --- pytorch_lightning/trainer/trainer.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index c2add79416021..5466bc9d12c45 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -7,7 +7,7 @@ import torch import torch.distributed as dist import torch.multiprocessing as mp -import tqdm +from tqdm.auto import tqdm from torch.optim.optimizer import Optimizer from pytorch_lightning.trainer.auto_mix_precision import TrainerAMPMixin @@ -808,13 +808,13 @@ def run_pretrain_routine(self, model): ref_model.on_train_start() if not self.disable_validation and self.num_sanity_val_steps > 0: # init progress bars for validation sanity check - pbar = tqdm.tqdm(desc='Validation sanity check', + pbar = tqdm(desc='Validation sanity check', total=self.num_sanity_val_steps * len(self.get_val_dataloaders()), leave=False, position=2 * self.process_position, disable=not self.show_progress_bar, dynamic_ncols=True, unit='batch') self.main_progress_bar = pbar # dummy validation progress bar - self.val_progress_bar = tqdm.tqdm(disable=True) + self.val_progress_bar = tqdm(disable=True) eval_results = self.evaluate(model, self.get_val_dataloaders(), self.num_sanity_val_steps, False) @@ -828,7 +828,7 @@ def run_pretrain_routine(self, model): self.early_stop_callback.check_metrics(callback_metrics) # init progress bar - pbar = tqdm.tqdm(leave=True, position=2 * self.process_position, + pbar = tqdm(leave=True, position=2 * self.process_position, disable=not self.show_progress_bar, dynamic_ncols=True, unit='batch', file=sys.stdout) self.main_progress_bar = pbar From b41f76c7c502e92ba9bc586ad16e0a847a3e065d Mon Sep 17 00:00:00 2001 From: wassname Date: Sun, 26 Jan 2020 11:37:17 +0800 Subject: [PATCH 2/3] use `from tqdm.auto` in eval loop --- pytorch_lightning/trainer/evaluation_loop.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pytorch_lightning/trainer/evaluation_loop.py b/pytorch_lightning/trainer/evaluation_loop.py index e0148d60e6cbd..6df04952e0481 100644 --- a/pytorch_lightning/trainer/evaluation_loop.py +++ b/pytorch_lightning/trainer/evaluation_loop.py @@ -127,7 +127,7 @@ from abc import ABC, abstractmethod import torch -import tqdm +from tqdm.auto import tqdm from pytorch_lightning.utilities.debugging import MisconfigurationException @@ -293,7 +293,7 @@ def run_evaluation(self, test=False): # main progress bar will already be closed when testing so initial position is free position = 2 * self.process_position + (not test) desc = 'Testing' if test else 'Validating' - pbar = tqdm.tqdm(desc=desc, total=max_batches, leave=test, position=position, + pbar = tqdm(desc=desc, total=max_batches, leave=test, position=position, disable=not self.show_progress_bar, dynamic_ncols=True, unit='batch', file=sys.stdout) setattr(self, f'{"test" if test else "val"}_progress_bar', pbar) From 7ca9f111f508ff8fe47f9ac8d50fc4957e409eed Mon Sep 17 00:00:00 2001 From: wassname Date: Sun, 26 Jan 2020 11:59:39 +0800 Subject: [PATCH 3/3] indents --- pytorch_lightning/trainer/evaluation_loop.py | 4 ++-- pytorch_lightning/trainer/trainer.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pytorch_lightning/trainer/evaluation_loop.py b/pytorch_lightning/trainer/evaluation_loop.py index 6df04952e0481..b5e2fe9554b73 100644 --- a/pytorch_lightning/trainer/evaluation_loop.py +++ b/pytorch_lightning/trainer/evaluation_loop.py @@ -294,8 +294,8 @@ def run_evaluation(self, test=False): position = 2 * self.process_position + (not test) desc = 'Testing' if test else 'Validating' pbar = tqdm(desc=desc, total=max_batches, leave=test, position=position, - disable=not self.show_progress_bar, dynamic_ncols=True, - unit='batch', file=sys.stdout) + disable=not self.show_progress_bar, dynamic_ncols=True, + unit='batch', file=sys.stdout) setattr(self, f'{"test" if test else "val"}_progress_bar', pbar) # run evaluation diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index 5466bc9d12c45..9441f63e8d5a2 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -829,8 +829,8 @@ def run_pretrain_routine(self, model): # init progress bar pbar = tqdm(leave=True, position=2 * self.process_position, - disable=not self.show_progress_bar, dynamic_ncols=True, unit='batch', - file=sys.stdout) + disable=not self.show_progress_bar, dynamic_ncols=True, unit='batch', + file=sys.stdout) self.main_progress_bar = pbar # clear cache before training