-
Notifications
You must be signed in to change notification settings - Fork 3.4k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Change nb to num in ABCs, comments, and tqdm logging #613
Changes from 2 commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -151,6 +151,7 @@ def training_step(self, batch, batch_idx): | |
|
||
import inspect | ||
from abc import ABC, abstractmethod | ||
import warnings | ||
|
||
import numpy as np | ||
|
||
|
@@ -169,22 +170,22 @@ class TrainerTrainLoopMixin(ABC): | |
def __init__(self): | ||
# this is just a summary on variables used in this abstract class, | ||
# the proper values/initialisation should be done in child class | ||
self.max_nb_epochs = None | ||
self.max_epochs = None | ||
self.min_epochs = None | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. is the min needed here in this class? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. yes, it is used in the training loop. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
|
||
self.use_ddp = None | ||
self.use_dp = None | ||
self.use_ddp2 = None | ||
self.single_gpu = None | ||
self.data_parallel_device_ids = None | ||
self.check_val_every_n_epoch = None | ||
self.nb_training_batches = None | ||
self.num_training_batches = None | ||
self.val_check_batch = None | ||
self.nb_val_batches = None | ||
self.num_val_batches = None | ||
self.fast_dev_run = None | ||
self.is_iterable_train_dataloader = None | ||
self.main_progress_bar = None | ||
self.accumulation_scheduler = None | ||
self.lr_schedulers = None | ||
self.min_nb_epochs = None | ||
self.enable_early_stop = None | ||
self.early_stop_callback = None | ||
self.callback_metrics = None | ||
|
@@ -194,7 +195,7 @@ def __init__(self): | |
self.log_save_interval = None | ||
self.proc_rank = None | ||
self.row_log_interval = None | ||
self.total_batch_nb = None | ||
self.total_batches = None | ||
self.truncated_bptt_steps = None | ||
self.optimizers = None | ||
self.accumulate_grad_batches = None | ||
|
@@ -207,6 +208,26 @@ def __init__(self): | |
self.get_train_dataloader = None | ||
self.reduce_lr_on_plateau_scheduler = None | ||
|
||
@property | ||
def max_nb_epochs(self): | ||
""" | ||
.. warning:: `max_nb_epochs` is deprecated and will be removed in | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. pls. just one line There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Got it. |
||
v0.8.0, use `max_epochs` instead. | ||
""" | ||
warnings.warn("`max_nb_epochs` is deprecated and will be removed in " | ||
"v0.8.0, use `max_epochs` instead.", DeprecationWarning) | ||
return self.max_epochs | ||
|
||
@property | ||
def min_nb_epochs(self): | ||
""" | ||
.. warning:: `min_nb_epochs` is deprecated and will be removed in | ||
v0.8.0, use `min_epochs` instead. | ||
""" | ||
warnings.warn("`min_nb_epochs` is deprecated and will be removed in " | ||
"v0.8.0, use `min_epochs` instead.", DeprecationWarning) | ||
return self.min_epochs | ||
|
||
@abstractmethod | ||
def get_model(self): | ||
# this is just empty shell for code from other class | ||
|
@@ -391,7 +412,7 @@ def run_training_epoch(self): | |
if early_stop_epoch or self.fast_dev_run: | ||
break | ||
|
||
# stop epoch if we limited nb batches | ||
# stop epoch if we limited num training batches | ||
met_batch_limit = batch_idx >= self.num_training_batches | ||
if met_batch_limit: | ||
break | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
you can say "full" number
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Agreed.