From 8a1c443b51ed82e8edb695784bec6af749a4ffba Mon Sep 17 00:00:00 2001 From: fsx950223 Date: Wed, 13 Apr 2022 13:01:27 +0800 Subject: [PATCH] fix apis --- efficientdet/tf2/infer_lib_test.py | 2 +- efficientnetv2/main.py | 5 +++++ efficientnetv2/utils.py | 15 +++++---------- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/efficientdet/tf2/infer_lib_test.py b/efficientdet/tf2/infer_lib_test.py index f5ad14c25..c87e55fee 100644 --- a/efficientdet/tf2/infer_lib_test.py +++ b/efficientdet/tf2/infer_lib_test.py @@ -131,7 +131,7 @@ def test_infer_lib_mixed_precision(self): model_params={'mixed_precision': True}) images = tf.ones((1, 512, 512, 3)) boxes, scores, classes, valid_lens = driver.serve(images) - policy = tf.keras.mixed_precision.experimental.global_policy() + policy = tf.keras.mixed_precision.global_policy() if policy.name == 'float32': self.assertEqual(tf.reduce_mean(boxes), 163.09) self.assertEqual(tf.reduce_mean(scores), 0.01000005) diff --git a/efficientnetv2/main.py b/efficientnetv2/main.py index 4865202b9..29834a78e 100644 --- a/efficientnetv2/main.py +++ b/efficientnetv2/main.py @@ -168,6 +168,11 @@ def build_model(in_images): utils.scalar('train/lr', learning_rate) optimizer = utils.build_optimizer( learning_rate, optimizer_name=config.train.optimizer) + + if config.runtime.mixed_precision and precision=='mixed_float16': + # Wrap optimizer with loss scale when precision is mixed_float16 + optimizer = tf.keras.mixed_precision.LossScaleOptimizer(optimizer) + if FLAGS.use_tpu: # When using TPU, wrap the optimizer with CrossShardOptimizer which # handles synchronization details between different TPU cores. To the diff --git a/efficientnetv2/utils.py b/efficientnetv2/utils.py index 3a7a6a8a7..3727ba1ca 100644 --- a/efficientnetv2/utils.py +++ b/efficientnetv2/utils.py @@ -396,7 +396,7 @@ def _custom_getter(getter, *args, **kwargs): yield varscope -def set_precision_policy(policy_name=None, loss_scale=False): +def set_precision_policy(policy_name=None): """Set precision policy according to the name. Args: @@ -410,14 +410,8 @@ def set_precision_policy(policy_name=None, loss_scale=False): assert policy_name in ('mixed_float16', 'mixed_bfloat16', 'float32') logging.info('use mixed precision policy name %s', policy_name) tf.compat.v1.keras.layers.enable_v2_dtype_behavior() - # mixed_float16 training is not supported for now, so disable loss_scale. - # float32 and mixed_bfloat16 do not need loss scale for training. - if loss_scale: - policy = tf.keras.mixed_precision.experimental.Policy(policy_name) - else: - policy = tf.keras.mixed_precision.experimental.Policy( - policy_name, loss_scale=None) - tf.keras.mixed_precision.experimental.set_policy(policy) + policy = tf.keras.mixed_precision.Policy(policy_name) + tf.keras.mixed_precision.set_policy(policy) def build_model_with_precision(pp, mm, ii, tt, *args, **kwargs): @@ -438,6 +432,7 @@ def build_model_with_precision(pp, mm, ii, tt, *args, **kwargs): Returns: the output of mm model. """ + del tt if pp == 'mixed_bfloat16': set_precision_policy(pp) inputs = tf.cast(ii, tf.bfloat16) @@ -445,7 +440,7 @@ def build_model_with_precision(pp, mm, ii, tt, *args, **kwargs): outputs = mm(inputs, *args, **kwargs) set_precision_policy('float32') elif pp == 'mixed_float16': - set_precision_policy(pp, loss_scale=tt) + set_precision_policy(pp) inputs = tf.cast(ii, tf.float16) with float16_scope(): outputs = mm(inputs, *args, **kwargs)