From 717b56fe2fc9ea593d6a1762ed9cb4922cba84e7 Mon Sep 17 00:00:00 2001 From: Chang Sun Date: Mon, 17 Jun 2024 15:01:25 -0700 Subject: [PATCH] "type: ignore" --- src/HGQ/layers/batchnorm_base.py | 4 ++-- src/HGQ/proxy/plugins/qkeras.py | 2 +- src/HGQ/proxy/precision_derivation.py | 8 ++++---- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/HGQ/layers/batchnorm_base.py b/src/HGQ/layers/batchnorm_base.py index 7af269d..f959f5d 100644 --- a/src/HGQ/layers/batchnorm_base.py +++ b/src/HGQ/layers/batchnorm_base.py @@ -93,7 +93,7 @@ def _post_build(self, input_shape): @property @tf.function(jit_compile=True) - def fused_kernel(self): + def fused_kernel(self): # type: ignore if not self.scale: return self.kernel scale = self.bn_gamma * tf.math.rsqrt(self.moving_variance + self.epsilon) @@ -101,7 +101,7 @@ def fused_kernel(self): @property @tf.function(jit_compile=True) - def fused_bias(self): + def fused_bias(self): # type: ignore if not self.center: return self.bias scale = self.bn_gamma * tf.math.rsqrt(self.moving_variance + self.epsilon) diff --git a/src/HGQ/proxy/plugins/qkeras.py b/src/HGQ/proxy/plugins/qkeras.py index a978986..f034bdd 100644 --- a/src/HGQ/proxy/plugins/qkeras.py +++ b/src/HGQ/proxy/plugins/qkeras.py @@ -161,5 +161,5 @@ class QKerasBaseLayer(metaclass=abc.ABCMeta): def init(): - ProxyLayerXFormer.__call__.register(QKerasBaseLayer, qlayer_to_proxy_layer) + ProxyLayerXFormer.__call__.register(QKerasBaseLayer, qlayer_to_proxy_layer) # type: ignore get_produced_kif.register(qkeras.QActivation, get_produced_kif.registry[keras.layers.Activation]) diff --git a/src/HGQ/proxy/precision_derivation.py b/src/HGQ/proxy/precision_derivation.py index 74c6ac6..fdcc403 100644 --- a/src/HGQ/proxy/precision_derivation.py +++ b/src/HGQ/proxy/precision_derivation.py @@ -110,7 +110,7 @@ def _(layer: keras.layers.Activation | keras.layers.ReLU | keras.layers.LeakyReL return 0, 1, 65535 if isinstance(layer, keras.layers.Softmax): return 0, 1, 65535 - k, i, f = activation_kif_forward(layer, *np.max(kifs, axis=0)) + k, i, f = activation_kif_forward(layer, *np.max(kifs, axis=0)) # type: ignore return k, i, f @@ -127,7 +127,7 @@ def _(layer: AvgPool1D | AvgPool2D | AvgPool3D): @get_produced_kif.register def _(layer: keras.layers.Add): kifs = get_input_kifs(layer) - k, i, f = np.max(kifs, axis=0) + k, i, f = np.max(kifs, axis=0) # type: ignore # being lazy here. But this will never overflow. i += int(np.ceil(np.log2(len(kifs)))) return k, i, f @@ -136,7 +136,7 @@ def _(layer: keras.layers.Add): @get_produced_kif.register def _(layer: keras.layers.Concatenate): kifs = get_input_kifs(layer) - k, i, f = np.max(kifs, axis=0) + k, i, f = np.max(kifs, axis=0) # type: ignore return k, i, f @@ -322,7 +322,7 @@ def get_config_table_tablesize_result(layer: keras.layers.Activation): i_kifs = get_input_kifs(layer) if len(i_kifs) > 1: warn(f'Activation layer {layer.name} has more than one input. Did you just make a activation with multiple inputs? table_size set in this way may make no sense. Proceed only if you know what you are doing.') - i_k, i_i, i_f = np.max(i_kifs, axis=0) + i_k, i_i, i_f = np.max(i_kifs, axis=0) # type: ignore table_size = int(2**(i_k + i_i + i_f)) # ...the ideal case. Will be the case if we have universal LUT-based activation. if layer.activation is keras.activations.tanh: table_size = int(8 / 2.**-i_f) # LUT Range hardcoded to -4 ~ 4, match #fractional bits