Skip to content

Commit

Permalink
"type: ignore"
Browse files Browse the repository at this point in the history
  • Loading branch information
calad0i committed Jun 17, 2024
1 parent eafebc7 commit 717b56f
Show file tree
Hide file tree
Showing 3 changed files with 7 additions and 7 deletions.
4 changes: 2 additions & 2 deletions src/HGQ/layers/batchnorm_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,15 +93,15 @@ def _post_build(self, input_shape):

@property
@tf.function(jit_compile=True)
def fused_kernel(self):
def fused_kernel(self): # type: ignore
if not self.scale:
return self.kernel
scale = self.bn_gamma * tf.math.rsqrt(self.moving_variance + self.epsilon)
return self.kernel * scale

@property
@tf.function(jit_compile=True)
def fused_bias(self):
def fused_bias(self): # type: ignore
if not self.center:
return self.bias
scale = self.bn_gamma * tf.math.rsqrt(self.moving_variance + self.epsilon)
Expand Down
2 changes: 1 addition & 1 deletion src/HGQ/proxy/plugins/qkeras.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,5 +161,5 @@ class QKerasBaseLayer(metaclass=abc.ABCMeta):


def init():
ProxyLayerXFormer.__call__.register(QKerasBaseLayer, qlayer_to_proxy_layer)
ProxyLayerXFormer.__call__.register(QKerasBaseLayer, qlayer_to_proxy_layer) # type: ignore
get_produced_kif.register(qkeras.QActivation, get_produced_kif.registry[keras.layers.Activation])
8 changes: 4 additions & 4 deletions src/HGQ/proxy/precision_derivation.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ def _(layer: keras.layers.Activation | keras.layers.ReLU | keras.layers.LeakyReL
return 0, 1, 65535
if isinstance(layer, keras.layers.Softmax):
return 0, 1, 65535
k, i, f = activation_kif_forward(layer, *np.max(kifs, axis=0))
k, i, f = activation_kif_forward(layer, *np.max(kifs, axis=0)) # type: ignore
return k, i, f


Expand All @@ -127,7 +127,7 @@ def _(layer: AvgPool1D | AvgPool2D | AvgPool3D):
@get_produced_kif.register
def _(layer: keras.layers.Add):
kifs = get_input_kifs(layer)
k, i, f = np.max(kifs, axis=0)
k, i, f = np.max(kifs, axis=0) # type: ignore
# being lazy here. But this will never overflow.
i += int(np.ceil(np.log2(len(kifs))))
return k, i, f
Expand All @@ -136,7 +136,7 @@ def _(layer: keras.layers.Add):
@get_produced_kif.register
def _(layer: keras.layers.Concatenate):
kifs = get_input_kifs(layer)
k, i, f = np.max(kifs, axis=0)
k, i, f = np.max(kifs, axis=0) # type: ignore
return k, i, f


Expand Down Expand Up @@ -322,7 +322,7 @@ def get_config_table_tablesize_result(layer: keras.layers.Activation):
i_kifs = get_input_kifs(layer)
if len(i_kifs) > 1:
warn(f'Activation layer {layer.name} has more than one input. Did you just make a activation with multiple inputs? table_size set in this way may make no sense. Proceed only if you know what you are doing.')
i_k, i_i, i_f = np.max(i_kifs, axis=0)
i_k, i_i, i_f = np.max(i_kifs, axis=0) # type: ignore
table_size = int(2**(i_k + i_i + i_f)) # ...the ideal case. Will be the case if we have universal LUT-based activation.
if layer.activation is keras.activations.tanh:
table_size = int(8 / 2.**-i_f) # LUT Range hardcoded to -4 ~ 4, match #fractional bits
Expand Down

0 comments on commit 717b56f

Please sign in to comment.