Skip to content

Commit

Permalink
fixup! Add trainable theta and discretization options
Browse files Browse the repository at this point in the history
  • Loading branch information
gsmalik committed May 26, 2021
1 parent 408047e commit c9fcd99
Showing 1 changed file with 21 additions and 17 deletions.
38 changes: 21 additions & 17 deletions keras_lmu/layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -197,10 +197,13 @@ def _cont2discrete_zoh(A, B, dt=1.0):
)
# A and B can be decoupled from theta when using euler. hence, only
# generated once in ``build``, regardless if theta is trainable or not.
# set as weights without theta. division by theta, according to euler
# discretization formula, is done in ``call``..
# if theta is trainable, A and B weights are set without theta and
# division by theta, according to euler discretization formula, is done
# in ``call``.
elif self.discretizer == "euler":
self._A, self._B = self.CONST_A.T, self.CONST_B.T
if not self.train_theta:
self._A, self._B = self._A * self.theta_inv, self._B * self.theta_inv

def build(self, input_shape):
"""
Expand Down Expand Up @@ -317,22 +320,23 @@ def call(self, inputs, states, training=None):
u = tf.expand_dims(u, -1)

# update memory
if self.discretizer == "zoh":
if training and self.train_theta:
# ``theta`` cannot be decoupled. generate new A and B matrices
# and assign them as weights.
self._gen_AB()
self.A.assign(self._A)
self.B.assign(self._B)
A, B = self._A, self._B
else:
A, B = self.A, self.B
m = tf.matmul(m, A) + tf.matmul(u, B)
if self.discretizer == "zoh" and training and self.train_theta:
# ``theta`` cannot be decoupled. generate new A and B matrices
# and assign them as weights.
self._gen_AB()
self.A.assign(self._A)
self.B.assign(self._B)
A, B = self._A, self._B
else:
A, B = self.A, self.B
_m = tf.matmul(m, A) + tf.matmul(u, B)

elif self.discretizer == "euler":
# ``theta`` can be decopled when using euler by dividing original A
# and B weights.
m = m + (self.theta_inv * (tf.matmul(m, self.A) + tf.matmul(u, self.B)))
if self.discretizer == "euler":
# if training theta, ``theta_inv`` can be decopled when using euler
# by dividing original A and B weights if training theta
m = m + ((self.theta_inv if self.train_theta else 1) * _m)
else:
m = _m

# re-combine memory/order dimensions
m = tf.reshape(m, (-1, self.memory_d * self.order))
Expand Down

0 comments on commit c9fcd99

Please sign in to comment.