diff --git a/.ci/hardware.sh.template b/.ci/hardware.sh.template index 7431e358d..3ddbd617d 100644 --- a/.ci/hardware.sh.template +++ b/.ci/hardware.sh.template @@ -21,7 +21,7 @@ pip install -e .[tests] pip install $NENGO_VERSION --upgrade pip install ~/travis-ci/nxsdk-0.8.0.tar.gz - SLURM=1 pytest --target loihi --no-hang -v --durations 50 --color=yes -n 2 --cov=nengo_loihi --cov-report=xml --cov-report=term-missing || HW_STATUS=1 + SLURM=1 pytest --target loihi --no-hang -v --durations 50 --color=yes -n 1 --cov=nengo_loihi --cov-report=xml --cov-report=term-missing || HW_STATUS=1 exit \$HW_STATUS EOF {% endblock %} diff --git a/.nengobones.yml b/.nengobones.yml index 3eea01be2..b6e946608 100644 --- a/.nengobones.yml +++ b/.nengobones.yml @@ -47,7 +47,7 @@ ci_scripts: pip_install: - flake8 post_commands: - - flake8 nengo + - flake8 nengo_loihi - flake8 --ignore=E226,E703,W391,W503 docs - template: deploy diff --git a/CHANGES.rst b/CHANGES.rst index c36c82bac..c2c272328 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -34,6 +34,30 @@ Release history - Switched to nengo-bones templating system for TravisCI config/scripts. (`#204 `__) +- It is no longer possible to pass ``network=None`` to ``Simulator``. + Previously this was possible, but unlikely to work as expected. + (`#202 `__) +- Better error messages are raised when attempting to simulate networks + in which certain objects participating in a learning rule are on-chip. + (`#202 `__, + `#208 `__, + `#209 `__) + +**Fixed** + +- The splitting and passthrough removal procedures were significantly + refactored, which fixed an issue in which networks could be modified + in the splitting process. + (`#202 `__, + `#211 `__) +- It is now possible to make connections and probes with object slices + (e.g., ``nengo.Probe(my_ensemble[0])``). + (`#202 `__, + `#205 `__, + `#206 `__) +- We no longer disable the Nengo decoder cache for all models. + (`#202 `__, + `#207 `__) 0.6.0 (February 22, 2019) ========================= diff --git a/docs/examples/adaptive_motor_control.ipynb b/docs/examples/adaptive_motor_control.ipynb index 3708dee62..72a44006c 100644 --- a/docs/examples/adaptive_motor_control.ipynb +++ b/docs/examples/adaptive_motor_control.ipynb @@ -122,7 +122,7 @@ " # Create node that calculates the OSC signal\n", " model.osc_node = nengo.Node(\n", " output=lambda t, x: ctrlr.generate(\n", - " q=x[:2], dq=x[2:4], target_pos=np.hstack([x[4:6], 0])),\n", + " q=x[:2], dq=x[2:4], target=np.hstack([x[4:6], np.zeros(4)])),\n", " size_in=6, size_out=2)\n", "\n", " # Create node that runs the arm simulation and gets feedback\n", diff --git a/nengo_loihi/builder/builder.py b/nengo_loihi/builder/builder.py index 3fdc196e4..f7f1e0c6a 100644 --- a/nengo_loihi/builder/builder.py +++ b/nengo_loihi/builder/builder.py @@ -1,7 +1,8 @@ from collections import defaultdict, OrderedDict import logging -from nengo import Network +from nengo import Ensemble, Network, Node, Probe +from nengo.builder import Model as NengoModel from nengo.builder.builder import Builder as NengoBuilder from nengo.builder.network import build_network from nengo.cache import NoDecoderCache @@ -106,10 +107,27 @@ def __init__(self, dt=0.001, label=None, builder=None): self.build_callback = None self.decoder_cache = NoDecoderCache() + # TODO: these models may not look/behave exactly the same as + # standard nengo models, because they don't have a toplevel network + # built into them or configs set + self.host_pre = NengoModel( + dt=float(dt), + label="%s:host_pre, dt=%f" % (label, dt), + decoder_cache=NoDecoderCache(), + ) + self.host = NengoModel( + dt=float(dt), + label="%s:host, dt=%f" % (label, dt), + decoder_cache=NoDecoderCache(), + ) + # Objects created by the model for simulation on Loihi self.inputs = OrderedDict() self.blocks = OrderedDict() + # Will be filled in by the simulator __init__ + self.split = None + # Will be filled in by the network builder self.toplevel = None self.config = None @@ -145,8 +163,11 @@ def __init__(self, dt=0.001, label=None, builder=None): # magnitude/weight resolution) self.pes_wgt_exp = 4 - # Will be provided by Simulator + # Used to track interactions between host models self.chip2host_params = {} + self.chip2host_receivers = OrderedDict() + self.host2chip_senders = OrderedDict() + self.needs_sender = {} def __getstate__(self): raise NotImplementedError("Can't pickle nengo_loihi.builder.Model") @@ -168,7 +189,25 @@ def add_block(self, block): self.blocks[block] = len(self.blocks) def build(self, obj, *args, **kwargs): - built = self.builder.build(self, obj, *args, **kwargs) + # Don't build the objects marked as "to_remove" by PassthroughSplit + if obj in self.split.passthrough.to_remove: + return None + + if not isinstance(obj, (Node, Ensemble, Probe)): + model = self + elif self.split.on_chip(obj): + model = self + else: + # Note: callbacks for the host_model will not be invoked + model = self.host_model(obj) + + # done for compatibility with nengo<=2.8.0 + # otherwise we could just copy over the initial + # seeding to all other models + model.seeds[obj] = self.seeds[obj] + model.seeded[obj] = self.seeded[obj] + + built = model.builder.build(model, obj, *args, **kwargs) if self.build_callback is not None: self.build_callback(obj) return built @@ -176,6 +215,13 @@ def build(self, obj, *args, **kwargs): def has_built(self, obj): return obj in self.params + def host_model(self, obj): + """Returns the Model corresponding to where obj should be built.""" + if self.split.is_precomputable(obj): + return self.host_pre + else: + return self.host + class Builder(NengoBuilder): """Fills in the Loihi Model object based on the Nengo Network. diff --git a/nengo_loihi/builder/connection.py b/nengo_loihi/builder/connection.py index 0b36502bf..31dc58252 100644 --- a/nengo_loihi/builder/connection.py +++ b/nengo_loihi/builder/connection.py @@ -1,5 +1,8 @@ +import copy +import logging + import nengo -from nengo import Ensemble, Connection, Node +from nengo import Ensemble, Connection, Node, Probe as NengoProbe from nengo.builder.connection import ( build_no_solver as _build_no_solver, BuiltConnection, @@ -7,18 +10,276 @@ get_targets, multiply, ) +from nengo.connection import LearningRule from nengo.ensemble import Neurons from nengo.exceptions import BuildError, ValidationError from nengo.solvers import NoSolver, Solver import numpy as np -from nengo_loihi import conv from nengo_loihi.block import Axon, LoihiBlock, Probe, Synapse from nengo_loihi.builder.builder import Builder +from nengo_loihi.builder.inputs import ( + ChipReceiveNode, + ChipReceiveNeurons, + HostSendNode, + HostReceiveNode, + PESModulatoryTarget, +) from nengo_loihi.compat import ( nengo_transforms, sample_transform, conn_solver) -from nengo_loihi.inputs import ChipReceiveNeurons, LoihiInput +from nengo_loihi.conv import channel_idxs, conv2d_loihi_weights, pixel_idxs +from nengo_loihi.inputs import LoihiInput from nengo_loihi.neurons import loihi_rates +from nengo_loihi.passthrough import base_obj + +logger = logging.getLogger(__name__) + + +def _inherit_seed(dest_model, dest_obj, src_model, src_obj): + dest_model.seeded[dest_obj] = src_model.seeded[src_obj] + dest_model.seeds[dest_obj] = src_model.seeds[src_obj] + + +@Builder.register(Connection) +def build_connection(model, conn): + pre_onchip = model.split.on_chip(base_obj(conn.pre)) + + if isinstance(conn.post_obj, LearningRule): + assert not pre_onchip + return build_host_to_learning_rule(model, conn) + + post_onchip = model.split.on_chip(base_obj(conn.post)) + + if pre_onchip and post_onchip: + build_chip_connection(model, conn) + + elif not pre_onchip and post_onchip: + if isinstance(conn.pre_obj, Neurons): + build_host_neurons_to_chip(model, conn) + else: + build_host_to_chip(model, conn) + + elif pre_onchip and not post_onchip: + build_chip_to_host(model, conn) + + else: + assert not pre_onchip and not post_onchip + host = model.host_model(base_obj(conn.pre)) + assert host is model.host_model(base_obj(conn.post)) + _inherit_seed(host, conn, model, conn) + host.build(conn) + + +def build_host_neurons_to_chip(model, conn): + """Send spikes over and do the rest of the connection on-chip""" + + assert not isinstance(conn.post, LearningRule) + dim = conn.size_in + host = model.host_model(base_obj(conn.pre)) + + logger.debug("Creating ChipReceiveNeurons for %s", conn) + receive = ChipReceiveNeurons( + dim, + neuron_type=conn.pre_obj.ensemble.neuron_type, + label=None if conn.label is None else "%s_neurons" % conn.label, + add_to_container=False, + ) + _inherit_seed(model, receive, model, conn) + model.builder.build(model, receive) + + receive2post = Connection( + receive, + conn.post, + transform=conn.transform, + synapse=conn.synapse, + label=None if conn.label is None else "%s_chip" % conn.label, + add_to_container=False, + ) + _inherit_seed(model, receive2post, model, conn) + build_chip_connection(model, receive2post) + + logger.debug("Creating HostSendNode for %s", conn) + send = HostSendNode( + dim, + label=None if conn.label is None else "%s_send" % conn.label, + add_to_container=False, + ) + host.build(send) + + pre2send = Connection( + conn.pre, + send, + synapse=None, + label=None if conn.label is None else "%s_host" % conn.label, + add_to_container=False, + ) + model.host2chip_senders[send] = receive + _inherit_seed(host, pre2send, model, conn) + host.build(pre2send) + + +def build_host_to_chip(model, conn): + rng = np.random.RandomState(model.seeds[conn]) + dim = conn.size_out + host = model.host_model(base_obj(conn.pre)) + + logger.debug("Creating ChipReceiveNode for %s", conn) + receive = ChipReceiveNode( + dim * 2, + size_out=dim, + label=None if conn.label is None else "%s_node" % conn.label, + add_to_container=False, + ) + model.builder.build(model, receive) + + receive2post = Connection( + receive, + conn.post, + synapse=model.decode_tau, + label=None if conn.label is None else "%s_chip" % conn.label, + add_to_container=False, + ) + _inherit_seed(model, receive2post, model, conn) + build_chip_connection(model, receive2post) + + logger.debug("Creating DecodeNeuron ensemble for %s", conn) + ens = model.node_neurons.get_ensemble(dim) + ens.label = None if conn.label is None else "%s_ens" % conn.label + _inherit_seed(host, ens, model, conn) + host.build(ens) + + if nengo_transforms is not None and isinstance( + conn.transform, nengo_transforms.Convolution): + raise BuildError( + "Conv2D transforms not supported for off-chip to " + "on-chip connections where `pre` is not a Neurons object.") + + # Scale the input spikes based on the radius of the target ensemble + weights = sample_transform(conn, rng=rng) + + if isinstance(conn.post_obj, Ensemble): + weights = weights / conn.post_obj.radius + + if nengo_transforms is None: + transform = weights + else: + # copy the Transform information, setting `init` to the sampled weights + transform = copy.copy(conn.transform) + type(transform).init.data[transform] = weights + + pre2ens = Connection( + conn.pre, + ens, + function=conn.function, + solver=conn.solver, + eval_points=conn.eval_points, + scale_eval_points=conn.scale_eval_points, + synapse=conn.synapse, + transform=transform, + label=None if conn.label is None else "%s_enc" % conn.label, + add_to_container=False, + ) + _inherit_seed(host, pre2ens, model, conn) + host.build(pre2ens) + + logger.debug("Creating HostSendNode for %s", conn) + send = HostSendNode( + dim * 2, + label=None if conn.label is None else "%s_send" % conn.label, + add_to_container=False, + ) + host.build(send) + + ensneurons2send = Connection( + ens.neurons, + send, + synapse=None, + label=None if conn.label is None else "%s_host" % conn.label, + add_to_container=False, + ) + _inherit_seed(host, ensneurons2send, model, conn) + model.host2chip_senders[send] = receive + host.build(ensneurons2send) + + +def build_chip_to_host(model, conn): + rng = np.random.RandomState(model.seeds[conn]) + dim = conn.size_out + host = model.host_model(base_obj(conn.post)) + + logger.debug("Creating HostReceiveNode for %s", conn) + receive = HostReceiveNode( + dim, + label=None if conn.label is None else "%s_receive" % conn.label, + add_to_container=False, + ) + host.build(receive) + + receive2post = Connection( + receive, + conn.post, + synapse=conn.synapse, + label=None if conn.label is None else "%s_host" % conn.label, + add_to_container=False, + ) + _inherit_seed(host, receive2post, model, conn) + host.build(receive2post) + + logger.debug("Creating Probe for %s", conn) + transform = sample_transform(conn, rng=rng) + + probe = NengoProbe(conn.pre, + synapse=None, + solver=conn.solver, + add_to_container=False) + model.chip2host_params[probe] = dict( + learning_rule_type=conn.learning_rule_type, + function=conn.function, + eval_points=conn.eval_points, + scale_eval_points=conn.scale_eval_points, + transform=transform, + label=None if conn.label is None else "%s_probe" % conn.label, + ) + model.chip2host_receivers[probe] = receive + _inherit_seed(model, probe, model, conn) + model.builder.build(model, probe) + + if conn.learning_rule_type is not None: + if not isinstance(conn.pre_obj, Ensemble): + raise NotImplementedError( + "Learning rule presynaptic object must be an Ensemble " + "(got %r)" % type(conn.pre_obj).__name__) + model.needs_sender[conn.learning_rule] = PESModulatoryTarget(probe) + + +def build_host_to_learning_rule(model, conn): + dim = conn.size_out + host = model.host_model(base_obj(conn.pre)) + + logger.debug("Creating HostSendNode for %s", conn) + send = HostSendNode( + dim, + label=None if conn.label is None else "%s_send" % conn.label, + add_to_container=False, + ) + host.build(send) + + pre2send = Connection( + conn.pre, + send, + function=conn.function, + solver=conn.solver, + eval_points=conn.eval_points, + scale_eval_points=conn.scale_eval_points, + synapse=conn.synapse, + transform=conn.transform, + label=conn.label, + add_to_container=False, + ) + pes_target = model.needs_sender[conn.post_obj] + model.host2chip_senders[send] = pes_target + _inherit_seed(host, pre2send, model, conn) + host.build(pre2send) def build_decoders(model, conn, rng, sampled_transform): @@ -114,13 +375,10 @@ def build_no_solver(model, solver, conn, rng, sampled_transform): return _build_no_solver(model, solver, conn, rng) -@Builder.register(Connection) # noqa: C901 -def build_connection(model, conn): +def build_chip_connection(model, conn): # noqa: C901 if nengo_transforms is not None: if isinstance(conn.transform, nengo_transforms.Convolution): - # TODO: integrate these into the same function - conv.build_conv2d_connection(model, conn) - return + return build_conv2d_connection(model, conn) elif not isinstance(conn.transform, nengo_transforms.Dense): raise NotImplementedError( "nengo-loihi does not yet support %s transforms" @@ -388,3 +646,90 @@ def build_connection(model, conn): solver_info=solver_info, transform=transform, weights=weights) + + +def build_conv2d_connection(model, conn): + if nengo_transforms is None: + # It should not be possible to reach this, because this function is + # only called for a Convolution transform, which can exist only if + # nengo_transforms exists. + raise NotImplementedError("Convolution requires newer Nengo") + + if conn.transform.dimensions != 2: + raise NotImplementedError("nengo-loihi only supports 2D convolution") + if conn.transform.padding != "valid": + raise NotImplementedError( + "nengo-loihi only supports convolution with 'valid' padding") + + # Create random number generator + rng = np.random.RandomState(model.seeds[conn]) + + pre_cx = model.objs[conn.pre_obj]['out'] + post_cx = model.objs[conn.post_obj]['in'] + assert isinstance(pre_cx, (LoihiInput, LoihiBlock)) + assert isinstance(post_cx, LoihiBlock) + + tau_s = 0.0 + if isinstance(conn.synapse, nengo.synapses.Lowpass): + tau_s = conn.synapse.tau + elif conn.synapse is not None: + raise NotImplementedError("Cannot handle non-Lowpass synapses") + + # --- pre + assert isinstance(conn.pre_obj, (Neurons, ChipReceiveNeurons)) + assert conn.pre_slice == slice(None) + + assert isinstance(conn.transform, nengo_transforms.Convolution) + + weights = conn.transform.sample(rng=rng) + input_shape = conn.transform.input_shape + + # Account for nengo spike height of 1/dt + weights = weights / model.dt + + if isinstance(conn.pre_obj, ChipReceiveNeurons): + neuron_type = conn.pre_obj.neuron_type + elif isinstance(conn.pre_obj, Neurons): + neuron_type = conn.pre_obj.ensemble.neuron_type + + if neuron_type is not None and hasattr(neuron_type, 'amplitude'): + weights = weights * neuron_type.amplitude + + # --- post + assert isinstance(conn.post_obj, Neurons) + assert conn.post_slice == slice(None) + + gain = model.params[conn.post_obj.ensemble].gain + if not np.all(gain == gain[0]): + # TODO: support this? + raise ValidationError( + "All neurons targeted by a Convolution connection must " + "have the same gain", "gain", obj=conn.post_obj.ensemble) + weights = weights * gain[0] + + pop_type = 32 # TODO: pick this + new_transform = copy.copy(conn.transform) + type(new_transform).init.data[new_transform] = weights + weights, indices, axon_to_weight_map, cx_bases = conv2d_loihi_weights( + new_transform) + + synapse = Synapse(np.prod(input_shape.spatial_shape), + label="conv2d_weights") + synapse.set_population_weights( + weights, indices, axon_to_weight_map, cx_bases, pop_type=pop_type) + post_cx.add_synapse(synapse) + model.objs[conn]['weights'] = synapse + + ax = Axon(np.prod(input_shape.spatial_shape), label="conv2d_weights") + ax.target = synapse + ax.cx_to_axon_map = pixel_idxs(input_shape) + ax.cx_atoms = channel_idxs(input_shape) + pre_cx.add_axon(ax) + + post_cx.compartment.configure_filter(tau_s, dt=model.dt) + + model.params[conn] = BuiltConnection( + eval_points=None, + solver_info=None, + transform=None, + weights=weights) diff --git a/nengo_loihi/builder/inputs.py b/nengo_loihi/builder/inputs.py new file mode 100644 index 000000000..74862587f --- /dev/null +++ b/nengo_loihi/builder/inputs.py @@ -0,0 +1,101 @@ +from collections import OrderedDict + +from nengo import Node +from nengo.exceptions import SimulationError +from nengo.params import Default +import numpy as np + + +class HostSendNode(Node): + """For sending host->chip messages""" + + def __init__(self, dimensions, label=Default): + self.queue = [] + super(HostSendNode, self).__init__( + self.update, + size_in=dimensions, + size_out=0, + label=label, + ) + + def update(self, t, x): + assert len(self.queue) == 0 or t > self.queue[-1][0] + self.queue.append((t, x)) + + +class HostReceiveNode(Node): + """For receiving chip->host messages""" + + def __init__(self, dimensions, label=Default): + self.queue = [(0, np.zeros(dimensions))] + self.queue_index = 0 + super(HostReceiveNode, self).__init__( + self.update, + size_in=0, + size_out=dimensions, + label=label, + ) + + def update(self, t): + while (len(self.queue) > self.queue_index + 1 + and self.queue[self.queue_index][0] < t): + self.queue_index += 1 + return self.queue[self.queue_index][1] + + def receive(self, t, x): + self.queue.append((t, x)) + + +class ChipReceiveNode(Node): + """For receiving host->chip messages""" + + def __init__(self, dimensions, size_out, label=Default): + self.raw_dimensions = dimensions + self.spikes = [] + self.spike_input = None # set by builder + super(ChipReceiveNode, self).__init__( + self.update, size_in=0, size_out=size_out, label=label) + + def clear(self): + self.spikes.clear() + + def receive(self, t, x): + assert len(self.spikes) == 0 or t > self.spikes[-1][0] + assert x.ndim == 1 + self.spikes.append((t, x.nonzero()[0])) + + def update(self, t): + raise SimulationError("ChipReceiveNodes should not be run") + + def collect_spikes(self): + assert self.spike_input is not None + for t, x in self.spikes: + yield (self.spike_input, t, x) + + +class ChipReceiveNeurons(ChipReceiveNode): + """Passes spikes directly (no on-off neuron encoding)""" + def __init__(self, dimensions, neuron_type=None, label=Default): + self.neuron_type = neuron_type + super(ChipReceiveNeurons, self).__init__( + dimensions, dimensions, label=label) + + +class PESModulatoryTarget: + def __init__(self, target): + self.target = target + self.errors = OrderedDict() + + def clear(self): + self.errors.clear() + + def receive(self, t, x): + assert len(self.errors) == 0 or t >= next(reversed(self.errors)) + if t in self.errors: + self.errors[t] += x + else: + self.errors[t] = np.array(x) + + def collect_errors(self): + for t, x in self.errors.items(): + yield (self.target, t, x) diff --git a/nengo_loihi/builder/node.py b/nengo_loihi/builder/node.py index 44a15d064..2ec4f7807 100644 --- a/nengo_loihi/builder/node.py +++ b/nengo_loihi/builder/node.py @@ -1,7 +1,8 @@ from nengo import Node from nengo_loihi.builder.builder import Builder -from nengo_loihi.inputs import ChipReceiveNode, SpikeInput +from nengo_loihi.builder.inputs import ChipReceiveNode +from nengo_loihi.inputs import SpikeInput @Builder.register(Node) diff --git a/nengo_loihi/builder/probe.py b/nengo_loihi/builder/probe.py index fda71d5ba..a2280a87f 100644 --- a/nengo_loihi/builder/probe.py +++ b/nengo_loihi/builder/probe.py @@ -1,5 +1,6 @@ import nengo from nengo import Ensemble, Connection, Node +from nengo.base import ObjView from nengo.connection import LearningRule from nengo.ensemble import Neurons from nengo.exceptions import BuildError @@ -45,6 +46,10 @@ def conn_probe(model, nengo_probe): raise NotImplementedError() target = nengo.Node(size_in=output_dim, add_to_container=False) + # TODO: This is a hack so that the builder can properly delegate the + # connection build to the right method + model.split._seen_objects.add(target) + model.split._chip_objects.add(target) conn = Connection( nengo_probe.target, @@ -70,10 +75,15 @@ def conn_probe(model, nengo_probe): model.seeded[conn] = model.seeded[nengo_probe] model.seeds[conn] = model.seeds[nengo_probe] + if isinstance(nengo_probe.target, ObjView): + target_obj = nengo_probe.target.obj + else: + target_obj = nengo_probe.target + d = conn.size_out - if isinstance(nengo_probe.target, Ensemble): + if isinstance(target_obj, Ensemble): # probed values are scaled by the target ensemble's radius - scale = nengo_probe.target.radius + scale = target_obj.radius w = np.diag(scale * np.ones(d)) weights = np.vstack([w, -w]) else: diff --git a/nengo_loihi/builder/tests/test_connection.py b/nengo_loihi/builder/tests/test_connection.py new file mode 100644 index 000000000..f4e5ccd78 --- /dev/null +++ b/nengo_loihi/builder/tests/test_connection.py @@ -0,0 +1,67 @@ +from distutils.version import LooseVersion + +import nengo +from nengo.exceptions import BuildError +import numpy as np +import pytest + + +@pytest.mark.skipif(LooseVersion(nengo.__version__) <= LooseVersion('2.8.0'), + reason="requires more recent Nengo version") +def test_split_conv2d_transform_error(Simulator): + with nengo.Network() as net: + node_offchip = nengo.Node([1]) + ens_onchip = nengo.Ensemble(10, 1) + conv2d = nengo.Convolution( + n_filters=1, input_shape=(1, 1, 1), kernel_size=(1, 1)) + nengo.Connection(node_offchip, ens_onchip, transform=conv2d) + + with pytest.raises(BuildError, match="Conv2D"): + with Simulator(net): + pass + + +@pytest.mark.parametrize("pre_dims", [1, 3]) +@pytest.mark.parametrize("post_dims", [1, 3]) +@pytest.mark.parametrize("learn", [True, False]) +@pytest.mark.parametrize("use_solver", [True, False]) +def test_manual_decoders( + seed, Simulator, pre_dims, post_dims, learn, use_solver): + + with nengo.Network(seed=seed) as model: + pre = nengo.Ensemble(50, dimensions=pre_dims, + gain=np.ones(50), + bias=np.ones(50) * 5) + post = nengo.Node(size_in=post_dims) + + learning_rule_type = nengo.PES() if learn else None + weights = np.zeros((post_dims, 50)) + if use_solver: + conn = nengo.Connection(pre, post, + function=lambda x: np.zeros(post_dims), + learning_rule_type=learning_rule_type, + solver=nengo.solvers.NoSolver(weights.T)) + else: + conn = nengo.Connection(pre.neurons, post, + learning_rule_type=learning_rule_type, + transform=weights) + + if learn: + error = nengo.Node(np.zeros(post_dims)) + nengo.Connection(error, conn.learning_rule) + + pre_probe = nengo.Probe(pre.neurons, synapse=None) + post_probe = nengo.Probe(post, synapse=None) + + if not use_solver and learn: + with pytest.raises(NotImplementedError): + with Simulator(model) as sim: + pass + else: + with Simulator(model) as sim: + sim.run(0.1) + + # Ensure pre population has a lot of activity + assert np.mean(sim.data[pre_probe]) > 100 + # But that post has no activity due to the zero weights + assert np.all(sim.data[post_probe] == 0) diff --git a/nengo_loihi/builder/tests/test_inputs.py b/nengo_loihi/builder/tests/test_inputs.py new file mode 100644 index 000000000..00802dd75 --- /dev/null +++ b/nengo_loihi/builder/tests/test_inputs.py @@ -0,0 +1,47 @@ +import nengo +from nengo.exceptions import SimulationError +import numpy as np +import pytest + +from nengo_loihi.builder.inputs import ChipReceiveNode, PESModulatoryTarget + + +def test_chipreceivenode_run_error(): + with nengo.Network() as net: + ChipReceiveNode(dimensions=1, size_out=1) + + with pytest.raises(SimulationError, match="should not be run"): + with nengo.Simulator(net) as sim: + sim.step() + + +def test_pesmodulatorytarget_interface(): + target = "target" + p = PESModulatoryTarget(target) + + t0 = 4 + e0 = [1.8, 2.4, 3.3] + t1 = t0 + 3 + e1 = [7.2, 2.2, 4.1] + e01 = np.array(e0) + np.array(e1) + + p.receive(t0, e0) + assert isinstance(p.errors[t0], np.ndarray) + assert np.allclose(p.errors[t0], e0) + + p.receive(t0, e1) + assert np.allclose(p.errors[t0], e01) + + with pytest.raises(AssertionError): + p.receive(t0 - 1, e0) # time needs to be >= last time + + p.receive(t1, e1) + assert np.allclose(p.errors[t1], e1) + + errors = list(p.collect_errors()) + assert len(errors) == 2 + assert errors[0][:2] == (target, t0) and np.allclose(errors[0][2], e01) + assert errors[1][:2] == (target, t1) and np.allclose(errors[1][2], e1) + + p.clear() + assert len(list(p.collect_errors())) == 0 diff --git a/nengo_loihi/conv.py b/nengo_loihi/conv.py index 228545ec9..1855115c3 100644 --- a/nengo_loihi/conv.py +++ b/nengo_loihi/conv.py @@ -1,15 +1,8 @@ -import copy import itertools -import nengo -from nengo.builder.connection import BuiltConnection -from nengo.ensemble import Neurons -from nengo.exceptions import ValidationError import numpy as np -from nengo_loihi.block import Axon, LoihiBlock, Synapse from nengo_loihi.compat import nengo_transforms -from nengo_loihi.inputs import ChipReceiveNeurons, LoihiInput class ImageSlice: @@ -105,93 +98,6 @@ def pixel_idxs(shape): (idxs % np.prod(shape.spatial_shape))) -def build_conv2d_connection(model, conn): - if nengo_transforms is None: - # It should not be possible to reach this, because this function is - # only called for a Convolution transform, which can exist only if - # nengo_transforms exists. - raise NotImplementedError("Convolution requires newer Nengo") - - if conn.transform.dimensions != 2: - raise NotImplementedError("nengo-loihi only supports 2D convolution") - if conn.transform.padding != "valid": - raise NotImplementedError( - "nengo-loihi only supports convolution with 'valid' padding") - - # Create random number generator - rng = np.random.RandomState(model.seeds[conn]) - - pre_cx = model.objs[conn.pre_obj]['out'] - post_cx = model.objs[conn.post_obj]['in'] - assert isinstance(pre_cx, (LoihiInput, LoihiBlock)) - assert isinstance(post_cx, LoihiBlock) - - tau_s = 0.0 - if isinstance(conn.synapse, nengo.synapses.Lowpass): - tau_s = conn.synapse.tau - elif conn.synapse is not None: - raise NotImplementedError("Cannot handle non-Lowpass synapses") - - # --- pre - assert isinstance(conn.pre_obj, (Neurons, ChipReceiveNeurons)) - assert conn.pre_slice == slice(None) - - assert isinstance(conn.transform, nengo_transforms.Convolution) - - weights = conn.transform.sample(rng=rng) - input_shape = conn.transform.input_shape - - # Account for nengo spike height of 1/dt - weights = weights / model.dt - - if isinstance(conn.pre_obj, ChipReceiveNeurons): - neuron_type = conn.pre_obj.neuron_type - elif isinstance(conn.pre_obj, Neurons): - neuron_type = conn.pre_obj.ensemble.neuron_type - - if neuron_type is not None and hasattr(neuron_type, 'amplitude'): - weights = weights * neuron_type.amplitude - - # --- post - assert isinstance(conn.post_obj, Neurons) - assert conn.post_slice == slice(None) - - gain = model.params[conn.post_obj.ensemble].gain - if not np.all(gain == gain[0]): - # TODO: support this? - raise ValidationError( - "All neurons targeted by a Convolution connection must " - "have the same gain", "gain", obj=conn.post_obj.ensemble) - weights = weights * gain[0] - - pop_type = 32 # TODO: pick this - new_transform = copy.copy(conn.transform) - type(new_transform).init.data[new_transform] = weights - weights, indices, axon_to_weight_map, cx_bases = conv2d_loihi_weights( - new_transform) - - synapse = Synapse(np.prod(input_shape.spatial_shape), - label="conv2d_weights") - synapse.set_population_weights( - weights, indices, axon_to_weight_map, cx_bases, pop_type=pop_type) - post_cx.add_synapse(synapse) - model.objs[conn]['weights'] = synapse - - ax = Axon(np.prod(input_shape.spatial_shape), label="conv2d_weights") - ax.target = synapse - ax.cx_to_axon_map = pixel_idxs(input_shape) - ax.cx_atoms = channel_idxs(input_shape) - pre_cx.add_axon(ax) - - post_cx.compartment.configure_filter(tau_s, dt=model.dt) - - model.params[conn] = BuiltConnection( - eval_points=None, - solver_info=None, - transform=None, - weights=weights) - - def conv2d_loihi_weights(transform): # TODO: It appears from that there is an upper limit on # CxBase of 256 (bug), so I had to make extra sets of redundant weights diff --git a/nengo_loihi/emulator/interface.py b/nengo_loihi/emulator/interface.py index 198818eec..e4dc4d28c 100644 --- a/nengo_loihi/emulator/interface.py +++ b/nengo_loihi/emulator/interface.py @@ -4,7 +4,7 @@ import logging import warnings -from nengo.exceptions import SimulationError +from nengo.exceptions import SimulationError, ValidationError from nengo.utils.compat import is_array, is_number import numpy as np @@ -294,7 +294,8 @@ def decay_float(x, u, d, s): def overflow(x, bits, name=None): pass # do not do overflow in floating point else: - raise ValueError("dtype %r not supported" % self.dtype) + raise ValidationError("dtype %r not supported" % self.dtype, + attr='dtype', obj=block_info) self._overflow = overflow @@ -367,7 +368,8 @@ def uniform(rng, n=self.n_compartments): def uniform(rng, n=self.n_compartments): return rng.uniform(-1, 1, size=n).astype(np.float32) else: - raise ValueError("dtype %r not supported" % self.dtype) + raise ValidationError("dtype %r not supported" % self.dtype, + attr='dtype', obj=block_info) assert not np.any(np.isnan(self.enabled)) assert not np.any(np.isnan(self.exp)) @@ -469,7 +471,8 @@ def weight_update(synapse, delta_ws, rng=None): for w, delta_w in zip(synapse.weights, delta_ws): w += synapse.learning_rate * delta_w else: - raise ValueError("dtype %r not supported" % self.dtype) + raise ValidationError("dtype %r not supported" % self.dtype, + attr='dtype', obj=block_info) self._trace_round = trace_round self._weight_update = weight_update diff --git a/nengo_loihi/hardware/builder.py b/nengo_loihi/hardware/builder.py index 998b5b43a..9d734f703 100644 --- a/nengo_loihi/hardware/builder.py +++ b/nengo_loihi/hardware/builder.py @@ -388,7 +388,7 @@ def build_synapse(n2core, core, block, synapse, cx_idxs): # noqa C901 n2core.synapseMap[axon_id].population32MapEntry.configure( cxBase=cx_base) else: - raise ValueError("Synapse: unrecognized pop_type: %s" % ( + raise BuildError("Synapse: unrecognized pop_type: %s" % ( synapse.pop_type,)) if synapse.learning: @@ -445,7 +445,7 @@ def collect_axons(n2core, core, block, axon, cx_ids): "with a multi-chip allocator" % ( tchip_id_source, tchip_id)) else: - raise ValueError("Axon: unrecognized pop_type: %s" % ( + raise BuildError("Axon: unrecognized pop_type: %s" % ( synapse.pop_type,)) return all_axons diff --git a/nengo_loihi/hardware/nxsdk_shim.py b/nengo_loihi/hardware/nxsdk_shim.py index b181f53cd..2d3e88f9d 100644 --- a/nengo_loihi/hardware/nxsdk_shim.py +++ b/nengo_loihi/hardware/nxsdk_shim.py @@ -1,8 +1,6 @@ from distutils.version import LooseVersion import os -import shutil import sys -import tempfile try: import nxsdk @@ -15,27 +13,6 @@ def assert_nxsdk(): pass - from nxsdk.graph import graph - - class PatchedGraph(graph.Graph): - def __init__(self, *args, **kwargs): - super(PatchedGraph, self).__init__(*args, **kwargs) - self.nengo_tmp_dirs = [] - - def createProcess(self, name, cFilePath, *args, **kwargs): - # copy the c file to a temporary directory (so that multiple - # simulations can use the same snip files without running into - # problems) - tmp = tempfile.TemporaryDirectory() - self.nengo_tmp_dirs.append(tmp) - - tmp_path = os.path.join(tmp.name, os.path.basename(cFilePath)) - shutil.copyfile(cFilePath, tmp_path) - - return super(PatchedGraph, self).createProcess( - name, tmp_path, *args, **kwargs) - - graph.Graph = PatchedGraph except ImportError: HAS_NXSDK = False nxsdk_dir = None diff --git a/nengo_loihi/inputs.py b/nengo_loihi/inputs.py index 902254206..931d5b69d 100644 --- a/nengo_loihi/inputs.py +++ b/nengo_loihi/inputs.py @@ -1,10 +1,4 @@ -from __future__ import division - -from nengo import Node -from nengo.exceptions import SimulationError -from nengo.params import Default from nengo.utils.compat import is_integer -import numpy as np class LoihiInput: @@ -34,78 +28,3 @@ def spike_times(self): def spike_idxs(self, ti): return self.spikes.get(ti, []) - - -class HostSendNode(Node): - """For sending host->chip messages""" - - def __init__(self, dimensions, label=Default): - self.queue = [] - super(HostSendNode, self).__init__( - self.update, - size_in=dimensions, - size_out=0, - label=label, - ) - - def update(self, t, x): - assert len(self.queue) == 0 or t > self.queue[-1][0] - self.queue.append((t, x)) - - -class HostReceiveNode(Node): - """For receiving chip->host messages""" - - def __init__(self, dimensions, label=Default): - self.queue = [(0, np.zeros(dimensions))] - self.queue_index = 0 - super(HostReceiveNode, self).__init__( - self.update, - size_in=0, - size_out=dimensions, - label=label, - ) - - def update(self, t): - while (len(self.queue) > self.queue_index + 1 - and self.queue[self.queue_index][0] < t): - self.queue_index += 1 - return self.queue[self.queue_index][1] - - def receive(self, t, x): - self.queue.append((t, x)) - - -class ChipReceiveNode(Node): - """For receiving host->chip messages""" - - def __init__(self, dimensions, size_out, label=Default): - self.raw_dimensions = dimensions - self.spikes = [] - self.spike_input = None # set by builder - super(ChipReceiveNode, self).__init__( - self.update, size_in=0, size_out=size_out, label=label) - - def clear(self): - self.spikes.clear() - - def receive(self, t, x): - assert len(self.spikes) == 0 or t > self.spikes[-1][0] - assert x.ndim == 1 - self.spikes.append((t, x.nonzero()[0])) - - def update(self, t): - raise SimulationError("ChipReceiveNodes should not be run") - - def collect_spikes(self): - assert self.spike_input is not None - for t, x in self.spikes: - yield (self.spike_input, t, x) - - -class ChipReceiveNeurons(ChipReceiveNode): - """Passes spikes directly (no on-off neuron encoding)""" - def __init__(self, dimensions, neuron_type=None, label=Default): - self.neuron_type = neuron_type - super(ChipReceiveNeurons, self).__init__( - dimensions, dimensions, label=label) diff --git a/nengo_loihi/passthrough.py b/nengo_loihi/passthrough.py index c2ee42be2..3b6e6f99e 100644 --- a/nengo_loihi/passthrough.py +++ b/nengo_loihi/passthrough.py @@ -1,13 +1,13 @@ from collections import OrderedDict import warnings -from nengo import Connection, Lowpass, Node +from nengo import Connection, Lowpass, Node, Probe +from nengo.base import ObjView from nengo.connection import LearningRule from nengo.ensemble import Neurons from nengo.exceptions import BuildError, NengoException import numpy as np - from nengo_loihi.compat import nengo_transforms, transform_array @@ -16,7 +16,9 @@ def is_passthrough(obj): def base_obj(obj): - """Returns the Ensemble or Node underlying an object""" + """Returns the object underlying some view or neurons.""" + if isinstance(obj, ObjView): + obj = obj.obj if isinstance(obj, Neurons): return obj.ensemble return obj @@ -210,51 +212,7 @@ def generate_conns(self): ) -def find_clusters(net, offchip): - """Create the Clusters for a given nengo Network.""" - - # find which objects have Probes, as we need to make sure to keep them - probed_objs = set(base_obj(p.target) for p in net.all_probes) - - clusters = OrderedDict() # mapping from object to its Cluster - for c in net.all_connections: - base_pre = base_obj(c.pre_obj) - base_post = base_obj(c.post_obj) - - pass_pre = is_passthrough(c.pre_obj) and c.pre_obj not in offchip - if pass_pre and c.pre_obj not in clusters: - # add new objects to their own initial Cluster - clusters[c.pre_obj] = Cluster(c.pre_obj) - if c.pre_obj in probed_objs: - clusters[c.pre_obj].probed_objs.add(c.pre_obj) - - pass_post = is_passthrough(c.post_obj) and c.post_obj not in offchip - if pass_post and c.post_obj not in clusters: - # add new objects to their own initial Cluster - clusters[c.post_obj] = Cluster(c.post_obj) - if c.post_obj in probed_objs: - clusters[c.post_obj].probed_objs.add(c.post_obj) - - if pass_pre and pass_post: - # both pre and post are passthrough, so merge the two - # clusters into one cluster - cluster = clusters[base_pre] - cluster.merge_with(clusters[base_post]) - for obj in cluster.objs: - clusters[obj] = cluster - cluster.conns_mid.add(c) - elif pass_pre: - # pre is passthrough but post is not, so this is an output - cluster = clusters[base_pre] - cluster.conns_out.add(c) - elif pass_post: - # pre is not a passthrough but post is, so this is an input - cluster = clusters[base_post] - cluster.conns_in.add(c) - return clusters - - -def convert_passthroughs(network, offchip): +class PassthroughSplit: """Create a set of Connections that could replace the passthrough Nodes. This does not actually modify the Network, but instead returns the @@ -262,42 +220,97 @@ def convert_passthroughs(network, offchip): and the Connections that should be added to replace the Nodes and Connections. - The parameter offchip provides a list of objects that should be considered - to be offchip. The system will only remove passthrough Nodes that go - between two onchip objects. + The parameter ignore provides a list of objects (i.e., ensembles and nodes) + that should not be considered by the passthrough removal process. + The system will only remove passthrough Nodes where neither pre nor post + are ignored. """ - clusters = find_clusters(network, offchip=offchip) - - removed_passthroughs = set() - removed_connections = set() - added_connections = set() - handled_clusters = set() - for cluster in clusters.values(): - if cluster not in handled_clusters: - handled_clusters.add(cluster) - onchip_input = False - onchip_output = False - for c in cluster.conns_in: - if base_obj(c.pre_obj) not in offchip: - onchip_input = True - break - for c in cluster.conns_out: - if base_obj(c.post_obj) not in offchip: - onchip_output = True - break - has_input = len(cluster.conns_in) > 0 - no_output = len(cluster.conns_out) + len(cluster.probed_objs) == 0 - - if has_input and ((onchip_input and onchip_output) or no_output): - try: - new_conns = list(cluster.generate_conns()) - except ClusterError: - # this Cluster has an issue, so don't remove it - continue - - removed_passthroughs.update(cluster.objs - cluster.probed_objs) - removed_connections.update(cluster.conns_in - | cluster.conns_mid - | cluster.conns_out) - added_connections.update(new_conns) - return removed_passthroughs, removed_connections, added_connections + + def __init__(self, network, ignore=None): + self.network = network + self.ignore = ignore if ignore is not None else set() + + self.to_remove = set() + self.to_add = set() + + if self.network is not None: + self.clusters = self._find_clusters() + self._already_split = set() + for cluster in self.clusters.values(): + if cluster not in self._already_split: + self._split_cluster(cluster) + + def _find_clusters(self): + """Find Clusters for the given Network.""" + + # find which objects have Probes, as we need to make sure to keep them + probed_objs = set(base_obj(p.target) for p in self.network.all_probes) + + clusters = OrderedDict() # mapping from object to its Cluster + for c in self.network.all_connections: + # We assume that neither pre nor post can be a probe which + # simplifies things slightly because we don't need to be + # concerned with any underlying target. + base_pre = base_obj(c.pre) + base_post = base_obj(c.post) + assert not isinstance(base_pre, Probe) + assert not isinstance(base_post, Probe) + + pass_pre = (is_passthrough(c.pre_obj) + and c.pre_obj not in self.ignore) + if pass_pre and c.pre_obj not in clusters: + # add new objects to their own initial Cluster + clusters[c.pre_obj] = Cluster(c.pre_obj) + if c.pre_obj in probed_objs: + clusters[c.pre_obj].probed_objs.add(c.pre_obj) + + pass_post = (is_passthrough(c.post_obj) + and c.post_obj not in self.ignore) + if pass_post and c.post_obj not in clusters: + # add new objects to their own initial Cluster + clusters[c.post_obj] = Cluster(c.post_obj) + if c.post_obj in probed_objs: + clusters[c.post_obj].probed_objs.add(c.post_obj) + + if pass_pre and pass_post: + # both pre and post are passthrough, so merge the two + # clusters into one cluster + cluster = clusters[base_pre] + cluster.merge_with(clusters[base_post]) + for obj in cluster.objs: + clusters[obj] = cluster + cluster.conns_mid.add(c) + elif pass_pre: + # pre is passthrough but post is not, so this is an output + cluster = clusters[base_pre] + cluster.conns_out.add(c) + elif pass_post: + # pre is not a passthrough but post is, so this is an input + cluster = clusters[base_post] + cluster.conns_in.add(c) + return clusters + + def _split_cluster(self, cluster): + """Split a Cluster.""" + assert cluster not in self._already_split + self._already_split.add(cluster) + + onchip_input = any(base_obj(c.pre) not in self.ignore + for c in cluster.conns_in) + onchip_output = any(base_obj(c.post) not in self.ignore + for c in cluster.conns_out) + + has_input = len(cluster.conns_in) > 0 + no_output = len(cluster.conns_out) + len(cluster.probed_objs) == 0 + + if has_input and ((onchip_input and onchip_output) or no_output): + try: + new_conns = list(cluster.generate_conns()) + except ClusterError: + # this Cluster has an issue, so don't remove it + return + + self.to_remove.update(cluster.objs - cluster.probed_objs) + self.to_remove.update( + cluster.conns_in | cluster.conns_mid | cluster.conns_out) + self.to_add.update(new_conns) diff --git a/nengo_loihi/simulator.py b/nengo_loihi/simulator.py index 2e25d7cd2..6537317db 100644 --- a/nengo_loihi/simulator.py +++ b/nengo_loihi/simulator.py @@ -4,7 +4,6 @@ import warnings import nengo -from nengo.cache import get_default_decoder_cache from nengo.exceptions import ( ReadonlyError, SimulatorClosed, @@ -21,7 +20,7 @@ from nengo_loihi.discretize import discretize_model from nengo_loihi.emulator import EmulatorInterface from nengo_loihi.hardware import HardwareInterface, HAS_NXSDK -from nengo_loihi.splitter import split +from nengo_loihi.splitter import Split logger = logging.getLogger(__name__) @@ -48,7 +47,7 @@ class Simulator: Parameters ---------- - network : Network or None + network : Network A network object to be built and then simulated. If None, then the *model* parameter must be provided instead. dt : float, optional (Default: 0.001) @@ -62,7 +61,7 @@ class Simulator: want to build the network manually, or you want to inject build artifacts in the model before building the network, then you can pass in a `.Model` instance. - precompute : bool, optional (Default: True) + precompute : bool, optional (Default: False) Whether model inputs should be precomputed to speed up simulation. When *precompute* is False, the simulator will be run one step at a time in order to use model outputs as inputs in other parts @@ -110,7 +109,7 @@ def __init__( # noqa: C901 # initialize values used in __del__ and close() first self.closed = True self.precompute = precompute - self.networks = None + self.network = network self.sims = OrderedDict() self._run_steps = None @@ -131,60 +130,60 @@ def __init__( # noqa: C901 self.model = model assert self.model.dt == dt - if network is not None: - nengo.rc.set("decoder_cache", "enabled", "False") - config.add_params(network) - - # ensure seeds are identical to nengo - seed_network(network, seeds=self.model.seeds, - seeded=self.model.seeded) - - # split the host into one, two or three networks - self.networks = split( - network, - precompute=precompute, - node_neurons=self.model.node_neurons, - node_tau=self.model.decode_tau, - remove_passthrough=remove_passthrough, - ) - network = self.networks.chip - - self.model.chip2host_params = self.networks.chip2host_params - - self.chip = self.networks.chip - self.host = self.networks.host - self.host_pre = self.networks.host_pre - - if len(self.host_pre.all_objects) > 0: - host_pre_model = self._get_host_model( - self.host_pre, dt=dt, seeds=self.model.seeds, - seeded=self.model.seeded) - self.sims["host_pre"] = nengo.Simulator(self.host_pre, - dt=self.dt, - model=host_pre_model, - progress_bar=False, - optimize=False) - - if len(self.host.all_objects) > 0: - host_model = self._get_host_model( - self.host, dt=dt, seeds=self.model.seeds, - seeded=self.model.seeded) - self.sims["host"] = nengo.Simulator( - self.host, - dt=self.dt, - model=host_model, - progress_bar=False, - optimize=False) - elif not precompute: - # If there is no host and precompute=False, then all objects - # must be on the chip, which is precomputable in the sense that - # no communication has to happen with the host. - # We could warn about this, but we want to avoid people having - # to specify `precompute` unless they absolutely have to. - self.precompute = True - - # Build the network into the model - self.model.build(network) + if network is None: + raise ValidationError("network parameter must not be None", + attr="network") + + config.add_params(network) + + # ensure seeds are identical to nengo + # this has no effect for nengo<=2.8.0 + seed_network(network, seeds=self.model.seeds, + seeded=self.model.seeded) + + # determine how to split the host into one, two or three models + self.model.split = Split(network, + precompute=precompute, + remove_passthrough=remove_passthrough) + + # Build the network into the model + self.model.build(network) + + # Build the extra passthrough connections into the model + passthrough = self.model.split.passthrough + for conn in passthrough.to_add: + # Note: connections added by the passthrough splitter do not + # respect seeds + self.model.seeds[conn] = None + self.model.seeded[conn] = False + self.model.build(conn) + + if len(self.model.host_pre.params): + assert precompute + self.sims["host_pre"] = nengo.Simulator( + network=None, + dt=self.dt, + model=self.model.host_pre, + progress_bar=False, + optimize=False) + elif precompute: + warnings.warn("No precomputable objects. Setting " + "precompute=True has no effect.") + + if len(self.model.host.params): + self.sims["host"] = nengo.Simulator( + network=None, + dt=self.dt, + model=self.model.host, + progress_bar=False, + optimize=False) + elif not precompute: + # If there is no host and precompute=False, then all objects + # must be on the chip, which is precomputable in the sense that + # no communication has to happen with the host. + # We could warn about this, but we want to avoid people having + # to specify `precompute` unless they absolutely have to. + self.precompute = True self._probe_outputs = self.model.params self.data = ProbeDict(self._probe_outputs) @@ -223,16 +222,6 @@ def __init__( # noqa: C901 self.closed = False self.reset(seed=seed) - @staticmethod - def _get_host_model(network, dt, seeds, seeded): - model = nengo.builder.Model( - dt=float(dt), - label="%s, dt=%f" % (network, dt), - decoder_cache=get_default_decoder_cache()) - model.seeds.update(seeds) - model.seeded.update(seeded) - return model - def __del__(self): """Raise a ResourceWarning if we are deallocated while open.""" if not self.closed: @@ -288,7 +277,7 @@ def _probe(self): self._probe_step_time() for probe in self.model.probes: - if probe in self.networks.chip2host_params: + if probe in self.model.chip2host_params: continue assert probe.sample_every is None, ( "probe.sample_every not implemented") @@ -371,7 +360,7 @@ def step(self): def _collect_receiver_info(self): spikes = [] errors = OrderedDict() - for sender, receiver in self.networks.host2chip_senders.items(): + for sender, receiver in self.model.host2chip_senders.items(): receiver.clear() for t, x in sender.queue: receiver.receive(t, x) @@ -404,7 +393,7 @@ def _host2chip(self, sim): def _chip2host(self, sim): probes_receivers = OrderedDict( # map probes to receivers (self.model.objs[probe]['out'], receiver) - for probe, receiver in self.networks.chip2host_receivers.items()) + for probe, receiver in self.model.chip2host_receivers.items()) sim.chip2host(probes_receivers) def _make_run_steps(self): diff --git a/nengo_loihi/splitter.py b/nengo_loihi/splitter.py index 920faa3a9..639f4426a 100644 --- a/nengo_loihi/splitter.py +++ b/nengo_loihi/splitter.py @@ -1,543 +1,159 @@ -from collections import defaultdict, OrderedDict -import copy -import logging -import warnings +from collections import defaultdict -from nengo import Connection, Direct, Ensemble, Network, Node, Probe -from nengo.base import ObjView -from nengo.connection import LearningRule -from nengo.ensemble import Neurons +from nengo import Direct, Ensemble, Node, Probe from nengo.exceptions import BuildError -import numpy as np - -from nengo_loihi.compat import nengo_transforms, sample_transform -from nengo_loihi.inputs import ( - ChipReceiveNode, - ChipReceiveNeurons, - HostSendNode, - HostReceiveNode, -) -from nengo_loihi.passthrough import convert_passthroughs - -logger = logging.getLogger(__name__) - - -class PESModulatoryTarget: - def __init__(self, target): - self.target = target - self.errors = OrderedDict() - - def clear(self): - self.errors.clear() - - def receive(self, t, x): - assert len(self.errors) == 0 or t >= next(reversed(self.errors)) - if t in self.errors: - self.errors[t] += x - else: - self.errors[t] = np.array(x) - - def collect_errors(self): - for t, x in self.errors.items(): - yield (self.target, t, x) - - -def base_obj(obj): - if isinstance(obj, ObjView): - obj = obj.obj - if isinstance(obj, Neurons): - return obj.ensemble - elif isinstance(obj, LearningRule): - return obj.connection - return obj - - -class SplitNetworks: - def __init__(self, original, node_neurons=None, node_tau=0.005): - self.original = original - self.node_neurons = node_neurons - self.node_tau = node_tau - - self.host = Network(seed=original.seed) - self.chip = Network(seed=original.seed) - self.host_pre = Network(seed=original.seed) - - self.targets = ("host", "chip", "host_pre") - - # Interactions between rules - self.needs_sender = {} - - # Used later in the build process - self.chip2host_params = {} - self.chip2host_receivers = OrderedDict() - self.host2chip_senders = OrderedDict() - - self.adds = OrderedDict() - self.moves = OrderedDict() - self.removes = [] - - def __contains__(self, obj): - obj = base_obj(obj) - return (obj in self.moves - or obj in self.adds - or obj in self.removes) - - def add(self, obj, target): - assert target in self.targets, "invalid target" - obj = base_obj(obj) - assert obj not in self, "obj already moved" - self.adds[obj] = target - - def finalize(self): - def _add(obj, net): - for cls in type(obj).__mro__: - if cls in net.objects: - net.objects[cls].append(obj) - break - else: - assert False, "cannot handle type %r" % (type(obj).__name__,) - - # Ensure that all objects have been dealt with - for obj in self.original.all_objects: - if not isinstance(obj, Network): - assert obj in self, ( - "%s not moved or explicitly removed" % (obj,)) - - # Process moves and adds - for obj, target in self.moves.items(): - _add(obj, getattr(self, target)) - for obj, target in self.adds.items(): - _add(obj, getattr(self, target)) - - def location(self, obj, default=None): - obj = base_obj(obj) - return self.moves.get(obj, self.adds.get(obj, default)) +from nengo.connection import LearningRule - def move(self, obj, target, force=False): - obj = base_obj(obj) - if not force: - assert obj not in self, "already moved" - assert target in self.targets, "invalid target" - logger.debug("Moving %s to %s", obj, target) - if obj in self.adds: - self.adds[obj] = target +from nengo_loihi.passthrough import base_obj, is_passthrough, PassthroughSplit + + +class Split: + """Creates a set of directives to guide the builder.""" + + def __init__(self, network, precompute=False, remove_passthrough=True): + self.network = network + + # subset of network: only nodes and ensembles; + # probes are handled dynamically + self._seen_objects = set() + + # subset of seen, marking which are run on the hardware; + # those running on the host are "seen - chip" + self._chip_objects = set() + + # Step 1. Place nodes on host + self._seen_objects.update(network.all_nodes) + + # Step 2. Place all possible ensembles on chip + # Note: assumes add_params already called by the simulator + for ens in network.all_ensembles: + if (network.config[ens].on_chip in (None, True) + and not isinstance(ens.neuron_type, Direct)): + self._chip_objects.add(ens) + self._seen_objects.add(ens) + + # Step 3. Move learning ensembles (post and error) to host + for conn in network.all_connections: + pre = base_obj(conn.pre) + post = base_obj(conn.post) + if (conn.learning_rule_type is not None + and isinstance(post, Ensemble) + and post in self._chip_objects): + if network.config[post].on_chip: + raise BuildError("Post ensemble (%r) of learned " + "connection (%r) must not be configured " + "as on_chip." % (post, conn)) + self._chip_objects.remove(post) + elif (isinstance(post, LearningRule) + and isinstance(pre, Ensemble) + and pre in self._chip_objects): + if network.config[pre].on_chip: + raise BuildError("Pre ensemble (%r) of error " + "connection (%r) must not be configured " + "as on_chip." % (pre, conn)) + self._chip_objects.remove(pre) + + # Step 4. Mark passthrough nodes for removal + if remove_passthrough: + passthroughs = set( + obj for obj in network.all_nodes if is_passthrough(obj)) + ignore = self._seen_objects - self._chip_objects - passthroughs + self.passthrough = PassthroughSplit(network, ignore) else: - self.moves[obj] = target - - def remove(self, obj): - obj = base_obj(obj) - logger.debug("Removing %s", obj) - self.removes.append(obj) - if obj in self.adds: - del self.adds[obj] - elif obj in self.moves: - del self.moves[obj] - - -def split(net, precompute, node_neurons, node_tau, remove_passthrough=False): - logger.info("Splitting model into host and chip parts") - networks = SplitNetworks(net, node_neurons=node_neurons, - node_tau=node_tau) - - # --- Step 1: place ensembles and nodes - place_nodes(networks) - place_ensembles(networks) - - # --- Step 1b: remove passthrough nodes - if remove_passthrough: - conns = merge_passthrough_nodes(networks) - else: - conns = networks.original.all_connections - - # --- Step 2: place simple connections - place_internetwork_connections(networks, conns) - - # --- Step 3: split complex connections - split_host_to_chip_connections(networks, conns) - split_chip_to_host_connections(networks, conns) - split_host_to_learning_rules(networks, conns) - - # --- Step 4: place precomputable parts of host - if precompute: - split_pre_from_host(networks) - - # --- Step 5: place probes - place_probes(networks) - - # Commit to the moves marked in the previous steps - networks.finalize() - if precompute: - if len(networks.host_pre.all_objects) == 0: - warnings.warn("No precomputable objects. Setting precompute=True " - "has no effect.") - else: - assert len(networks.host_pre.all_objects) == 0, ( - "Object erroneously added to host_pre") - - return networks + self.passthrough = PassthroughSplit(None) - -def place_nodes(networks): - # Only ChipReceiveNodes can be run on chip - for node in networks.original.all_nodes: - if isinstance(node, ChipReceiveNode): - # Typically ChipReceiveNodes are created by the splitter, but - # it's conceivable that advanced users might make them manually - networks.move(node, "chip") + # Step 5. Split precomputable parts of host + # This is a subset of host, marking which are precomputable + if precompute: + self._host_precomputable_objects = self._preclosure() else: - networks.move(node, "host") - - -def place_ensembles(networks): - config = networks.original.config - - for ens in networks.original.all_ensembles: - # User-specified config takes precedence - if config[ens].on_chip is not None: - networks.move(ens, "chip" if config[ens].on_chip else "host") - # Direct mode ensembles must be off-chip - elif isinstance(ens.neuron_type, Direct): - networks.move(ens, "host") - - for conn in networks.original.all_connections: - # `post` of learning rules must be off chip - if (conn.learning_rule_type is not None - and isinstance(base_obj(conn.post_obj), Ensemble) - and conn.post_obj not in networks): - networks.move(conn.post_obj, "host") - # `error` of learning rules must be off chip - elif (isinstance(conn.post_obj, LearningRule) - and isinstance(base_obj(conn.pre_obj), Ensemble) - and conn.pre_obj not in networks): - networks.move(conn.pre_obj, "host") - - # All other ensembles are placed on chip - for ens in networks.original.all_ensembles: - if ens not in networks: - networks.move(ens, "chip") - - -def merge_passthrough_nodes(networks): - offchip = set() - for obj, target in networks.moves.items(): - if isinstance(obj, Node) and obj.output is None: - # this is a passthrough Node so don't force it to be offchip - continue - elif target == 'host': - offchip.add(obj) - - remove_nodes, remove_conns, add_conns = convert_passthroughs( - networks.original, offchip) - for n in remove_nodes: - networks.remove(n) - for c in remove_conns: - networks.remove(c) - - conns = networks.original.all_connections - for c in remove_conns: - conns.remove(c) - conns.extend(add_conns) - - return conns - - -def place_internetwork_connections(networks, conns): - """Connections from two objects placed in the same location go there. - - That is, connections from two objects on the host are done on the host, - and connections from two objects on the chip are done on the chip. - """ - for conn in conns: - pre_loc = networks.location(conn.pre_obj) - post_loc = networks.location(conn.post_obj) - if pre_loc == post_loc: - if pre_loc == "chip": - assert conn.learning_rule_type is None - networks.move(conn, pre_loc) - - -def split_host_to_chip_connections(networks, conns): - for conn in conns: - if conn in networks: - # Already processed - continue - - pre_loc = networks.location(conn.pre_obj) - post_loc = networks.location(conn.post_obj) - if pre_loc == "host" and post_loc == "chip": - if isinstance(conn.pre_obj, Neurons): - split_host_neurons_to_chip(networks, conn) - else: - split_host_to_chip(networks, conn) - assert conn in networks - - -def split_host_neurons_to_chip(networks, conn): - """Send spikes over and do the rest of the connection on-chip""" - - assert not isinstance(conn.post, LearningRule) - dim = conn.size_in - - logger.debug("Creating ChipReceiveNeurons for %s", conn) - receive = ChipReceiveNeurons( - dim, - neuron_type=conn.pre_obj.ensemble.neuron_type, - label=None if conn.label is None else "%s_neurons" % conn.label, - add_to_container=False, - ) - networks.add(receive, "chip") - receive2post = Connection( - receive, - conn.post, - transform=conn.transform, - synapse=conn.synapse, - label=None if conn.label is None else "%s_chip" % conn.label, - add_to_container=False, - ) - networks.add(receive2post, "chip") - - logger.debug("Creating HostSendNode for %s", conn) - send = HostSendNode( - dim, - label=None if conn.label is None else "%s_send" % conn.label, - add_to_container=False, - ) - networks.add(send, "host") - pre2send = Connection( - conn.pre, - send, - synapse=None, - label=None if conn.label is None else "%s_host" % conn.label, - add_to_container=False, - ) - networks.add(pre2send, "host") - - networks.host2chip_senders[send] = receive - networks.remove(conn) - - -def split_host_to_chip(networks, conn): - dim = conn.size_out - logger.debug("Creating ChipReceiveNode for %s", conn) - receive = ChipReceiveNode( - dim * 2, - size_out=dim, - label=None if conn.label is None else "%s_node" % conn.label, - add_to_container=False, - ) - networks.add(receive, "chip") - receive2post = Connection( - receive, - conn.post, - synapse=networks.node_tau, - label=None if conn.label is None else "%s_chip" % conn.label, - add_to_container=False, - ) - networks.add(receive2post, "chip") - - logger.debug("Creating DecodeNeuron ensemble for %s", conn) - if networks.node_neurons is None: - raise BuildError( - "DecodeNeurons must be specified for host->chip connection.") - ens = networks.node_neurons.get_ensemble(dim) - ens.label = None if conn.label is None else "%s_ens" % conn.label - networks.add(ens, "host") - - if nengo_transforms is not None and isinstance( - conn.transform, nengo_transforms.Convolution): - raise BuildError( - "Conv2D transforms not supported for off-chip to " - "on-chip connections where `pre` is not a Neurons object.") - - # Scale the input spikes based on the radius of the target ensemble - seed = networks.original.seed if conn.seed is None else conn.seed - weights = sample_transform(conn, rng=np.random.RandomState(seed=seed)) - - if isinstance(conn.post_obj, Ensemble): - weights = weights / conn.post_obj.radius - - if nengo_transforms is None: - transform = weights - else: - # copy the Transform information, setting `init` to the sampled weights - transform = copy.copy(conn.transform) - type(transform).init.data[transform] = weights - - pre2ens = Connection( - conn.pre, - ens, - function=conn.function, - solver=conn.solver, - eval_points=conn.eval_points, - scale_eval_points=conn.scale_eval_points, - synapse=conn.synapse, - transform=transform, - label=None if conn.label is None else "%s_enc" % conn.label, - add_to_container=False, - ) - networks.add(pre2ens, "host") - - logger.debug("Creating HostSendNode for %s", conn) - send = HostSendNode( - dim * 2, - label=None if conn.label is None else "%s_send" % conn.label, - add_to_container=False, - ) - networks.add(send, "host") - ensneurons2send = Connection( - ens.neurons, - send, - synapse=None, - label=None if conn.label is None else "%s_host" % conn.label, - add_to_container=False, - ) - networks.add(ensneurons2send, "host") - networks.remove(conn) - - networks.host2chip_senders[send] = receive - - -def split_chip_to_host_connections(networks, conns): - for conn in conns: - if conn in networks: - # Already processed - continue - - pre_loc = networks.location(conn.pre_obj) - post_loc = networks.location(conn.post_obj) - # All other connections should be processed by this point - if pre_loc == "chip" and post_loc == "host": - split_chip_to_host(networks, conn) - assert conn in networks - - -def split_chip_to_host(networks, conn): - dim = conn.size_out - - logger.debug("Creating HostReceiveNode for %s", conn) - receive = HostReceiveNode( - dim, - label=None if conn.label is None else "%s_receive" % conn.label, - add_to_container=False, - ) - networks.add(receive, "host") - receive2post = Connection( - receive, - conn.post, - synapse=conn.synapse, - label=None if conn.label is None else "%s_host" % conn.label, - add_to_container=False, - ) - networks.add(receive2post, "host") - - logger.debug("Creating Probe for %s", conn) - seed = networks.original.seed if conn.seed is None else conn.seed - transform = sample_transform(conn, rng=np.random.RandomState(seed=seed)) - - probe = Probe(conn.pre, - synapse=None, - solver=conn.solver, - add_to_container=False) - networks.chip2host_params[probe] = dict( - learning_rule_type=conn.learning_rule_type, - function=conn.function, - eval_points=conn.eval_points, - scale_eval_points=conn.scale_eval_points, - transform=transform, - label=None if conn.label is None else "%s_probe" % conn.label, - ) - networks.add(probe, "chip") - networks.chip2host_receivers[probe] = receive - - if conn.learning_rule_type is not None: - if not isinstance(conn.pre_obj, Ensemble): - raise NotImplementedError( - "Learning rule presynaptic object must be an Ensemble " - "(got %r)" % type(conn.pre_obj).__name__) - networks.needs_sender[conn.learning_rule] = PESModulatoryTarget(probe) - networks.remove(conn) - - -def split_host_to_learning_rules(networks, conns): - for conn in conns: - if conn in networks: - # Already processed - continue - - pre_loc = networks.location(conn.pre_obj) - if (pre_loc == "host" - and isinstance(conn.post_obj, LearningRule)): - split_host_to_learning_rule(networks, conn) - assert conn in networks - - -def split_host_to_learning_rule(networks, conn): - dim = conn.size_out - logger.debug("Creating HostSendNode for %s", conn) - send = HostSendNode( - dim, - label=None if conn.label is None else "%s_send" % conn.label, - add_to_container=False, - ) - networks.add(send, "host") - - pre2send = Connection( - conn.pre, - send, - function=conn.function, - solver=conn.solver, - eval_points=conn.eval_points, - scale_eval_points=conn.scale_eval_points, - synapse=conn.synapse, - transform=conn.transform, - label=conn.label, - add_to_container=False, - ) - networks.add(pre2send, "host") - pes_target = networks.needs_sender[conn.post_obj] - networks.host2chip_senders[send] = pes_target - networks.remove(conn) - - -def place_probes(networks): - for probe in networks.original.all_probes: - target = base_obj(probe.target) - networks.move(probe, networks.location(target)) - - -def split_pre_from_host(networks): # noqa: C901 - logger.info("Splitting pre model from host") - - inputs = defaultdict(list) - outputs = defaultdict(list) - queue = [] - - for d in [networks.moves, networks.adds]: - for obj in d: - if isinstance(obj, Connection): - inputs[base_obj(obj.post_obj)].append(obj) - outputs[base_obj(obj.pre_obj)].append(obj) - elif isinstance(obj, HostSendNode): - networks.move(obj, "host_pre", force=True) + self._host_precomputable_objects = set() + + def _preclosure(self): # noqa: C901 + """Returns all objects that [in]directly send data to chip.""" + # performs a "transitive closure" on all host objects that + # send data to the chip. if any of these objects receive + # output from the chip, then a BuildError is raised + precomputable = set() + + # forwards and backwards adjacency lists + pre_to_conn = defaultdict(list) + post_to_conn = defaultdict(list) + + # data-structure for breadth-first search + queue = [] + head = 0 + + def mark_precomputable(obj): + assert isinstance(obj, (Node, Ensemble)) + if obj not in precomputable: + precomputable.add(obj) queue.append(obj) - while len(queue) > 0: - node_or_ens = queue.pop() - - for conn in inputs[node_or_ens] + outputs[node_or_ens]: - if networks.location(conn) != "host": - continue - networks.move(conn, "host_pre", force=True) - - if conn in inputs[node_or_ens]: - obj = base_obj(conn.pre_obj) - elif conn in outputs[node_or_ens]: - obj = base_obj(conn.post_obj) - - if (isinstance(obj, (Node, Ensemble)) - and networks.location(obj) == "host"): - if isinstance(obj, HostReceiveNode): + # determine which connections will actually be built + conns = ((set(self.network.all_connections) + | self.passthrough.to_add) - self.passthrough.to_remove) + + # Initialize queue with the pre objects on host->chip connections. + # We assume that all `conn.pre` objects are pre-computable, and then + # raise an error later if one of them turns out to rely on chip output. + # Learning rules are not supported with precompute=True because + # this would require a hybrid simulation where some parts of the + # model interact with the host while other parts are precomputed + # ahead of time. The simulator assumes that precompute=True does not + # require any interaction between host and chip between time-steps. + # Also see issue #214. + for conn in conns: + pre, post = base_obj(conn.pre), base_obj(conn.post) + pre_to_conn[pre].append(conn) + post_to_conn[post].append(conn) + assert pre not in self.passthrough.to_remove + assert post not in self.passthrough.to_remove + + if (isinstance(post, LearningRule) + or conn.learning_rule is not None): + raise BuildError("precompute=True not supported when using " + "learning rules") + + if self.on_chip(post) and not self.on_chip(pre): + mark_precomputable(pre) + + # traverse all connected objects breadth-first + while head < len(queue): + node_or_ens = queue[head] + head += 1 + + # handle forwards adjacencies + for conn in pre_to_conn[node_or_ens]: + assert base_obj(conn.pre) is node_or_ens + post = base_obj(conn.post) + if not self.on_chip(post): + mark_precomputable(post) + + # handle backwards adjacencies + for conn in post_to_conn[node_or_ens]: + assert base_obj(conn.post) is node_or_ens + pre = base_obj(conn.pre) + if self.on_chip(pre): raise BuildError("Cannot precompute input, " "as it is dependent on output") - networks.move(obj, "host_pre", force=True) - queue.append(obj) + mark_precomputable(pre) + + return precomputable + + def is_precomputable(self, obj): + if isinstance(obj, Probe): + obj = base_obj(obj.target) + return (not self.on_chip(obj) + and obj in self._host_precomputable_objects) + + def on_chip(self, obj): + if isinstance(obj, Probe): + obj = base_obj(obj.target) + if not isinstance(obj, (Ensemble, Node)): + raise BuildError("Locations are only established for ensembles ", + "nodes, and probes -- not for %r" % (obj,)) + if obj not in self._seen_objects: + raise BuildError("Object (%r) is not a part of the network" + % (obj,)) + return obj in self._chip_objects diff --git a/nengo_loihi/tests/test_conv.py b/nengo_loihi/tests/test_conv.py index 960f0d025..993669f04 100644 --- a/nengo_loihi/tests/test_conv.py +++ b/nengo_loihi/tests/test_conv.py @@ -697,3 +697,28 @@ def test_conv_round_robin_unsupported(Simulator, seed): hardware_options={'allocator': RoundRobin(n_chips=8)}, precompute=True): pass + + +@pytest.mark.skipif(nengo_transforms is None, + reason="Requires new nengo.transforms") +def test_conv_non_lowpass(Simulator): + k = 10 + d = 5 + with nengo.Network() as model: + a = nengo.Ensemble(n_neurons=k**2, dimensions=k) + + x = nengo.Ensemble(n_neurons=d, dimensions=d, + gain=np.ones(d), bias=np.ones(d)) + + conv = nengo.Convolution( + n_filters=d, input_shape=(k, k, 1), + strides=(1, 1), kernel_size=(k, k)) + assert conv.size_in == k**2 + assert conv.size_out == d + + nengo.Connection(a.neurons, x.neurons, transform=conv, + synapse=nengo.Alpha(0.005)) + + with pytest.raises(NotImplementedError, match="non-Lowpass synapses"): + with Simulator(model): + pass diff --git a/nengo_loihi/tests/test_examples.py b/nengo_loihi/tests/test_examples.py index d7504dac1..74cff0500 100644 --- a/nengo_loihi/tests/test_examples.py +++ b/nengo_loihi/tests/test_examples.py @@ -5,7 +5,7 @@ from nengo.utils.stdlib import execfile try: from nengo.utils.ipython import iter_cells, load_notebook -except ImportError as err: +except ImportError: def iter_cells(nb, cell_type="code"): return (cell for cell in nb.cells if cell.cell_type == cell_type) diff --git a/nengo_loihi/tests/test_inputs.py b/nengo_loihi/tests/test_inputs.py deleted file mode 100644 index d50e5ccf5..000000000 --- a/nengo_loihi/tests/test_inputs.py +++ /dev/null @@ -1,14 +0,0 @@ -import nengo -from nengo.exceptions import SimulationError -import pytest - -from nengo_loihi.inputs import ChipReceiveNode - - -def test_chipreceivenode_run_error(): - with nengo.Network() as net: - ChipReceiveNode(dimensions=1, size_out=1) - - with pytest.raises(SimulationError, match="should not be run"): - with nengo.Simulator(net) as sim: - sim.step() diff --git a/nengo_loihi/tests/test_learning.py b/nengo_loihi/tests/test_learning.py index 07a348e8f..b9cf562b7 100644 --- a/nengo_loihi/tests/test_learning.py +++ b/nengo_loihi/tests/test_learning.py @@ -91,14 +91,14 @@ def test_pes_comm_channel(dims, allclose, plt, seed, Simulator): x_loihi = loihi_sim.data[probes['pre']] assert allclose(x_loihi[pre_tmask], y_dpre[pre_tmask], - atol=0.1, rtol=0.05) + atol=0.15, rtol=0.05) assert allclose(y_loihi[post_tmask], y_dpost[post_tmask], - atol=0.1, rtol=0.05) + atol=0.15, rtol=0.05) assert allclose(y_loihi, y_nengo, atol=0.2, rtol=0.2) assert allclose(y_real[post_tmask], y_dpost[post_tmask], - atol=0.1, rtol=0.05) + atol=0.15, rtol=0.05) assert allclose(y_real, y_nengo, atol=0.2, rtol=0.2) diff --git a/nengo_loihi/tests/test_passthrough.py b/nengo_loihi/tests/test_passthrough.py index 6f31c8a45..0742e8b5c 100644 --- a/nengo_loihi/tests/test_passthrough.py +++ b/nengo_loihi/tests/test_passthrough.py @@ -3,11 +3,9 @@ import numpy as np import pytest -import nengo_loihi from nengo_loihi.compat import transform_array from nengo_loihi.decode_neurons import OnOffDecodeNeurons -from nengo_loihi.inputs import ChipReceiveNode -from nengo_loihi.splitter import split +from nengo_loihi.passthrough import PassthroughSplit default_node_neurons = OnOffDecodeNeurons() @@ -24,32 +22,20 @@ def test_passthrough_placement(): g = nengo.Node(None, size_in=1) # should be off-chip nengo.Connection(stim, a) nengo.Connection(a, b) - nengo.Connection(b, c) - nengo.Connection(c, d) - nengo.Connection(d, e) - nengo.Connection(e, f) + conn_bc = nengo.Connection(b, c) + conn_cd = nengo.Connection(c, d) + conn_de = nengo.Connection(d, e) + conn_ef = nengo.Connection(e, f) nengo.Connection(f, g) nengo.Probe(g) - nengo_loihi.add_params(model) - networks = split(model, - precompute=False, - node_neurons=default_node_neurons, - node_tau=0.005, - remove_passthrough=True) - chip = networks.chip - host = networks.host - - assert a in host.nodes - assert a not in chip.nodes - assert c not in host.nodes - assert c not in chip.nodes - assert d not in host.nodes - assert d not in chip.nodes - assert e not in host.nodes - assert e not in chip.nodes - assert g in host.nodes - assert g not in chip.nodes + split = PassthroughSplit(model, ignore={stim}) + + assert split.to_remove == {c, d, e, conn_bc, conn_cd, conn_de, conn_ef} + assert len(split.to_add) == 1 + conn = next(iter(split.to_add)) + assert conn.pre is b + assert conn.post is f @pytest.mark.parametrize("d1", [1, 3]) @@ -64,19 +50,15 @@ def test_transform_merging(d1, d2, d3): t1 = np.random.uniform(-1, 1, (d2, d1)) t2 = np.random.uniform(-1, 1, (d3, d2)) - nengo.Connection(a, b, transform=t1) - nengo.Connection(b, c, transform=t2) + conn_ab = nengo.Connection(a, b, transform=t1) + conn_bc = nengo.Connection(b, c, transform=t2) + + split = PassthroughSplit(model) - nengo_loihi.add_params(model) - networks = split(model, - precompute=False, - node_neurons=default_node_neurons, - node_tau=0.005, - remove_passthrough=True) - chip = networks.chip + assert split.to_remove == {b, conn_ab, conn_bc} - assert len(chip.connections) == 1 - conn = chip.connections[0] + assert len(split.to_add) == 1 + conn = next(iter(split.to_add)) assert np.allclose(transform_array(conn.transform), np.dot(t2, t1)) @@ -88,22 +70,13 @@ def test_identity_array(n_ensembles, ens_dimensions): b = nengo.networks.EnsembleArray(10, n_ensembles, ens_dimensions) nengo.Connection(a.output, b.input) - nengo_loihi.add_params(model) - networks = split(model, - precompute=False, - node_neurons=default_node_neurons, - node_tau=0.005, - remove_passthrough=True) + split = PassthroughSplit(model) - # ignore the a.input -> a.ensemble connections - connections = [conn for conn in networks.chip.connections - if not (isinstance(conn.pre_obj, ChipReceiveNode) - and conn.post_obj in a.ensembles)] + assert len(split.to_add) == n_ensembles - assert len(connections) == n_ensembles pre = set() post = set() - for conn in connections: + for conn in split.to_add: assert conn.pre in a.all_ensembles or conn.pre_obj is a.input assert conn.post in b.all_ensembles assert np.allclose(transform_array(conn.transform), @@ -123,21 +96,12 @@ def test_full_array(n_ensembles, ens_dimensions): D = n_ensembles * ens_dimensions nengo.Connection(a.output, b.input, transform=np.ones((D, D))) - nengo_loihi.add_params(model) - networks = split(model, - precompute=False, - node_neurons=default_node_neurons, - node_tau=0.005, - remove_passthrough=True) + split = PassthroughSplit(model) - # ignore the a.input -> a.ensemble connections - connections = [conn for conn in networks.chip.connections - if not (isinstance(conn.pre_obj, ChipReceiveNode) - and conn.post_obj in a.ensembles)] + assert len(split.to_add) == n_ensembles ** 2 - assert len(connections) == n_ensembles ** 2 pairs = set() - for conn in connections: + for conn in split.to_add: assert conn.pre in a.all_ensembles assert conn.post in b.all_ensembles assert np.allclose(transform_array(conn.transform), @@ -158,26 +122,17 @@ def test_synapse_merging(Simulator, seed): nengo.Connection(b[1], c.input[0], synapse=None) nengo.Connection(b[1], c.input[1], synapse=0.2) - nengo_loihi.add_params(model) - networks = split(model, - precompute=False, - node_neurons=default_node_neurons, - node_tau=0.005, - remove_passthrough=True) + split = PassthroughSplit(model) - # ignore the a.input -> a.ensemble connections - connections = [conn for conn in networks.chip.connections - if not (isinstance(conn.pre_obj, ChipReceiveNode) - and conn.post_obj in a.ensembles)] + assert len(split.to_add) == 4 - assert len(connections) == 4 desired_filters = { ('0', '0'): None, ('0', '1'): 0.2, ('1', '0'): 0.1, ('1', '1'): 0.3, } - for conn in connections: + for conn in split.to_add: if desired_filters[(conn.pre.label, conn.post.label)] is None: assert conn.synapse is None else: @@ -186,9 +141,13 @@ def test_synapse_merging(Simulator, seed): conn.synapse.tau, desired_filters[(conn.pre.label, conn.post.label)]) - # check that model builds/runs correctly - with Simulator(model, remove_passthrough=True) as sim: - sim.step() + # check that model builds/runs, and issues the warning + with pytest.warns(UserWarning) as record: + with Simulator(model, remove_passthrough=True) as sim: + sim.step() + + assert any("Combining two Lowpass synapses" in r.message.args[0] + for r in record) def test_no_input(Simulator, seed, allclose): @@ -268,7 +227,7 @@ def make_net(learn_error=False, loop=False): return net, probes - # Since `convert_passthroughs` catches its own cluster errors, we won't see + # Since `PassthroughSplit` catches its own cluster errors, we won't see # the error here. We ensure identical behaviour (so nodes are not removed). # Test learning rule node input diff --git a/nengo_loihi/tests/test_simulator.py b/nengo_loihi/tests/test_simulator.py index b9e1e2d96..d5068db1c 100644 --- a/nengo_loihi/tests/test_simulator.py +++ b/nengo_loihi/tests/test_simulator.py @@ -1,7 +1,8 @@ import inspect import nengo -from nengo.exceptions import ReadonlyError, ValidationError, SimulationError +from nengo.exceptions import ( + BuildError, ReadonlyError, SimulationError, ValidationError) import numpy as np import pytest @@ -15,6 +16,11 @@ from nengo_loihi.inputs import SpikeInput +def test_none_network(Simulator): + with pytest.raises(ValidationError, match="network parameter"): + Simulator(None) + + def test_model_validate_notempty(Simulator): with nengo.Network() as model: nengo_loihi.add_params(model) @@ -22,10 +28,15 @@ def test_model_validate_notempty(Simulator): a = nengo.Ensemble(10, 1) model.config[a].on_chip = False - with pytest.raises(nengo.exceptions.BuildError): + assert nengo.rc.get("decoder_cache", "enabled") + + with pytest.raises(BuildError, match="No neurons marked"): with Simulator(model): pass + # Ensure cache config not changed + assert nengo.rc.get("decoder_cache", "enabled") + @pytest.mark.parametrize("precompute", [True, False]) def test_probedict_fallbacks(precompute, Simulator): @@ -337,11 +348,11 @@ def test_tau_s_warning(Simulator): with pytest.warns(UserWarning) as record: with Simulator(net): pass - # The 0.001 synapse is applied first due to splitting rules putting - # the stim -> ens connection later than the ens -> ens connection + assert any(rec.message.args[0] == ( - "tau_s is currently 0.001, which is smaller than 0.005. " - "Overwriting tau_s with 0.005.") for rec in record) + "tau_s is already set to 0.005, which is larger than 0.001. " + "Using 0.005." + ) for rec in record) with net: nengo.Connection(ens, ens, @@ -350,9 +361,10 @@ def test_tau_s_warning(Simulator): with pytest.warns(UserWarning) as record: with Simulator(net): pass + assert any(rec.message.args[0] == ( - "tau_s is already set to 0.1, which is larger than 0.005. Using 0.1." - ) for rec in record) + "tau_s is currently 0.005, which is smaller than 0.1. " + "Overwriting tau_s with 0.1.") for rec in record) @pytest.mark.xfail(nengo.version.version_info <= (2, 8, 0), @@ -362,63 +374,44 @@ def test_seeds(precompute, Simulator, seed): with nengo.Network(seed=seed) as net: nengo_loihi.add_params(net) - e0 = nengo.Ensemble(1, 1) - e1 = nengo.Ensemble(1, 1, seed=2) - e2 = nengo.Ensemble(1, 1) + e0 = nengo.Ensemble(1, 1, label="e0") + e1 = nengo.Ensemble(1, 1, seed=2, label="e1") + e2 = nengo.Ensemble(1, 1, label="e2") net.config[e2].on_chip = False nengo.Connection(e0, e1) nengo.Connection(e0, e2) with nengo.Network(): n = nengo.Node(0) - e = nengo.Ensemble(1, 1) + e = nengo.Ensemble(1, 1, label="e") nengo.Node(1) nengo.Connection(n, e) nengo.Probe(e) with nengo.Network(seed=8): - nengo.Ensemble(8, 1, seed=3) + nengo.Ensemble(8, 1, seed=3, label="unnamed") nengo.Node(1) + def get_seed(sim, obj): + return sim.model.seeds.get( + obj, sim.model.host.seeds.get( + obj, sim.model.host_pre.seeds.get(obj, None))) + # --- test that seeds are the same as nengo ref simulator ref = nengo.Simulator(net) with Simulator(net, precompute=precompute) as sim: for obj in net.all_objects: - on_chip = (not isinstance(obj, nengo.Node) and ( - not isinstance(obj, nengo.Ensemble) - or net.config[obj].on_chip)) - - seed = sim.model.seeds.get(obj, None) - assert seed is None or seed == ref.model.seeds[obj] - if on_chip: - assert seed is not None - if obj in sim.model.seeded: - assert sim.model.seeded[obj] == ref.model.seeded[obj] - - if precompute: - seed0 = sim.sims["host_pre"].model.seeds.get(obj, None) - assert seed0 is None or seed0 == ref.model.seeds[obj] - seed1 = sim.sims["host"].model.seeds.get(obj, None) - assert seed1 is None or seed1 == ref.model.seeds[obj] - else: - seed0 = sim.sims["host"].model.seeds.get(obj, None) - assert seed0 is None or seed0 == ref.model.seeds[obj] - seed1 = None - - if not on_chip: - assert seed0 is not None or seed1 is not None + assert get_seed(sim, obj) == ref.model.seeds.get(obj, None) # --- test that seeds that we set are preserved after splitting model = nengo_loihi.builder.Model() - for i, o in enumerate(net.all_objects): - model.seeds[o] = i + for i, obj in enumerate(net.all_objects): + model.seeds[obj] = i with Simulator(net, model=model, precompute=precompute) as sim: - for i, o in enumerate(net.all_objects): - for name, subsim in sim.sims.items(): - if name.startswith("host"): - assert subsim.model.seeds[o] == i + for i, obj in enumerate(net.all_objects): + assert get_seed(sim, obj) == i def test_interface(Simulator, allclose): @@ -634,8 +627,6 @@ def test_population_input(request, allclose): assert allclose(z[[1, 3, 5]], weights[0], atol=4e-2, rtol=0) -@pytest.mark.skipif(pytest.config.getoption("--target") != "loihi", - reason="Loihi only test") def test_precompute(allclose, Simulator, seed, plt): simtime = 0.2 @@ -671,9 +662,43 @@ def test_precompute(allclose, Simulator, seed, plt): plt.plot(sim2.trange(), sim2.data[p_out]) plt.title('precompute=True') + # check that each is using the right placement + assert stim in sim1.model.host.params + assert stim not in sim1.model.host_pre.params + assert stim not in sim2.model.host.params + assert stim in sim2.model.host_pre.params + + assert p_stim not in sim1.model.params + assert p_stim in sim1.model.host.params + assert p_stim not in sim1.model.host_pre.params + + assert p_stim not in sim2.model.params + assert p_stim not in sim2.model.host.params + assert p_stim in sim2.model.host_pre.params + + for sim in (sim1, sim2): + assert a in sim.model.params + assert a not in sim.model.host.params + assert a not in sim.model.host_pre.params + + assert output not in sim.model.params + assert output in sim.model.host.params + assert output not in sim.model.host_pre.params + + assert p_a in sim.model.params + assert p_a not in sim.model.host.params + assert p_a not in sim.model.host_pre.params + + assert p_out not in sim.model.params + assert p_out in sim.model.host.params + assert p_out not in sim.model.host_pre.params + assert np.array_equal(sim1.data[p_stim], sim2.data[p_stim]) - assert allclose(sim1.data[p_a], sim2.data[p_a], atol=0.2) - assert allclose(sim1.data[p_out], sim2.data[p_out], atol=0.2) + assert sim1.target == sim2.target + + # precompute should not make a difference in outputs + assert allclose(sim1.data[p_a], sim2.data[p_a]) + assert allclose(sim1.data[p_out], sim2.data[p_out]) @pytest.mark.skipif(pytest.config.getoption("--target") != "loihi", @@ -726,3 +751,105 @@ def test_input_node_precompute(allclose, Simulator, plt): plt.legend(loc='best') assert allclose(x['sim'], x['loihi'], atol=0.1, rtol=0.01) + + +@pytest.mark.parametrize("remove_passthrough", [True, False]) +def test_simulator_passthrough(remove_passthrough, Simulator): + with nengo.Network() as model: + host_input = nengo.Node(0) + host_a = nengo.Node(size_in=1) + host_b = nengo.Node(size_in=1) + + chip_x = nengo.Ensemble(10, 1) + remove_c = nengo.Node(size_in=1) + chip_y = nengo.Ensemble(10, 1) + + host_d = nengo.Node(size_in=1) + + conn_input_a = nengo.Connection(host_input, host_a) + conn_a_b = nengo.Connection(host_a, host_b) + conn_b_x = nengo.Connection(host_b, chip_x) + conn_x_c = nengo.Connection(chip_x, remove_c) + conn_c_y = nengo.Connection(remove_c, chip_y) + conn_y_d = nengo.Connection(chip_y, host_d) + + probe_y = nengo.Probe(chip_y) + probe_d = nengo.Probe(host_d) + + with Simulator(model, remove_passthrough=remove_passthrough) as sim: + pass + + assert host_input in sim.model.host.params + assert probe_d in sim.model.host.params + + assert chip_x in sim.model.params + assert chip_y in sim.model.params + assert probe_y in sim.model.params + + # Passthrough nodes are not removed on the host + assert host_a in sim.model.host.params + assert host_b in sim.model.host.params + assert host_d in sim.model.host.params + assert conn_input_a in sim.model.host.params + assert conn_a_b in sim.model.host.params + + if remove_passthrough: + assert remove_c not in sim.model.host.params + else: + assert remove_c in sim.model.host.params + + # These connections currently aren't built in either case + for model in (sim.model, sim.model.host): + assert conn_b_x not in model.params + assert conn_x_c not in model.params + assert conn_c_y not in model.params + assert conn_y_d not in model.params + + +def test_slicing_bugs(Simulator, seed): + + n = 50 + with nengo.Network() as model: + a = nengo.Ensemble(n, 1, label="a") + p0 = nengo.Probe(a[0]) + p = nengo.Probe(a) + + with Simulator(model) as sim: + sim.run(0.1) + + assert np.allclose(sim.data[p0], sim.data[p]) + assert a in sim.model.params + assert a not in sim.model.host.params + + with nengo.Network() as model: + nengo_loihi.add_params(model) + + a = nengo.Ensemble(n, 1, label="a") + + b0 = nengo.Ensemble(n, 1, label="b0", seed=seed) + model.config[b0].on_chip = False + nengo.Connection(a[0], b0) + + b = nengo.Ensemble(n, 1, label="b", seed=seed) + model.config[b].on_chip = False + nengo.Connection(a, b) + + p0 = nengo.Probe(b0) + p = nengo.Probe(b) + + with Simulator(model) as sim: + sim.run(0.1) + + assert np.allclose(sim.data[p0], sim.data[p]) + assert a in sim.model.params + assert a not in sim.model.host.params + assert b not in sim.model.params + assert b in sim.model.host.params + + +def test_network_unchanged(Simulator): + with nengo.Network() as model: + nengo.Ensemble(100, 1) + with Simulator(model): + pass + assert model.all_networks == [] diff --git a/nengo_loihi/tests/test_splitter.py b/nengo_loihi/tests/test_splitter.py index c0a483366..d02b8f807 100644 --- a/nengo_loihi/tests/test_splitter.py +++ b/nengo_loihi/tests/test_splitter.py @@ -1,99 +1,40 @@ -from distutils.version import LooseVersion - import pytest import nengo from nengo.exceptions import BuildError import numpy as np -from nengo_loihi.decode_neurons import OnOffDecodeNeurons from nengo_loihi.config import add_params -from nengo_loihi.inputs import ( - ChipReceiveNeurons, - ChipReceiveNode, - HostReceiveNode, - HostSendNode, -) -from nengo_loihi.splitter import ( - PESModulatoryTarget, - place_ensembles, - place_internetwork_connections, - place_nodes, - place_probes, - SplitNetworks, - split, - split_chip_to_host, - split_host_neurons_to_chip, - split_host_to_chip, - split_host_to_learning_rules, - split_pre_from_host, -) - -default_node_neurons = OnOffDecodeNeurons() - - -@pytest.mark.parametrize("pre_dims", [1, 3]) -@pytest.mark.parametrize("post_dims", [1, 3]) -@pytest.mark.parametrize("learn", [True, False]) -@pytest.mark.parametrize("use_solver", [True, False]) -def test_manual_decoders( - seed, Simulator, pre_dims, post_dims, learn, use_solver): - - with nengo.Network(seed=seed) as model: - pre = nengo.Ensemble(50, dimensions=pre_dims, - gain=np.ones(50), - bias=np.ones(50) * 5) - post = nengo.Node(size_in=post_dims) - - learning_rule_type = nengo.PES() if learn else None - weights = np.zeros((post_dims, 50)) - if use_solver: - conn = nengo.Connection(pre, post, - function=lambda x: np.zeros(post_dims), - learning_rule_type=learning_rule_type, - solver=nengo.solvers.NoSolver(weights.T)) - else: - conn = nengo.Connection(pre.neurons, post, - learning_rule_type=learning_rule_type, - transform=weights) - - if learn: - error = nengo.Node(np.zeros(post_dims)) - nengo.Connection(error, conn.learning_rule) - - pre_probe = nengo.Probe(pre.neurons, synapse=None) - post_probe = nengo.Probe(post, synapse=None) - - if not use_solver and learn: - with pytest.raises(NotImplementedError): - with Simulator(model) as sim: - pass - else: - with Simulator(model) as sim: - sim.run(0.1) - - # Ensure pre population has a lot of activity - assert np.mean(sim.data[pre_probe]) > 100 - # But that post has no activity due to the zero weights - assert np.all(sim.data[post_probe] == 0) +from nengo_loihi.splitter import Split def test_place_nodes(): + # all nodes go on the host + # ChipReceiveNodes and HostSendNodes are created later by the builder + with nengo.Network() as net: + add_params(net) offchip1 = nengo.Node(0) with nengo.Network(): offchip2 = nengo.Node(np.sin) - offchip3 = HostSendNode(dimensions=1) - onchip = ChipReceiveNode(dimensions=1, size_out=1) + ensemble = nengo.Ensemble(100, 1) + offchip3 = nengo.Node(size_in=1) + nengo.Connection(ensemble, offchip3) + + with nengo.Network(): + nowhere = nengo.Node(0) - networks = SplitNetworks(net, node_neurons=default_node_neurons) - place_nodes(networks) - assert networks.moves[offchip1] == "host" - assert networks.moves[offchip2] == "host" - assert networks.moves[offchip3] == "host" - assert networks.moves[onchip] == "chip" + split = Split(net) + assert not split.on_chip(offchip1) + assert not split.on_chip(offchip2) + assert not split.on_chip(offchip3) + + with pytest.raises(BuildError, match="not a part of the network"): + split.on_chip(nowhere) def test_place_ensembles(): + # builder will move the learning stuff onto the host + with nengo.Network() as net: add_params(net) offchip = nengo.Ensemble(10, 1, label="offchip") @@ -108,352 +49,159 @@ def test_place_ensembles(): conn = nengo.Connection(pre, post, learning_rule_type=nengo.PES()) nengo.Connection(error, conn.learning_rule) - networks = SplitNetworks(net, node_neurons=default_node_neurons) - place_ensembles(networks) - assert networks.moves[offchip] == "host" - assert networks.moves[direct] == "host" - assert networks.moves[onchip] == "chip" - assert networks.moves[pre] == "chip" - assert networks.moves[post] == "host" - assert networks.moves[error] == "host" + split = Split(net) + assert not split.on_chip(offchip) + assert not split.on_chip(direct) + assert split.on_chip(onchip) + assert split.on_chip(pre) + assert not split.on_chip(post) + assert not split.on_chip(error) + + for obj in net.all_ensembles + net.all_nodes: + assert not split.is_precomputable(obj) + with pytest.raises(BuildError, match="Locations are only established"): + split.on_chip(conn) -def test_place_inter_network_connection(): + +def test_place_internetwork_connections(): with nengo.Network() as net: + add_params(net) offchip = nengo.Ensemble(10, 1) + net.config[offchip].on_chip = False onchip = nengo.Ensemble(10, 1) + onon = nengo.Connection(onchip, onchip) onoff = nengo.Connection(onchip, offchip) offon = nengo.Connection(offchip, onchip) offoff = nengo.Connection(offchip, offchip) - networks = SplitNetworks(net, node_neurons=default_node_neurons) - networks.move(onchip, "chip") - networks.move(offchip, "host") - - place_internetwork_connections(networks, networks.original.all_connections) - assert onoff not in networks - assert offon not in networks - assert networks.location(onon) == "chip" - assert networks.location(offoff) == "host" - - -def test_split_host_neurons_to_chip(): - with nengo.Network() as net: - offchip = nengo.Ensemble(10, 1) - onchip = nengo.Ensemble(10, 1) - neurons2neurons = nengo.Connection( - offchip.neurons, onchip.neurons, transform=np.ones((10, 10))) - neurons2ensemble = nengo.Connection( - offchip.neurons, onchip, transform=np.ones((1, 10))) - - networks = SplitNetworks(net, node_neurons=default_node_neurons) - networks.move(offchip, "host") - networks.move(onchip, "chip") - - def assert_split_correctly(split_conn): - assert len(networks.adds) == 4 - added_types = sorted([(type(obj).__name__, location) - for obj, location in networks.adds.items()]) - assert added_types == [ - ("ChipReceiveNeurons", "chip"), - ("Connection", "chip"), - ("Connection", "host"), - ("HostSendNode", "host"), - ] - assert split_conn in networks.removes - - send = next(obj for obj in networks.adds - if isinstance(obj, HostSendNode)) - receive = next(obj for obj in networks.adds - if isinstance(obj, ChipReceiveNeurons)) - assert networks.host2chip_senders[send] is receive - - split_host_neurons_to_chip(networks, neurons2neurons) - assert_split_correctly(neurons2neurons) - networks.adds.clear() # Makes testing subsequent adds easier - split_host_neurons_to_chip(networks, neurons2ensemble) - assert_split_correctly(neurons2ensemble) + split = Split(net) + assert split.on_chip(onon.pre) + assert split.on_chip(onon.post) -def test_split_host_to_chip(): - with nengo.Network() as net: - ens_offchip = nengo.Ensemble(10, 1) - node_offchip = nengo.Node(np.sin) - ens_onchip = nengo.Ensemble(10, 1) - connections = [ - nengo.Connection(ens_offchip, ens_onchip), - nengo.Connection(node_offchip, ens_onchip), - nengo.Connection( - ens_offchip, ens_onchip.neurons, transform=np.ones((10, 1))), - nengo.Connection( - node_offchip, ens_onchip.neurons, transform=np.ones((10, 1))), - ] - - networks = SplitNetworks(net, node_neurons=default_node_neurons) - networks.move(ens_offchip, "host") - networks.move(node_offchip, "host") - networks.move(ens_onchip, "chip") - - for conn in connections: - split_host_to_chip(networks, conn) - for added in networks.adds: - if isinstance(added, nengo.Ensemble): - ens = added - elif isinstance(added, ChipReceiveNode): - receive = added - elif isinstance(added, HostSendNode): - send = added - # Otherwise must be connection - elif added.pre is conn.pre: - pre2ens = added - elif added.post is conn.post: - receive2post = added - else: - ensneurons2send = added - - assert networks.location(ens) == "host" - assert isinstance(ens.neuron_type, nengo.SpikingRectifiedLinear) - assert pre2ens.post is ens - - assert networks.location(receive) == "chip" - assert networks.location(receive2post) == "chip" - assert receive2post.pre is receive - - assert networks.location(send) == "host" - assert networks.location(ensneurons2send) == "host" - assert ensneurons2send.pre == ens.neurons - assert ensneurons2send.post is send - - assert conn in networks.removes - networks.adds.clear() # makes next loop iteration easier - - -def test_split_no_node_neuron_error(): - with nengo.Network() as net: - add_params(net) - node_offchip = nengo.Node(np.sin) - ens_onchip = nengo.Ensemble(10, 1) - nengo.Connection(node_offchip, ens_onchip) - - with pytest.raises(BuildError, match="DecodeNeurons"): - split(net, precompute=False, node_neurons=None, node_tau=None) + assert split.on_chip(onoff.pre) + assert not split.on_chip(onoff.post) + assert not split.on_chip(offon.pre) + assert split.on_chip(offon.post) -def test_split_chip_to_host(): - with nengo.Network() as net: - ens_onchip = nengo.Ensemble(10, 1) - ens_offchip = nengo.Ensemble(10, 1) - node_offchip = nengo.Node(size_in=1) - connections = [ - nengo.Connection(ens_onchip, ens_offchip), - nengo.Connection( - ens_onchip, ens_offchip, learning_rule_type=nengo.PES()), - nengo.Connection(ens_onchip, node_offchip), - nengo.Connection( - ens_onchip.neurons, ens_offchip, transform=np.ones((1, 10))), - nengo.Connection( - ens_onchip.neurons, node_offchip, transform=np.ones((1, 10))), - ] - connections.append( - nengo.Connection(ens_onchip, connections[1].learning_rule) - ) - - networks = SplitNetworks(net, node_neurons=default_node_neurons) - networks.move(ens_onchip, "chip") - networks.move(ens_offchip, "host") - networks.move(node_offchip, "host") - - for conn in connections: - split_chip_to_host(networks, conn) - for added in networks.adds: - if isinstance(added, HostReceiveNode): - receive = added - elif isinstance(added, nengo.Probe): - probe = added - else: - assert added.post is conn.post - receive2post = added - - assert networks.location(receive) == "host" - assert networks.location(receive2post) == "host" - assert receive2post.pre is receive - - assert networks.location(probe) == "chip" - assert probe.target is conn.pre or probe.target is conn.pre.ensemble - assert probe.synapse is None - assert probe in networks.chip2host_params - assert probe in networks.chip2host_receivers - assert networks.chip2host_receivers[probe] is receive - if conn.learning_rule_type is not None: - assert conn.learning_rule in networks.needs_sender - assert isinstance(networks.needs_sender[conn.learning_rule], - PESModulatoryTarget) - - assert conn in networks.removes - networks.adds.clear() # makes next loop iteration easier + assert not split.on_chip(offoff.pre) + assert not split.on_chip(offoff.post) def test_split_host_to_learning_rule(): with nengo.Network() as net: + add_params(net) pre = nengo.Ensemble(10, 1, label="pre") post = nengo.Ensemble(10, 1, label="post") err_onchip = nengo.Ensemble(10, 1, label="err_onchip") err_offchip = nengo.Ensemble(10, 1, label="err_offchip") + net.config[err_offchip].on_chip = False ens_conn = nengo.Connection(pre, post, learning_rule_type=nengo.PES()) neurons_conn = nengo.Connection(pre.neurons, post.neurons, learning_rule_type=nengo.PES()) - on2on_ens = nengo.Connection(err_onchip, ens_conn.learning_rule) - on2on_neurons = nengo.Connection( + nengo.Connection(err_onchip, ens_conn.learning_rule) + nengo.Connection( err_onchip, neurons_conn.learning_rule) - off2on_ens = nengo.Connection(err_offchip, ens_conn.learning_rule) - off2on_neurons = nengo.Connection( + nengo.Connection(err_offchip, ens_conn.learning_rule) + nengo.Connection( err_offchip, neurons_conn.learning_rule) - networks = SplitNetworks(net, node_neurons=default_node_neurons) - networks.move(pre, "chip") - networks.move(post, "chip") - networks.move(err_onchip, "chip") - networks.move(err_offchip, "host") - networks.move(ens_conn, "chip") - networks.move(neurons_conn, "chip") - networks.needs_sender[ens_conn.learning_rule] = "ens_pes_target" - networks.needs_sender[neurons_conn.learning_rule] = "neurons_pes_target" - - split_host_to_learning_rules(networks, networks.original.all_connections) - assert on2on_ens not in networks - assert on2on_neurons not in networks - assert sorted([type(obj).__name__ for obj in networks.adds]) == [ - "Connection", "Connection", "HostSendNode", "HostSendNode", - ] - assert off2on_ens in networks.removes - assert "ens_pes_target" in list(networks.host2chip_senders.values()) - assert off2on_neurons in networks.removes - assert "neurons_pes_target" in list(networks.host2chip_senders.values()) + split = Split(net) + + assert split.on_chip(pre) + assert not split.on_chip(post) + + assert not split.on_chip(err_onchip) + assert not split.on_chip(err_offchip) + + +def test_precompute_host_to_learning_rule_unsupported(): + with nengo.Network() as net: + add_params(net) + + pre = nengo.Ensemble(10, 1, label="pre") + post = nengo.Ensemble(10, 1, label="post") + nengo.Connection(pre, post, learning_rule_type=nengo.PES()) + + with pytest.raises(BuildError, match="learning rules"): + Split(net, precompute=True) def test_place_probes(): with nengo.Network() as net: + add_params(net) offchip1 = nengo.Node(0) with nengo.Network(): onchip1 = nengo.Ensemble(10, 1) offchip2 = nengo.Ensemble(10, 1) + net.config[offchip2].on_chip = False onchip2 = nengo.Ensemble(10, 1) - onchip3 = nengo.Connection(onchip1, onchip2) - offchip3 = nengo.Connection(offchip1, offchip2) + nengo.Connection(onchip1, onchip2) + nengo.Connection(offchip1, offchip2) offchip_probes = [ nengo.Probe(offchip1), nengo.Probe(offchip2), - nengo.Probe(offchip3), ] onchip_probes = [ nengo.Probe(onchip1), nengo.Probe(onchip2), - nengo.Probe(onchip3), ] - networks = SplitNetworks(net, node_neurons=default_node_neurons) - for obj in [offchip1, offchip2, offchip3]: - networks.move(obj, "host") - for obj in [onchip1, onchip2, onchip3]: - networks.move(obj, "chip") - place_probes(networks) - assert all(networks.location(p) == "host" for p in offchip_probes) - assert all(networks.location(p) == "chip" for p in onchip_probes) + split = Split(net) + assert split.on_chip(onchip1) + assert split.on_chip(onchip2) + assert not split.on_chip(offchip1) + assert not split.on_chip(offchip2) + assert not any(split.on_chip(p) for p in offchip_probes) + assert all(split.on_chip(p) for p in onchip_probes) def test_split_pre_from_host(): with nengo.Network() as net: + add_params(net) pre_1 = nengo.Node(0, label="pre_1") pre_2 = nengo.Ensemble(10, 1, label="pre_2") pre_3 = nengo.Node(size_in=1, label="pre_3") pre_4 = nengo.Ensemble(1, 1, label="pre_4") - send = HostSendNode(dimensions=1) + pre_5 = nengo.Probe(pre_4) + onchip = nengo.Ensemble(1, 1, label="onchip") post1 = nengo.Ensemble(10, 1, label="post1") post2 = nengo.Node(size_in=1, label="post2") - pre_connections = [ - nengo.Connection(pre_1, pre_2), - nengo.Connection(pre_2, pre_3), - nengo.Connection(pre_3, pre_4), - nengo.Connection(pre_4.neurons, send), - ] - post_connections = [ - nengo.Connection(onchip, post1), - nengo.Connection(post1, post2), - ] + post3 = nengo.Probe(post2, label="post3") - networks = SplitNetworks(net, node_neurons=default_node_neurons) - for obj in [pre_1, pre_3, send, post1, post2]: - networks.move(obj, "host") - for obj in [pre_2, pre_4]: - networks.add(obj, "host") - for conn in pre_connections + post_connections: - networks.move(conn, "host") - networks.move(onchip, "chip") + nengo.Connection(pre_1, pre_2) + nengo.Connection(pre_2, pre_3) + nengo.Connection(pre_3, pre_4) + nengo.Connection(pre_4.neurons, onchip) + nengo.Connection(onchip, post1) + nengo.Connection(post1, post2) - split_pre_from_host(networks) - for obj in [pre_1, pre_2, pre_3, pre_4, send] + pre_connections: - assert networks.location(obj) == "host_pre", obj - for obj in [post1, post2] + post_connections: - assert networks.location(obj) == "host", obj - assert networks.location(onchip) == "chip" + net.config[pre_2].on_chip = False + net.config[pre_4].on_chip = False + net.config[post1].on_chip = False + split = Split(net, precompute=True) -def test_consistent_order(): - with nengo.Network() as model: - add_params(model) + host_precomputable = {pre_1, pre_2, pre_3, pre_4, pre_5} + for obj in host_precomputable: + assert not split.on_chip(obj) + assert split.is_precomputable(obj) - u0 = nengo.Node(0, label="u0") - for i in range(5): - e = nengo.Ensemble(i+1, 1, label="e%d" % i) - f = nengo.Ensemble(i+1, 1, label="f%d" % i) - nengo.Connection(u0, e, label="c0%d" % i) - nengo.Connection(e, f, label="cf%d" % i) - nengo.Probe(e) - nengo.Probe(f.neurons) - - # Test splitting a number of times, making sure the order of things matches - # the original network each time - split_params = dict( - precompute=False, - node_neurons=OnOffDecodeNeurons(dt=0.001), - node_tau=0.005, - remove_passthrough=False, - ) - - networks0 = split(model, **split_params) - for _ in range(5): - networks = split(model, **split_params) - - # --- order matches original network - assert len(model.all_ensembles) == len(networks.chip.all_ensembles) - for ea, eb in zip(model.all_ensembles, networks.chip.all_ensembles): - assert ea.n_neurons == eb.n_neurons and ea.label == eb.label - - # --- order matches previous split - for attr in ('connections', 'ensembles', 'nodes', 'probes'): - for net in ('host_pre', 'host', 'chip'): - aa = getattr(getattr(networks0, net), 'all_' + attr) - bb = getattr(getattr(networks, net), 'all_' + attr) - for a, b in zip(aa, bb): - assert a.label == b.label - - -@pytest.mark.skipif(LooseVersion(nengo.__version__) <= LooseVersion('2.8.0'), - reason="requires more recent Nengo version") -def test_split_conv2d_transform_error(): - with nengo.Network() as net: - add_params(net) - node_offchip = nengo.Node([1]) - ens_onchip = nengo.Ensemble(10, 1) - conv2d = nengo.Convolution( - n_filters=1, input_shape=(1, 1, 1), kernel_size=(1, 1)) - nengo.Connection(node_offchip, ens_onchip, transform=conv2d) + host_nonprecomputable = {post1, post2, post3} + for obj in host_nonprecomputable: + assert not split.on_chip(obj) + assert not split.is_precomputable(obj) - with pytest.raises(BuildError, match="Conv2D"): - split(net, precompute=False, node_neurons=default_node_neurons, - node_tau=0.005) + assert split.on_chip(onchip) + assert not split.is_precomputable(onchip) + + with pytest.raises(BuildError, match="not a part of the network"): + split.is_precomputable( + nengo.Node(0, add_to_container=False)) def test_split_precompute_loop_error(): @@ -464,62 +212,128 @@ def test_split_precompute_loop_error(): nengo.Connection(node_offchip, ens_onchip) nengo.Connection(ens_onchip, node_offchip) - with pytest.raises(BuildError, match="precompute"): - split(net, precompute=True, node_neurons=default_node_neurons, - node_tau=0.005) + with pytest.raises(BuildError, match="Cannot precompute"): + Split(net, precompute=True) + + +def test_chip_learning_errors(): + with nengo.Network() as net: + add_params(net) + + a = nengo.Ensemble(100, 1) + b = nengo.Ensemble(100, 1) + net.config[b].on_chip = True + + nengo.Connection(a, b, learning_rule_type=nengo.PES()) + + with pytest.raises(BuildError, match="Post ensemble"): + Split(net) + + with nengo.Network() as net: + add_params(net) + + a = nengo.Ensemble(100, 1) + b = nengo.Ensemble(100, 1) + error = nengo.Ensemble(100, 1) + net.config[error].on_chip = True + + conn = nengo.Connection(a, b, learning_rule_type=nengo.PES()) + nengo.Connection(error, conn.learning_rule) + + with pytest.raises(BuildError, match="Pre ensemble"): + Split(net) + + +@pytest.mark.parametrize("remove_passthrough", [True, False]) +def test_split_remove_passthrough(remove_passthrough): + with nengo.Network() as net: + add_params(net) + + keep1 = nengo.Node(0, label="keep1") + keep2 = nengo.Node(lambda t, x: x, size_in=1, label="keep2") + keep3 = nengo.Node(size_in=1, label="keep3") + + chip1 = nengo.Ensemble(10, 1, label="chip1") + discard1 = nengo.Node(size_in=1, label="discard1") + chip2 = nengo.Ensemble(10, 1, label="chip2") + discard2 = nengo.Node(size_in=1, label="discard2") + chip3 = nengo.Ensemble(10, 1, label="chip3") + + keep4 = nengo.Node(size_in=1, label="keep4") + probe = nengo.Probe(keep4) + nengo.Connection(keep1, keep2) + nengo.Connection(keep2, keep3) + nengo.Connection(keep3, chip1) + conn1 = nengo.Connection(chip1, discard1) + conn2 = nengo.Connection(discard1, chip2) + conn3 = nengo.Connection(chip2, discard2) + conn4 = nengo.Connection(discard2, chip3) + nengo.Connection(chip3, keep4) -def test_splitnetwork_bad_add_type(): - net = nengo.Network() - networks = SplitNetworks(net) - networks.add(1, "chip") - with pytest.raises(AssertionError): - networks.finalize() + split = Split(net, remove_passthrough=remove_passthrough) + assert not split.on_chip(probe) + if remove_passthrough: + assert split.passthrough.to_remove == { + conn1, conn2, conn3, conn4, discard1, discard2, + } -def test_splitnetwork_remove_add(): - net = nengo.Network() - networks = SplitNetworks(net) - e = nengo.Ensemble(1, 1, add_to_container=False) - networks.add(e, "chip") - networks.remove(e) - assert e not in networks.adds + conns = list(split.passthrough.to_add) + assert len(conns) == 2 + prepost = {(conn.pre, conn.post) for conn in conns} + assert prepost == {(chip1, chip2), (chip2, chip3)} -def test_pesmodulatorytarget_interface(): - target = "target" - p = PESModulatoryTarget(target) + else: + assert split.passthrough.to_remove == set() + assert split.passthrough.to_add == set() + + +def test_sliced_passthrough_bug(): + with nengo.Network() as model: + add_params(model) + + a = nengo.Ensemble(1, 1, label="a") + passthrough = nengo.Node(size_in=1, label="passthrough") - t0 = 4 - e0 = [1.8, 2.4, 3.3] - t1 = t0 + 3 - e1 = [7.2, 2.2, 4.1] - e01 = np.array(e0) + np.array(e1) + nengo.Connection(a, passthrough) + p = nengo.Probe(passthrough[0]) - p.receive(t0, e0) - assert isinstance(p.errors[t0], np.ndarray) - assert np.allclose(p.errors[t0], e0) + split = Split(model, remove_passthrough=True) - p.receive(t0, e1) - assert np.allclose(p.errors[t0], e01) + assert len(split.passthrough.to_add) == 0 + assert len(split.passthrough.to_remove) == 0 + + assert split.on_chip(a) + assert not split.on_chip(passthrough) + assert not split.on_chip(p) + + +def test_precompute_remove_passthrough(): + with nengo.Network() as net: + add_params(net) - with pytest.raises(AssertionError): - p.receive(t0 - 1, e0) # time needs to be >= last time + host = nengo.Node(0, label="host") + onchip1 = nengo.Ensemble(1, 1, label="onchip1") + passthrough1 = nengo.Node(size_in=1, label="passthrough1") + onchip2 = nengo.Ensemble(1, 1, label="onchip2") + passthrough2 = nengo.Node(size_in=1, label="passthrough2") + onchip3 = nengo.Ensemble(1, 1, label="onchip3") - p.receive(t1, e1) - assert np.allclose(p.errors[t1], e1) + nengo.Connection(host, onchip1) + nengo.Connection(onchip1, passthrough1) + nengo.Connection(passthrough1, onchip2) + nengo.Connection(onchip2, passthrough2) + nengo.Connection(passthrough2, onchip3) - errors = list(p.collect_errors()) - assert len(errors) == 2 - assert errors[0][:2] == (target, t0) and np.allclose(errors[0][2], e01) - assert errors[1][:2] == (target, t1) and np.allclose(errors[1][2], e1) + split = Split(net, precompute=True, remove_passthrough=True) - p.clear() - assert len(list(p.collect_errors())) == 0 + assert split.is_precomputable(host) + assert not split.on_chip(host) + for obj in (onchip1, passthrough1, onchip2, passthrough2, onchip3): + assert not split.is_precomputable(obj) -def test_bad_obj_type(): - split = SplitNetworks(nengo.Network()) - split.adds = {"woops": "host"} - with pytest.raises(AssertionError, match="cannot handle type"): - split.finalize() + for obj in (onchip1, onchip2, onchip3): + assert split.on_chip(obj)