Skip to content

Commit

Permalink
Update lrp_opts presets in utils.py for the lrp_python_demo
Browse files Browse the repository at this point in the history
  • Loading branch information
Maximilian Kohlbrenner committed Aug 17, 2018
1 parent c2568e5 commit 2ea37a4
Show file tree
Hide file tree
Showing 2 changed files with 81 additions and 21 deletions.
23 changes: 15 additions & 8 deletions caffe-master-lrp/demonstrator/lrp_python_demo.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
def main():
simple_lrp_demo()

def simple_lrp_demo(num_images = 3):
def simple_lrp_demo(num_images = 1):
"""
Simple example to demonstrate the LRP methods using the Caffe python interface.
Calculates the prediction and LRP heatmap for num_images of example imaages from the EXAMPLE_IMAGE_FOLDER
Expand Down Expand Up @@ -63,7 +63,7 @@ def simple_lrp_demo(num_images = 3):
## ############# ##
# LRP parameters: #
## ############# ##
lrp_type = 'epsilon'
lrp_type = 'epsilon'
# lrp_type | meaning of lrp_param | uses switch_layer | description
# ---------------------------------------------------------------------------
# epsilon | epsilon | no | epsilon lrp
Expand All @@ -72,19 +72,21 @@ def simple_lrp_demo(num_images = 3):
# eps_n_wsquare | epsilon | yes | epsilon lrp until switch_layer, wsquare lrp for all layers below
# ab_n_flat | beta | yes | alphabeta lrp until switch_layer, wflat lrp for all layers below
# ab_n_wsquare | beta | yes | alphabeta lrp until switch_layer, wsquare lrp for all layers below
# std_n_ab | beta | yes | standard lrp (epsilon with eps=0) until switch_layer, alphabeta lrp for all layers below
# layer_dep | - | no | standard lrp (epsilon with eps=0) for all fully-connected layers, alphabeta lrp with alpha=1 for all convolution layerrs
# layer_dep_n_flat | - | yes | layer_dep (see above) until switch_layer, wflat lrp for all layers below
# layer_dep_n_wsquare | - | yes | layer_dep (see above) until switch-layer, wsquare lrp for all layers below
# eps_n_ab | (epsilon, beta) | yes | epsilon lrp until switch_layer, alphabeta lrp for all layers below
# layer_dep | (epsilon, beta) | no | epsilon lrp for all fully-connected layers, alphabeta lrp with alpha=1 for all convolution layerrs
# layer_dep_n_flat | (epsilon, beta) | yes | layer_dep (see above) until switch_layer, wflat lrp for all layers below
# layer_dep_n_wsquare | (epsilon, beta) | yes | layer_dep (see above) until switch-layer, wsquare lrp for all layers below

lrp_param = 0.000001 # (epsilon | beta | epsilon | epsilon | beta )
classind = -1 # (class index | -1 for top_class)
# depending on lrp_type, lrp_param needs to be a scalar or a tuple (see table above). If a scalar is given to an lrp_type that expects a tuple, the default epsilon=0., alpha=0.
lrp_param = 1e-10

# switch_layer param only needed for the composite methods
# the parameter depicts the first layer for which the second formula type is used.
# interesting values for caffenet are: 0, 4, 8, 10, 12 | 15, 18, 21 (convolution layers | innerproduct layers)
switch_layer = 13

classind = -1 # (class index | -1 for top_class)


## ################################## ##
# Heatmap calculation and presentation #
Expand All @@ -93,6 +95,11 @@ def simple_lrp_demo(num_images = 3):
# LRP
backward = lrp_hm(net, transformed_input, lrp_method=lrp_type, lrp_param=lrp_param, target_class_inds=classind, switch_layer=switch_layer)

if backward is None:
print('----------ERROR-------------')
print('LRP result is None, check lrp_type and lrp_param for corectness')
return

sum_over_channels = True
normalize_heatmap = False

Expand Down
79 changes: 66 additions & 13 deletions caffe-master-lrp/demonstrator/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,16 +137,32 @@ def lrp_hm(net, input_images, lrp_method = 'epsilon', lrp_param = 0.0000001, tar
if single_mode:

if switch_layer > 0 and lrp_method != 'epsilon' and lrp_method != 'alphabeta':
relevance = net.lrp_single(int(target_class[0]), lrp_opts(lrp_method, lrp_param, switch_layer = switch_layer))
lrpopts = lrp_opts(lrp_method, lrp_param, switch_layer = switch_layer)
if lrpopts is None:
print('Invalid lrp parameter setting, check lrp_type and lrp_param')
return None
relevance = net.lrp_single(int(target_class[0]), lrpopts)
else:
relevance = net.lrp_single(int(target_class[0]), lrp_opts(lrp_method, lrp_param))
lrpopts = lrp_opts(lrp_method, lrp_param)
if lrpopts is None:
print('Invalid lrp parameter setting, check lrp_type and lrp_param')
return None
relevance = net.lrp_single(int(target_class[0]), lrpopts)

else:

if switch_layer > 0 and lrp_method != 'epsilon' and lrp_method != 'alphabeta':
relevance = net.lrp(target_class, lrp_opts(lrp_method, lrp_param, switch_layer = switch_layer))
lrpopts = lrp_opts(lrp_method, lrp_param, switch_layer = switch_layer)
if lrpopts is None:
print('Invalid lrp parameter setting, check lrp_type and lrp_param')
return None
relevance = net.lrp(target_class, lrpopts)
else:
relevance = net.lrp(target_class, lrp_opts(lrp_method, lrp_param))
lrpopts = lrp_opts(lrp_method, lrp_param)
if lrpopts is None:
print('Invalid lrp parameter setting, check lrp_type and lrp_param')
return None
relevance = net.lrp(target_class, lrpopts)

output.append(relevance)

Expand Down Expand Up @@ -255,26 +271,63 @@ def lrp_opts(method = 'epsilon', param = 0., switch_layer = -1):
lrp_opts.alphabeta_beta = param
lrp_opts.auxiliaryvariable_maxlayerindexforflatdistinconv = switch_layer

elif method == 'std_n_ab':
elif method == 'eps_n_ab':

if isinstance(param, tuple) and len(param) == 2:
epsilon = param[0]
beta = param[1]
else:
epsilon = 1e-10
beta = 0.

lrp_opts.alphabeta_beta = beta
lrp_opts.epsstab = epsilon
lrp_opts.relpropformulatype = 114
lrp_opts.alphabeta_beta = param
lrp_opts.epsstab = 0.0000000001
lrp_opts.auxiliaryvariable_maxlayerindexforflatdistinconv = switch_layer

elif method == 'layer_dep':

if isinstance(param, tuple) and len(param) == 2:
epsilon = param[0]
beta = param[1]
else:
epsilon = 1e-10
beta = 0.

lrp_opts.alphabeta_beta = beta
lrp_opts.epsstab = epsilon

lrp_opts.relpropformulatype = 100
lrp_opts.epsstab = 0.0000000001
lrp_opts.alphabeta_beta = 0.
lrp_opts.auxiliaryvariable_maxlayerindexforflatdistinconv = switch_layer

elif method == 'layer_dep_n_flat':

if isinstance(param, tuple) and len(param) == 2:
epsilon = param[0]
beta = param[1]
else:
epsilon = 1e-10
beta = 0.

lrp_opts.alphabeta_beta = beta
lrp_opts.epsstab = epsilon

lrp_opts.relpropformulatype = 102
lrp_opts.epsstab = 0.0000000001
lrp_opts.alphabeta_beta = 0.
lrp_opts.auxiliaryvariable_maxlayerindexforflatdistinconv = switch_layer

elif method == 'layer_dep_n_wsquare':
if isinstance(param, tuple) and len(param) == 2:
epsilon = param[0]
beta = param[1]
else:
epsilon = 1e-10
beta = 0.

lrp_opts.alphabeta_beta = beta
lrp_opts.epsstab = epsilon

lrp_opts.relpropformulatype = 104
lrp_opts.epsstab = 0.0000000001
lrp_opts.alphabeta_beta = 0.
lrp_opts.auxiliaryvariable_maxlayerindexforflatdistinconv = switch_layer

elif method == 'deconv':
lrp_opts.relpropformulatype = 26
Expand Down

0 comments on commit 2ea37a4

Please sign in to comment.