Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

remove numpy from attribution tests #760

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions tests/attr/layer/test_layer_conductance.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ def _conductance_test_assert(
internal_batch_size=internal_batch_size,
return_convergence_delta=True,
)
delta_condition = all(abs(delta.numpy().flatten()) < 0.01)
delta_condition = (delta.abs() < 0.01).all()
self.assertTrue(
delta_condition,
"Sum of attributions does {}"
Expand Down Expand Up @@ -195,7 +195,7 @@ def forward_hook(module, inp, out):
return_convergence_delta=True,
),
)
delta_condition = all(abs(delta.numpy().flatten()) < 0.005)
delta_condition = (delta.abs() < 0.005).all()
self.assertTrue(
delta_condition,
"Sum of attribution values does {} "
Expand Down
5 changes: 2 additions & 3 deletions tests/attr/layer/test_layer_deeplift.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
from tests.helpers.basic import (
BaseTest,
assert_delta,
assertArraysAlmostEqual,
assertTensorAlmostEqual,
assertTensorTuplesAlmostEqual,
)
Expand Down Expand Up @@ -243,8 +242,8 @@ def test_lin_maxpool_lin_classification(self) -> None:
)
expected = [[[-8.0]], [[-7.0]]]
expected_delta = [0.0, 0.0]
assertArraysAlmostEqual(cast(Tensor, attrs).detach().numpy(), expected)
assertArraysAlmostEqual(delta.detach().numpy(), expected_delta)
assertTensorAlmostEqual(self, cast(Tensor, attrs), expected, 0.0001, "max")
assertTensorAlmostEqual(self, delta, expected_delta, 0.0001, "max")

def test_convnet_maxpool2d_classification(self) -> None:
inputs = 100 * torch.randn(2, 1, 10, 10)
Expand Down
10 changes: 5 additions & 5 deletions tests/attr/test_deeplift_basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -291,10 +291,10 @@ def test_lin_maxpool_lin_classification(self) -> None:
attrs, delta = dl.attribute(
inputs, baselines, target=0, return_convergence_delta=True
)
expected = [[0.0, 0.0, 0.0, -8.0], [0.0, -7.0, 0.0, 0.0]]
expected_delta = [0.0, 0.0]
assertArraysAlmostEqual(attrs.detach().numpy(), expected)
assertArraysAlmostEqual(delta.detach().numpy(), expected_delta)
expected = torch.Tensor([[0.0, 0.0, 0.0, -8.0], [0.0, -7.0, 0.0, 0.0]])
expected_delta = torch.Tensor([0.0, 0.0])
assertTensorAlmostEqual(self, attrs, expected, 0.0001)
assertTensorAlmostEqual(self, delta, expected_delta, 0.0001)

def _deeplift_assert(
self,
Expand Down Expand Up @@ -345,7 +345,7 @@ def _deeplift_assert(
)
assertArraysAlmostEqual(delta, delta_external, 0.0)

delta_condition = all(abs(delta.numpy().flatten()) < 0.00001)
delta_condition = (delta.abs() < 0.00001).all()
self.assertTrue(
delta_condition,
"The sum of attribution values {} is not "
Expand Down
2 changes: 1 addition & 1 deletion tests/attr/test_deeplift_classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,7 @@ def _assert_attributions(
) -> None:
self.assertEqual(inputs.shape, attributions.shape)

delta_condition = all(abs(delta.numpy().flatten()) < 0.003)
delta_condition = (delta.abs() < 0.003).all()
self.assertTrue(
delta_condition,
"The sum of attribution values {} is not "
Expand Down
2 changes: 1 addition & 1 deletion tests/attr/test_feature_ablation.py
Original file line number Diff line number Diff line change
Expand Up @@ -457,7 +457,7 @@ def forward_func(inp):
target=None,
feature_mask=mask,
perturbations_per_eval=(1,),
expected_ablation=torch.zeros((5 * 3 * 2,) + inp[0].shape).numpy().tolist(),
expected_ablation=torch.zeros((5 * 3 * 2,) + inp[0].shape),
)

def test_single_inp_ablation_multi_output_aggr(self) -> None:
Expand Down
9 changes: 3 additions & 6 deletions tests/attr/test_gradient_shap.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@
from numpy import ndarray
from tests.helpers.basic import (
BaseTest,
assertArraysAlmostEqual,
assertTensorAlmostEqual,
)
from tests.helpers.basic_models import BasicLinearModel, BasicModel2
Expand Down Expand Up @@ -244,10 +243,8 @@ def _assert_shap_ig_comparision(
self, attributions1: Tuple[Tensor, ...], attributions2: Tuple[Tensor, ...]
) -> None:
for attribution1, attribution2 in zip(attributions1, attributions2):
for attr_row1, attr_row2 in zip(
attribution1.detach().numpy(), attribution2.detach().numpy()
):
assertArraysAlmostEqual(attr_row1, attr_row2, delta=0.005)
for attr_row1, attr_row2 in zip(attribution1, attribution2):
assertTensorAlmostEqual(self, attr_row1, attr_row2, 0.005, "max")


def _assert_attribution_delta(
Expand All @@ -272,7 +269,7 @@ def _assert_attribution_delta(


def _assert_delta(test: BaseTest, delta: Tensor) -> None:
delta_condition = all(abs(delta.numpy().flatten()) < 0.0006)
delta_condition = (delta.abs() < 0.0006).all()
test.assertTrue(
delta_condition,
"Sum of SHAP values {} does"
Expand Down
13 changes: 7 additions & 6 deletions tests/attr/test_input_x_gradient.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,11 @@
from captum.attr._core.input_x_gradient import InputXGradient
from captum.attr._core.noise_tunnel import NoiseTunnel
from tests.attr.test_saliency import _get_basic_config, _get_multiargs_basic_config
from tests.helpers.basic import BaseTest, assertArraysAlmostEqual
from tests.helpers.basic import (
BaseTest,
assertArraysAlmostEqual,
assertTensorAlmostEqual,
)
from tests.helpers.classification_models import SoftmaxModel
from torch import Tensor
from torch.nn import Module
Expand Down Expand Up @@ -100,11 +104,8 @@ def _input_x_gradient_classification_assert(self, nt_type: str = "vanilla") -> N
attributions = input_x_grad.attribute(input, target)
output = model(input)[:, target]
output.backward()
expercted = input.grad * input
self.assertEqual(
expercted.detach().numpy().tolist(),
attributions.detach().numpy().tolist(),
)
expected = input.grad * input
assertTensorAlmostEqual(self, attributions, expected, 0.00001, "max")
else:
nt = NoiseTunnel(input_x_grad)
attributions = nt.attribute(
Expand Down
2 changes: 1 addition & 1 deletion tests/attr/test_integrated_gradients_basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -464,7 +464,7 @@ def _compute_attribution_and_evaluate(
for input, attribution in zip(inputs, attributions):
self.assertEqual(attribution.shape, input.shape)
if multiply_by_inputs:
self.assertTrue(all(abs(delta.numpy().flatten()) < 0.07))
assertTensorAlmostEqual(self, delta, torch.zeros(delta.shape), 0.07, "max")

# compare attributions retrieved with and without
# `return_convergence_delta` flag
Expand Down
4 changes: 2 additions & 2 deletions tests/attr/test_integrated_gradients_classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ def _validate_completness(
)
assertTensorAlmostEqual(self, delta_expected, delta)

delta_condition = all(abs(delta.numpy().flatten()) < 0.005)
delta_condition = (delta.abs() < 0.005).all()
self.assertTrue(
delta_condition,
"The sum of attribution values {} is not "
Expand All @@ -130,7 +130,7 @@ def _validate_completness(
)
self.assertEqual([input.shape[0] * n_samples], list(delta.shape))

self.assertTrue(all(abs(delta.numpy().flatten()) < 0.05))
self.assertTrue((delta.abs() < 0.05).all())
self.assertEqual(attributions.shape, input.shape)


Expand Down
21 changes: 11 additions & 10 deletions tests/attr/test_saliency.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
BaseTest,
assertArraysAlmostEqual,
assertTensorTuplesAlmostEqual,
assertTensorAlmostEqual,
)
from tests.helpers.basic_models import BasicModel, BasicModel5_MultiArgs
from tests.helpers.classification_models import SoftmaxModel
Expand All @@ -18,7 +19,7 @@


def _get_basic_config() -> Tuple[Module, Tensor, Tensor, Any]:
input = torch.tensor([1.0, 2.0, 3.0, 0.0, -1.0, 7.0], requires_grad=True)
input = torch.tensor([1.0, 2.0, 3.0, 0.0, -1.0, 7.0], requires_grad=True).T
# manually percomputed gradients
grads = torch.tensor([-0.0, -0.0, -0.0, 1.0, 1.0, -0.0])
return BasicModel(), input, grads, None
Expand Down Expand Up @@ -179,11 +180,14 @@ def _saliency_base_assert(

def _assert_attribution(self, attribution: Tensor, expected: Tensor) -> None:
expected = torch.abs(expected)
assertArraysAlmostEqual(
expected.detach().numpy().flatten().tolist(),
attribution.detach().numpy().flatten().tolist(),
delta=0.5,
)
if len(attribution.shape) == 0:
assert (attribution - expected).abs() < 0.001
else:
assertArraysAlmostEqual(
expected.flatten(),
attribution.flatten(),
delta=0.5,
)

def _saliency_classification_assert(self, nt_type: str = "vanilla") -> None:
num_in = 5
Expand All @@ -199,10 +203,7 @@ def _saliency_classification_assert(self, nt_type: str = "vanilla") -> None:
output = model(input)[:, target]
output.backward()
expected = torch.abs(cast(Tensor, input.grad))
self.assertEqual(
expected.detach().numpy().tolist(),
attributions.detach().numpy().tolist(),
)
assertTensorAlmostEqual(self, attributions, expected)
else:
nt = NoiseTunnel(saliency)
attributions = nt.attribute(
Expand Down
11 changes: 3 additions & 8 deletions tests/helpers/basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,17 +62,12 @@ def assertTensorTuplesAlmostEqual(test, actual, expected, delta=0.0001, mode="su

def assertAttributionComparision(test, attributions1, attributions2):
for attribution1, attribution2 in zip(attributions1, attributions2):
for attr_row1, attr_row2 in zip(
attribution1.detach().numpy(), attribution2.detach().numpy()
):
if isinstance(attr_row1, np.ndarray):
assertArraysAlmostEqual(attr_row1, attr_row2, delta=0.05)
else:
test.assertAlmostEqual(attr_row1, attr_row2, delta=0.05)
for attr_row1, attr_row2 in zip(attribution1, attribution2):
assertTensorAlmostEqual(test, attr_row1, attr_row2, 0.05, "max")


def assert_delta(test, delta):
delta_condition = all(abs(delta.numpy().flatten()) < 0.00001)
delta_condition = (delta.abs() < 0.00001).all()
test.assertTrue(
delta_condition,
"The sum of attribution values {} for relu layer is not "
Expand Down