Skip to content

Commit

Permalink
Add MIFGSM Attack (#717)
Browse files Browse the repository at this point in the history
* Add MIFGSM Attack

* test mifgsm in test_attack

* add docstring for mi_fgsm

---------

Co-authored-by: lcy <1539275856@qq.com>
  • Loading branch information
lcycode and lcy committed Nov 9, 2023
1 parent 475cb98 commit b3fcc73
Show file tree
Hide file tree
Showing 3 changed files with 167 additions and 0 deletions.
6 changes: 6 additions & 0 deletions foolbox/attacks/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,11 @@
L2AdamBasicIterativeAttack,
LinfAdamBasicIterativeAttack,
)
from .mi_fgsm import ( # noqa: F401
L1MomentumIterativeFastGradientMethod,
L2MomentumIterativeFastGradientMethod,
LinfMomentumIterativeFastGradientMethod,
)
from .fast_gradient_method import ( # noqa: F401
L1FastGradientAttack,
L2FastGradientAttack,
Expand Down Expand Up @@ -93,6 +98,7 @@
L2PGD = L2ProjectedGradientDescentAttack
LinfPGD = LinfProjectedGradientDescentAttack
PGD = LinfPGD
MIFGSM = LinfMomentumIterativeFastGradientMethod

L1AdamPGD = L1AdamProjectedGradientDescentAttack
L2AdamPGD = L2AdamProjectedGradientDescentAttack
Expand Down
143 changes: 143 additions & 0 deletions foolbox/attacks/mi_fgsm.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,143 @@
from functools import partial
from typing import Callable, Optional

from foolbox.attacks.gradient_descent_base import normalize_lp_norms

from .basic_iterative_method import (
Optimizer,
L1BasicIterativeAttack,
L2BasicIterativeAttack,
LinfBasicIterativeAttack,
)
import eagerpy as ep


class GDMOptimizer(Optimizer):
"""Momentum-based Gradient Descent Optimizer
Args:
x : Optimization variable for initialization of accumulation grad
stepsize : Stepsize for gradient descent
momentum : Momentum factor for accumulation grad
normalize_fn : Function to normalize the gradient
"""

def __init__(
self,
x: ep.Tensor,
stepsize: float,
momentum: float = 1.0,
normalize_fn: Callable[[ep.Tensor], ep.Tensor] = lambda x: x.sign(),
):
self.stepsize = stepsize
self.momentum = momentum
self.normalize = normalize_fn
self.accumulation_grad = ep.zeros_like(x)

def __call__(self, gradient: ep.Tensor) -> ep.Tensor:
self.accumulation_grad = self.momentum * self.accumulation_grad + gradient
return self.stepsize * self.normalize(self.accumulation_grad)


class L1MomentumIterativeFastGradientMethod(L1BasicIterativeAttack):
"""L1 Momentum Iterative Fast Gradient Sign Method (MI-FGSM) [#Dong18]
Args:
momentum : Momentum factor for accumulation grad
rel_stepsize : Stepsize relative to epsilon
abs_stepsize : If given, it takes precedence over rel_stepsize.
steps : Number of update steps to perform.
random_start : Whether the perturbation is initialized randomly or starts at zero.
"""

def __init__(
self,
*,
momentum: float = 1.0,
rel_stepsize: float = 0.2,
abs_stepsize: Optional[float] = None,
steps: int = 10,
random_start: bool = False,
):
self.momentum = momentum
super().__init__(
rel_stepsize=rel_stepsize,
abs_stepsize=abs_stepsize,
steps=steps,
random_start=random_start,
)

def get_optimizer(self, x: ep.Tensor, stepsize: float) -> Optimizer:
return GDMOptimizer(
x, stepsize, self.momentum, partial(normalize_lp_norms, p=1)
)


class L2MomentumIterativeFastGradientMethod(L2BasicIterativeAttack):
"""L2 Momentum Iterative Fast Gradient Sign Method (MI-FGSM) [#Dong18]
Args:
momentum : Momentum factor for accumulation grad
rel_stepsize : Stepsize relative to epsilon
abs_stepsize : If given, it takes precedence over rel_stepsize.
steps : Number of update steps to perform.
random_start : Whether the perturbation is initialized randomly or starts at zero.
"""

def __init__(
self,
*,
momentum: float = 1.0,
rel_stepsize: float = 0.2,
abs_stepsize: Optional[float] = None,
steps: int = 10,
random_start: bool = False,
):
self.momentum = momentum
super().__init__(
rel_stepsize=rel_stepsize,
abs_stepsize=abs_stepsize,
steps=steps,
random_start=random_start,
)

def get_optimizer(self, x: ep.Tensor, stepsize: float) -> Optimizer:
return GDMOptimizer(
x, stepsize, self.momentum, partial(normalize_lp_norms, p=2)
)


class LinfMomentumIterativeFastGradientMethod(LinfBasicIterativeAttack):
"""Linf Momentum Iterative Fast Gradient Sign Method (MI-FGSM) [#Dong18]
Args:
momentum : Momentum factor for accumulation grad
rel_stepsize : Stepsize relative to epsilon
abs_stepsize : If given, it takes precedence over rel_stepsize.
steps : Number of update steps to perform.
random_start : Whether the perturbation is initialized randomly or starts at zero.
References: .. [#Dong18] Dong Y, Liao F, Pang T, et al. Boosting adversarial attacks with momentum[
C]//Proceedings of the IEEE conference on computer vision and pattern recognition. 2018: 9185-9193.
https://arxiv.org/abs/1710.06081
"""

def __init__(
self,
*,
momentum: float = 1.0,
rel_stepsize: float = 0.2,
abs_stepsize: Optional[float] = None,
steps: int = 10,
random_start: bool = False,
):
self.momentum = momentum
super().__init__(
rel_stepsize=rel_stepsize,
abs_stepsize=abs_stepsize,
steps=steps,
random_start=random_start,
)

def get_optimizer(self, x: ep.Tensor, stepsize: float) -> Optimizer:
return GDMOptimizer(x, stepsize, self.momentum)
18 changes: 18 additions & 0 deletions tests/test_attacks.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,15 @@ def get_attack_id(x: AttackTestTarget) -> str:
AttackTestTarget(fa.FGSM(), Linf(100.0), uses_grad=True),
AttackTestTarget(FGSM_GE(), Linf(100.0)),
AttackTestTarget(fa.FGM(), L2(100.0), uses_grad=True),
AttackTestTarget(
fa.LinfMomentumIterativeFastGradientMethod(), Linf(1.0), uses_grad=True
),
AttackTestTarget(
fa.L2MomentumIterativeFastGradientMethod(), L2(50.0), uses_grad=True
),
AttackTestTarget(
fa.L1MomentumIterativeFastGradientMethod(), 5000.0, uses_grad=True
),
AttackTestTarget(fa.L1FastGradientAttack(), 5000.0, uses_grad=True),
AttackTestTarget(
fa.GaussianBlurAttack(steps=10), uses_grad=True, requires_real_model=True
Expand Down Expand Up @@ -243,6 +252,15 @@ def test_untargeted_attacks(
),
AttackTestTarget(fa.L2AdamBasicIterativeAttack(), L2(50.0), uses_grad=True),
AttackTestTarget(fa.L1AdamBasicIterativeAttack(), 5000.0, uses_grad=True),
AttackTestTarget(
fa.LinfMomentumIterativeFastGradientMethod(), Linf(1.0), uses_grad=True
),
AttackTestTarget(
fa.L2MomentumIterativeFastGradientMethod(), L2(50.0), uses_grad=True
),
AttackTestTarget(
fa.L1MomentumIterativeFastGradientMethod(), 5000.0, uses_grad=True
),
AttackTestTarget(fa.SparseL1DescentAttack(), 5000.0, uses_grad=True),
]

Expand Down

0 comments on commit b3fcc73

Please sign in to comment.