From 5b46ddfa003e738f02b3842c3b9a5a7270e0bbc1 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Wed, 28 Jun 2023 13:50:02 -0700 Subject: [PATCH 001/126] Initial implementation of adversarial training callback --- mart/callbacks/__init__.py | 1 + mart/callbacks/adversarial_training.py | 46 +++++++++++++++++++ .../callbacks/adversarial_training.yaml | 3 ++ mart/configs/experiment/CIFAR10_CNN_Adv.yaml | 39 ++++++++++------ 4 files changed, 76 insertions(+), 13 deletions(-) create mode 100644 mart/callbacks/adversarial_training.py create mode 100644 mart/configs/callbacks/adversarial_training.yaml diff --git a/mart/callbacks/__init__.py b/mart/callbacks/__init__.py index 8e117180..0dccb7f7 100644 --- a/mart/callbacks/__init__.py +++ b/mart/callbacks/__init__.py @@ -1,3 +1,4 @@ +from .adversarial_training import * from .eval_mode import * from .gradients import * from .no_grad_mode import * diff --git a/mart/callbacks/adversarial_training.py b/mart/callbacks/adversarial_training.py new file mode 100644 index 00000000..7453e52e --- /dev/null +++ b/mart/callbacks/adversarial_training.py @@ -0,0 +1,46 @@ +# +# Copyright (C) 2022 Intel Corporation +# +# SPDX-License-Identifier: BSD-3-Clause +# + +from pytorch_lightning.callbacks import Callback + +from mart.models import LitModular + +__all__ = ["AdversarialTraining"] + + +class AdversarialTraining(Callback): + """Perturbs inputs to be adversarial.""" + + def __init__( + self, adversary=None, train_adversary=None, validation_adversary=None, test_adversary=None + ): + adversary = adversary or train_adversary + + self.train_adversary = train_adversary or adversary + self.validation_adversary = validation_adversary or adversary + self.test_adversary = test_adversary or adversary + + def on_train_batch_start(self, trainer, pl_module, batch, batch_idx): + input, target = batch + + # FIXME: We reach into LitModular here...how can we get rid of this? + assert isinstance(pl_module, LitModular) + model = pl_module.model + sequence = model._sequences["training"] + + # FIXME: This doesn't work because sequence does not include the Adversary module. How can we fix that? + # Because this a callback, we can safely assume the Adversary module should live before the model. + # We should be able to "manually" insert it into the sequence here. + out = self.train_adversary(input=input, target=target, model=model, sequence=sequence) + print("out =", out) + + def on_validation_batch_start(self, trainer, pl_module, batch, batch_idx, dataloader_idx): + # FIXME: Copy on_train_batch_start + pass + + def on_test_batch_start(self, trainer, pl_module, batch, batch_idx, dataloader_idx): + # FIXME: Copy on_train_batch_start + pass diff --git a/mart/configs/callbacks/adversarial_training.yaml b/mart/configs/callbacks/adversarial_training.yaml new file mode 100644 index 00000000..0f6a7b47 --- /dev/null +++ b/mart/configs/callbacks/adversarial_training.yaml @@ -0,0 +1,3 @@ +adversarial_training: + _target_: mart.callbacks.AdversarialTraining + adversary: ??? diff --git a/mart/configs/experiment/CIFAR10_CNN_Adv.yaml b/mart/configs/experiment/CIFAR10_CNN_Adv.yaml index c254669b..ced39cd1 100644 --- a/mart/configs/experiment/CIFAR10_CNN_Adv.yaml +++ b/mart/configs/experiment/CIFAR10_CNN_Adv.yaml @@ -1,23 +1,36 @@ # @package _global_ defaults: - - CIFAR10_CNN - - /attack@model.modules.input_adv_training: classification_eps1.75_fgsm - - /attack@model.modules.input_adv_test: classification_eps2_pgd10_step1 + - /attack@callbacks.adversarial_training.adversary: classification_eps1.75_fgsm + - /attack@callbacks.adversarial_training.test_adversary: classification_eps2_pgd10_step1 + - override /datamodule: cifar10 + - override /model: classifier_cifar10_cnn + - override /metric: accuracy + - override /optimization: super_convergence + - override /callbacks: [model_checkpoint, lr_monitor, adversarial_training] task_name: "CIFAR10_CNN_Adv" tags: ["adv", "fat"] -model: - training_sequence: - seq005: input_adv_training +optimized_metric: "test_metrics/acc" + +callbacks: + model_checkpoint: + monitor: "validation_metrics/acc" + mode: "max" - seq010: - preprocessor: - _call_with_args_: ["input_adv_training"] +trainer: + # 50K training images, batch_size=128, drop_last, 15 epochs. + max_steps: 5850 + precision: 32 - test_sequence: - seq005: input_adv_test +datamodule: + ims_per_batch: 128 + world_size: 1 + num_workers: 8 - seq010: - preprocessor: ["input_adv_test"] +model: + optimizer: + lr: 0.1 + momentum: 0.9 + weight_decay: 1e-4 From 849f07136e1f31986f6e3ffb60bdfa3a2f952695 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Thu, 29 Jun 2023 13:42:56 -0700 Subject: [PATCH 002/126] Make callback work in CPU --- mart/attack/adversary.py | 23 +++++++++++++++-------- mart/callbacks/adversarial_training.py | 25 ++++++++++--------------- mart/configs/attack/adversary.yaml | 1 + 3 files changed, 26 insertions(+), 23 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 8c5513d2..376f8429 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -152,22 +152,21 @@ def configure_gradient_clipping( self.gradient_modifier(group["params"]) @silent() - def forward(self, *, model=None, sequence=None, **batch): + def forward(self, *, model=None, **batch): batch["model"] = model - batch["sequence"] = sequence - # Adversary lives within a sequence of model. To signal the adversary should attack, one - # must pass a model to attack when calling the adversary. Since we do not know where the - # Adversary lives inside the model, we also need the remaining sequence to be able to - # get a loss. - if model and sequence: + # Adversary can live within a sequence of model. To signal the adversary should + # attack, one must pass a model to attack when calling the adversary. Since we + # do not know where the Adversary lives inside the model, we also need the + # remaining sequence to be able to get a loss. + if model: self._attack(**batch) perturbation = self.perturber(**batch) input_adv = self.composer(perturbation, **batch) # Enforce constraints after the attack optimization ends. - if model and sequence: + if model: self.enforcer(input_adv, **batch) return input_adv @@ -211,3 +210,11 @@ def cpu(self): # This is a problem when this LightningModule has parameters, so we stop this from # happening by ignoring the call to cpu(). pass + + def attack(adversary, model, **batch): + # Create attacked model where the adversary executes before the model + def attacked_model(*, input, **batch): + adv_input = adversary(input=input, **batch) + return model(input=adv_input, **batch) + + return adversary(**batch, model=attacked_model) diff --git a/mart/callbacks/adversarial_training.py b/mart/callbacks/adversarial_training.py index 7453e52e..875bea18 100644 --- a/mart/callbacks/adversarial_training.py +++ b/mart/callbacks/adversarial_training.py @@ -23,24 +23,19 @@ def __init__( self.validation_adversary = validation_adversary or adversary self.test_adversary = test_adversary or adversary + # FIXME: These are hacks. Ideally we would use on_after_batch_transfer but that isn't exposed to + # callbacks only to LightningModules. But maybe we can forward those to callbacks? def on_train_batch_start(self, trainer, pl_module, batch, batch_idx): input, target = batch - - # FIXME: We reach into LitModular here...how can we get rid of this? - assert isinstance(pl_module, LitModular) - model = pl_module.model - sequence = model._sequences["training"] - - # FIXME: This doesn't work because sequence does not include the Adversary module. How can we fix that? - # Because this a callback, we can safely assume the Adversary module should live before the model. - # We should be able to "manually" insert it into the sequence here. - out = self.train_adversary(input=input, target=target, model=model, sequence=sequence) - print("out =", out) + input_adv = self.train_adversary.attack(pl_module, input=input, target=target, step="training") + input[:] = input_adv # XXX: hacke def on_validation_batch_start(self, trainer, pl_module, batch, batch_idx, dataloader_idx): - # FIXME: Copy on_train_batch_start - pass + input, target = batch + input_adv = self.validation_adversary.attack(pl_module, input=input, target=target, step="validation") + input[:] = input_adv # XXX: hacke def on_test_batch_start(self, trainer, pl_module, batch, batch_idx, dataloader_idx): - # FIXME: Copy on_train_batch_start - pass + input, target = batch + input_adv = self.test_adversary.attack(pl_module, input=input, target=target, step="test") + input[:] = input_adv # XXX: hacke diff --git a/mart/configs/attack/adversary.yaml b/mart/configs/attack/adversary.yaml index 40188b5a..bbf52433 100644 --- a/mart/configs/attack/adversary.yaml +++ b/mart/configs/attack/adversary.yaml @@ -2,6 +2,7 @@ defaults: - /callbacks@callbacks: [progress_bar] _target_: mart.attack.Adversary +_convert_: all perturber: ??? composer: ??? optimizer: From feffbe3d5d9e384d22aca0a30d8866b683ac72c0 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Thu, 29 Jun 2023 13:45:30 -0700 Subject: [PATCH 003/126] style --- mart/callbacks/adversarial_training.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/mart/callbacks/adversarial_training.py b/mart/callbacks/adversarial_training.py index 875bea18..b633b3c1 100644 --- a/mart/callbacks/adversarial_training.py +++ b/mart/callbacks/adversarial_training.py @@ -27,12 +27,16 @@ def __init__( # callbacks only to LightningModules. But maybe we can forward those to callbacks? def on_train_batch_start(self, trainer, pl_module, batch, batch_idx): input, target = batch - input_adv = self.train_adversary.attack(pl_module, input=input, target=target, step="training") + input_adv = self.train_adversary.attack( + pl_module, input=input, target=target, step="training" + ) input[:] = input_adv # XXX: hacke def on_validation_batch_start(self, trainer, pl_module, batch, batch_idx, dataloader_idx): input, target = batch - input_adv = self.validation_adversary.attack(pl_module, input=input, target=target, step="validation") + input_adv = self.validation_adversary.attack( + pl_module, input=input, target=target, step="validation" + ) input[:] = input_adv # XXX: hacke def on_test_batch_start(self, trainer, pl_module, batch, batch_idx, dataloader_idx): From 5e6c272273c63bb7e1391e1ee2aaae03ab66c3ca Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Thu, 29 Jun 2023 16:58:04 -0700 Subject: [PATCH 004/126] Add and use on_after_batch_transfer hook --- mart/callbacks/adversarial_training.py | 38 ++++++++++++++------------ mart/models/modular.py | 8 ++++++ 2 files changed, 29 insertions(+), 17 deletions(-) diff --git a/mart/callbacks/adversarial_training.py b/mart/callbacks/adversarial_training.py index b633b3c1..de4392fd 100644 --- a/mart/callbacks/adversarial_training.py +++ b/mart/callbacks/adversarial_training.py @@ -23,23 +23,27 @@ def __init__( self.validation_adversary = validation_adversary or adversary self.test_adversary = test_adversary or adversary - # FIXME: These are hacks. Ideally we would use on_after_batch_transfer but that isn't exposed to - # callbacks only to LightningModules. But maybe we can forward those to callbacks? - def on_train_batch_start(self, trainer, pl_module, batch, batch_idx): + def on_after_batch_transfer(self, trainer, pl_module, batch, dataloader_idx): + # FIXME: Would be nice if batch was a structured object (or a dict) input, target = batch - input_adv = self.train_adversary.attack( - pl_module, input=input, target=target, step="training" - ) - input[:] = input_adv # XXX: hacke - def on_validation_batch_start(self, trainer, pl_module, batch, batch_idx, dataloader_idx): - input, target = batch - input_adv = self.validation_adversary.attack( - pl_module, input=input, target=target, step="validation" - ) - input[:] = input_adv # XXX: hacke + if trainer.training: + adversary = self.train_adversary + step = "training" # FIXME: Use pl_module.training_step? - def on_test_batch_start(self, trainer, pl_module, batch, batch_idx, dataloader_idx): - input, target = batch - input_adv = self.test_adversary.attack(pl_module, input=input, target=target, step="test") - input[:] = input_adv # XXX: hacke + elif trainer.validating: + adversary = self.validation_adversary + step = "validation" # FIXME: Use pl_module.validation_step? + + elif trainer.testing: + adversary = self.test_adversary + step = "test" # FIXME: Use pl_module.test_step? + + else: + return batch + + # Move adversary to same device as pl_module + adversary.to(pl_module.device) + input = adversary.attack(pl_module, input=input, target=target, step=step) + + return [input, target] diff --git a/mart/models/modular.py b/mart/models/modular.py index a27c6867..461fd701 100644 --- a/mart/models/modular.py +++ b/mart/models/modular.py @@ -120,6 +120,14 @@ def configure_optimizers(self): return config + # FIXME: This is a hack to make callbacks have an on_after_batch_transfer hook. + def on_after_batch_transfer(self, batch, dataloader_idx): + for callback in self.trainer.callbacks: + if not hasattr(callback, "on_after_batch_transfer"): + continue + + return callback.on_after_batch_transfer(self.trainer, self, batch, dataloader_idx) + def forward(self, **kwargs): return self.model(**kwargs) From 59003c25cc6b1ea95e6ec7a03710a17c76bfdf68 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 30 Jun 2023 08:17:34 -0700 Subject: [PATCH 005/126] Override on_after_batch_transfer in callback setup --- mart/callbacks/adversarial_training.py | 19 ++++++++++++++++--- mart/models/modular.py | 8 -------- 2 files changed, 16 insertions(+), 11 deletions(-) diff --git a/mart/callbacks/adversarial_training.py b/mart/callbacks/adversarial_training.py index de4392fd..c8ac22fd 100644 --- a/mart/callbacks/adversarial_training.py +++ b/mart/callbacks/adversarial_training.py @@ -4,6 +4,8 @@ # SPDX-License-Identifier: BSD-3-Clause # +import types + from pytorch_lightning.callbacks import Callback from mart.models import LitModular @@ -23,21 +25,32 @@ def __init__( self.validation_adversary = validation_adversary or adversary self.test_adversary = test_adversary or adversary - def on_after_batch_transfer(self, trainer, pl_module, batch, dataloader_idx): + def setup(self, trainer, pl_module, stage=None): + pl_module.on_after_batch_transfer = types.MethodType( + self.on_after_batch_transfer, pl_module + ) + + def teardown(self, trainer, pl_module, start=None): + # FIXME: remove on_after_batch_transfer + pass + + def on_after_batch_transfer(self, pl_module, batch, dataloader_idx): # FIXME: Would be nice if batch was a structured object (or a dict) input, target = batch + trainer = pl_module.trainer + if trainer.training: adversary = self.train_adversary step = "training" # FIXME: Use pl_module.training_step? elif trainer.validating: adversary = self.validation_adversary - step = "validation" # FIXME: Use pl_module.validation_step? + step = "validation" # FIXME: Use pl_module.training_step? elif trainer.testing: adversary = self.test_adversary - step = "test" # FIXME: Use pl_module.test_step? + step = "test" # FIXME: Use pl_module.training_step? else: return batch diff --git a/mart/models/modular.py b/mart/models/modular.py index 461fd701..a27c6867 100644 --- a/mart/models/modular.py +++ b/mart/models/modular.py @@ -120,14 +120,6 @@ def configure_optimizers(self): return config - # FIXME: This is a hack to make callbacks have an on_after_batch_transfer hook. - def on_after_batch_transfer(self, batch, dataloader_idx): - for callback in self.trainer.callbacks: - if not hasattr(callback, "on_after_batch_transfer"): - continue - - return callback.on_after_batch_transfer(self.trainer, self, batch, dataloader_idx) - def forward(self, **kwargs): return self.model(**kwargs) From 8218f6516d5c8683a6b0c12d044d171b43bcfbbb Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 30 Jun 2023 09:12:58 -0700 Subject: [PATCH 006/126] Restore on_after_batch_transfer hook in teardown --- mart/callbacks/adversarial_training.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mart/callbacks/adversarial_training.py b/mart/callbacks/adversarial_training.py index c8ac22fd..1dfe741e 100644 --- a/mart/callbacks/adversarial_training.py +++ b/mart/callbacks/adversarial_training.py @@ -26,13 +26,13 @@ def __init__( self.test_adversary = test_adversary or adversary def setup(self, trainer, pl_module, stage=None): + self._on_after_batch_transfer = pl_module.on_after_batch_transfer pl_module.on_after_batch_transfer = types.MethodType( self.on_after_batch_transfer, pl_module ) - def teardown(self, trainer, pl_module, start=None): - # FIXME: remove on_after_batch_transfer - pass + def teardown(self, trainer, pl_module, stage=None): + pl_module.on_after_batch_transfer = self._on_after_batch_transfer def on_after_batch_transfer(self, pl_module, batch, dataloader_idx): # FIXME: Would be nice if batch was a structured object (or a dict) From 226664d216ee608c29f9c7697a88ac4bf98ecb67 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 30 Jun 2023 09:13:26 -0700 Subject: [PATCH 007/126] cleanup --- mart/callbacks/adversarial_training.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/mart/callbacks/adversarial_training.py b/mart/callbacks/adversarial_training.py index 1dfe741e..f19cbb04 100644 --- a/mart/callbacks/adversarial_training.py +++ b/mart/callbacks/adversarial_training.py @@ -39,19 +39,15 @@ def on_after_batch_transfer(self, pl_module, batch, dataloader_idx): input, target = batch trainer = pl_module.trainer - if trainer.training: adversary = self.train_adversary - step = "training" # FIXME: Use pl_module.training_step? - + step = "training" elif trainer.validating: adversary = self.validation_adversary - step = "validation" # FIXME: Use pl_module.training_step? - + step = "validation" elif trainer.testing: adversary = self.test_adversary - step = "test" # FIXME: Use pl_module.training_step? - + step = "test" else: return batch From d1187c90fc8a791d29d82057bad35c4f64a6030c Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 30 Jun 2023 09:14:04 -0700 Subject: [PATCH 008/126] Call original on_after_batch_transfer hook --- mart/callbacks/adversarial_training.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mart/callbacks/adversarial_training.py b/mart/callbacks/adversarial_training.py index f19cbb04..5b6fc430 100644 --- a/mart/callbacks/adversarial_training.py +++ b/mart/callbacks/adversarial_training.py @@ -35,6 +35,8 @@ def teardown(self, trainer, pl_module, stage=None): pl_module.on_after_batch_transfer = self._on_after_batch_transfer def on_after_batch_transfer(self, pl_module, batch, dataloader_idx): + batch = self._on_after_batch_transfer(batch, dataloader_idx) + # FIXME: Would be nice if batch was a structured object (or a dict) input, target = batch From 9850a7862897944381d7c647537920b422674666 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 30 Jun 2023 09:18:21 -0700 Subject: [PATCH 009/126] Move Adversary.attack into callback --- mart/attack/adversary.py | 8 -------- mart/callbacks/adversarial_training.py | 20 ++++++++++++++------ 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 376f8429..ff822673 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -210,11 +210,3 @@ def cpu(self): # This is a problem when this LightningModule has parameters, so we stop this from # happening by ignoring the call to cpu(). pass - - def attack(adversary, model, **batch): - # Create attacked model where the adversary executes before the model - def attacked_model(*, input, **batch): - adv_input = adversary(input=input, **batch) - return model(input=adv_input, **batch) - - return adversary(**batch, model=attacked_model) diff --git a/mart/callbacks/adversarial_training.py b/mart/callbacks/adversarial_training.py index 5b6fc430..65b90b98 100644 --- a/mart/callbacks/adversarial_training.py +++ b/mart/callbacks/adversarial_training.py @@ -37,9 +37,7 @@ def teardown(self, trainer, pl_module, stage=None): def on_after_batch_transfer(self, pl_module, batch, dataloader_idx): batch = self._on_after_batch_transfer(batch, dataloader_idx) - # FIXME: Would be nice if batch was a structured object (or a dict) - input, target = batch - + # FIXME: Remove use of step trainer = pl_module.trainer if trainer.training: adversary = self.train_adversary @@ -53,8 +51,18 @@ def on_after_batch_transfer(self, pl_module, batch, dataloader_idx): else: return batch - # Move adversary to same device as pl_module + # Create attacked model where the adversary executes before the model + # FIXME: Should we just use pl_module.training_step? Ideally we would not decompose batch + # and instead pass batch directly to the underlying pl_module since it knows how to + # interpret batch. + def attacked_model(input, **batch): + input_adv = adversary(input=input, **batch) + return pl_module(input=input_adv, **batch) + + # Move adversary to same device as pl_module and run attack + # FIXME: Directly pass batch instead of assuming it has a structure? + input, target = batch adversary.to(pl_module.device) - input = adversary.attack(pl_module, input=input, target=target, step=step) + input_adv = adversary(input=input, target=target, step=step, model=attacked_model) - return [input, target] + return [input_adv, target] From d71cb43591e813d687029c1432809b8ce182f4de Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 14 Jul 2023 10:11:15 -0700 Subject: [PATCH 010/126] Use training sequence by default. --- mart/models/modular.py | 1 + mart/nn/nn.py | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/mart/models/modular.py b/mart/models/modular.py index 192204a2..e814fee9 100644 --- a/mart/models/modular.py +++ b/mart/models/modular.py @@ -62,6 +62,7 @@ def __init__( "training": training_sequence, "validation": validation_sequence, "test": test_sequence, + None: training_sequence, # use training sequence with losses by default. } self.model = SequentialDict(modules, sequences) diff --git a/mart/nn/nn.py b/mart/nn/nn.py index 02113899..67d82c1b 100644 --- a/mart/nn/nn.py +++ b/mart/nn/nn.py @@ -57,7 +57,8 @@ def __init__(self, modules, sequences=None): self._sequences = { name: self.parse_sequence(sequence) for name, sequence in sequences.items() } - self._sequences[None] = self + # We intend to make training sequence as the default sequence. + # self._sequences[None] = self def parse_sequence(self, sequence): if sequence is None: From 83f017e4c0fdf9c4acddd664214376d42837b7c8 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 14 Jul 2023 10:11:54 -0700 Subject: [PATCH 011/126] Assume a simple model interface in adversary. --- mart/attack/adversary.py | 60 +++++++++++++++----------- mart/callbacks/adversarial_training.py | 25 ++++------- 2 files changed, 43 insertions(+), 42 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 4a884c1c..c6d05bb7 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -101,6 +101,9 @@ def __init__( assert self._attacker.max_epochs == 0 assert self._attacker.limit_train_batches > 0 + # TODO: Make this configurable. E.g. [0,1] <-> [0,255] + self.transform = self.untransform = lambda x: x + @property def perturber(self) -> Perturber: # Hide the perturber module in a list, so that perturbation is not exported as a parameter in the model checkpoint, @@ -110,13 +113,26 @@ def perturber(self) -> Perturber: def configure_optimizers(self): return self.optimizer(self.perturber) + def get_input_adv(self, *, input, target, untransform=True): + perturbation = self.perturber(input=input, target=target) + input_adv = self.composer(perturbation, input=input, target=target) + + if untransform: + input_adv = self.untransform(input_adv) + + return input_adv + def training_step(self, batch, batch_idx): + # TODO: We shouldn't need to copy because it is never changed? # copy batch since we modify it and it is used internally - batch = batch.copy() + # batch = batch.copy() + + # Compose un-transformed input_adv from batch["input"], then give to model for updated gain. + # Note: Only input and target are required by perturber.projector and composer. + input_adv = self.get_input_adv(**batch) - # We need to evaluate the perturbation against the whole model, so call it normally to get a gain. - model = batch.pop("model") - outputs = model(**batch) + # A model that returns output dictionary. + outputs = self.model(input=input_adv, target=batch["target"]) # FIXME: This should really be just `return outputs`. But this might require a new sequence? # FIXME: Everything below here should live in the model as modules. @@ -150,36 +166,30 @@ def configure_gradient_clipping( self.gradient_modifier(group["params"]) @silent() - def forward(self, *, model=None, **batch): - batch["model"] = model - - # Adversary can live within a sequence of model. To signal the adversary should - # attack, one must pass a model to attack when calling the adversary. Since we - # do not know where the Adversary lives inside the model, we also need the - # remaining sequence to be able to get a loss. - if model: - self._attack(**batch) + def forward(self, *, input, target, model): + # What we need is a frozen model that returns (a dictionary of) logits, or losses. + self.model = model - perturbation = self.perturber(**batch) - input_adv = self.composer(perturbation, **batch) - - # Enforce constraints after the attack optimization ends. - if model: - self.enforcer(input_adv, **batch) - - return input_adv - - def _attack(self, *, input, **batch): - batch["input"] = input + # Transform input so that it's easier to work with by adversary. + input_transformed = self.transform(input) + batch = {"input": input_transformed, "target": target} # Configure and reset perturbation for current inputs - self.perturber.configure_perturbation(input) + self.perturber.configure_perturbation(input_transformed) # Attack, aka fit a perturbation, for one epoch by cycling over the same input batch. # We use Trainer.limit_train_batches to control the number of attack iterations. self.attacker.fit_loop.max_epochs += 1 self.attacker.fit(self, train_dataloaders=cycle([batch])) + # Get the transformed input_adv for enforcer checking. + input_adv_transformed = self.get_input_adv(untransform=False, **batch) + self.enforcer(input_adv_transformed, **batch) + # Un-transform to the same format as input. + input_adv = self.untransform(input_adv_transformed) + + return input_adv + @property def attacker(self): if not isinstance(self._attacker, partial): diff --git a/mart/callbacks/adversarial_training.py b/mart/callbacks/adversarial_training.py index 65b90b98..4ef44b16 100644 --- a/mart/callbacks/adversarial_training.py +++ b/mart/callbacks/adversarial_training.py @@ -6,9 +6,7 @@ import types -from pytorch_lightning.callbacks import Callback - -from mart.models import LitModular +from lightning.pytorch.callbacks import Callback __all__ = ["AdversarialTraining"] @@ -16,6 +14,7 @@ class AdversarialTraining(Callback): """Perturbs inputs to be adversarial.""" + # TODO: training/validation/test or train/val/test def __init__( self, adversary=None, train_adversary=None, validation_adversary=None, test_adversary=None ): @@ -37,32 +36,24 @@ def teardown(self, trainer, pl_module, stage=None): def on_after_batch_transfer(self, pl_module, batch, dataloader_idx): batch = self._on_after_batch_transfer(batch, dataloader_idx) - # FIXME: Remove use of step trainer = pl_module.trainer if trainer.training: adversary = self.train_adversary - step = "training" elif trainer.validating: adversary = self.validation_adversary - step = "validation" elif trainer.testing: adversary = self.test_adversary - step = "test" else: return batch - # Create attacked model where the adversary executes before the model - # FIXME: Should we just use pl_module.training_step? Ideally we would not decompose batch - # and instead pass batch directly to the underlying pl_module since it knows how to - # interpret batch. - def attacked_model(input, **batch): - input_adv = adversary(input=input, **batch) - return pl_module(input=input_adv, **batch) - # Move adversary to same device as pl_module and run attack + adversary.to(pl_module.device) + # FIXME: Directly pass batch instead of assuming it has a structure? input, target = batch - adversary.to(pl_module.device) - input_adv = adversary(input=input, target=target, step=step, model=attacked_model) + input_adv = adversary(input=input, target=target, model=pl_module) + + # Replace the adversarial trainer with the original trainer. + pl_module.trainer = trainer return [input_adv, target] From 88a807be66bdb208b09ad9bea8cc5e7f5c756abb Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 14 Jul 2023 10:12:13 -0700 Subject: [PATCH 012/126] Update configs. --- mart/configs/callbacks/adversarial_training.yaml | 2 +- mart/configs/experiment/CIFAR10_CNN_Adv.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/mart/configs/callbacks/adversarial_training.yaml b/mart/configs/callbacks/adversarial_training.yaml index 0f6a7b47..232e2278 100644 --- a/mart/configs/callbacks/adversarial_training.yaml +++ b/mart/configs/callbacks/adversarial_training.yaml @@ -1,3 +1,3 @@ adversarial_training: _target_: mart.callbacks.AdversarialTraining - adversary: ??? + # adversary: ??? diff --git a/mart/configs/experiment/CIFAR10_CNN_Adv.yaml b/mart/configs/experiment/CIFAR10_CNN_Adv.yaml index ced39cd1..3d4b1c12 100644 --- a/mart/configs/experiment/CIFAR10_CNN_Adv.yaml +++ b/mart/configs/experiment/CIFAR10_CNN_Adv.yaml @@ -1,7 +1,7 @@ # @package _global_ defaults: - - /attack@callbacks.adversarial_training.adversary: classification_eps1.75_fgsm + - /attack@callbacks.adversarial_training.train_adversary: classification_eps1.75_fgsm - /attack@callbacks.adversarial_training.test_adversary: classification_eps2_pgd10_step1 - override /datamodule: cifar10 - override /model: classifier_cifar10_cnn From 9dfd7f6c413a9c0aafac42c41e0d47599bd95b6e Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 14 Jul 2023 10:44:02 -0700 Subject: [PATCH 013/126] Log original gain on progress bar. --- mart/attack/adversary.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index c6d05bb7..650af474 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -139,6 +139,9 @@ def training_step(self, batch, batch_idx): # Use CallWith to dispatch **outputs. gain = self.gain_fn(**outputs) + # Log original gain as a metric for LR scheduler to monitor, and show gain on progress bar. + self.log("gain", gain.sum(), prog_bar=True) + # objective_fn is optional, because adversaries may never reach their objective. if self.objective_fn is not None: found = self.objective_fn(**outputs) @@ -147,13 +150,7 @@ def training_step(self, batch, batch_idx): if len(gain.shape) > 0: gain = gain[~found] - if len(gain.shape) > 0: - gain = gain.sum() - - # Log gain as a metric for LR scheduler to monitor, and show gain on progress bar. - self.log("gain", gain, prog_bar=True) - - return gain + return gain.sum() def configure_gradient_clipping( self, optimizer, gradient_clip_val=None, gradient_clip_algorithm=None From 2cd7ca86bf1f8886b6f0e110347696d5fd908736 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 14 Jul 2023 12:07:45 -0700 Subject: [PATCH 014/126] Hide model in Adversary so that it's not tampered. --- mart/attack/adversary.py | 7 ++++++- mart/callbacks/adversarial_training.py | 5 ++--- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 650af474..86740653 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -162,10 +162,15 @@ def configure_gradient_clipping( for group in optimizer.param_groups: self.gradient_modifier(group["params"]) + @property + def model(self): + # Hide model in a list, so that it won't be tampered by the inner Trainer. + return self._model[0] + @silent() def forward(self, *, input, target, model): # What we need is a frozen model that returns (a dictionary of) logits, or losses. - self.model = model + self._model = [model] # Transform input so that it's easier to work with by adversary. input_transformed = self.transform(input) diff --git a/mart/callbacks/adversarial_training.py b/mart/callbacks/adversarial_training.py index 4ef44b16..a68ca277 100644 --- a/mart/callbacks/adversarial_training.py +++ b/mart/callbacks/adversarial_training.py @@ -51,9 +51,8 @@ def on_after_batch_transfer(self, pl_module, batch, dataloader_idx): # FIXME: Directly pass batch instead of assuming it has a structure? input, target = batch - input_adv = adversary(input=input, target=target, model=pl_module) - # Replace the adversarial trainer with the original trainer. - pl_module.trainer = trainer + # TODO: We may need to do model.eval() if there's BN-like layers in the model. + input_adv = adversary(input=input, target=target, model=pl_module) return [input_adv, target] From e64247428fb3ca7d24cff61e53db0196f2896509 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 14 Jul 2023 14:58:27 -0700 Subject: [PATCH 015/126] Skip adversary if not defined. --- mart/callbacks/adversarial_training.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/mart/callbacks/adversarial_training.py b/mart/callbacks/adversarial_training.py index a68ca277..253b930f 100644 --- a/mart/callbacks/adversarial_training.py +++ b/mart/callbacks/adversarial_training.py @@ -36,6 +36,8 @@ def teardown(self, trainer, pl_module, stage=None): def on_after_batch_transfer(self, pl_module, batch, dataloader_idx): batch = self._on_after_batch_transfer(batch, dataloader_idx) + adversary = None + trainer = pl_module.trainer if trainer.training: adversary = self.train_adversary @@ -43,7 +45,9 @@ def on_after_batch_transfer(self, pl_module, batch, dataloader_idx): adversary = self.validation_adversary elif trainer.testing: adversary = self.test_adversary - else: + + # Skip if adversary is not defined for the phase train/validation/test. + if adversary is None: return batch # Move adversary to same device as pl_module and run attack From f222aefe2f22ab94e170e1345fd4d019dfa82eca Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 14 Jul 2023 14:58:47 -0700 Subject: [PATCH 016/126] Update adversarial experiment on COCO. --- .../experiment/COCO_TorchvisionFasterRCNN_Adv.yaml | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/mart/configs/experiment/COCO_TorchvisionFasterRCNN_Adv.yaml b/mart/configs/experiment/COCO_TorchvisionFasterRCNN_Adv.yaml index 398394bf..a1860696 100644 --- a/mart/configs/experiment/COCO_TorchvisionFasterRCNN_Adv.yaml +++ b/mart/configs/experiment/COCO_TorchvisionFasterRCNN_Adv.yaml @@ -2,15 +2,9 @@ defaults: - COCO_TorchvisionFasterRCNN - - /attack@model.modules.input_adv_test: object_detection_mask_adversary + - /attack@callbacks.adversarial_training.test_adversary: object_detection_mask_adversary - override /datamodule: coco_perturbable_mask + - override /callbacks: [model_checkpoint, lr_monitor, adversarial_training] task_name: "COCO_TorchvisionFasterRCNN_Adv" tags: ["adv"] - -model: - test_sequence: - seq005: input_adv_test - - seq010: - preprocessor: ["input_adv_test"] From 1d9b18211e8a9906d8e43ae906d5d9e3cb6f9278 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 14 Jul 2023 14:59:02 -0700 Subject: [PATCH 017/126] Fix test. --- tests/test_experiments.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_experiments.py b/tests/test_experiments.py index 65b27a5d..404a98ef 100644 --- a/tests/test_experiments.py +++ b/tests/test_experiments.py @@ -80,7 +80,7 @@ def test_cifar10_cnn_adv_experiment(classification_cfg, tmp_path): "-m", "experiment=CIFAR10_CNN_Adv", "hydra.sweep.dir=" + str(tmp_path), - "model.modules.input_adv_test.max_iters=10", + "callbacks.adversarial_training.test_adversary.max_iters=10", "optimized_metric=training_metrics/acc", "++datamodule.train_dataset.image_size=[3,32,32]", "++datamodule.train_dataset.num_classes=10", From 45b5bf0d52aff0b3b56038091637238b1de5ad9f Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 14 Jul 2023 15:22:43 -0700 Subject: [PATCH 018/126] Hide model in adversar's batch. --- mart/attack/adversary.py | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 86740653..f5c34eeb 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -127,12 +127,16 @@ def training_step(self, batch, batch_idx): # copy batch since we modify it and it is used internally # batch = batch.copy() + input = batch["input"] + target = batch["target"] + # What we need is a frozen model that returns (a dictionary of) logits, or losses. + model = batch["model"] + # Compose un-transformed input_adv from batch["input"], then give to model for updated gain. - # Note: Only input and target are required by perturber.projector and composer. - input_adv = self.get_input_adv(**batch) + input_adv = self.get_input_adv(input=input, target=target) # A model that returns output dictionary. - outputs = self.model(input=input_adv, target=batch["target"]) + outputs = model(input=input_adv, target=target) # FIXME: This should really be just `return outputs`. But this might require a new sequence? # FIXME: Everything below here should live in the model as modules. @@ -162,19 +166,12 @@ def configure_gradient_clipping( for group in optimizer.param_groups: self.gradient_modifier(group["params"]) - @property - def model(self): - # Hide model in a list, so that it won't be tampered by the inner Trainer. - return self._model[0] - @silent() def forward(self, *, input, target, model): - # What we need is a frozen model that returns (a dictionary of) logits, or losses. - self._model = [model] - # Transform input so that it's easier to work with by adversary. input_transformed = self.transform(input) - batch = {"input": input_transformed, "target": target} + + batch = {"input": input_transformed, "target": target, "model": model} # Configure and reset perturbation for current inputs self.perturber.configure_perturbation(input_transformed) @@ -185,8 +182,9 @@ def forward(self, *, input, target, model): self.attacker.fit(self, train_dataloaders=cycle([batch])) # Get the transformed input_adv for enforcer checking. - input_adv_transformed = self.get_input_adv(untransform=False, **batch) - self.enforcer(input_adv_transformed, **batch) + input_adv_transformed = self.get_input_adv(input=input, target=target, untransform=False) + self.enforcer(input_adv_transformed, input=input, target=target) + # Un-transform to the same format as input. input_adv = self.untransform(input_adv_transformed) From 9a2531f14a3a29c8dcb5ba953eb22480fab4f1da Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 14 Jul 2023 15:52:23 -0700 Subject: [PATCH 019/126] Fix test. --- tests/test_adversary.py | 73 ++++++++++------------------------------- 1 file changed, 17 insertions(+), 56 deletions(-) diff --git a/tests/test_adversary.py b/tests/test_adversary.py index 448baa48..0c363902 100644 --- a/tests/test_adversary.py +++ b/tests/test_adversary.py @@ -17,35 +17,6 @@ from mart.attack.gradient_modifier import Sign -def test_adversary(input_data, target_data, perturbation): - perturber = Mock(spec=Perturber, return_value=perturbation) - composer = mart.attack.composer.Additive() - gain = Mock() - enforcer = Mock() - attacker = Mock(max_epochs=0, limit_train_batches=1, fit_loop=Mock(max_epochs=0)) - - adversary = Adversary( - perturber=perturber, - composer=composer, - optimizer=None, - gain=gain, - enforcer=enforcer, - attacker=attacker, - ) - - output_data = adversary(input=input_data, target=target_data) - - # The enforcer and attacker should only be called when model is not None. - enforcer.assert_not_called() - attacker.fit.assert_not_called() - assert attacker.fit_loop.max_epochs == 0 - - perturber.assert_called_once() - gain.assert_not_called() - - torch.testing.assert_close(output_data, input_data + perturbation) - - def test_with_model(input_data, target_data, perturbation): perturber = Mock(spec=Perturber, return_value=perturbation) composer = mart.attack.composer.Additive() @@ -53,7 +24,6 @@ def test_with_model(input_data, target_data, perturbation): enforcer = Mock() attacker = Mock(max_epochs=0, limit_train_batches=1, fit_loop=Mock(max_epochs=0)) model = Mock() - sequence = Mock() adversary = Adversary( perturber=perturber, @@ -64,7 +34,7 @@ def test_with_model(input_data, target_data, perturbation): attacker=attacker, ) - output_data = adversary(input=input_data, target=target_data, model=model, sequence=sequence) + output_data = adversary(input=input_data, target=target_data, model=model) # The enforcer is only called when model is not None. enforcer.assert_called_once() @@ -78,7 +48,7 @@ def test_with_model(input_data, target_data, perturbation): torch.testing.assert_close(output_data, input_data + perturbation) -def test_hidden_params(input_data, target_data, perturbation): +def test_hidden_params(): initializer = Mock() composer = mart.attack.composer.Additive() projector = Mock() @@ -88,8 +58,6 @@ def test_hidden_params(input_data, target_data, perturbation): gain = Mock() enforcer = Mock() attacker = Mock(max_epochs=0, limit_train_batches=1, fit_loop=Mock(max_epochs=0)) - model = Mock() - sequence = Mock() adversary = Adversary( perturber=perturber, @@ -100,8 +68,6 @@ def test_hidden_params(input_data, target_data, perturbation): attacker=attacker, ) - # output_data = adversary(input=input_data, target=target_data, model=model, sequence=sequence) - # Adversarial perturbation should not be updated by a regular training optimizer. params = [p for p in adversary.parameters()] assert len(params) == 0 @@ -122,7 +88,6 @@ def test_hidden_params_after_forward(input_data, target_data, perturbation): enforcer = Mock() attacker = Mock(max_epochs=0, limit_train_batches=1, fit_loop=Mock(max_epochs=0)) model = Mock() - sequence = Mock() adversary = Adversary( perturber=perturber, @@ -133,7 +98,7 @@ def test_hidden_params_after_forward(input_data, target_data, perturbation): attacker=attacker, ) - output_data = adversary(input=input_data, target=target_data, model=model, sequence=sequence) + output_data = adversary(input=input_data, target=target_data, model=model) # Adversary will have no parameter even after forward is called, because we hide Perturber in a list. params = [p for p in adversary.parameters()] @@ -180,7 +145,6 @@ def test_perturbation(input_data, target_data, perturbation): enforcer = Mock() attacker = Mock(max_epochs=0, limit_train_batches=1, fit_loop=Mock(max_epochs=0)) model = Mock() - sequence = Mock() adversary = Adversary( perturber=perturber, @@ -191,15 +155,15 @@ def test_perturbation(input_data, target_data, perturbation): attacker=attacker, ) - _ = adversary(input=input_data, target=target_data, model=model, sequence=sequence) - output_data = adversary(input=input_data, target=target_data) + output_data = adversary(input=input_data, target=target_data, model=model) # The enforcer is only called when model is not None. enforcer.assert_called_once() attacker.fit.assert_called_once() - # Once with model and sequence and once without - assert perturber.call_count == 2 + # Perturber is called once for generating initial input_adv. + # The fit() doesn't run because max_epochs=0. + assert perturber.call_count == 1 torch.testing.assert_close(output_data, input_data + perturbation) @@ -236,20 +200,17 @@ def initializer(x): max_iters=1, ) - def model(input, target, model=None, **kwargs): - return {"logits": adversary(input=input, target=target)} - - sequence = Mock() + def model(input, target): + return {"logits": input} - adversary(input=input_data, target=target_data, model=model, sequence=sequence) - input_adv = adversary(input=input_data, target=target_data) + input_adv = adversary(input=input_data, target=target_data, model=model) perturbation = input_data - input_adv torch.testing.assert_close(perturbation.unique(), torch.Tensor([-1, 0, 1])) -def test_configure_optimizers(input_data, target_data): +def test_configure_optimizers(): perturber = Mock() composer = mart.attack.composer.Additive() optimizer = Mock(spec=mart.optim.OptimizerFactory) @@ -268,8 +229,8 @@ def test_configure_optimizers(input_data, target_data): gain.assert_not_called() -def test_training_step(input_data, target_data): - perturber = Mock() +def test_training_step(input_data, target_data, perturbation): + perturber = Mock(spec=Perturber, return_value=perturbation) composer = mart.attack.composer.Additive() optimizer = Mock(spec=mart.optim.OptimizerFactory) gain = Mock(return_value=torch.tensor(1337)) @@ -290,8 +251,8 @@ def test_training_step(input_data, target_data): assert output == 1337 -def test_training_step_with_many_gain(input_data, target_data): - perturber = Mock() +def test_training_step_with_many_gain(input_data, target_data, perturbation): + perturber = Mock(spec=Perturber, return_value=perturbation) composer = mart.attack.composer.Additive() optimizer = Mock(spec=mart.optim.OptimizerFactory) gain = Mock(return_value=torch.tensor([1234, 5678])) @@ -311,8 +272,8 @@ def test_training_step_with_many_gain(input_data, target_data): assert output == 1234 + 5678 -def test_training_step_with_objective(input_data, target_data): - perturber = Mock() +def test_training_step_with_objective(input_data, target_data, perturbation): + perturber = Mock(spec=Perturber, return_value=perturbation) composer = mart.attack.composer.Additive() optimizer = Mock(spec=mart.optim.OptimizerFactory) gain = Mock(return_value=torch.tensor([1234, 5678])) From 704fc018a144b9c207ec985acb0d54204b08d8fe Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 14 Jul 2023 18:28:36 -0700 Subject: [PATCH 020/126] Hide transform in untransform in this PR. --- mart/attack/adversary.py | 25 ++++++------------------- 1 file changed, 6 insertions(+), 19 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index f5c34eeb..e0106689 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -101,9 +101,6 @@ def __init__( assert self._attacker.max_epochs == 0 assert self._attacker.limit_train_batches > 0 - # TODO: Make this configurable. E.g. [0,1] <-> [0,255] - self.transform = self.untransform = lambda x: x - @property def perturber(self) -> Perturber: # Hide the perturber module in a list, so that perturbation is not exported as a parameter in the model checkpoint, @@ -113,13 +110,9 @@ def perturber(self) -> Perturber: def configure_optimizers(self): return self.optimizer(self.perturber) - def get_input_adv(self, *, input, target, untransform=True): + def get_input_adv(self, *, input, target): perturbation = self.perturber(input=input, target=target) input_adv = self.composer(perturbation, input=input, target=target) - - if untransform: - input_adv = self.untransform(input_adv) - return input_adv def training_step(self, batch, batch_idx): @@ -132,7 +125,7 @@ def training_step(self, batch, batch_idx): # What we need is a frozen model that returns (a dictionary of) logits, or losses. model = batch["model"] - # Compose un-transformed input_adv from batch["input"], then give to model for updated gain. + # Compose input_adv from input, then give to model for updated gain. input_adv = self.get_input_adv(input=input, target=target) # A model that returns output dictionary. @@ -168,13 +161,10 @@ def configure_gradient_clipping( @silent() def forward(self, *, input, target, model): - # Transform input so that it's easier to work with by adversary. - input_transformed = self.transform(input) - - batch = {"input": input_transformed, "target": target, "model": model} + batch = {"input": input, "target": target, "model": model} # Configure and reset perturbation for current inputs - self.perturber.configure_perturbation(input_transformed) + self.perturber.configure_perturbation(input) # Attack, aka fit a perturbation, for one epoch by cycling over the same input batch. # We use Trainer.limit_train_batches to control the number of attack iterations. @@ -182,11 +172,8 @@ def forward(self, *, input, target, model): self.attacker.fit(self, train_dataloaders=cycle([batch])) # Get the transformed input_adv for enforcer checking. - input_adv_transformed = self.get_input_adv(input=input, target=target, untransform=False) - self.enforcer(input_adv_transformed, input=input, target=target) - - # Un-transform to the same format as input. - input_adv = self.untransform(input_adv_transformed) + input_adv = self.get_input_adv(input=input, target=target) + self.enforcer(input_adv, input=input, target=target) return input_adv From 5bb36e70ea85b12502413bdd342193c58cc7f387 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 14 Jul 2023 18:39:59 -0700 Subject: [PATCH 021/126] Revert changes in gain logging. --- mart/attack/adversary.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index e0106689..0669d95e 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -136,9 +136,6 @@ def training_step(self, batch, batch_idx): # Use CallWith to dispatch **outputs. gain = self.gain_fn(**outputs) - # Log original gain as a metric for LR scheduler to monitor, and show gain on progress bar. - self.log("gain", gain.sum(), prog_bar=True) - # objective_fn is optional, because adversaries may never reach their objective. if self.objective_fn is not None: found = self.objective_fn(**outputs) @@ -147,7 +144,13 @@ def training_step(self, batch, batch_idx): if len(gain.shape) > 0: gain = gain[~found] - return gain.sum() + if len(gain.shape) > 0: + gain = gain.sum() + + # Log gain as a metric for LR scheduler to monitor, and show gain on progress bar. + self.log("gain", gain, prog_bar=True) + + return gain def configure_gradient_clipping( self, optimizer, gradient_clip_val=None, gradient_clip_algorithm=None From c85182dda0b6b77461efe1c8d6e37c2095e1db76 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 17 Jul 2023 22:07:44 -0700 Subject: [PATCH 022/126] Add LitModular.attack_step() for adversary to run model.forward() to get gradients. --- mart/callbacks/adversarial_training.py | 105 ++++++++++++++++++++++++- mart/models/modular.py | 7 +- mart/nn/nn.py | 3 +- 3 files changed, 109 insertions(+), 6 deletions(-) diff --git a/mart/callbacks/adversarial_training.py b/mart/callbacks/adversarial_training.py index 253b930f..3947ccdc 100644 --- a/mart/callbacks/adversarial_training.py +++ b/mart/callbacks/adversarial_training.py @@ -4,10 +4,15 @@ # SPDX-License-Identifier: BSD-3-Clause # +from __future__ import annotations + import types +from typing import Callable from lightning.pytorch.callbacks import Callback +from ..utils import MonkeyPatch + __all__ = ["AdversarialTraining"] @@ -16,14 +21,30 @@ class AdversarialTraining(Callback): # TODO: training/validation/test or train/val/test def __init__( - self, adversary=None, train_adversary=None, validation_adversary=None, test_adversary=None + self, + adversary: Callable = None, + train_adversary: Callable = None, + validation_adversary: Callable = None, + test_adversary: Callable = None, + batch_input_key: str | int = 0, ): + """AdversaryConnector. + + Args: + adversary (Callable, optional): _description_. Defaults to None. + train_adversary (Callable, optional): _description_. Defaults to None. + validation_adversary (Callable, optional): _description_. Defaults to None. + test_adversary (Callable, optional): _description_. Defaults to None. + batch_input_key (str | int, optional): Input locator in a batch. Defaults to 0. + """ adversary = adversary or train_adversary self.train_adversary = train_adversary or adversary self.validation_adversary = validation_adversary or adversary self.test_adversary = test_adversary or adversary + self.batch_input_key = batch_input_key + def setup(self, trainer, pl_module, stage=None): self._on_after_batch_transfer = pl_module.on_after_batch_transfer pl_module.on_after_batch_transfer = types.MethodType( @@ -33,6 +54,81 @@ def setup(self, trainer, pl_module, stage=None): def teardown(self, trainer, pl_module, stage=None): pl_module.on_after_batch_transfer = self._on_after_batch_transfer + def get_input_target_batcher(self, batch_orig): + if isinstance(batch_orig, tuple): + # Convert tuple to list + batch = list(batch_orig).copy() + else: + batch = batch_orig.copy() + + batch_input_key = self.batch_input_key + + # pop() works for both list and dict. + input = batch.pop(batch_input_key) + + if isinstance(batch, list) and len(batch) == 1: + target = batch[0] + + def batch_constructor(input, target): + batch = [target] + batch.insert(batch_input_key, input) + return batch + + elif isinstance(batch, list) and len(batch) > 2: + target = batch.copy() + + def batch_constructor(input, target): + batch = target.copy() + batch.insert(batch_input_key, input) + return batch + + elif isinstance(batch, dict) and "target" in dict: + target = batch["target"] + + def batch_constructor(input, target): + return {batch_input_key: input, "target": target} + + elif isinstance(batch, dict) and "target" not in dict: + # Example in anomalib: dict_keys(['image_path', 'label', 'image', 'mask_path', 'mask']) + # image: NCHW; label: N, + target = batch + + def batch_constructor(input, target): + # Besides input and target, add others back to batch. + return target | {batch_input_key: input} + + else: + raise NotImplementedError() + + return input, target, batch_constructor + + def wrap_model(self, model, batch_constructor, dataloader_idx): + """Make a model, such that output = model(input, target).""" + + # Consume dataloader_idx + if hasattr(model, "attack_step"): + + def model_forward(batch): + output = model.attack_step(batch, dataloader_idx) + return output + + elif hasattr(model, "training_step"): + # Monkey-patch model.log to avoid spamming. + @MonkeyPatch(model, "log", lambda *args, **kwargs: None) + def model_forward(batch): + output = model.training_step(batch, dataloader_idx) + return output + + else: + model_forward = model + + def wrapped_model(*, input, target): + batch = batch_constructor(input, target) + output = model_forward(batch) + return output + + return wrapped_model + def on_after_batch_transfer(self, pl_module, batch, dataloader_idx): batch = self._on_after_batch_transfer(batch, dataloader_idx) @@ -54,9 +150,12 @@ def on_after_batch_transfer(self, pl_module, batch, dataloader_idx): adversary.to(pl_module.device) # FIXME: Directly pass batch instead of assuming it has a structure? - input, target = batch + input, target, batch_constructor = self.get_input_target_batcher(batch) + + # We also need to construct a batch for model during attack iterations. + model = self.wrap_model(pl_module, batch_constructor, dataloader_idx) # TODO: We may need to do model.eval() if there's BN-like layers in the model. - input_adv = adversary(input=input, target=target, model=pl_module) + input_adv = adversary(input=input, target=target, model=model) return [input_adv, target] diff --git a/mart/models/modular.py b/mart/models/modular.py index e814fee9..c63c9fd7 100644 --- a/mart/models/modular.py +++ b/mart/models/modular.py @@ -62,7 +62,6 @@ def __init__( "training": training_sequence, "validation": validation_sequence, "test": test_sequence, - None: training_sequence, # use training sequence with losses by default. } self.model = SequentialDict(modules, sequences) @@ -127,6 +126,12 @@ def configure_optimizers(self): def forward(self, **kwargs): return self.model(**kwargs) + def attack_step(self, batch, batch_idx): + # Use the training sequence in attack. + input, target = batch + output = self(input=input, target=target, model=self.model, step="training") + return output + # # Training # diff --git a/mart/nn/nn.py b/mart/nn/nn.py index 67d82c1b..02113899 100644 --- a/mart/nn/nn.py +++ b/mart/nn/nn.py @@ -57,8 +57,7 @@ def __init__(self, modules, sequences=None): self._sequences = { name: self.parse_sequence(sequence) for name, sequence in sequences.items() } - # We intend to make training sequence as the default sequence. - # self._sequences[None] = self + self._sequences[None] = self def parse_sequence(self, sequence): if sequence is None: From 558fc7b7b0782f6224df1d5cfb183561fb133137 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Tue, 18 Jul 2023 17:15:58 -0700 Subject: [PATCH 023/126] Add batch_converter in Adversary that supports transform/untransform. --- mart/attack/__init__.py | 1 + mart/attack/adversary.py | 35 +++-- mart/attack/batch_converter.py | 128 ++++++++++++++++++ mart/callbacks/adversarial_training.py | 72 ++-------- mart/configs/attack/adversary.yaml | 1 + mart/configs/attack/batch_converter/dict.yaml | 2 + mart/configs/attack/batch_converter/list.yaml | 2 + .../attack/batch_converter/tensor.yaml | 1 + .../configs/attack/batch_converter/tuple.yaml | 2 + .../attack/classification_eps1.75_fgsm.yaml | 1 + .../classification_eps2_pgd10_step1.yaml | 1 + .../classification_eps8_pgd10_step1.yaml | 1 + .../object_detection_mask_adversary.yaml | 1 + tests/test_adversary.py | 43 +++++- 14 files changed, 211 insertions(+), 80 deletions(-) create mode 100644 mart/attack/batch_converter.py create mode 100644 mart/configs/attack/batch_converter/dict.yaml create mode 100644 mart/configs/attack/batch_converter/list.yaml create mode 100644 mart/configs/attack/batch_converter/tensor.yaml create mode 100644 mart/configs/attack/batch_converter/tuple.yaml diff --git a/mart/attack/__init__.py b/mart/attack/__init__.py index 843ce9bd..2a55d648 100644 --- a/mart/attack/__init__.py +++ b/mart/attack/__init__.py @@ -1,5 +1,6 @@ from .adversary import * from .adversary_wrapper import * +from .batch_converter import * from .composer import * from .enforcer import * from .gain import * diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 0669d95e..d24caf5b 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -42,6 +42,7 @@ def __init__( objective: Objective | None = None, enforcer: Enforcer | None = None, attacker: pl.Trainer | None = None, + batch_converter: Callable, **kwargs, ): """_summary_ @@ -55,6 +56,7 @@ def __init__( objective (Objective): A function for computing adversarial objective, which returns True or False. Optional. enforcer (Enforcer): A Callable that enforce constraints on the adversarial input. attacker (Trainer): A PyTorch-Lightning Trainer object used to fit the perturbation. + batch_converter (Callable): Convert batch into convenient format and reverse. """ super().__init__() @@ -101,6 +103,8 @@ def __init__( assert self._attacker.max_epochs == 0 assert self._attacker.limit_train_batches > 0 + self.batch_converter = batch_converter + @property def perturber(self) -> Perturber: # Hide the perturber module in a list, so that perturbation is not exported as a parameter in the model checkpoint, @@ -120,16 +124,18 @@ def training_step(self, batch, batch_idx): # copy batch since we modify it and it is used internally # batch = batch.copy() - input = batch["input"] + input_transformed = batch["input"] target = batch["target"] # What we need is a frozen model that returns (a dictionary of) logits, or losses. model = batch["model"] - # Compose input_adv from input, then give to model for updated gain. - input_adv = self.get_input_adv(input=input, target=target) + # Compose un-transformed input_adv from batch["input"], then give to model for updated gain. + input_adv_transformed = self.get_input_adv(input=input_transformed, target=target) + # Target model expects input in the original format. + batch_adv = self.batch_converter.revert(input_adv_transformed, target) # A model that returns output dictionary. - outputs = model(input=input_adv, target=target) + outputs = model(batch_adv) # FIXME: This should really be just `return outputs`. But this might require a new sequence? # FIXME: Everything below here should live in the model as modules. @@ -163,22 +169,29 @@ def configure_gradient_clipping( self.gradient_modifier(group["params"]) @silent() - def forward(self, *, input, target, model): - batch = {"input": input, "target": target, "model": model} + def forward(self, *, batch: torch.Tensor | list | dict, model: Callable): + # Extract and transform input so that is convenient for Adversary. + input_transformed, target = self.batch_converter(batch) + + # Optimization loop only sees the transformed input in batches. + batch_transformed = {"input": input_transformed, "target": target, "model": model} # Configure and reset perturbation for current inputs - self.perturber.configure_perturbation(input) + self.perturber.configure_perturbation(input_transformed) # Attack, aka fit a perturbation, for one epoch by cycling over the same input batch. # We use Trainer.limit_train_batches to control the number of attack iterations. self.attacker.fit_loop.max_epochs += 1 - self.attacker.fit(self, train_dataloaders=cycle([batch])) + self.attacker.fit(self, train_dataloaders=cycle([batch_transformed])) # Get the transformed input_adv for enforcer checking. - input_adv = self.get_input_adv(input=input, target=target) - self.enforcer(input_adv, input=input, target=target) + input_adv_transformed = self.get_input_adv(input=input_transformed, target=target) + self.enforcer(input_adv_transformed, input=input_transformed, target=target) - return input_adv + # Revert to the original format of batch. + batch_adv = self.batch_converter.revert(input_adv_transformed, target) + + return batch_adv @property def attacker(self): diff --git a/mart/attack/batch_converter.py b/mart/attack/batch_converter.py new file mode 100644 index 00000000..0eb19e01 --- /dev/null +++ b/mart/attack/batch_converter.py @@ -0,0 +1,128 @@ +# +# Copyright (C) 2022 Intel Corporation +# +# SPDX-License-Identifier: BSD-3-Clause +# + +import abc +from typing import Callable + +# TODO: Do we need to copy batch? + +__all__ = [ + "TensorBatchConverter", + "DictBatchConverter", + "ListBatchConverter", + "TupleBatchConverter", +] + + +class BatchConverter(abc.ABC): + def __init__(self, *, transform: Callable = None, untransform: Callable = None): + """_summary_ + + Args: + transform (Callable): Transform input into a convenient format, e.g. [0,1]->[0.255]. + untransform (Callable): Transform adversarial input in the convenient format back into the original format of input, e.g. [0,255]->[0,1]. + """ + self.transform = transform if transform is not None else lambda x: x + self.untransform = untransform if untransform is not None else lambda x: x + + def __call__(self, batch): + input, target = self._convert(batch) + input_transformed = self.transform(input) + return input_transformed, target + + def revert(self, input_transformed, target): + input = self.untransform(input_transformed) + batch = self._revert(input, target) + return batch + + @abc.abstractclassmethod + def _revert(self, input, target): + pass + + @abc.abstractclassmethod + def _convert(self, batch): + pass + + +class TensorBatchConverter(BatchConverter): + def _convert(self, batch): + input = batch + target = None + return input, target + + def _revert(self, input, target): + batch = input + return batch + + +class DictBatchConverter(BatchConverter): + def __init__(self, input_key: str = "input", **kwargs): + """_summary_ + + Args: + input_key (str): Input locator in a batch. Defaults to "input". + """ + super().__init__(**kwargs) + + self.input_key = input_key + self.rest = {} + + def _convert(self, batch): + input = batch.pop(self.input_key) + if "target" in batch: + target = batch.pop("target") + self.rest = batch + else: + target = batch + return input, target + + def _revert(self, input, target): + if self.rest is {}: + batch = {self.input_key: input} | target + else: + batch = {self.input_key: input, "target": target} | self.rest + + return batch + + +class ListBatchConverter(BatchConverter): + def __init__(self, input_key: int = 0, **kwargs): + super().__init__(**kwargs) + + self.input_key = input_key + self.target_size = None + + def _convert(self, batch: list): + input = batch.pop(self.input_key) + self.target_size = len(batch) + + if self.target_size == 1: + target = batch[0] + else: + target = batch + + return input, target + + def _revert(self, input, target): + if self.target_size == 1: + batch = [target] + batch.insert(self.input_key, input) + else: + batch = target + batch.insert(self.input_key, input) + return batch + + +class TupleBatchConverter(ListBatchConverter): + def _convert(self, batch: tuple): + batch_list = list(batch) + input, target = super()._convert(batch_list) + return input, target + + def _revert(self, input, target): + batch_list = super()._revert(input, target) + batch = tuple(batch_list) + return batch diff --git a/mart/callbacks/adversarial_training.py b/mart/callbacks/adversarial_training.py index 3947ccdc..22b7a6a4 100644 --- a/mart/callbacks/adversarial_training.py +++ b/mart/callbacks/adversarial_training.py @@ -54,56 +54,8 @@ def setup(self, trainer, pl_module, stage=None): def teardown(self, trainer, pl_module, stage=None): pl_module.on_after_batch_transfer = self._on_after_batch_transfer - def get_input_target_batcher(self, batch_orig): - if isinstance(batch_orig, tuple): - # Convert tuple to list - batch = list(batch_orig).copy() - else: - batch = batch_orig.copy() - - batch_input_key = self.batch_input_key - - # pop() works for both list and dict. - input = batch.pop(batch_input_key) - - if isinstance(batch, list) and len(batch) == 1: - target = batch[0] - - def batch_constructor(input, target): - batch = [target] - batch.insert(batch_input_key, input) - return batch - - elif isinstance(batch, list) and len(batch) > 2: - target = batch.copy() - - def batch_constructor(input, target): - batch = target.copy() - batch.insert(batch_input_key, input) - return batch - - elif isinstance(batch, dict) and "target" in dict: - target = batch["target"] - - def batch_constructor(input, target): - return {batch_input_key: input, "target": target} - - elif isinstance(batch, dict) and "target" not in dict: - # Example in anomalib: dict_keys(['image_path', 'label', 'image', 'mask_path', 'mask']) - # image: NCHW; label: N, - target = batch - - def batch_constructor(input, target): - # Besides input and target, add others back to batch. - return target | {batch_input_key: input} - - else: - raise NotImplementedError() - - return input, target, batch_constructor - - def wrap_model(self, model, batch_constructor, dataloader_idx): - """Make a model, such that output = model(input, target).""" + def wrap_model(self, model, dataloader_idx): + """Make a model, such that `output = model(batch)`.""" # Consume dataloader_idx if hasattr(model, "attack_step"): @@ -122,12 +74,7 @@ def model_forward(batch): else: model_forward = model - def wrapped_model(*, input, target): - batch = batch_constructor(input, target) - output = model_forward(batch) - return output - - return wrapped_model + return model_forward def on_after_batch_transfer(self, pl_module, batch, dataloader_idx): batch = self._on_after_batch_transfer(batch, dataloader_idx) @@ -149,13 +96,12 @@ def on_after_batch_transfer(self, pl_module, batch, dataloader_idx): # Move adversary to same device as pl_module and run attack adversary.to(pl_module.device) - # FIXME: Directly pass batch instead of assuming it has a structure? - input, target, batch_constructor = self.get_input_target_batcher(batch) - - # We also need to construct a batch for model during attack iterations. - model = self.wrap_model(pl_module, batch_constructor, dataloader_idx) + # We assume Adversary is not aware of PyTorch Lightning, + # so wrap the model as `output=model(batch)`. + model = self.wrap_model(pl_module, dataloader_idx) # TODO: We may need to do model.eval() if there's BN-like layers in the model. - input_adv = adversary(input=input, target=target, model=model) + # Directly pass batch instead of assuming it has a structure. + batch_adv = adversary(batch=batch, model=model) - return [input_adv, target] + return batch_adv diff --git a/mart/configs/attack/adversary.yaml b/mart/configs/attack/adversary.yaml index bbf52433..480e3a5b 100644 --- a/mart/configs/attack/adversary.yaml +++ b/mart/configs/attack/adversary.yaml @@ -12,3 +12,4 @@ gradient_modifier: null objective: null enforcer: ??? attacker: null +batch_converter: ??? diff --git a/mart/configs/attack/batch_converter/dict.yaml b/mart/configs/attack/batch_converter/dict.yaml new file mode 100644 index 00000000..db421039 --- /dev/null +++ b/mart/configs/attack/batch_converter/dict.yaml @@ -0,0 +1,2 @@ +_target_: mart.attack.batch_converter.DictBatchConverter +input_key: input diff --git a/mart/configs/attack/batch_converter/list.yaml b/mart/configs/attack/batch_converter/list.yaml new file mode 100644 index 00000000..53da9fae --- /dev/null +++ b/mart/configs/attack/batch_converter/list.yaml @@ -0,0 +1,2 @@ +_target_: mart.attack.batch_converter.ListBatchConverter +input_key: 0 diff --git a/mart/configs/attack/batch_converter/tensor.yaml b/mart/configs/attack/batch_converter/tensor.yaml new file mode 100644 index 00000000..47697bfd --- /dev/null +++ b/mart/configs/attack/batch_converter/tensor.yaml @@ -0,0 +1 @@ +_target_: mart.attack.batch_converter.TensorBatchConverter diff --git a/mart/configs/attack/batch_converter/tuple.yaml b/mart/configs/attack/batch_converter/tuple.yaml new file mode 100644 index 00000000..25ff65b5 --- /dev/null +++ b/mart/configs/attack/batch_converter/tuple.yaml @@ -0,0 +1,2 @@ +_target_: mart.attack.batch_converter.TupleBatchConverter +input_key: 0 diff --git a/mart/configs/attack/classification_eps1.75_fgsm.yaml b/mart/configs/attack/classification_eps1.75_fgsm.yaml index 7c300e2d..c3c0ec46 100644 --- a/mart/configs/attack/classification_eps1.75_fgsm.yaml +++ b/mart/configs/attack/classification_eps1.75_fgsm.yaml @@ -10,6 +10,7 @@ defaults: - objective: misclassification - enforcer: default - enforcer/constraints: [lp, pixel_range] + - batch_converter: list enforcer: constraints: diff --git a/mart/configs/attack/classification_eps2_pgd10_step1.yaml b/mart/configs/attack/classification_eps2_pgd10_step1.yaml index b98cf407..7dd30548 100644 --- a/mart/configs/attack/classification_eps2_pgd10_step1.yaml +++ b/mart/configs/attack/classification_eps2_pgd10_step1.yaml @@ -10,6 +10,7 @@ defaults: - objective: misclassification - enforcer: default - enforcer/constraints: [lp, pixel_range] + - batch_converter: list enforcer: constraints: diff --git a/mart/configs/attack/classification_eps8_pgd10_step1.yaml b/mart/configs/attack/classification_eps8_pgd10_step1.yaml index f1b6242a..7b9577a7 100644 --- a/mart/configs/attack/classification_eps8_pgd10_step1.yaml +++ b/mart/configs/attack/classification_eps8_pgd10_step1.yaml @@ -10,6 +10,7 @@ defaults: - objective: misclassification - enforcer: default - enforcer/constraints: [lp, pixel_range] + - batch_converter: list enforcer: constraints: diff --git a/mart/configs/attack/object_detection_mask_adversary.yaml b/mart/configs/attack/object_detection_mask_adversary.yaml index ad99dda0..cedbd9eb 100644 --- a/mart/configs/attack/object_detection_mask_adversary.yaml +++ b/mart/configs/attack/object_detection_mask_adversary.yaml @@ -10,6 +10,7 @@ defaults: - objective: zero_ap - enforcer: default - enforcer/constraints: [mask, pixel_range] + - batch_converter: tuple # Make a 5-step attack for the demonstration purpose. max_iters: 5 diff --git a/tests/test_adversary.py b/tests/test_adversary.py index 0c363902..2986f48d 100644 --- a/tests/test_adversary.py +++ b/tests/test_adversary.py @@ -24,6 +24,8 @@ def test_with_model(input_data, target_data, perturbation): enforcer = Mock() attacker = Mock(max_epochs=0, limit_train_batches=1, fit_loop=Mock(max_epochs=0)) model = Mock() + batch = {"input": input_data, "target": target_data} + batch_converter = mart.attack.DictBatchConverter() adversary = Adversary( perturber=perturber, @@ -32,9 +34,11 @@ def test_with_model(input_data, target_data, perturbation): gain=gain, enforcer=enforcer, attacker=attacker, + batch_converter=batch_converter, ) - output_data = adversary(input=input_data, target=target_data, model=model) + batch_adv = adversary(batch=batch, model=model) + output_data = batch_adv["input"] # The enforcer is only called when model is not None. enforcer.assert_called_once() @@ -58,6 +62,7 @@ def test_hidden_params(): gain = Mock() enforcer = Mock() attacker = Mock(max_epochs=0, limit_train_batches=1, fit_loop=Mock(max_epochs=0)) + batch_converter = mart.attack.DictBatchConverter() adversary = Adversary( perturber=perturber, @@ -66,6 +71,7 @@ def test_hidden_params(): gain=gain, enforcer=enforcer, attacker=attacker, + batch_converter=batch_converter, ) # Adversarial perturbation should not be updated by a regular training optimizer. @@ -88,6 +94,8 @@ def test_hidden_params_after_forward(input_data, target_data, perturbation): enforcer = Mock() attacker = Mock(max_epochs=0, limit_train_batches=1, fit_loop=Mock(max_epochs=0)) model = Mock() + batch = {"input": input_data, "target": target_data} + batch_converter = mart.attack.DictBatchConverter() adversary = Adversary( perturber=perturber, @@ -96,9 +104,11 @@ def test_hidden_params_after_forward(input_data, target_data, perturbation): gain=gain, enforcer=enforcer, attacker=attacker, + batch_converter=batch_converter, ) - output_data = adversary(input=input_data, target=target_data, model=model) + batch_adv = adversary(batch=batch, model=model) + output_data = batch_adv["input"] # Adversary will have no parameter even after forward is called, because we hide Perturber in a list. params = [p for p in adversary.parameters()] @@ -119,6 +129,7 @@ def test_loading_perturbation_from_state_dict(): gain = Mock() enforcer = Mock() attacker = Mock(max_epochs=0, limit_train_batches=1, fit_loop=Mock(max_epochs=0)) + batch_converter = mart.attack.DictBatchConverter() adversary = Adversary( perturber=perturber, @@ -127,6 +138,7 @@ def test_loading_perturbation_from_state_dict(): gain=gain, enforcer=enforcer, attacker=attacker, + batch_converter=batch_converter, ) # We should be able to load arbitrary state_dict, because Adversary ignores state_dict. @@ -145,6 +157,8 @@ def test_perturbation(input_data, target_data, perturbation): enforcer = Mock() attacker = Mock(max_epochs=0, limit_train_batches=1, fit_loop=Mock(max_epochs=0)) model = Mock() + batch = {"input": input_data, "target": target_data} + batch_converter = mart.attack.DictBatchConverter() adversary = Adversary( perturber=perturber, @@ -153,9 +167,11 @@ def test_perturbation(input_data, target_data, perturbation): gain=gain, enforcer=enforcer, attacker=attacker, + batch_converter=batch_converter, ) - output_data = adversary(input=input_data, target=target_data, model=model) + batch_adv = adversary(batch=batch, model=model) + output_data = batch_adv["input"] # The enforcer is only called when model is not None. enforcer.assert_called_once() @@ -190,6 +206,9 @@ def initializer(x): projector=None, ) + batch = {"input": input_data, "target": target_data} + batch_converter = mart.attack.DictBatchConverter() + adversary = Adversary( perturber=perturber, composer=composer, @@ -198,12 +217,14 @@ def initializer(x): gradient_modifier=Sign(), enforcer=enforcer, max_iters=1, + batch_converter=batch_converter, ) - def model(input, target): - return {"logits": input} + def model(batch): + return {"logits": batch["input"]} - input_adv = adversary(input=input_data, target=target_data, model=model) + batch_adv = adversary(batch=batch, model=model) + input_adv = batch_adv["input"] perturbation = input_data - input_adv @@ -215,12 +236,14 @@ def test_configure_optimizers(): composer = mart.attack.composer.Additive() optimizer = Mock(spec=mart.optim.OptimizerFactory) gain = Mock() + batch_converter = mart.attack.DictBatchConverter() adversary = Adversary( perturber=perturber, composer=composer, optimizer=optimizer, gain=gain, + batch_converter=batch_converter, ) adversary.configure_optimizers() @@ -235,12 +258,14 @@ def test_training_step(input_data, target_data, perturbation): optimizer = Mock(spec=mart.optim.OptimizerFactory) gain = Mock(return_value=torch.tensor(1337)) model = Mock(return_value={}) + batch_converter = mart.attack.DictBatchConverter() adversary = Adversary( perturber=perturber, composer=composer, optimizer=optimizer, gain=gain, + batch_converter=batch_converter, ) output = adversary.training_step( @@ -257,12 +282,14 @@ def test_training_step_with_many_gain(input_data, target_data, perturbation): optimizer = Mock(spec=mart.optim.OptimizerFactory) gain = Mock(return_value=torch.tensor([1234, 5678])) model = Mock(return_value={}) + batch_converter = mart.attack.DictBatchConverter() adversary = Adversary( perturber=perturber, composer=composer, optimizer=optimizer, gain=gain, + batch_converter=batch_converter, ) output = adversary.training_step( @@ -279,6 +306,7 @@ def test_training_step_with_objective(input_data, target_data, perturbation): gain = Mock(return_value=torch.tensor([1234, 5678])) model = Mock(return_value={}) objective = Mock(return_value=torch.tensor([True, False], dtype=torch.bool)) + batch_converter = mart.attack.DictBatchConverter() adversary = Adversary( perturber=perturber, @@ -286,6 +314,7 @@ def test_training_step_with_objective(input_data, target_data, perturbation): optimizer=optimizer, objective=objective, gain=gain, + batch_converter=batch_converter, ) output = adversary.training_step( @@ -305,6 +334,7 @@ def test_configure_gradient_clipping(): ) gradient_modifier = Mock() gain = Mock() + batch_converter = mart.attack.DictBatchConverter() adversary = Adversary( perturber=perturber, @@ -312,6 +342,7 @@ def test_configure_gradient_clipping(): optimizer=optimizer, gradient_modifier=gradient_modifier, gain=gain, + batch_converter=batch_converter, ) # We need to mock a trainer since LightningModule does some checks adversary.trainer = Mock(gradient_clip_val=1.0, gradient_clip_algorithm="norm") From 7ed32acfcd4a8a9a12e25abfd3713fc1d409816f Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Tue, 18 Jul 2023 17:17:36 -0700 Subject: [PATCH 024/126] Clean up. --- mart/callbacks/adversarial_training.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/mart/callbacks/adversarial_training.py b/mart/callbacks/adversarial_training.py index 22b7a6a4..31879c59 100644 --- a/mart/callbacks/adversarial_training.py +++ b/mart/callbacks/adversarial_training.py @@ -26,7 +26,6 @@ def __init__( train_adversary: Callable = None, validation_adversary: Callable = None, test_adversary: Callable = None, - batch_input_key: str | int = 0, ): """AdversaryConnector. @@ -35,7 +34,6 @@ def __init__( train_adversary (Callable, optional): _description_. Defaults to None. validation_adversary (Callable, optional): _description_. Defaults to None. test_adversary (Callable, optional): _description_. Defaults to None. - batch_input_key (str | int, optional): Input locator in a batch. Defaults to 0. """ adversary = adversary or train_adversary @@ -43,8 +41,6 @@ def __init__( self.validation_adversary = validation_adversary or adversary self.test_adversary = test_adversary or adversary - self.batch_input_key = batch_input_key - def setup(self, trainer, pl_module, stage=None): self._on_after_batch_transfer = pl_module.on_after_batch_transfer pl_module.on_after_batch_transfer = types.MethodType( From 6e434964d5c78173ee285adad601a3f28d0b82a7 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Tue, 18 Jul 2023 17:31:56 -0700 Subject: [PATCH 025/126] Fix adv. visualizer. --- mart/callbacks/visualizer.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/mart/callbacks/visualizer.py b/mart/callbacks/visualizer.py index 39409143..34e7c5cc 100644 --- a/mart/callbacks/visualizer.py +++ b/mart/callbacks/visualizer.py @@ -6,6 +6,7 @@ import os +import torch from lightning.pytorch.callbacks import Callback from torchvision.transforms import ToPILImage @@ -32,7 +33,8 @@ def on_train_batch_end(self, trainer, model, outputs, batch, batch_idx): def on_train_end(self, trainer, model): # FIXME: We should really just save this to outputs instead of recomputing adv_input - adv_input = model(input=self.input, target=self.target) + with torch.no_grad(): + adv_input = model.get_input_adv(input=self.input, target=self.target) for img, tgt in zip(adv_input, self.target): fname = tgt["file_name"] From 254e1680ee6c9898b6caa33ee70b3a726667a975 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Tue, 18 Jul 2023 17:41:02 -0700 Subject: [PATCH 026/126] Fix visualizer test. --- tests/test_visualizer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_visualizer.py b/tests/test_visualizer.py index 5c25e930..cb188591 100644 --- a/tests/test_visualizer.py +++ b/tests/test_visualizer.py @@ -24,7 +24,7 @@ def perturb(input): return result trainer = Mock() - model = Mock(return_value=perturb(input_list)) + model = Mock(get_input_adv=Mock(return_value=perturb(input_list))) outputs = Mock() batch = {"input": input_list, "target": target_list} adversary = Mock(spec=Adversary, side_effect=perturb) From 46b5cb7e75258b27702a606ea22e8eccda8327a6 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Wed, 19 Jul 2023 13:43:08 -0700 Subject: [PATCH 027/126] Make adversary optional in some phases. --- mart/callbacks/adversarial_training.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/mart/callbacks/adversarial_training.py b/mart/callbacks/adversarial_training.py index 31879c59..3437c74b 100644 --- a/mart/callbacks/adversarial_training.py +++ b/mart/callbacks/adversarial_training.py @@ -35,8 +35,6 @@ def __init__( validation_adversary (Callable, optional): _description_. Defaults to None. test_adversary (Callable, optional): _description_. Defaults to None. """ - adversary = adversary or train_adversary - self.train_adversary = train_adversary or adversary self.validation_adversary = validation_adversary or adversary self.test_adversary = test_adversary or adversary @@ -85,7 +83,7 @@ def on_after_batch_transfer(self, pl_module, batch, dataloader_idx): elif trainer.testing: adversary = self.test_adversary - # Skip if adversary is not defined for the phase train/validation/test. + # Skip if adversary is not defined for all phases train/validation/test. if adversary is None: return batch From ace60a110ada331a661bdb7941f3f970db01bc82 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Thu, 20 Jul 2023 19:48:42 -0700 Subject: [PATCH 028/126] Add a minimal PyTorch wrapper. --- examples/armory_attack/pytorch_wrapper.py | 79 +++++++++++++++++++++++ examples/armory_attack/requirements.txt | 2 + 2 files changed, 81 insertions(+) create mode 100644 examples/armory_attack/pytorch_wrapper.py create mode 100644 examples/armory_attack/requirements.txt diff --git a/examples/armory_attack/pytorch_wrapper.py b/examples/armory_attack/pytorch_wrapper.py new file mode 100644 index 00000000..ca10b863 --- /dev/null +++ b/examples/armory_attack/pytorch_wrapper.py @@ -0,0 +1,79 @@ +# +# Copyright (C) 2022 Intel Corporation +# +# SPDX-License-Identifier: BSD-3-Clause +# + +from __future__ import annotations + +import hydra +import numpy as np +import torch +from multimethod import multimethod +from omegaconf import OmegaConf + + +# A recursive function to convert all np.ndarray in an object to torch.Tensor, or vice versa. +@multimethod +def convert(obj: dict, device=None): + return {key: convert(value) for key, value in obj.items()} + + +@multimethod +def convert(obj: list, device=None): # noqa: F811 + return [convert(item) for item in obj] + + +@multimethod +def convert(obj: tuple, device=None): # noqa: F811 + return tuple(convert(obj)) + + +@multimethod +def convert(obj: np.ndarray, device=None): # noqa: F811 + return torch.tensor(obj, device=device) + + +@multimethod +def convert(obj: torch.Tensor, device=None): # noqa: F811 + return obj.detach().cpu().numpy() + + +# All other types, no change. +@multimethod +def convert(obj, device=None): # noqa: F811 + return obj + + +class MartAttack: + """A minimal wrapper to run PyTorch-based MART adversary in Armory against PyTorch-based + models. + + 1. Extract the PyTorch model from an ART Estimator; + 2. Convert np.ndarray to torch.Tensor; + 3. Run PyTorch-based MART adversary and get result as torch.Tensor; + 4. Convert torch.Tensor back to np.ndarray. + """ + + def __init__(self, model, mart_adv_config_yaml): + # TODO: Automatically search for torch.nn.Module within model. + # Extract PyTorch model from an ART Estimator. + self.model = model._model + self.device = self.model.device + + # Instantiate a MART adversary. + adv_cfg = OmegaConf.load(mart_adv_config_yaml) + self.adversary = hydra.utils.instantiate(adv_cfg) + + def generate(self, **batch_np): + # Convert np.ndarray to torch.Tensor. + # Specify a device to place PyTorch tensors. + batch_pth = convert(batch_np, device=self.device) + batch_adv_pytorch = self.adversary(**batch_pth) + + # Convert torch.Tensor to np.ndarray. + batch_adv_np = convert(batch_adv_pytorch) + + # Only return adversarial input in the original numpy format. + input_adv_np = batch_adv_np["x"] + return input_adv_np diff --git a/examples/armory_attack/requirements.txt b/examples/armory_attack/requirements.txt new file mode 100644 index 00000000..95ad6eb8 --- /dev/null +++ b/examples/armory_attack/requirements.txt @@ -0,0 +1,2 @@ +mart @ git+https://github.com/IntelLabs/MART.git@example_armory_attack +multimethod From f076be127a35c315d4bc2f5eb703391be4f50680 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Thu, 20 Jul 2023 21:04:16 -0700 Subject: [PATCH 029/126] Add batch_converter for object detection tasks in Armory. --- examples/armory_attack/batch_converter.py | 70 +++++++++++++++++++++++ 1 file changed, 70 insertions(+) create mode 100644 examples/armory_attack/batch_converter.py diff --git a/examples/armory_attack/batch_converter.py b/examples/armory_attack/batch_converter.py new file mode 100644 index 00000000..9217b84c --- /dev/null +++ b/examples/armory_attack/batch_converter.py @@ -0,0 +1,70 @@ +# +# Copyright (C) 2022 Intel Corporation +# +# SPDX-License-Identifier: BSD-3-Clause +# + +from functools import reduce + +import torch + +from mart.attack.batch_converter import BatchConverter + + +class ObjectDetectionBatchConverter(BatchConverter): + def __init__( + self, + input_key: str = "x", + target_keys: dict = { + "y": ["area", "boxes", "id", "image_id", "is_crowd", "labels"], + "y_patch_metadata": [ + "avg_patch_depth", + "gs_coords", + "mask", + "max_depth_perturb_meters", + ], + }, + **kwargs, + ): + super().__init__(**kwargs) + self.input_key = input_key + self.target_keys = target_keys + + def _convert(self, batch: dict): + input = batch[self.input_key] + + target = [] + all_targets = [batch[key] for key in self.target_keys] + + # Merge several target keys. + for dicts in zip(*all_targets): + joint_target = reduce(lambda a, b: a | b, dicts) + target.append(joint_target) + + # NHWC -> NCHW, the PyTorch format. + input = input.permute((0, 3, 1, 2)) + # NCHW -> tuple[CHW] + input = tuple(inp_ for inp_ in input) + + target = tuple(target) + + return input, target + + def _revert(self, input: tuple[torch.Tensor], target: tuple[dict]) -> dict: + batch = {} + + # tuple[CHW] -> NCHW + input = torch.stack(input) + # NCHW -> NHWC, the TensorFlow format used in ART. + input = input.permute((0, 2, 3, 1)) + + batch[self.input_key] = input + + # Split target into several self.target_keys + for target_key, sub_keys in self.target_keys.items(): + batch[target_key] = [] + for target_i_dict in target: + target_key_i = {sub_key: target_i_dict[sub_key] for sub_key in sub_keys} + batch[target_key].append(target_key_i) + + return batch From ee9dd36f18c8768ba718c33297d1d3fc35414456 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 21 Jul 2023 09:21:56 -0700 Subject: [PATCH 030/126] Fix adversary call. --- examples/armory_attack/pytorch_wrapper.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/armory_attack/pytorch_wrapper.py b/examples/armory_attack/pytorch_wrapper.py index ca10b863..1a1803b3 100644 --- a/examples/armory_attack/pytorch_wrapper.py +++ b/examples/armory_attack/pytorch_wrapper.py @@ -69,7 +69,7 @@ def generate(self, **batch_np): # Convert np.ndarray to torch.Tensor. # Specify a device to place PyTorch tensors. batch_pth = convert(batch_np, device=self.device) - batch_adv_pytorch = self.adversary(**batch_pth) + batch_adv_pytorch = self.adversary(batch_pth, model=self.model) # Convert torch.Tensor to np.ndarray. batch_adv_np = convert(batch_adv_pytorch) From 570a8b5f32285df54683cb3cc0d12034a2df2711 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 21 Jul 2023 10:21:56 -0700 Subject: [PATCH 031/126] Implement wrapper MartAttackObjectDetection. --- examples/armory_attack/pytorch_wrapper.py | 56 +++++++++++++++++++---- 1 file changed, 46 insertions(+), 10 deletions(-) diff --git a/examples/armory_attack/pytorch_wrapper.py b/examples/armory_attack/pytorch_wrapper.py index 1a1803b3..df81e733 100644 --- a/examples/armory_attack/pytorch_wrapper.py +++ b/examples/armory_attack/pytorch_wrapper.py @@ -12,6 +12,8 @@ from multimethod import multimethod from omegaconf import OmegaConf +from .batch_converter import ObjectDetectionBatchConverter + # A recursive function to convert all np.ndarray in an object to torch.Tensor, or vice versa. @multimethod @@ -55,25 +57,59 @@ class MartAttack: 4. Convert torch.Tensor back to np.ndarray. """ - def __init__(self, model, mart_adv_config_yaml): + def __init__(self, model, batch_converter, mart_adv_config_yaml): # TODO: Automatically search for torch.nn.Module within model. # Extract PyTorch model from an ART Estimator. self.model = model._model self.device = self.model.device + self.batch_converter = batch_converter + # Instantiate a MART adversary. adv_cfg = OmegaConf.load(mart_adv_config_yaml) self.adversary = hydra.utils.instantiate(adv_cfg) - def generate(self, **batch_np): - # Convert np.ndarray to torch.Tensor. - # Specify a device to place PyTorch tensors. - batch_pth = convert(batch_np, device=self.device) - batch_adv_pytorch = self.adversary(batch_pth, model=self.model) - - # Convert torch.Tensor to np.ndarray. - batch_adv_np = convert(batch_adv_pytorch) + def convert_batch_armory_to_torchvision(self, batch_armory_np): + # np.ndarray -> torch.Tensor, on a device. + batch_armory_pth = convert(batch_armory_np, device=self.device) + # armory format -> torchvision format. + batch_tv_pth = self.batch_converter(batch_armory_pth) + return batch_tv_pth + + def convert_batch_torchvision_to_armory(self, batch_tv_pth): + # torchvision format -> armory format. + batch_armory_pth = self.batch_converter.revert(batch_tv_pth) + # torch.Tensor -> np.ndarray + batch_armory_np = convert(batch_armory_pth) + return batch_armory_np + + def generate(self, **batch_armory_np): + batch_tv_pth = self.convert_batch_armory_to_torchvision(batch_armory_np) + batch_adv_tv_pth = self.adversary(batch_tv_pth, model=self.model) + batch_adv_armory_np = self.convert_batch_torchvision_to_armory(batch_adv_tv_pth) # Only return adversarial input in the original numpy format. - input_adv_np = batch_adv_np["x"] + input_key = self.batch_converter.input_key + input_adv_np = batch_adv_armory_np[input_key] return input_adv_np + + +class MartAttackObjectDetection(MartAttack): + def __init__(self, model, mart_adv_config_yaml): + batch_config = { + "input_key": "x", + "target_keys": { + "y": ["area", "boxes", "id", "image_id", "is_crowd", "labels"], + "y_patch_metadata": [ + "avg_patch_depth", + "gs_coords", + "mask", + "max_depth_perturb_meters", + ], + }, + } + + batch_converter = ObjectDetectionBatchConverter(**batch_config) + super().__init__( + model=model, batch_converter=batch_converter, mart_adv_config_yaml=mart_adv_config_yaml + ) From 7c52e8c1d10d63d962f5b83cf4d92469bc6ef3d2 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 21 Jul 2023 11:00:10 -0700 Subject: [PATCH 032/126] Make a package. --- examples/armory_attack/mart_armory/__init__.py | 5 +++++ .../armory_attack/mart_armory/attack_config_generator.py | 0 .../armory_attack/{ => mart_armory}/batch_converter.py | 0 .../armory_attack/{ => mart_armory}/pytorch_wrapper.py | 0 examples/armory_attack/setup.py | 9 +++++++++ 5 files changed, 14 insertions(+) create mode 100644 examples/armory_attack/mart_armory/__init__.py create mode 100644 examples/armory_attack/mart_armory/attack_config_generator.py rename examples/armory_attack/{ => mart_armory}/batch_converter.py (100%) rename examples/armory_attack/{ => mart_armory}/pytorch_wrapper.py (100%) create mode 100644 examples/armory_attack/setup.py diff --git a/examples/armory_attack/mart_armory/__init__.py b/examples/armory_attack/mart_armory/__init__.py new file mode 100644 index 00000000..b15e4be1 --- /dev/null +++ b/examples/armory_attack/mart_armory/__init__.py @@ -0,0 +1,5 @@ +import importlib + +from mart_armory.pytorch_wrapper import MartAttackObjectDetection + +__version__ = importlib.metadata.version(__package__ or __name__) diff --git a/examples/armory_attack/mart_armory/attack_config_generator.py b/examples/armory_attack/mart_armory/attack_config_generator.py new file mode 100644 index 00000000..e69de29b diff --git a/examples/armory_attack/batch_converter.py b/examples/armory_attack/mart_armory/batch_converter.py similarity index 100% rename from examples/armory_attack/batch_converter.py rename to examples/armory_attack/mart_armory/batch_converter.py diff --git a/examples/armory_attack/pytorch_wrapper.py b/examples/armory_attack/mart_armory/pytorch_wrapper.py similarity index 100% rename from examples/armory_attack/pytorch_wrapper.py rename to examples/armory_attack/mart_armory/pytorch_wrapper.py diff --git a/examples/armory_attack/setup.py b/examples/armory_attack/setup.py new file mode 100644 index 00000000..35456b63 --- /dev/null +++ b/examples/armory_attack/setup.py @@ -0,0 +1,9 @@ +import setuptools + +setuptools.setup( + name="mart_armory", + version="0.0.1", + description="A wrapper for running MART attack in Armory.", + url="https://github.com/IntelLabs/MART", + packages=["mart_armory"], +) From 0ab2170cb0c1fbb08b9389f08df27691b3750865 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 21 Jul 2023 11:00:28 -0700 Subject: [PATCH 033/126] Add test data. --- .../tests/test_batch_converter.py | 56 +++++++++++++++++++ 1 file changed, 56 insertions(+) create mode 100644 examples/armory_attack/tests/test_batch_converter.py diff --git a/examples/armory_attack/tests/test_batch_converter.py b/examples/armory_attack/tests/test_batch_converter.py new file mode 100644 index 00000000..05a1dc30 --- /dev/null +++ b/examples/armory_attack/tests/test_batch_converter.py @@ -0,0 +1,56 @@ +import numpy as np + +x = np.random.rand(1, 960, 1280, 3).astype(np.float32) + +y = [ + { + "area": np.array( + [ + 154, + 286, + 226, + ] + ), + "boxes": np.array( + [ + [1238.0, 59.0, 1259.0, 85.0], + [739.0, 405.0, 762.0, 438.0], + [838.0, 361.0, 853.0, 393.0], + ], + dtype=np.float32, + ), + "id": np.array( + [ + 80, + 81, + 82, + ] + ), + "image_id": np.array( + [ + 16681727, + 16681727, + 16681727, + ] + ), + "is_crowd": np.array( + [ + False, + False, + False, + ] + ), + "labels": np.array([1, 1, 1]), + } +] + +y_patch_metadata = [ + { + "avg_patch_depth": np.array(25.20819092), + "gs_coords": np.array([[969, 64], [1033, 92], [469, 214], [439, 166]], dtype=np.int32), + "mask": np.zeros((960, 1280, 3), dtype=np.uint8), + "max_depth_perturb_meters": np.array(3.0), + } +] + +batch = {"x": x, "y": y, "y_patch_metadata": y_patch_metadata} From 59bd9d7172f0fe1fa90f868fd4d42c22a034beb1 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 21 Jul 2023 11:08:20 -0700 Subject: [PATCH 034/126] Rename as mart_armory. --- examples/{armory_attack => mart_armory}/mart_armory/__init__.py | 0 .../mart_armory/attack_config_generator.py | 0 .../{armory_attack => mart_armory}/mart_armory/batch_converter.py | 0 .../{armory_attack => mart_armory}/mart_armory/pytorch_wrapper.py | 0 examples/{armory_attack => mart_armory}/requirements.txt | 0 examples/{armory_attack => mart_armory}/setup.py | 0 .../{armory_attack => mart_armory}/tests/test_batch_converter.py | 0 7 files changed, 0 insertions(+), 0 deletions(-) rename examples/{armory_attack => mart_armory}/mart_armory/__init__.py (100%) rename examples/{armory_attack => mart_armory}/mart_armory/attack_config_generator.py (100%) rename examples/{armory_attack => mart_armory}/mart_armory/batch_converter.py (100%) rename examples/{armory_attack => mart_armory}/mart_armory/pytorch_wrapper.py (100%) rename examples/{armory_attack => mart_armory}/requirements.txt (100%) rename examples/{armory_attack => mart_armory}/setup.py (100%) rename examples/{armory_attack => mart_armory}/tests/test_batch_converter.py (100%) diff --git a/examples/armory_attack/mart_armory/__init__.py b/examples/mart_armory/mart_armory/__init__.py similarity index 100% rename from examples/armory_attack/mart_armory/__init__.py rename to examples/mart_armory/mart_armory/__init__.py diff --git a/examples/armory_attack/mart_armory/attack_config_generator.py b/examples/mart_armory/mart_armory/attack_config_generator.py similarity index 100% rename from examples/armory_attack/mart_armory/attack_config_generator.py rename to examples/mart_armory/mart_armory/attack_config_generator.py diff --git a/examples/armory_attack/mart_armory/batch_converter.py b/examples/mart_armory/mart_armory/batch_converter.py similarity index 100% rename from examples/armory_attack/mart_armory/batch_converter.py rename to examples/mart_armory/mart_armory/batch_converter.py diff --git a/examples/armory_attack/mart_armory/pytorch_wrapper.py b/examples/mart_armory/mart_armory/pytorch_wrapper.py similarity index 100% rename from examples/armory_attack/mart_armory/pytorch_wrapper.py rename to examples/mart_armory/mart_armory/pytorch_wrapper.py diff --git a/examples/armory_attack/requirements.txt b/examples/mart_armory/requirements.txt similarity index 100% rename from examples/armory_attack/requirements.txt rename to examples/mart_armory/requirements.txt diff --git a/examples/armory_attack/setup.py b/examples/mart_armory/setup.py similarity index 100% rename from examples/armory_attack/setup.py rename to examples/mart_armory/setup.py diff --git a/examples/armory_attack/tests/test_batch_converter.py b/examples/mart_armory/tests/test_batch_converter.py similarity index 100% rename from examples/armory_attack/tests/test_batch_converter.py rename to examples/mart_armory/tests/test_batch_converter.py From 244522375d1e693d10b860a525904871bcba2d08 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 21 Jul 2023 11:15:40 -0700 Subject: [PATCH 035/126] Update package. --- examples/mart_armory/README.md | 7 +++++++ examples/mart_armory/pyproject.toml | 21 +++++++++++++++++++++ examples/mart_armory/setup.py | 9 --------- 3 files changed, 28 insertions(+), 9 deletions(-) create mode 100644 examples/mart_armory/README.md create mode 100644 examples/mart_armory/pyproject.toml delete mode 100644 examples/mart_armory/setup.py diff --git a/examples/mart_armory/README.md b/examples/mart_armory/README.md new file mode 100644 index 00000000..a64f7bff --- /dev/null +++ b/examples/mart_armory/README.md @@ -0,0 +1,7 @@ +## Installation + +git+https://github.com/IntelLabs/MART.git@example_armory_attack + +```shell +pip install 'git+https://github.com/IntelLabs/MART.git@example_armory_attack#egg=mart_armory&subdirectory=examples/mart_armory' # install a python package from a repo subdirectory +``` diff --git a/examples/mart_armory/pyproject.toml b/examples/mart_armory/pyproject.toml new file mode 100644 index 00000000..6bcd02ad --- /dev/null +++ b/examples/mart_armory/pyproject.toml @@ -0,0 +1,21 @@ +[project] +name = "mart_armory" +version = "0.0.1a0" +description = "A wrapper for running MART attack in Armory." +readme = "README.md" +license = {file = "LICENSE"} +authors = [ + { name = "Intel Corporation", email = "weilin.xu@intel.com" }, +] + +requires-python = ">=3.9" + +dependencies = [ + "mart@git+https://github.com/IntelLabs/MART.git@example_armory_attack", +] + +[project.urls] +Source = "https://github.com/IntelLabs/MART/tree/example_armory_attack/examples/mart_armory" + +[tool.setuptools.packages.find] +include = ["mart_armory*"] diff --git a/examples/mart_armory/setup.py b/examples/mart_armory/setup.py deleted file mode 100644 index 35456b63..00000000 --- a/examples/mart_armory/setup.py +++ /dev/null @@ -1,9 +0,0 @@ -import setuptools - -setuptools.setup( - name="mart_armory", - version="0.0.1", - description="A wrapper for running MART attack in Armory.", - url="https://github.com/IntelLabs/MART", - packages=["mart_armory"], -) From f89f129f4725fa3f70b892571f8b2e165e79e66a Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 21 Jul 2023 11:19:04 -0700 Subject: [PATCH 036/126] Update dependency. --- examples/mart_armory/pyproject.toml | 3 ++- examples/mart_armory/requirements.txt | 2 -- 2 files changed, 2 insertions(+), 3 deletions(-) delete mode 100644 examples/mart_armory/requirements.txt diff --git a/examples/mart_armory/pyproject.toml b/examples/mart_armory/pyproject.toml index 6bcd02ad..44a071b0 100644 --- a/examples/mart_armory/pyproject.toml +++ b/examples/mart_armory/pyproject.toml @@ -11,7 +11,8 @@ authors = [ requires-python = ">=3.9" dependencies = [ - "mart@git+https://github.com/IntelLabs/MART.git@example_armory_attack", + "mart@git+https://github.com/IntelLabs/MART.git@example_armory_attack", + "multimethod ~= 1.9.1", ] [project.urls] diff --git a/examples/mart_armory/requirements.txt b/examples/mart_armory/requirements.txt deleted file mode 100644 index 95ad6eb8..00000000 --- a/examples/mart_armory/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -mart @ git+https://github.com/IntelLabs/MART.git@example_armory_attack -multimethod From 2b8080cf90f801918003765e932c83e5dd0c9e98 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 21 Jul 2023 11:20:19 -0700 Subject: [PATCH 037/126] Update README. --- examples/mart_armory/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/mart_armory/README.md b/examples/mart_armory/README.md index a64f7bff..8b554167 100644 --- a/examples/mart_armory/README.md +++ b/examples/mart_armory/README.md @@ -1,7 +1,7 @@ ## Installation -git+https://github.com/IntelLabs/MART.git@example_armory_attack +Install the `mart_armory` package from a repo subdirectory. ```shell -pip install 'git+https://github.com/IntelLabs/MART.git@example_armory_attack#egg=mart_armory&subdirectory=examples/mart_armory' # install a python package from a repo subdirectory +pip install 'git+https://github.com/IntelLabs/MART.git@example_armory_attack#egg=mart_armory&subdirectory=examples/mart_armory' ``` From b48e85b7d6885b3ef9eb68ce8be51bd24f16e579 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 21 Jul 2023 11:48:08 -0700 Subject: [PATCH 038/126] Update README --- examples/mart_armory/README.md | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/examples/mart_armory/README.md b/examples/mart_armory/README.md index 8b554167..111d2846 100644 --- a/examples/mart_armory/README.md +++ b/examples/mart_armory/README.md @@ -5,3 +5,25 @@ Install the `mart_armory` package from a repo subdirectory. ```shell pip install 'git+https://github.com/IntelLabs/MART.git@example_armory_attack#egg=mart_armory&subdirectory=examples/mart_armory' ``` + +## Usage + +1. Generate a YAML configuration of attack. + +NOT IMPLEMENTED YET. + +```shell +python -m mart.tools.generate_adversary_config --output=path/to/adversary.yaml +``` + +2. Update the attack section in the Armory scenario configuration. + +```json +"attack": { + "kwargs": { + "mart_adv_config_yaml": "path/to/adversary.yaml", + }, + "module": "mart_armory", + "name": "MartAttackObjectDetection", +}, +``` From 4c05c0fa4eca32371f57ede4f4c03ae384a3101f Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 21 Jul 2023 12:22:43 -0700 Subject: [PATCH 039/126] Update README --- examples/mart_armory/README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/examples/mart_armory/README.md b/examples/mart_armory/README.md index 111d2846..066082b7 100644 --- a/examples/mart_armory/README.md +++ b/examples/mart_armory/README.md @@ -13,7 +13,9 @@ pip install 'git+https://github.com/IntelLabs/MART.git@example_armory_attack#egg NOT IMPLEMENTED YET. ```shell -python -m mart.tools.generate_adversary_config --output=path/to/adversary.yaml +python -m mart.generate_adversary_config \ +attack=[object_detection_mask_adversary,data_coco] \ +output=path/to/adversary.yaml ``` 2. Update the attack section in the Armory scenario configuration. From 7453025377af775879b04308470cfb3ca0f82713 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 21 Jul 2023 12:37:52 -0700 Subject: [PATCH 040/126] Add an attack config generator. --- examples/mart_armory/README.md | 6 +-- .../mart_armory/configs/assemble_attack.yaml | 15 ++++++ .../mart_armory/attack_config_generator.py | 0 .../mart_armory/generate_attack_config.py | 53 +++++++++++++++++++ 4 files changed, 71 insertions(+), 3 deletions(-) create mode 100644 examples/mart_armory/configs/assemble_attack.yaml delete mode 100644 examples/mart_armory/mart_armory/attack_config_generator.py create mode 100644 examples/mart_armory/mart_armory/generate_attack_config.py diff --git a/examples/mart_armory/README.md b/examples/mart_armory/README.md index 066082b7..4fc088de 100644 --- a/examples/mart_armory/README.md +++ b/examples/mart_armory/README.md @@ -13,9 +13,9 @@ pip install 'git+https://github.com/IntelLabs/MART.git@example_armory_attack#egg NOT IMPLEMENTED YET. ```shell -python -m mart.generate_adversary_config \ +python -m mart_armory.generate_attack_config \ attack=[object_detection_mask_adversary,data_coco] \ -output=path/to/adversary.yaml +output=path/to/attack.yaml ``` 2. Update the attack section in the Armory scenario configuration. @@ -23,7 +23,7 @@ output=path/to/adversary.yaml ```json "attack": { "kwargs": { - "mart_adv_config_yaml": "path/to/adversary.yaml", + "mart_adv_config_yaml": "path/to/attack.yaml", }, "module": "mart_armory", "name": "MartAttackObjectDetection", diff --git a/examples/mart_armory/configs/assemble_attack.yaml b/examples/mart_armory/configs/assemble_attack.yaml new file mode 100644 index 00000000..1b6da4ec --- /dev/null +++ b/examples/mart_armory/configs/assemble_attack.yaml @@ -0,0 +1,15 @@ +# @package _global_ + +# specify here default training configuration +defaults: + - _self_ + - attack: null + - override hydra/hydra_logging: disabled + - override hydra/job_logging: disabled + +output: ??? + +hydra: + output_subdir: null + run: + dir: . diff --git a/examples/mart_armory/mart_armory/attack_config_generator.py b/examples/mart_armory/mart_armory/attack_config_generator.py deleted file mode 100644 index e69de29b..00000000 diff --git a/examples/mart_armory/mart_armory/generate_attack_config.py b/examples/mart_armory/mart_armory/generate_attack_config.py new file mode 100644 index 00000000..a9ef9a49 --- /dev/null +++ b/examples/mart_armory/mart_armory/generate_attack_config.py @@ -0,0 +1,53 @@ +# this file acts as a robust starting point for launching hydra runs and multiruns +# can be run from any place + +import os +import sys +from pathlib import Path + +import hydra +import pyrootutils +from omegaconf import DictConfig, OmegaConf + +from mart import utils + +log = utils.get_pylogger(__name__) + +# project root setup +# uses the current working directory as root. +# sets PROJECT_ROOT environment variable (used in `configs/paths/default.yaml`) +# loads environment variables from ".env" if exists +# adds root dir to the PYTHONPATH (so this file can be run from any place) +# https://github.com/ashleve/pyrootutils +# FIXME: Get rid of pyrootutils if we don't infer config.paths.root from PROJECT_ROOT. +root = Path(os.getcwd()) +pyrootutils.set_root(path=root, dotenv=True, pythonpath=True) + +config_path = root / "configs" +if not config_path.exists(): + log.warning(f"No config directory found at {config_path}!") + config_path = "configs" + + +@hydra.main(version_base="1.2", config_path=config_path, config_name="assemble_attack.yaml") +def main(cfg: DictConfig) -> float: + if "attack" not in cfg: + print( + "Please assemble an attack, e.g., `attack=[object_detection_mask_adversary,data_coco]`" + ) + else: + print(OmegaConf.to_yaml(cfg.attack)) + + if "output" not in cfg: + print("You can output config as a yaml file by `output=path/to/file.yaml`") + else: + OmegaConf.save(config=cfg.attack, f=cfg.output) + print(f"Saved config to {cfg.output}") + + +if __name__ == "__main__": + ret = main() + if ret is not None and ret < 0: + sys.exit(ret) + else: + sys.exit(0) From 409f1db1cee3cf27fa75e8deda052366aaa26db9 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 21 Jul 2023 17:58:19 -0700 Subject: [PATCH 041/126] Move configs to package. --- .../hydra_plugins/hydra_mart_armory/__init__.py | 0 .../hydra_plugins/hydra_mart_armory/mart_armory.py | 8 ++++++++ .../{ => mart_armory}/configs/assemble_attack.yaml | 0 examples/mart_armory/pyproject.toml | 5 ++++- 4 files changed, 12 insertions(+), 1 deletion(-) create mode 100644 examples/mart_armory/hydra_plugins/hydra_mart_armory/__init__.py create mode 100644 examples/mart_armory/hydra_plugins/hydra_mart_armory/mart_armory.py rename examples/mart_armory/{ => mart_armory}/configs/assemble_attack.yaml (100%) diff --git a/examples/mart_armory/hydra_plugins/hydra_mart_armory/__init__.py b/examples/mart_armory/hydra_plugins/hydra_mart_armory/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/examples/mart_armory/hydra_plugins/hydra_mart_armory/mart_armory.py b/examples/mart_armory/hydra_plugins/hydra_mart_armory/mart_armory.py new file mode 100644 index 00000000..cecddb9d --- /dev/null +++ b/examples/mart_armory/hydra_plugins/hydra_mart_armory/mart_armory.py @@ -0,0 +1,8 @@ +from hydra.core.config_search_path import ConfigSearchPath +from hydra.plugins.search_path_plugin import SearchPathPlugin + + +class HydraMartSearchPathPlugin(SearchPathPlugin): + def manipulate_search_path(self, search_path: ConfigSearchPath) -> None: + # Add mart.configs to search path + search_path.append("hydra-mart", "pkg://mart_armory.configs") diff --git a/examples/mart_armory/configs/assemble_attack.yaml b/examples/mart_armory/mart_armory/configs/assemble_attack.yaml similarity index 100% rename from examples/mart_armory/configs/assemble_attack.yaml rename to examples/mart_armory/mart_armory/configs/assemble_attack.yaml diff --git a/examples/mart_armory/pyproject.toml b/examples/mart_armory/pyproject.toml index 44a071b0..7e20d3bd 100644 --- a/examples/mart_armory/pyproject.toml +++ b/examples/mart_armory/pyproject.toml @@ -19,4 +19,7 @@ dependencies = [ Source = "https://github.com/IntelLabs/MART/tree/example_armory_attack/examples/mart_armory" [tool.setuptools.packages.find] -include = ["mart_armory*"] +include = ["mart_armory*", "hydra_plugins*"] + +[tool.setuptools.package-data] +"*" = ["*.yaml"] From 9fa464a5ec75e2609b3b87b943454abe04fa5d52 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 21 Jul 2023 18:09:12 -0700 Subject: [PATCH 042/126] Add configs from range01. --- .../batch_converter/transform/to_pixel_range_1.yaml | 3 +++ .../transform/to_pixel_range_255.yaml | 13 +++++++++++++ .../mart_armory/configs/attack/data_coco.yaml | 11 +++++++++++ 3 files changed, 27 insertions(+) create mode 100644 examples/mart_armory/mart_armory/configs/attack/batch_converter/transform/to_pixel_range_1.yaml create mode 100644 examples/mart_armory/mart_armory/configs/attack/batch_converter/transform/to_pixel_range_255.yaml create mode 100644 examples/mart_armory/mart_armory/configs/attack/data_coco.yaml diff --git a/examples/mart_armory/mart_armory/configs/attack/batch_converter/transform/to_pixel_range_1.yaml b/examples/mart_armory/mart_armory/configs/attack/batch_converter/transform/to_pixel_range_1.yaml new file mode 100644 index 00000000..92a63b7c --- /dev/null +++ b/examples/mart_armory/mart_armory/configs/attack/batch_converter/transform/to_pixel_range_1.yaml @@ -0,0 +1,3 @@ +_target_: torchvision.transforms.Normalize +mean: 0 +std: 255 diff --git a/examples/mart_armory/mart_armory/configs/attack/batch_converter/transform/to_pixel_range_255.yaml b/examples/mart_armory/mart_armory/configs/attack/batch_converter/transform/to_pixel_range_255.yaml new file mode 100644 index 00000000..dbeff64d --- /dev/null +++ b/examples/mart_armory/mart_armory/configs/attack/batch_converter/transform/to_pixel_range_255.yaml @@ -0,0 +1,13 @@ +_target_: torchvision.transforms.Compose +transforms: + - _target_: mart.transforms.Denormalize + center: 0 + scale: 255 + # Fix potential numeric error. + - _target_: torch.fake_quantize_per_tensor_affine + _partial_: true + # (x/1+0).round().clamp(0, 255) * 1 + scale: 1 + zero_point: 0 + quant_min: 0 + quant_max: 255 diff --git a/examples/mart_armory/mart_armory/configs/attack/data_coco.yaml b/examples/mart_armory/mart_armory/configs/attack/data_coco.yaml new file mode 100644 index 00000000..52bae686 --- /dev/null +++ b/examples/mart_armory/mart_armory/configs/attack/data_coco.yaml @@ -0,0 +1,11 @@ +defaults: + - batch_converter/transform@batch_converter.transform.transforms: to_pixel_range_255 + - batch_converter/transform@batch_converter.untransform.transforms: to_pixel_range_1 + - override batch_converter: tuple + +batch_converter: + transform: + _target_: mart.transforms.TupleTransforms + + untransform: + _target_: mart.transforms.TupleTransforms From b827ef16ae70d5535e7c7650d3fc82e40a7d49c7 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 21 Jul 2023 18:09:42 -0700 Subject: [PATCH 043/126] Update README. --- examples/mart_armory/README.md | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/examples/mart_armory/README.md b/examples/mart_armory/README.md index 4fc088de..1408f804 100644 --- a/examples/mart_armory/README.md +++ b/examples/mart_armory/README.md @@ -10,12 +10,10 @@ pip install 'git+https://github.com/IntelLabs/MART.git@example_armory_attack#egg 1. Generate a YAML configuration of attack. -NOT IMPLEMENTED YET. - ```shell python -m mart_armory.generate_attack_config \ attack=[object_detection_mask_adversary,data_coco] \ -output=path/to/attack.yaml +output=path_to_attack.yaml ``` 2. Update the attack section in the Armory scenario configuration. From 8f3d725ac01880d9d86e85cb7fc4743e0c078275 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 21 Jul 2023 18:25:58 -0700 Subject: [PATCH 044/126] Update configs. --- examples/mart_armory/README.md | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/examples/mart_armory/README.md b/examples/mart_armory/README.md index 1408f804..905e361b 100644 --- a/examples/mart_armory/README.md +++ b/examples/mart_armory/README.md @@ -13,6 +13,7 @@ pip install 'git+https://github.com/IntelLabs/MART.git@example_armory_attack#egg ```shell python -m mart_armory.generate_attack_config \ attack=[object_detection_mask_adversary,data_coco] \ +attack.objective=null \ output=path_to_attack.yaml ``` @@ -20,10 +21,14 @@ output=path_to_attack.yaml ```json "attack": { - "kwargs": { - "mart_adv_config_yaml": "path/to/attack.yaml", - }, "module": "mart_armory", "name": "MartAttackObjectDetection", + "kwargs": { + "mart_adv_config_yaml": "path/to/attack.yaml" + }, + "knowledge": "white", + "use_label": true }, ``` + +Armory requires the argument `knowledge`. The statement `"use_label": true` gets `y` for the attack. From 142b02f3495b909310e8c56859beb4ea1e5909d6 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 21 Jul 2023 18:26:07 -0700 Subject: [PATCH 045/126] Bugfix. --- examples/mart_armory/mart_armory/pytorch_wrapper.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/mart_armory/mart_armory/pytorch_wrapper.py b/examples/mart_armory/mart_armory/pytorch_wrapper.py index df81e733..198f66e5 100644 --- a/examples/mart_armory/mart_armory/pytorch_wrapper.py +++ b/examples/mart_armory/mart_armory/pytorch_wrapper.py @@ -61,7 +61,7 @@ def __init__(self, model, batch_converter, mart_adv_config_yaml): # TODO: Automatically search for torch.nn.Module within model. # Extract PyTorch model from an ART Estimator. self.model = model._model - self.device = self.model.device + self.device = model.device self.batch_converter = batch_converter From 1c43a3a3d025d1b892f7bebfc1a5f2386025a01a Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 21 Jul 2023 18:27:58 -0700 Subject: [PATCH 046/126] Fix Adversary(**kwarg). --- examples/mart_armory/mart_armory/pytorch_wrapper.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/mart_armory/mart_armory/pytorch_wrapper.py b/examples/mart_armory/mart_armory/pytorch_wrapper.py index 198f66e5..1a483cce 100644 --- a/examples/mart_armory/mart_armory/pytorch_wrapper.py +++ b/examples/mart_armory/mart_armory/pytorch_wrapper.py @@ -85,7 +85,7 @@ def convert_batch_torchvision_to_armory(self, batch_tv_pth): def generate(self, **batch_armory_np): batch_tv_pth = self.convert_batch_armory_to_torchvision(batch_armory_np) - batch_adv_tv_pth = self.adversary(batch_tv_pth, model=self.model) + batch_adv_tv_pth = self.adversary(batch=batch_tv_pth, model=self.model) batch_adv_armory_np = self.convert_batch_torchvision_to_armory(batch_adv_tv_pth) # Only return adversarial input in the original numpy format. From b9a055ed6a8d28b6d64268ce0151588bb3d76d8d Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 21 Jul 2023 18:41:51 -0700 Subject: [PATCH 047/126] Fix device placement. --- examples/mart_armory/mart_armory/pytorch_wrapper.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/mart_armory/mart_armory/pytorch_wrapper.py b/examples/mart_armory/mart_armory/pytorch_wrapper.py index 1a483cce..db71f38c 100644 --- a/examples/mart_armory/mart_armory/pytorch_wrapper.py +++ b/examples/mart_armory/mart_armory/pytorch_wrapper.py @@ -18,17 +18,17 @@ # A recursive function to convert all np.ndarray in an object to torch.Tensor, or vice versa. @multimethod def convert(obj: dict, device=None): - return {key: convert(value) for key, value in obj.items()} + return {key: convert(value, device=device) for key, value in obj.items()} @multimethod def convert(obj: list, device=None): # noqa: F811 - return [convert(item) for item in obj] + return [convert(item, device=device) for item in obj] @multimethod def convert(obj: tuple, device=None): # noqa: F811 - return tuple(convert(obj)) + return tuple(convert(obj, device=device)) @multimethod From c29f85854fa1de130509653473a9c7f910100466 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 21 Jul 2023 19:06:17 -0700 Subject: [PATCH 048/126] Bugfix. --- examples/mart_armory/mart_armory/pytorch_wrapper.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/examples/mart_armory/mart_armory/pytorch_wrapper.py b/examples/mart_armory/mart_armory/pytorch_wrapper.py index db71f38c..6e2cb24f 100644 --- a/examples/mart_armory/mart_armory/pytorch_wrapper.py +++ b/examples/mart_armory/mart_armory/pytorch_wrapper.py @@ -60,7 +60,8 @@ class MartAttack: def __init__(self, model, batch_converter, mart_adv_config_yaml): # TODO: Automatically search for torch.nn.Module within model. # Extract PyTorch model from an ART Estimator. - self.model = model._model + # Make the model accept batch as an argument parameter. + self.model = lambda batch: model._model(*batch) self.device = model.device self.batch_converter = batch_converter @@ -69,6 +70,9 @@ def __init__(self, model, batch_converter, mart_adv_config_yaml): adv_cfg = OmegaConf.load(mart_adv_config_yaml) self.adversary = hydra.utils.instantiate(adv_cfg) + # Move adversary to the same device. + self.adversary.to(self.device) + def convert_batch_armory_to_torchvision(self, batch_armory_np): # np.ndarray -> torch.Tensor, on a device. batch_armory_pth = convert(batch_armory_np, device=self.device) @@ -85,6 +89,10 @@ def convert_batch_torchvision_to_armory(self, batch_tv_pth): def generate(self, **batch_armory_np): batch_tv_pth = self.convert_batch_armory_to_torchvision(batch_armory_np) + + # FIXME: Convert perturbable_mask in conversion. + batch_tv_pth[1][0]["perturbable_mask"] = batch_tv_pth[1][0]["mask"].permute((2, 0, 1)) + batch_adv_tv_pth = self.adversary(batch=batch_tv_pth, model=self.model) batch_adv_armory_np = self.convert_batch_torchvision_to_armory(batch_adv_tv_pth) From 6622e57d80d3c49271960c927b00d9f459e40b8d Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 21 Jul 2023 19:24:17 -0700 Subject: [PATCH 049/126] Change lambda to a real wrapper. --- .../mart_armory/pytorch_wrapper.py | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/examples/mart_armory/mart_armory/pytorch_wrapper.py b/examples/mart_armory/mart_armory/pytorch_wrapper.py index 6e2cb24f..8bf5c11d 100644 --- a/examples/mart_armory/mart_armory/pytorch_wrapper.py +++ b/examples/mart_armory/mart_armory/pytorch_wrapper.py @@ -47,6 +47,20 @@ def convert(obj, device=None): # noqa: F811 return obj +class ModelWrapper(torch.nn.Module): + def __init__(self, model): + super().__init__() + + # FIXME: We need an interface to modify the model, because the model only returns prediction in eval() model. + self.model = model + + def forward(self, batch): + + # Make the model accept batch as an argument parameter. + output = self.model(*batch) + return output + + class MartAttack: """A minimal wrapper to run PyTorch-based MART adversary in Armory against PyTorch-based models. @@ -58,10 +72,9 @@ class MartAttack: """ def __init__(self, model, batch_converter, mart_adv_config_yaml): - # TODO: Automatically search for torch.nn.Module within model. # Extract PyTorch model from an ART Estimator. - # Make the model accept batch as an argument parameter. - self.model = lambda batch: model._model(*batch) + # TODO: Automatically search for torch.nn.Module within model. + self.model = ModelWrapper(model._model) self.device = model.device self.batch_converter = batch_converter From 2c8ae224cc4238b65e9697999aec54eb145d6603 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 21 Jul 2023 19:26:43 -0700 Subject: [PATCH 050/126] Update README. --- examples/mart_armory/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/mart_armory/README.md b/examples/mart_armory/README.md index 905e361b..506d097e 100644 --- a/examples/mart_armory/README.md +++ b/examples/mart_armory/README.md @@ -14,7 +14,7 @@ pip install 'git+https://github.com/IntelLabs/MART.git@example_armory_attack#egg python -m mart_armory.generate_attack_config \ attack=[object_detection_mask_adversary,data_coco] \ attack.objective=null \ -output=path_to_attack.yaml +output=path/to/attack.yaml ``` 2. Update the attack section in the Armory scenario configuration. From 8f7570ae4ee072c26368713445a70b7281fde701 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 21 Jul 2023 20:02:20 -0700 Subject: [PATCH 051/126] Use DualModel and fix bugs. --- .../configs/attack/gain/rcnn_training_loss.yaml | 8 ++++++++ examples/mart_armory/mart_armory/pytorch_wrapper.py | 12 +++++++----- 2 files changed, 15 insertions(+), 5 deletions(-) create mode 100644 examples/mart_armory/mart_armory/configs/attack/gain/rcnn_training_loss.yaml diff --git a/examples/mart_armory/mart_armory/configs/attack/gain/rcnn_training_loss.yaml b/examples/mart_armory/mart_armory/configs/attack/gain/rcnn_training_loss.yaml new file mode 100644 index 00000000..c9e3439e --- /dev/null +++ b/examples/mart_armory/mart_armory/configs/attack/gain/rcnn_training_loss.yaml @@ -0,0 +1,8 @@ +_target_: mart.nn.CallWith +module: + _target_: mart.nn.Sum +_call_with_args_: + - "training.loss_objectness" + - "training.loss_rpn_box_reg" + - "training.loss_classifier" + - "training.loss_box_reg" diff --git a/examples/mart_armory/mart_armory/pytorch_wrapper.py b/examples/mart_armory/mart_armory/pytorch_wrapper.py index 8bf5c11d..30fa1a67 100644 --- a/examples/mart_armory/mart_armory/pytorch_wrapper.py +++ b/examples/mart_armory/mart_armory/pytorch_wrapper.py @@ -12,6 +12,8 @@ from multimethod import multimethod from omegaconf import OmegaConf +from mart.models.dual_mode import DualModeGeneralizedRCNN + from .batch_converter import ObjectDetectionBatchConverter @@ -52,10 +54,9 @@ def __init__(self, model): super().__init__() # FIXME: We need an interface to modify the model, because the model only returns prediction in eval() model. - self.model = model + self.model = DualModeGeneralizedRCNN(model) def forward(self, batch): - # Make the model accept batch as an argument parameter. output = self.model(*batch) return output @@ -95,7 +96,8 @@ def convert_batch_armory_to_torchvision(self, batch_armory_np): def convert_batch_torchvision_to_armory(self, batch_tv_pth): # torchvision format -> armory format. - batch_armory_pth = self.batch_converter.revert(batch_tv_pth) + # Note: revert(input, target) + batch_armory_pth = self.batch_converter.revert(*batch_tv_pth) # torch.Tensor -> np.ndarray batch_armory_np = convert(batch_armory_pth) return batch_armory_np @@ -103,8 +105,8 @@ def convert_batch_torchvision_to_armory(self, batch_tv_pth): def generate(self, **batch_armory_np): batch_tv_pth = self.convert_batch_armory_to_torchvision(batch_armory_np) - # FIXME: Convert perturbable_mask in conversion. - batch_tv_pth[1][0]["perturbable_mask"] = batch_tv_pth[1][0]["mask"].permute((2, 0, 1)) + # FIXME: Convert perturbable_mask somewhere else. + batch_tv_pth[1][0]["perturbable_mask"] = batch_tv_pth[1][0]["mask"].permute((2, 0, 1)) > 0 batch_adv_tv_pth = self.adversary(batch=batch_tv_pth, model=self.model) batch_adv_armory_np = self.convert_batch_torchvision_to_armory(batch_adv_tv_pth) From 8e949d322b3e13d4f7edd8fc4a53d16978fe3844 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 21 Jul 2023 20:04:25 -0700 Subject: [PATCH 052/126] Bugfix. --- examples/mart_armory/mart_armory/pytorch_wrapper.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/examples/mart_armory/mart_armory/pytorch_wrapper.py b/examples/mart_armory/mart_armory/pytorch_wrapper.py index 30fa1a67..d1c23b2f 100644 --- a/examples/mart_armory/mart_armory/pytorch_wrapper.py +++ b/examples/mart_armory/mart_armory/pytorch_wrapper.py @@ -106,7 +106,9 @@ def generate(self, **batch_armory_np): batch_tv_pth = self.convert_batch_armory_to_torchvision(batch_armory_np) # FIXME: Convert perturbable_mask somewhere else. - batch_tv_pth[1][0]["perturbable_mask"] = batch_tv_pth[1][0]["mask"].permute((2, 0, 1)) > 0 + batch_tv_pth[1][0]["perturbable_mask"] = ( + batch_tv_pth[1][0]["mask"].permute((2, 0, 1)) / 255 + ) batch_adv_tv_pth = self.adversary(batch=batch_tv_pth, model=self.model) batch_adv_armory_np = self.convert_batch_torchvision_to_armory(batch_adv_tv_pth) From 3e8f8c9701cfc82b75700ebd3a264d6013b2e71e Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 24 Jul 2023 11:10:20 -0700 Subject: [PATCH 053/126] Comment. --- .../mart_armory/mart_armory/batch_converter.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/examples/mart_armory/mart_armory/batch_converter.py b/examples/mart_armory/mart_armory/batch_converter.py index 9217b84c..9fde8604 100644 --- a/examples/mart_armory/mart_armory/batch_converter.py +++ b/examples/mart_armory/mart_armory/batch_converter.py @@ -41,18 +41,26 @@ def _convert(self, batch: dict): joint_target = reduce(lambda a, b: a | b, dicts) target.append(joint_target) + target = tuple(target) + + # TODO: Move to transform() that works on both input and target. + # 1. input permute + # 2. tuplize input + # 3. permute and scale target["mask"] # NHWC -> NCHW, the PyTorch format. input = input.permute((0, 3, 1, 2)) # NCHW -> tuple[CHW] - input = tuple(inp_ for inp_ in input) - - target = tuple(target) + input = tuple(input) return input, target def _revert(self, input: tuple[torch.Tensor], target: tuple[dict]) -> dict: batch = {} + # TODO: Move to untransform(). + # 1. permute and scale target["mask"] + # 2. input stack + # 3. input permute # tuple[CHW] -> NCHW input = torch.stack(input) # NCHW -> NHWC, the TensorFlow format used in ART. From 342fef29f00aac2bf298d4a7f7816bc0d8c43773 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 24 Jul 2023 11:28:09 -0700 Subject: [PATCH 054/126] Add target_transform in target_untransform. --- mart/attack/batch_converter.py | 30 ++++++++++++++++++++++++------ 1 file changed, 24 insertions(+), 6 deletions(-) diff --git a/mart/attack/batch_converter.py b/mart/attack/batch_converter.py index 0eb19e01..58cc784c 100644 --- a/mart/attack/batch_converter.py +++ b/mart/attack/batch_converter.py @@ -18,23 +18,41 @@ class BatchConverter(abc.ABC): - def __init__(self, *, transform: Callable = None, untransform: Callable = None): - """_summary_ + def __init__( + self, + *, + transform: Callable = None, + untransform: Callable = None, + target_transform: Callable = None, + target_untransform: Callable = None, + ): + """Convert batch into (input, target), and vice versa. Args: transform (Callable): Transform input into a convenient format, e.g. [0,1]->[0.255]. untransform (Callable): Transform adversarial input in the convenient format back into the original format of input, e.g. [0,255]->[0,1]. + target_transform (Callable): Transform target. + target_untransform (Callable): Untransform target. """ - self.transform = transform if transform is not None else lambda x: x - self.untransform = untransform if untransform is not None else lambda x: x + self.transform = transform if isinstance(transform, Callable) else lambda x: x + self.untransform = untransform if isinstance(untransform, Callable) else lambda x: x + + self.target_transform = ( + target_transform if isinstance(target_transform, Callable) else lambda x: x + ) + self.target_untransform = ( + target_untransform if isinstance(target_untransform, Callable) else lambda x: x + ) def __call__(self, batch): input, target = self._convert(batch) input_transformed = self.transform(input) - return input_transformed, target + target_transformed = self.target_transform(target) + return input_transformed, target_transformed - def revert(self, input_transformed, target): + def revert(self, input_transformed, target_transformed): input = self.untransform(input_transformed) + target = self.target_untransform(target_transformed) batch = self._revert(input, target) return batch From 08a66e434cfc0f05b44d756372f0a62d772400ee Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 24 Jul 2023 12:28:19 -0700 Subject: [PATCH 055/126] Make batch_converter configurable. --- examples/mart_armory/README.md | 3 +- examples/mart_armory/mart_armory/__init__.py | 2 +- .../mart_armory/configs/assemble_attack.yaml | 1 + .../batch_converter/object_detection.yaml | 6 ++++ .../mart_armory/generate_attack_config.py | 4 +-- .../mart_armory/pytorch_wrapper.py | 30 ++++--------------- 6 files changed, 17 insertions(+), 29 deletions(-) create mode 100644 examples/mart_armory/mart_armory/configs/batch_converter/object_detection.yaml diff --git a/examples/mart_armory/README.md b/examples/mart_armory/README.md index 506d097e..b4c67fd4 100644 --- a/examples/mart_armory/README.md +++ b/examples/mart_armory/README.md @@ -12,6 +12,7 @@ pip install 'git+https://github.com/IntelLabs/MART.git@example_armory_attack#egg ```shell python -m mart_armory.generate_attack_config \ +batch_converter=object_detection \ attack=[object_detection_mask_adversary,data_coco] \ attack.objective=null \ output=path/to/attack.yaml @@ -22,7 +23,7 @@ output=path/to/attack.yaml ```json "attack": { "module": "mart_armory", - "name": "MartAttackObjectDetection", + "name": "MartAttack", "kwargs": { "mart_adv_config_yaml": "path/to/attack.yaml" }, diff --git a/examples/mart_armory/mart_armory/__init__.py b/examples/mart_armory/mart_armory/__init__.py index b15e4be1..926f7956 100644 --- a/examples/mart_armory/mart_armory/__init__.py +++ b/examples/mart_armory/mart_armory/__init__.py @@ -1,5 +1,5 @@ import importlib -from mart_armory.pytorch_wrapper import MartAttackObjectDetection +from mart_armory.pytorch_wrapper import MartAttack __version__ = importlib.metadata.version(__package__ or __name__) diff --git a/examples/mart_armory/mart_armory/configs/assemble_attack.yaml b/examples/mart_armory/mart_armory/configs/assemble_attack.yaml index 1b6da4ec..19a4410c 100644 --- a/examples/mart_armory/mart_armory/configs/assemble_attack.yaml +++ b/examples/mart_armory/mart_armory/configs/assemble_attack.yaml @@ -4,6 +4,7 @@ defaults: - _self_ - attack: null + - batch_converter: null - override hydra/hydra_logging: disabled - override hydra/job_logging: disabled diff --git a/examples/mart_armory/mart_armory/configs/batch_converter/object_detection.yaml b/examples/mart_armory/mart_armory/configs/batch_converter/object_detection.yaml new file mode 100644 index 00000000..a9c7b154 --- /dev/null +++ b/examples/mart_armory/mart_armory/configs/batch_converter/object_detection.yaml @@ -0,0 +1,6 @@ +_target_: mart_armory.batch_converter.ObjectDetectionBatchConverter +input_key: "x" +target_keys: + y: ["area", "boxes", "id", "image_id", "is_crowd", "labels"] + y_patch_metadata: + ["avg_patch_depth", "gs_coords", "mask", "max_depth_perturb_meters"] diff --git a/examples/mart_armory/mart_armory/generate_attack_config.py b/examples/mart_armory/mart_armory/generate_attack_config.py index a9ef9a49..6376381f 100644 --- a/examples/mart_armory/mart_armory/generate_attack_config.py +++ b/examples/mart_armory/mart_armory/generate_attack_config.py @@ -36,12 +36,12 @@ def main(cfg: DictConfig) -> float: "Please assemble an attack, e.g., `attack=[object_detection_mask_adversary,data_coco]`" ) else: - print(OmegaConf.to_yaml(cfg.attack)) + print(OmegaConf.to_yaml(cfg)) if "output" not in cfg: print("You can output config as a yaml file by `output=path/to/file.yaml`") else: - OmegaConf.save(config=cfg.attack, f=cfg.output) + OmegaConf.save(config=cfg, f=cfg.output) print(f"Saved config to {cfg.output}") diff --git a/examples/mart_armory/mart_armory/pytorch_wrapper.py b/examples/mart_armory/mart_armory/pytorch_wrapper.py index d1c23b2f..095ef1df 100644 --- a/examples/mart_armory/mart_armory/pytorch_wrapper.py +++ b/examples/mart_armory/mart_armory/pytorch_wrapper.py @@ -72,17 +72,18 @@ class MartAttack: 4. Convert torch.Tensor back to np.ndarray. """ - def __init__(self, model, batch_converter, mart_adv_config_yaml): + def __init__(self, model, mart_adv_config_yaml): # Extract PyTorch model from an ART Estimator. # TODO: Automatically search for torch.nn.Module within model. self.model = ModelWrapper(model._model) self.device = model.device - self.batch_converter = batch_converter - # Instantiate a MART adversary. adv_cfg = OmegaConf.load(mart_adv_config_yaml) - self.adversary = hydra.utils.instantiate(adv_cfg) + adv = hydra.utils.instantiate(adv_cfg) + + self.batch_converter = adv.batch_converter + self.adversary = adv.attack # Move adversary to the same device. self.adversary.to(self.device) @@ -117,24 +118,3 @@ def generate(self, **batch_armory_np): input_key = self.batch_converter.input_key input_adv_np = batch_adv_armory_np[input_key] return input_adv_np - - -class MartAttackObjectDetection(MartAttack): - def __init__(self, model, mart_adv_config_yaml): - batch_config = { - "input_key": "x", - "target_keys": { - "y": ["area", "boxes", "id", "image_id", "is_crowd", "labels"], - "y_patch_metadata": [ - "avg_patch_depth", - "gs_coords", - "mask", - "max_depth_perturb_meters", - ], - }, - } - - batch_converter = ObjectDetectionBatchConverter(**batch_config) - super().__init__( - model=model, batch_converter=batch_converter, mart_adv_config_yaml=mart_adv_config_yaml - ) From a8b2c22d85590eafa7256399ceac8bc2049195c5 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 24 Jul 2023 12:46:37 -0700 Subject: [PATCH 056/126] Make transform configurable. --- .../mart_armory/batch_converter.py | 32 +++++++++---------- .../batch_converter/object_detection.yaml | 19 +++++++++++ 2 files changed, 35 insertions(+), 16 deletions(-) diff --git a/examples/mart_armory/mart_armory/batch_converter.py b/examples/mart_armory/mart_armory/batch_converter.py index 9fde8604..a9c124f6 100644 --- a/examples/mart_armory/mart_armory/batch_converter.py +++ b/examples/mart_armory/mart_armory/batch_converter.py @@ -43,28 +43,28 @@ def _convert(self, batch: dict): target = tuple(target) - # TODO: Move to transform() that works on both input and target. - # 1. input permute - # 2. tuplize input - # 3. permute and scale target["mask"] - # NHWC -> NCHW, the PyTorch format. - input = input.permute((0, 3, 1, 2)) - # NCHW -> tuple[CHW] - input = tuple(input) + # # TODO: Move to transform() that works on both input and target. + # # 1. input permute + # # 2. tuplize input + # # 3. permute and scale target["mask"] + # # NHWC -> NCHW, the PyTorch format. + # input = input.permute((0, 3, 1, 2)) + # # NCHW -> tuple[CHW] + # input = tuple(input) return input, target def _revert(self, input: tuple[torch.Tensor], target: tuple[dict]) -> dict: batch = {} - # TODO: Move to untransform(). - # 1. permute and scale target["mask"] - # 2. input stack - # 3. input permute - # tuple[CHW] -> NCHW - input = torch.stack(input) - # NCHW -> NHWC, the TensorFlow format used in ART. - input = input.permute((0, 2, 3, 1)) + # # TODO: Move to untransform(). + # # 1. permute and scale target["mask"] + # # 2. input stack + # # 3. input permute + # # tuple[CHW] -> NCHW + # input = torch.stack(input) + # # NCHW -> NHWC, the TensorFlow format used in ART. + # input = input.permute((0, 2, 3, 1)) batch[self.input_key] = input diff --git a/examples/mart_armory/mart_armory/configs/batch_converter/object_detection.yaml b/examples/mart_armory/mart_armory/configs/batch_converter/object_detection.yaml index a9c7b154..87537bb2 100644 --- a/examples/mart_armory/mart_armory/configs/batch_converter/object_detection.yaml +++ b/examples/mart_armory/mart_armory/configs/batch_converter/object_detection.yaml @@ -4,3 +4,22 @@ target_keys: y: ["area", "boxes", "id", "image_id", "is_crowd", "labels"] y_patch_metadata: ["avg_patch_depth", "gs_coords", "mask", "max_depth_perturb_meters"] +transform: + _target_: torchvision.transforms.Compose + _convert_: partial + transforms: # NHWC -> NCHW, the PyTorch format. + - _target_: torch.permute + _partial_: true + dims: [0, 3, 1, 2] + - _target_: builtins.tuple + _partial_: true +untransform: + _target_: torchvision.transforms.Compose + _convert_: partial + transforms: # NCHW -> NHWC, the TensorFlow format used in ART. + - _target_: torch.stack + _partial_: true + dim: 0 + - _target_: torch.permute + _partial_: true + dims: [0, 2, 3, 1] From ac3c5e8da43ff554b766faa6ff58c4d82daf926a Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 24 Jul 2023 12:49:10 -0700 Subject: [PATCH 057/126] Simplify config. --- .../mart_armory/configs/batch_converter/object_detection.yaml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/examples/mart_armory/mart_armory/configs/batch_converter/object_detection.yaml b/examples/mart_armory/mart_armory/configs/batch_converter/object_detection.yaml index 87537bb2..919da605 100644 --- a/examples/mart_armory/mart_armory/configs/batch_converter/object_detection.yaml +++ b/examples/mart_armory/mart_armory/configs/batch_converter/object_detection.yaml @@ -1,4 +1,5 @@ _target_: mart_armory.batch_converter.ObjectDetectionBatchConverter +_convert_: partial input_key: "x" target_keys: y: ["area", "boxes", "id", "image_id", "is_crowd", "labels"] @@ -6,7 +7,6 @@ target_keys: ["avg_patch_depth", "gs_coords", "mask", "max_depth_perturb_meters"] transform: _target_: torchvision.transforms.Compose - _convert_: partial transforms: # NHWC -> NCHW, the PyTorch format. - _target_: torch.permute _partial_: true @@ -15,7 +15,6 @@ transform: _partial_: true untransform: _target_: torchvision.transforms.Compose - _convert_: partial transforms: # NCHW -> NHWC, the TensorFlow format used in ART. - _target_: torch.stack _partial_: true From 5898fc7de892a2d91c2e7c497b9c7103393e9960 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 24 Jul 2023 15:33:04 -0700 Subject: [PATCH 058/126] Make target_transofom configurable. --- .../mart_armory/batch_converter.py | 28 +++++++++++++ .../batch_converter/object_detection.yaml | 40 +++++++++++++++++++ .../mart_armory/pytorch_wrapper.py | 5 --- 3 files changed, 68 insertions(+), 5 deletions(-) diff --git a/examples/mart_armory/mart_armory/batch_converter.py b/examples/mart_armory/mart_armory/batch_converter.py index a9c124f6..cda0176f 100644 --- a/examples/mart_armory/mart_armory/batch_converter.py +++ b/examples/mart_armory/mart_armory/batch_converter.py @@ -76,3 +76,31 @@ def _revert(self, input: tuple[torch.Tensor], target: tuple[dict]) -> dict: batch[target_key].append(target_key_i) return batch + + +class SelectKeyTransform: + def __init__(self, *, key, transform, rename=None): + self.key = key + self.transform = transform + self.rename = rename + + def __call__(self, target: dict): + new_key = self.rename or self.key + + target[new_key] = self.transform(target[self.key]) + if self.rename is not None: + del target[self.key] + + return target + + +class Method: + def __init__(self, *args, name, **kwargs): + self.name = name + self.args = args + self.kwargs = kwargs + + def __call__(self, obj): + method = getattr(obj, self.name) + ret = method(*self.args, **self.kwargs) + return ret diff --git a/examples/mart_armory/mart_armory/configs/batch_converter/object_detection.yaml b/examples/mart_armory/mart_armory/configs/batch_converter/object_detection.yaml index 919da605..632e7a30 100644 --- a/examples/mart_armory/mart_armory/configs/batch_converter/object_detection.yaml +++ b/examples/mart_armory/mart_armory/configs/batch_converter/object_detection.yaml @@ -1,3 +1,4 @@ +# Convert Armory data batch to the format that is comprehensible by torchvision RCNN. _target_: mart_armory.batch_converter.ObjectDetectionBatchConverter _convert_: partial input_key: "x" @@ -5,6 +6,7 @@ target_keys: y: ["area", "boxes", "id", "image_id", "is_crowd", "labels"] y_patch_metadata: ["avg_patch_depth", "gs_coords", "mask", "max_depth_perturb_meters"] + transform: _target_: torchvision.transforms.Compose transforms: # NHWC -> NCHW, the PyTorch format. @@ -13,6 +15,7 @@ transform: dims: [0, 3, 1, 2] - _target_: builtins.tuple _partial_: true + untransform: _target_: torchvision.transforms.Compose transforms: # NCHW -> NHWC, the TensorFlow format used in ART. @@ -22,3 +25,40 @@ untransform: - _target_: torch.permute _partial_: true dims: [0, 2, 3, 1] + +target_transform: + _target_: mart.transforms.TupleTransforms + transforms: + _target_: mart_armory.batch_converter.SelectKeyTransform + # Apply this to target["mask"] only + key: "mask" + rename: "perturbable_mask" + transform: + _target_: torchvision.transforms.Compose + transforms: + # HWC -> CHW + - _target_: torch.permute + _partial_: true + dims: [2, 0, 1] + # Normalize() does not work with uint8. + - _target_: mart_armory.batch_converter.Method + name: div + _args_: [255] + +target_untransform: + _target_: mart.transforms.TupleTransforms + transforms: + _target_: mart_armory.batch_converter.SelectKeyTransform + # Apply this to target["mask"] only + key: "perturbable_mask" + rename: "mask" + transform: + _target_: torchvision.transforms.Compose + transforms: + - _target_: mart_armory.batch_converter.Method + name: mul + _args_: [255] + # CHW -> HWC + - _target_: torch.permute + _partial_: true + dims: [1, 2, 0] diff --git a/examples/mart_armory/mart_armory/pytorch_wrapper.py b/examples/mart_armory/mart_armory/pytorch_wrapper.py index 095ef1df..a0378e6e 100644 --- a/examples/mart_armory/mart_armory/pytorch_wrapper.py +++ b/examples/mart_armory/mart_armory/pytorch_wrapper.py @@ -106,11 +106,6 @@ def convert_batch_torchvision_to_armory(self, batch_tv_pth): def generate(self, **batch_armory_np): batch_tv_pth = self.convert_batch_armory_to_torchvision(batch_armory_np) - # FIXME: Convert perturbable_mask somewhere else. - batch_tv_pth[1][0]["perturbable_mask"] = ( - batch_tv_pth[1][0]["mask"].permute((2, 0, 1)) / 255 - ) - batch_adv_tv_pth = self.adversary(batch=batch_tv_pth, model=self.model) batch_adv_armory_np = self.convert_batch_torchvision_to_armory(batch_adv_tv_pth) From a682d00e7f1cc4236b01f3212c20fa4089584848 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 24 Jul 2023 15:48:32 -0700 Subject: [PATCH 059/126] Make model_wrapper configurable. --- examples/mart_armory/README.md | 1 + .../mart_armory/configs/assemble_attack.yaml | 1 + .../configs/model_wrapper/art_rcnn.yaml | 2 ++ .../mart_armory/pytorch_wrapper.py | 26 ++++++++++++------- 4 files changed, 20 insertions(+), 10 deletions(-) create mode 100644 examples/mart_armory/mart_armory/configs/model_wrapper/art_rcnn.yaml diff --git a/examples/mart_armory/README.md b/examples/mart_armory/README.md index b4c67fd4..75b798dc 100644 --- a/examples/mart_armory/README.md +++ b/examples/mart_armory/README.md @@ -13,6 +13,7 @@ pip install 'git+https://github.com/IntelLabs/MART.git@example_armory_attack#egg ```shell python -m mart_armory.generate_attack_config \ batch_converter=object_detection \ +model_wrapper=art_rcnn \ attack=[object_detection_mask_adversary,data_coco] \ attack.objective=null \ output=path/to/attack.yaml diff --git a/examples/mart_armory/mart_armory/configs/assemble_attack.yaml b/examples/mart_armory/mart_armory/configs/assemble_attack.yaml index 19a4410c..565944af 100644 --- a/examples/mart_armory/mart_armory/configs/assemble_attack.yaml +++ b/examples/mart_armory/mart_armory/configs/assemble_attack.yaml @@ -5,6 +5,7 @@ defaults: - _self_ - attack: null - batch_converter: null + - model_wrapper: null - override hydra/hydra_logging: disabled - override hydra/job_logging: disabled diff --git a/examples/mart_armory/mart_armory/configs/model_wrapper/art_rcnn.yaml b/examples/mart_armory/mart_armory/configs/model_wrapper/art_rcnn.yaml new file mode 100644 index 00000000..8dcacd37 --- /dev/null +++ b/examples/mart_armory/mart_armory/configs/model_wrapper/art_rcnn.yaml @@ -0,0 +1,2 @@ +_target_: mart_armory.pytorch_wrapper.ArtRcnnModelWrapper +_partial_: true diff --git a/examples/mart_armory/mart_armory/pytorch_wrapper.py b/examples/mart_armory/mart_armory/pytorch_wrapper.py index a0378e6e..da455e86 100644 --- a/examples/mart_armory/mart_armory/pytorch_wrapper.py +++ b/examples/mart_armory/mart_armory/pytorch_wrapper.py @@ -14,8 +14,6 @@ from mart.models.dual_mode import DualModeGeneralizedRCNN -from .batch_converter import ObjectDetectionBatchConverter - # A recursive function to convert all np.ndarray in an object to torch.Tensor, or vice versa. @multimethod @@ -49,12 +47,21 @@ def convert(obj, device=None): # noqa: F811 return obj -class ModelWrapper(torch.nn.Module): +class ArtRcnnModelWrapper(torch.nn.Module): + """Modify the model so that it is convenient to attack. + + Common issues: + 1. Make the model accept a single argument `output=model(batch)`; + 2. Make the model return loss in eval mode; + 3. Change non-differentiable operations. + """ + def __init__(self, model): super().__init__() - # FIXME: We need an interface to modify the model, because the model only returns prediction in eval() model. - self.model = DualModeGeneralizedRCNN(model) + # Extract PyTorch model from an ART Estimator. + # TODO: Automatically search for torch.nn.Module within model. + self.model = DualModeGeneralizedRCNN(model._model) def forward(self, batch): # Make the model accept batch as an argument parameter. @@ -73,17 +80,16 @@ class MartAttack: """ def __init__(self, model, mart_adv_config_yaml): - # Extract PyTorch model from an ART Estimator. - # TODO: Automatically search for torch.nn.Module within model. - self.model = ModelWrapper(model._model) - self.device = model.device - # Instantiate a MART adversary. adv_cfg = OmegaConf.load(mart_adv_config_yaml) adv = hydra.utils.instantiate(adv_cfg) self.batch_converter = adv.batch_converter self.adversary = adv.attack + self.model_wrapper = adv.model_wrapper + + self.model = self.model_wrapper(model) + self.device = model.device # Move adversary to the same device. self.adversary.to(self.device) From 228523a0b29d2423303ef3959c421ac4c0c6c997 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 24 Jul 2023 15:55:08 -0700 Subject: [PATCH 060/126] Clean up. --- .../mart_armory/mart_armory/configs/assemble_attack.yaml | 6 +++--- examples/mart_armory/mart_armory/pytorch_wrapper.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/examples/mart_armory/mart_armory/configs/assemble_attack.yaml b/examples/mart_armory/mart_armory/configs/assemble_attack.yaml index 565944af..4fca0e84 100644 --- a/examples/mart_armory/mart_armory/configs/assemble_attack.yaml +++ b/examples/mart_armory/mart_armory/configs/assemble_attack.yaml @@ -3,9 +3,9 @@ # specify here default training configuration defaults: - _self_ - - attack: null - - batch_converter: null - - model_wrapper: null + - attack: ??? + - batch_converter: ??? + - model_wrapper: ??? - override hydra/hydra_logging: disabled - override hydra/job_logging: disabled diff --git a/examples/mart_armory/mart_armory/pytorch_wrapper.py b/examples/mart_armory/mart_armory/pytorch_wrapper.py index da455e86..72520340 100644 --- a/examples/mart_armory/mart_armory/pytorch_wrapper.py +++ b/examples/mart_armory/mart_armory/pytorch_wrapper.py @@ -88,8 +88,8 @@ def __init__(self, model, mart_adv_config_yaml): self.adversary = adv.attack self.model_wrapper = adv.model_wrapper - self.model = self.model_wrapper(model) self.device = model.device + self.model = self.model_wrapper(model) # Move adversary to the same device. self.adversary.to(self.device) From b6a5149bf563f84d6897c33382600dd0472e0a97 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Tue, 25 Jul 2023 10:27:01 -0700 Subject: [PATCH 061/126] Update README. --- examples/mart_armory/README.md | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/examples/mart_armory/README.md b/examples/mart_armory/README.md index 75b798dc..22373be1 100644 --- a/examples/mart_armory/README.md +++ b/examples/mart_armory/README.md @@ -33,4 +33,17 @@ output=path/to/attack.yaml }, ``` -Armory requires the argument `knowledge`. The statement `"use_label": true` gets `y` for the attack. +Note that Armory requires the argument `knowledge`. The statement `"use_label": true` gets `y` for the attack. + +Alternatively, we can use `jq` to update existing scenario json files, for example + +```bash +cat scenario_configs/eval7/carla_overhead_object_detection/carla_obj_det_adversarialpatch_undefended.json \ +| jq 'del(.attack)' \ +| jq '.attack.knowledg="white"' \ +| jq '.attack.use_label=true' \ +| jq '.attack.module="mart_armory"' \ +| jq '.attack.name="MartAttack"' \ +| jq '.attack.kwargs.mart_adv_config_yaml="path/to/attack.yaml"' \ +> carla_obj_det_adversarialpatch_undefended_mart_attack.json +``` From 1efd2430d97e6576ca40129d351e57f42ac05e4a Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Tue, 25 Jul 2023 10:28:41 -0700 Subject: [PATCH 062/126] Fix typo. --- examples/mart_armory/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/mart_armory/README.md b/examples/mart_armory/README.md index 22373be1..dfe5c10f 100644 --- a/examples/mart_armory/README.md +++ b/examples/mart_armory/README.md @@ -40,7 +40,7 @@ Alternatively, we can use `jq` to update existing scenario json files, for examp ```bash cat scenario_configs/eval7/carla_overhead_object_detection/carla_obj_det_adversarialpatch_undefended.json \ | jq 'del(.attack)' \ -| jq '.attack.knowledg="white"' \ +| jq '.attack.knowledge="white"' \ | jq '.attack.use_label=true' \ | jq '.attack.module="mart_armory"' \ | jq '.attack.name="MartAttack"' \ From ea7cfcef39e241d49b7006e76abeeda0a6a44867 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Wed, 26 Jul 2023 17:16:22 -0700 Subject: [PATCH 063/126] Rename variable: target -> target_transformed --- mart/attack/adversary.py | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index d24caf5b..8505cfd0 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -125,14 +125,16 @@ def training_step(self, batch, batch_idx): # batch = batch.copy() input_transformed = batch["input"] - target = batch["target"] + target_transformed = batch["target"] # What we need is a frozen model that returns (a dictionary of) logits, or losses. model = batch["model"] # Compose un-transformed input_adv from batch["input"], then give to model for updated gain. - input_adv_transformed = self.get_input_adv(input=input_transformed, target=target) + input_adv_transformed = self.get_input_adv( + input=input_transformed, target=target_transformed + ) # Target model expects input in the original format. - batch_adv = self.batch_converter.revert(input_adv_transformed, target) + batch_adv = self.batch_converter.revert(input_adv_transformed, target_transformed) # A model that returns output dictionary. outputs = model(batch_adv) @@ -171,10 +173,14 @@ def configure_gradient_clipping( @silent() def forward(self, *, batch: torch.Tensor | list | dict, model: Callable): # Extract and transform input so that is convenient for Adversary. - input_transformed, target = self.batch_converter(batch) + input_transformed, target_transformed = self.batch_converter(batch) # Optimization loop only sees the transformed input in batches. - batch_transformed = {"input": input_transformed, "target": target, "model": model} + batch_transformed = { + "input": input_transformed, + "target": target_transformed, + "model": model, + } # Configure and reset perturbation for current inputs self.perturber.configure_perturbation(input_transformed) @@ -185,11 +191,13 @@ def forward(self, *, batch: torch.Tensor | list | dict, model: Callable): self.attacker.fit(self, train_dataloaders=cycle([batch_transformed])) # Get the transformed input_adv for enforcer checking. - input_adv_transformed = self.get_input_adv(input=input_transformed, target=target) - self.enforcer(input_adv_transformed, input=input_transformed, target=target) + input_adv_transformed = self.get_input_adv( + input=input_transformed, target=target_transformed + ) + self.enforcer(input_adv_transformed, input=input_transformed, target=target_transformed) # Revert to the original format of batch. - batch_adv = self.batch_converter.revert(input_adv_transformed, target) + batch_adv = self.batch_converter.revert(input_adv_transformed, target_transformed) return batch_adv From f187adbae93e89ec15fd1986b47b492c1d613d03 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Wed, 26 Jul 2023 17:35:49 -0700 Subject: [PATCH 064/126] Replace the multimethod package with functools.singledispatch. --- .../mart_armory/pytorch_wrapper.py | 35 ++++++++++--------- examples/mart_armory/pyproject.toml | 1 - 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/examples/mart_armory/mart_armory/pytorch_wrapper.py b/examples/mart_armory/mart_armory/pytorch_wrapper.py index 72520340..61fdd1c0 100644 --- a/examples/mart_armory/mart_armory/pytorch_wrapper.py +++ b/examples/mart_armory/mart_armory/pytorch_wrapper.py @@ -6,47 +6,48 @@ from __future__ import annotations +from functools import singledispatch + import hydra import numpy as np import torch -from multimethod import multimethod from omegaconf import OmegaConf from mart.models.dual_mode import DualModeGeneralizedRCNN # A recursive function to convert all np.ndarray in an object to torch.Tensor, or vice versa. -@multimethod -def convert(obj: dict, device=None): +@singledispatch +def convert(obj, device=None): + """All other types, no change.""" + return obj + + +@convert.register +def _(obj: dict, device=None): return {key: convert(value, device=device) for key, value in obj.items()} -@multimethod -def convert(obj: list, device=None): # noqa: F811 +@convert.register +def _(obj: list, device=None): return [convert(item, device=device) for item in obj] -@multimethod -def convert(obj: tuple, device=None): # noqa: F811 +@convert.register +def _(obj: tuple, device=None): return tuple(convert(obj, device=device)) -@multimethod -def convert(obj: np.ndarray, device=None): # noqa: F811 +@convert.register +def _(obj: np.ndarray, device=None): return torch.tensor(obj, device=device) -@multimethod -def convert(obj: torch.Tensor, device=None): # noqa: F811 +@convert.register +def _(obj: torch.Tensor, device=None): return obj.detach().cpu().numpy() -# All other types, no change. -@multimethod -def convert(obj, device=None): # noqa: F811 - return obj - - class ArtRcnnModelWrapper(torch.nn.Module): """Modify the model so that it is convenient to attack. diff --git a/examples/mart_armory/pyproject.toml b/examples/mart_armory/pyproject.toml index 7e20d3bd..550e3bb6 100644 --- a/examples/mart_armory/pyproject.toml +++ b/examples/mart_armory/pyproject.toml @@ -12,7 +12,6 @@ requires-python = ">=3.9" dependencies = [ "mart@git+https://github.com/IntelLabs/MART.git@example_armory_attack", - "multimethod ~= 1.9.1", ] [project.urls] From bae2345a0269245ebe7d266fd3c731b1e65faeec Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 28 Jul 2023 10:53:58 -0700 Subject: [PATCH 065/126] Fix monkey patch in adv_training callback. --- mart/callbacks/adversarial_training.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mart/callbacks/adversarial_training.py b/mart/callbacks/adversarial_training.py index 3437c74b..0738467c 100644 --- a/mart/callbacks/adversarial_training.py +++ b/mart/callbacks/adversarial_training.py @@ -60,9 +60,9 @@ def model_forward(batch): elif hasattr(model, "training_step"): # Monkey-patch model.log to avoid spamming. - @MonkeyPatch(model, "log", lambda *args, **kwargs: None) def model_forward(batch): - output = model.training_step(batch, dataloader_idx) + with MonkeyPatch(model, "log", lambda *args, **kwargs: None): + output = model.training_step(batch, dataloader_idx) return output else: From 9f1d35da7c76a27fe14dc6f000f7ae9a28bde31f Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 28 Jul 2023 10:54:15 -0700 Subject: [PATCH 066/126] Fix DictBatchConverter. --- mart/attack/batch_converter.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/mart/attack/batch_converter.py b/mart/attack/batch_converter.py index 58cc784c..89e0648c 100644 --- a/mart/attack/batch_converter.py +++ b/mart/attack/batch_converter.py @@ -91,17 +91,20 @@ def __init__(self, input_key: str = "input", **kwargs): def _convert(self, batch): input = batch.pop(self.input_key) if "target" in batch: - target = batch.pop("target") + target = batch["target"] self.rest = batch else: target = batch return input, target def _revert(self, input, target): - if self.rest is {}: - batch = {self.input_key: input} | target + if self.rest == {}: + batch = target else: - batch = {self.input_key: input, "target": target} | self.rest + batch = self.rest + + # Input may have been changed. + batch[self.input_key] = input return batch From 2a8505ba740f42773d5f1f788f7139705ac77c54 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 28 Jul 2023 10:59:27 -0700 Subject: [PATCH 067/126] Remove recursive adversarial training callback in Adversary. --- mart/attack/adversary.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 8505cfd0..8a99384f 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -15,6 +15,7 @@ from mart.utils import silent +from ..callbacks.adversarial_training import AdversarialTraining from ..optim import OptimizerFactory if TYPE_CHECKING: @@ -221,6 +222,11 @@ def attacker(self): self._attacker = self._attacker(accelerator=accelerator, devices=devices) + # Remove recursive adversarial training callback from lightning.pytorch.callbacks_factory. + for callback in self._attacker.callbacks: + if isinstance(callback, AdversarialTraining): + self._attacker.callbacks.remove(callback) + return self._attacker def cpu(self): From 5d81324dcd9243d060fb8928a84b39bc902ffdfc Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 28 Jul 2023 11:45:43 -0700 Subject: [PATCH 068/126] Copy batch before transform(). --- mart/attack/adversary.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 8a99384f..f3ba5325 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -173,8 +173,9 @@ def configure_gradient_clipping( @silent() def forward(self, *, batch: torch.Tensor | list | dict, model: Callable): + # Copy to keep the original batch. # Extract and transform input so that is convenient for Adversary. - input_transformed, target_transformed = self.batch_converter(batch) + input_transformed, target_transformed = self.batch_converter(batch.copy()) # Optimization loop only sees the transformed input in batches. batch_transformed = { From fb79aa8fcb2c29831a09237df5459157214469a3 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 28 Jul 2023 16:24:36 -0700 Subject: [PATCH 069/126] Copy original batch in batch_converter(), because tuple batch cannot be copied. --- mart/attack/adversary.py | 2 +- mart/attack/batch_converter.py | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index f3ba5325..80e455f7 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -175,7 +175,7 @@ def configure_gradient_clipping( def forward(self, *, batch: torch.Tensor | list | dict, model: Callable): # Copy to keep the original batch. # Extract and transform input so that is convenient for Adversary. - input_transformed, target_transformed = self.batch_converter(batch.copy()) + input_transformed, target_transformed = self.batch_converter(batch) # Optimization loop only sees the transformed input in batches. batch_transformed = { diff --git a/mart/attack/batch_converter.py b/mart/attack/batch_converter.py index 89e0648c..1f3b7a95 100644 --- a/mart/attack/batch_converter.py +++ b/mart/attack/batch_converter.py @@ -89,6 +89,8 @@ def __init__(self, input_key: str = "input", **kwargs): self.rest = {} def _convert(self, batch): + # Make a copy because we don't want to break the original batch. + batch = batch.copy() input = batch.pop(self.input_key) if "target" in batch: target = batch["target"] @@ -117,6 +119,8 @@ def __init__(self, input_key: int = 0, **kwargs): self.target_size = None def _convert(self, batch: list): + # Make a copy because we don't want to break the original batch. + batch = batch.copy() input = batch.pop(self.input_key) self.target_size = len(batch) From 32f72a2b45761f5b0a58e109f057d840441fe1ac Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 31 Jul 2023 11:58:57 -0700 Subject: [PATCH 070/126] Explicit no_op function in transforms. --- mart/attack/batch_converter.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/mart/attack/batch_converter.py b/mart/attack/batch_converter.py index 58cc784c..6c2d3cd0 100644 --- a/mart/attack/batch_converter.py +++ b/mart/attack/batch_converter.py @@ -34,14 +34,18 @@ def __init__( target_transform (Callable): Transform target. target_untransform (Callable): Untransform target. """ - self.transform = transform if isinstance(transform, Callable) else lambda x: x - self.untransform = untransform if isinstance(untransform, Callable) else lambda x: x + + def no_op(x): + return x + + self.transform = transform if isinstance(transform, Callable) else no_op + self.untransform = untransform if isinstance(untransform, Callable) else no_op self.target_transform = ( - target_transform if isinstance(target_transform, Callable) else lambda x: x + target_transform if isinstance(target_transform, Callable) else no_op ) self.target_untransform = ( - target_untransform if isinstance(target_untransform, Callable) else lambda x: x + target_untransform if isinstance(target_untransform, Callable) else no_op ) def __call__(self, batch): From 840271b29ec25232ef0b31e70d078b64a095ace5 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 31 Jul 2023 12:14:47 -0700 Subject: [PATCH 071/126] Add batch_transform and batch_untransform in BatchConverter. --- mart/attack/batch_converter.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/mart/attack/batch_converter.py b/mart/attack/batch_converter.py index 6c2d3cd0..1bba163c 100644 --- a/mart/attack/batch_converter.py +++ b/mart/attack/batch_converter.py @@ -25,6 +25,8 @@ def __init__( untransform: Callable = None, target_transform: Callable = None, target_untransform: Callable = None, + batch_transform: Callable = None, + batch_untransform: Callable = None, ): """Convert batch into (input, target), and vice versa. @@ -35,7 +37,7 @@ def __init__( target_untransform (Callable): Untransform target. """ - def no_op(x): + def no_op(x, device=None): return x self.transform = transform if isinstance(transform, Callable) else no_op @@ -48,8 +50,14 @@ def no_op(x): target_untransform if isinstance(target_untransform, Callable) else no_op ) - def __call__(self, batch): - input, target = self._convert(batch) + self.batch_transform = batch_transform if isinstance(batch_transform, Callable) else no_op + self.batch_untransform = ( + batch_untransform if isinstance(batch_untransform, Callable) else no_op + ) + + def __call__(self, batch, device=None): + batch_transformed = self.batch_transform(batch, device=device) + input, target = self._convert(batch_transformed) input_transformed = self.transform(input) target_transformed = self.target_transform(target) return input_transformed, target_transformed @@ -57,7 +65,8 @@ def __call__(self, batch): def revert(self, input_transformed, target_transformed): input = self.untransform(input_transformed) target = self.target_untransform(target_transformed) - batch = self._revert(input, target) + batch_transformed = self._revert(input, target) + batch = self.batch_untransform(batch_transformed) return batch @abc.abstractclassmethod From 8572eef0ef51d1458570e52846e5c06595f862f6 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 31 Jul 2023 12:15:49 -0700 Subject: [PATCH 072/126] Make tensor<->array transform configurable as batch_transform. --- .../batch_converter/object_detection.yaml | 8 ++++ .../mart_armory/pytorch_wrapper.py | 41 +----------------- mart/transforms/tensor_array.py | 42 +++++++++++++++++++ 3 files changed, 52 insertions(+), 39 deletions(-) create mode 100644 mart/transforms/tensor_array.py diff --git a/examples/mart_armory/mart_armory/configs/batch_converter/object_detection.yaml b/examples/mart_armory/mart_armory/configs/batch_converter/object_detection.yaml index 632e7a30..693c5df8 100644 --- a/examples/mart_armory/mart_armory/configs/batch_converter/object_detection.yaml +++ b/examples/mart_armory/mart_armory/configs/batch_converter/object_detection.yaml @@ -62,3 +62,11 @@ target_untransform: - _target_: torch.permute _partial_: true dims: [1, 2, 0] + +batch_transform: + _target_: mart.transforms.tensor_array.convert + _partial_: true + +batch_untransform: + _target_: mart.transforms.tensor_array.convert + _partial_: true diff --git a/examples/mart_armory/mart_armory/pytorch_wrapper.py b/examples/mart_armory/mart_armory/pytorch_wrapper.py index 61fdd1c0..c09a74f2 100644 --- a/examples/mart_armory/mart_armory/pytorch_wrapper.py +++ b/examples/mart_armory/mart_armory/pytorch_wrapper.py @@ -6,48 +6,13 @@ from __future__ import annotations -from functools import singledispatch - import hydra -import numpy as np import torch from omegaconf import OmegaConf from mart.models.dual_mode import DualModeGeneralizedRCNN -# A recursive function to convert all np.ndarray in an object to torch.Tensor, or vice versa. -@singledispatch -def convert(obj, device=None): - """All other types, no change.""" - return obj - - -@convert.register -def _(obj: dict, device=None): - return {key: convert(value, device=device) for key, value in obj.items()} - - -@convert.register -def _(obj: list, device=None): - return [convert(item, device=device) for item in obj] - - -@convert.register -def _(obj: tuple, device=None): - return tuple(convert(obj, device=device)) - - -@convert.register -def _(obj: np.ndarray, device=None): - return torch.tensor(obj, device=device) - - -@convert.register -def _(obj: torch.Tensor, device=None): - return obj.detach().cpu().numpy() - - class ArtRcnnModelWrapper(torch.nn.Module): """Modify the model so that it is convenient to attack. @@ -97,17 +62,15 @@ def __init__(self, model, mart_adv_config_yaml): def convert_batch_armory_to_torchvision(self, batch_armory_np): # np.ndarray -> torch.Tensor, on a device. - batch_armory_pth = convert(batch_armory_np, device=self.device) # armory format -> torchvision format. - batch_tv_pth = self.batch_converter(batch_armory_pth) + batch_tv_pth = self.batch_converter(batch_armory_np, device=self.device) return batch_tv_pth def convert_batch_torchvision_to_armory(self, batch_tv_pth): # torchvision format -> armory format. # Note: revert(input, target) - batch_armory_pth = self.batch_converter.revert(*batch_tv_pth) # torch.Tensor -> np.ndarray - batch_armory_np = convert(batch_armory_pth) + batch_armory_np = self.batch_converter.revert(*batch_tv_pth) return batch_armory_np def generate(self, **batch_armory_np): diff --git a/mart/transforms/tensor_array.py b/mart/transforms/tensor_array.py new file mode 100644 index 00000000..ca88072f --- /dev/null +++ b/mart/transforms/tensor_array.py @@ -0,0 +1,42 @@ +# +# Copyright (C) 2022 Intel Corporation +# +# SPDX-License-Identifier: BSD-3-Clause +# + +from functools import singledispatch + +import numpy as np +import torch + + +# A recursive function to convert all np.ndarray in an object to torch.Tensor, or vice versa. +@singledispatch +def convert(obj, device=None): + """All other types, no change.""" + return obj + + +@convert.register +def _(obj: dict, device=None): + return {key: convert(value, device=device) for key, value in obj.items()} + + +@convert.register +def _(obj: list, device=None): + return [convert(item, device=device) for item in obj] + + +@convert.register +def _(obj: tuple, device=None): + return tuple(convert(obj, device=device)) + + +@convert.register +def _(obj: np.ndarray, device=None): + return torch.tensor(obj, device=device) + + +@convert.register +def _(obj: torch.Tensor, device=None): + return obj.detach().cpu().numpy() From bb0cecc5d8bf29285c34c83dcfc0899d8ac8e096 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 31 Jul 2023 12:19:01 -0700 Subject: [PATCH 073/126] Clean up. --- .../mart_armory/mart_armory/pytorch_wrapper.py | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/examples/mart_armory/mart_armory/pytorch_wrapper.py b/examples/mart_armory/mart_armory/pytorch_wrapper.py index c09a74f2..ff8098e3 100644 --- a/examples/mart_armory/mart_armory/pytorch_wrapper.py +++ b/examples/mart_armory/mart_armory/pytorch_wrapper.py @@ -60,24 +60,16 @@ def __init__(self, model, mart_adv_config_yaml): # Move adversary to the same device. self.adversary.to(self.device) - def convert_batch_armory_to_torchvision(self, batch_armory_np): + def generate(self, **batch_armory_np): # np.ndarray -> torch.Tensor, on a device. # armory format -> torchvision format. batch_tv_pth = self.batch_converter(batch_armory_np, device=self.device) - return batch_tv_pth - def convert_batch_torchvision_to_armory(self, batch_tv_pth): + batch_adv_tv_pth = self.adversary(batch=batch_tv_pth, model=self.model) + # torchvision format -> armory format. - # Note: revert(input, target) # torch.Tensor -> np.ndarray - batch_armory_np = self.batch_converter.revert(*batch_tv_pth) - return batch_armory_np - - def generate(self, **batch_armory_np): - batch_tv_pth = self.convert_batch_armory_to_torchvision(batch_armory_np) - - batch_adv_tv_pth = self.adversary(batch=batch_tv_pth, model=self.model) - batch_adv_armory_np = self.convert_batch_torchvision_to_armory(batch_adv_tv_pth) + batch_adv_armory_np = self.batch_converter.revert(*batch_adv_tv_pth) # Only return adversarial input in the original numpy format. input_key = self.batch_converter.input_key From fd436c0cd1a9f01debf3a154d682305ce43e8ccf Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 31 Jul 2023 16:58:36 -0700 Subject: [PATCH 074/126] Add Adversary.model_transform. --- mart/attack/adversary.py | 10 +++++++++- mart/configs/attack/adversary.yaml | 1 + 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 8505cfd0..d2e640b9 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -43,6 +43,7 @@ def __init__( enforcer: Enforcer | None = None, attacker: pl.Trainer | None = None, batch_converter: Callable, + model_transform: Callable | None = None, **kwargs, ): """_summary_ @@ -57,6 +58,7 @@ def __init__( enforcer (Enforcer): A Callable that enforce constraints on the adversarial input. attacker (Trainer): A PyTorch-Lightning Trainer object used to fit the perturbation. batch_converter (Callable): Convert batch into convenient format and reverse. + model_transform (Callable): Change model so that it works for Adversary. """ super().__init__() @@ -105,6 +107,10 @@ def __init__( self.batch_converter = batch_converter + self.model_transform = ( + model_transform if isinstance(model_transform, Callable) else lambda x: x + ) + @property def perturber(self) -> Perturber: # Hide the perturber module in a list, so that perturbation is not exported as a parameter in the model checkpoint, @@ -175,11 +181,13 @@ def forward(self, *, batch: torch.Tensor | list | dict, model: Callable): # Extract and transform input so that is convenient for Adversary. input_transformed, target_transformed = self.batch_converter(batch) + model_transformed = self.model_transform(model) + # Optimization loop only sees the transformed input in batches. batch_transformed = { "input": input_transformed, "target": target_transformed, - "model": model, + "model": model_transformed, } # Configure and reset perturbation for current inputs diff --git a/mart/configs/attack/adversary.yaml b/mart/configs/attack/adversary.yaml index 480e3a5b..427534d5 100644 --- a/mart/configs/attack/adversary.yaml +++ b/mart/configs/attack/adversary.yaml @@ -13,3 +13,4 @@ objective: null enforcer: ??? attacker: null batch_converter: ??? +model_transform: null From 5fc3c4cbabfb3e256e537e9107b41f53a0884f9f Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 31 Jul 2023 17:00:09 -0700 Subject: [PATCH 075/126] Use Adversary.model_transform. --- .../mart_armory/configs/assemble_attack.yaml | 2 +- .../attack/model_transform/armory_objdet.yaml | 7 ++++ .../mart_armory/model_transform.py | 42 +++++++++++++++++++ .../mart_armory/pytorch_wrapper.py | 28 +------------ 4 files changed, 51 insertions(+), 28 deletions(-) create mode 100644 examples/mart_armory/mart_armory/configs/attack/model_transform/armory_objdet.yaml create mode 100644 examples/mart_armory/mart_armory/model_transform.py diff --git a/examples/mart_armory/mart_armory/configs/assemble_attack.yaml b/examples/mart_armory/mart_armory/configs/assemble_attack.yaml index 4fca0e84..569d84a6 100644 --- a/examples/mart_armory/mart_armory/configs/assemble_attack.yaml +++ b/examples/mart_armory/mart_armory/configs/assemble_attack.yaml @@ -5,7 +5,7 @@ defaults: - _self_ - attack: ??? - batch_converter: ??? - - model_wrapper: ??? + - attack/model_transform: armory_objdet - override hydra/hydra_logging: disabled - override hydra/job_logging: disabled diff --git a/examples/mart_armory/mart_armory/configs/attack/model_transform/armory_objdet.yaml b/examples/mart_armory/mart_armory/configs/attack/model_transform/armory_objdet.yaml new file mode 100644 index 00000000..0670a5b8 --- /dev/null +++ b/examples/mart_armory/mart_armory/configs/attack/model_transform/armory_objdet.yaml @@ -0,0 +1,7 @@ +_target_: torchvision.transforms.Compose +transforms: + - _target_: mart_armory.model_transform.Extract + attrib: "_model" + - _target_: mart.models.dual_mode.DualModeGeneralizedRCNN + _partial_: true + - _target_: mart_armory.model_transform.ListInputAsArgs diff --git a/examples/mart_armory/mart_armory/model_transform.py b/examples/mart_armory/mart_armory/model_transform.py new file mode 100644 index 00000000..8c58064f --- /dev/null +++ b/examples/mart_armory/mart_armory/model_transform.py @@ -0,0 +1,42 @@ +import torch + +from mart.models.dual_mode import DualModeGeneralizedRCNN + + +class ArtRcnnModelWrapper(torch.nn.Module): + """Modify the model so that it is convenient to attack. + + Common issues: + 1. Make the model accept a single argument `output=model(batch)`; + 2. Make the model return loss in eval mode; + 3. Change non-differentiable operations. + """ + + def __init__(self, model): + super().__init__() + + # Extract PyTorch model from an ART Estimator. + # TODO: Automatically search for torch.nn.Module within model. + self.model = DualModeGeneralizedRCNN(model._model) + + def forward(self, batch): + # Make the model accept batch as an argument parameter. + output = self.model(*batch) + return output + + +class ListInputAsArgs: + def __call__(self, model): + def forward(batch): + return model(*batch) + + return forward + + +class Extract: + def __init__(self, attrib): + self.attrib = attrib + + def __call__(self, model): + model = getattr(model, self.attrib) + return model diff --git a/examples/mart_armory/mart_armory/pytorch_wrapper.py b/examples/mart_armory/mart_armory/pytorch_wrapper.py index ff8098e3..1df84b08 100644 --- a/examples/mart_armory/mart_armory/pytorch_wrapper.py +++ b/examples/mart_armory/mart_armory/pytorch_wrapper.py @@ -7,33 +7,8 @@ from __future__ import annotations import hydra -import torch from omegaconf import OmegaConf -from mart.models.dual_mode import DualModeGeneralizedRCNN - - -class ArtRcnnModelWrapper(torch.nn.Module): - """Modify the model so that it is convenient to attack. - - Common issues: - 1. Make the model accept a single argument `output=model(batch)`; - 2. Make the model return loss in eval mode; - 3. Change non-differentiable operations. - """ - - def __init__(self, model): - super().__init__() - - # Extract PyTorch model from an ART Estimator. - # TODO: Automatically search for torch.nn.Module within model. - self.model = DualModeGeneralizedRCNN(model._model) - - def forward(self, batch): - # Make the model accept batch as an argument parameter. - output = self.model(*batch) - return output - class MartAttack: """A minimal wrapper to run PyTorch-based MART adversary in Armory against PyTorch-based @@ -52,10 +27,9 @@ def __init__(self, model, mart_adv_config_yaml): self.batch_converter = adv.batch_converter self.adversary = adv.attack - self.model_wrapper = adv.model_wrapper + self.model = model self.device = model.device - self.model = self.model_wrapper(model) # Move adversary to the same device. self.adversary.to(self.device) From 5ebbbc03285b1268faef701321c34d348c9bcdbf Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 31 Jul 2023 17:09:27 -0700 Subject: [PATCH 076/126] Clean up. --- .../batch_converter/object_detection.yaml | 4 ++ .../mart_armory/model_transform.py | 38 +++++++------------ .../mart_armory/pytorch_wrapper.py | 9 ++--- 3 files changed, 21 insertions(+), 30 deletions(-) diff --git a/examples/mart_armory/mart_armory/configs/batch_converter/object_detection.yaml b/examples/mart_armory/mart_armory/configs/batch_converter/object_detection.yaml index 693c5df8..4fd83be8 100644 --- a/examples/mart_armory/mart_armory/configs/batch_converter/object_detection.yaml +++ b/examples/mart_armory/mart_armory/configs/batch_converter/object_detection.yaml @@ -8,6 +8,7 @@ target_keys: ["avg_patch_depth", "gs_coords", "mask", "max_depth_perturb_meters"] transform: + # armory format -> torchvision format. _target_: torchvision.transforms.Compose transforms: # NHWC -> NCHW, the PyTorch format. - _target_: torch.permute @@ -17,6 +18,7 @@ transform: _partial_: true untransform: + # torchvision format -> armory format. _target_: torchvision.transforms.Compose transforms: # NCHW -> NHWC, the TensorFlow format used in ART. - _target_: torch.stack @@ -64,9 +66,11 @@ target_untransform: dims: [1, 2, 0] batch_transform: + # np.ndarray -> torch.Tensor, on a device. _target_: mart.transforms.tensor_array.convert _partial_: true batch_untransform: + # torch.Tensor -> np.ndarray _target_: mart.transforms.tensor_array.convert _partial_: true diff --git a/examples/mart_armory/mart_armory/model_transform.py b/examples/mart_armory/mart_armory/model_transform.py index 8c58064f..09f94dfa 100644 --- a/examples/mart_armory/mart_armory/model_transform.py +++ b/examples/mart_armory/mart_armory/model_transform.py @@ -1,31 +1,19 @@ -import torch +# +# Copyright (C) 2022 Intel Corporation +# +# SPDX-License-Identifier: BSD-3-Clause +# -from mart.models.dual_mode import DualModeGeneralizedRCNN - - -class ArtRcnnModelWrapper(torch.nn.Module): - """Modify the model so that it is convenient to attack. - - Common issues: - 1. Make the model accept a single argument `output=model(batch)`; - 2. Make the model return loss in eval mode; - 3. Change non-differentiable operations. - """ - - def __init__(self, model): - super().__init__() - - # Extract PyTorch model from an ART Estimator. - # TODO: Automatically search for torch.nn.Module within model. - self.model = DualModeGeneralizedRCNN(model._model) - - def forward(self, batch): - # Make the model accept batch as an argument parameter. - output = self.model(*batch) - return output +# Modify a model so that it is convenient to attack. +# Common issues: +# 1. Make the model accept a single argument `output=model(batch)`; +# 2. Make the model return loss in eval mode; +# 3. Change non-differentiable operations. class ListInputAsArgs: + """Make a model expand input as non-keyword arguments.""" + def __call__(self, model): def forward(batch): return model(*batch) @@ -34,6 +22,8 @@ def forward(batch): class Extract: + """Example use case: extract the PyTorch model from an ART Estimator.""" + def __init__(self, attrib): self.attrib = attrib diff --git a/examples/mart_armory/mart_armory/pytorch_wrapper.py b/examples/mart_armory/mart_armory/pytorch_wrapper.py index 1df84b08..5c1e563d 100644 --- a/examples/mart_armory/mart_armory/pytorch_wrapper.py +++ b/examples/mart_armory/mart_armory/pytorch_wrapper.py @@ -35,14 +35,11 @@ def __init__(self, model, mart_adv_config_yaml): self.adversary.to(self.device) def generate(self, **batch_armory_np): - # np.ndarray -> torch.Tensor, on a device. - # armory format -> torchvision format. + # Armory format -> torchvision format batch_tv_pth = self.batch_converter(batch_armory_np, device=self.device) - + # Attack batch_adv_tv_pth = self.adversary(batch=batch_tv_pth, model=self.model) - - # torchvision format -> armory format. - # torch.Tensor -> np.ndarray + # torchvision format -> Armory format batch_adv_armory_np = self.batch_converter.revert(*batch_adv_tv_pth) # Only return adversarial input in the original numpy format. From 58d99834f07742beaf5cac260a573ba8fb9f1627 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 31 Jul 2023 17:10:10 -0700 Subject: [PATCH 077/126] Update doc. --- examples/mart_armory/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/mart_armory/README.md b/examples/mart_armory/README.md index dfe5c10f..86b7ba8f 100644 --- a/examples/mart_armory/README.md +++ b/examples/mart_armory/README.md @@ -13,9 +13,9 @@ pip install 'git+https://github.com/IntelLabs/MART.git@example_armory_attack#egg ```shell python -m mart_armory.generate_attack_config \ batch_converter=object_detection \ -model_wrapper=art_rcnn \ attack=[object_detection_mask_adversary,data_coco] \ attack.objective=null \ +attack.model_transform=armory_objdet \ output=path/to/attack.yaml ``` From 2b91fe2c217761f2485e051a669640c99f77f79a Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 31 Jul 2023 17:10:43 -0700 Subject: [PATCH 078/126] Fix import path. --- examples/mart_armory/mart_armory/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/mart_armory/mart_armory/__init__.py b/examples/mart_armory/mart_armory/__init__.py index 926f7956..77272048 100644 --- a/examples/mart_armory/mart_armory/__init__.py +++ b/examples/mart_armory/mart_armory/__init__.py @@ -1,5 +1,5 @@ -import importlib +from importlib import metadata from mart_armory.pytorch_wrapper import MartAttack -__version__ = importlib.metadata.version(__package__ or __name__) +__version__ = metadata.version(__package__ or __name__) From 120a1273b622cbb52789e81596bbcabe6e07330d Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 31 Jul 2023 11:58:57 -0700 Subject: [PATCH 079/126] Explicit no_op function in transforms. --- mart/attack/batch_converter.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/mart/attack/batch_converter.py b/mart/attack/batch_converter.py index 1f3b7a95..d96e4c52 100644 --- a/mart/attack/batch_converter.py +++ b/mart/attack/batch_converter.py @@ -34,14 +34,18 @@ def __init__( target_transform (Callable): Transform target. target_untransform (Callable): Untransform target. """ - self.transform = transform if isinstance(transform, Callable) else lambda x: x - self.untransform = untransform if isinstance(untransform, Callable) else lambda x: x + + def no_op(x): + return x + + self.transform = transform if isinstance(transform, Callable) else no_op + self.untransform = untransform if isinstance(untransform, Callable) else no_op self.target_transform = ( - target_transform if isinstance(target_transform, Callable) else lambda x: x + target_transform if isinstance(target_transform, Callable) else no_op ) self.target_untransform = ( - target_untransform if isinstance(target_untransform, Callable) else lambda x: x + target_untransform if isinstance(target_untransform, Callable) else no_op ) def __call__(self, batch): From 238882e6491921b621541f6c76186baea1c1848c Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 31 Jul 2023 12:14:47 -0700 Subject: [PATCH 080/126] Add batch_transform and batch_untransform in BatchConverter. --- mart/attack/batch_converter.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/mart/attack/batch_converter.py b/mart/attack/batch_converter.py index d96e4c52..9cd7a94c 100644 --- a/mart/attack/batch_converter.py +++ b/mart/attack/batch_converter.py @@ -25,6 +25,8 @@ def __init__( untransform: Callable = None, target_transform: Callable = None, target_untransform: Callable = None, + batch_transform: Callable = None, + batch_untransform: Callable = None, ): """Convert batch into (input, target), and vice versa. @@ -35,7 +37,7 @@ def __init__( target_untransform (Callable): Untransform target. """ - def no_op(x): + def no_op(x, device=None): return x self.transform = transform if isinstance(transform, Callable) else no_op @@ -48,8 +50,14 @@ def no_op(x): target_untransform if isinstance(target_untransform, Callable) else no_op ) - def __call__(self, batch): - input, target = self._convert(batch) + self.batch_transform = batch_transform if isinstance(batch_transform, Callable) else no_op + self.batch_untransform = ( + batch_untransform if isinstance(batch_untransform, Callable) else no_op + ) + + def __call__(self, batch, device=None): + batch_transformed = self.batch_transform(batch, device=device) + input, target = self._convert(batch_transformed) input_transformed = self.transform(input) target_transformed = self.target_transform(target) return input_transformed, target_transformed @@ -57,7 +65,8 @@ def __call__(self, batch): def revert(self, input_transformed, target_transformed): input = self.untransform(input_transformed) target = self.target_untransform(target_transformed) - batch = self._revert(input, target) + batch_transformed = self._revert(input, target) + batch = self.batch_untransform(batch_transformed) return batch @abc.abstractclassmethod From 6570285c7dc489aea33680baf295cfc678d88f4e Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 31 Jul 2023 16:58:36 -0700 Subject: [PATCH 081/126] Add Adversary.model_transform. --- mart/attack/adversary.py | 10 +++++++++- mart/configs/attack/adversary.yaml | 1 + 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 80e455f7..7d52a176 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -44,6 +44,7 @@ def __init__( enforcer: Enforcer | None = None, attacker: pl.Trainer | None = None, batch_converter: Callable, + model_transform: Callable | None = None, **kwargs, ): """_summary_ @@ -58,6 +59,7 @@ def __init__( enforcer (Enforcer): A Callable that enforce constraints on the adversarial input. attacker (Trainer): A PyTorch-Lightning Trainer object used to fit the perturbation. batch_converter (Callable): Convert batch into convenient format and reverse. + model_transform (Callable): Change model so that it works for Adversary. """ super().__init__() @@ -106,6 +108,10 @@ def __init__( self.batch_converter = batch_converter + self.model_transform = ( + model_transform if isinstance(model_transform, Callable) else lambda x: x + ) + @property def perturber(self) -> Perturber: # Hide the perturber module in a list, so that perturbation is not exported as a parameter in the model checkpoint, @@ -177,11 +183,13 @@ def forward(self, *, batch: torch.Tensor | list | dict, model: Callable): # Extract and transform input so that is convenient for Adversary. input_transformed, target_transformed = self.batch_converter(batch) + model_transformed = self.model_transform(model) + # Optimization loop only sees the transformed input in batches. batch_transformed = { "input": input_transformed, "target": target_transformed, - "model": model, + "model": model_transformed, } # Configure and reset perturbation for current inputs diff --git a/mart/configs/attack/adversary.yaml b/mart/configs/attack/adversary.yaml index 480e3a5b..427534d5 100644 --- a/mart/configs/attack/adversary.yaml +++ b/mart/configs/attack/adversary.yaml @@ -13,3 +13,4 @@ objective: null enforcer: ??? attacker: null batch_converter: ??? +model_transform: null From 5a0c2647ae354b4f12faaf9b565434b1f5dd791c Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 31 Jul 2023 17:23:00 -0700 Subject: [PATCH 082/126] Simplify transforms in batch converter. --- mart/attack/batch_converter.py | 71 +++++++++++++++++++--------------- 1 file changed, 39 insertions(+), 32 deletions(-) diff --git a/mart/attack/batch_converter.py b/mart/attack/batch_converter.py index 9cd7a94c..148c0585 100644 --- a/mart/attack/batch_converter.py +++ b/mart/attack/batch_converter.py @@ -4,6 +4,8 @@ # SPDX-License-Identifier: BSD-3-Clause # +from __future__ import annotations + import abc from typing import Callable @@ -21,12 +23,12 @@ class BatchConverter(abc.ABC): def __init__( self, *, - transform: Callable = None, - untransform: Callable = None, - target_transform: Callable = None, - target_untransform: Callable = None, - batch_transform: Callable = None, - batch_untransform: Callable = None, + transform: Callable | None = None, + untransform: Callable | None = None, + target_transform: Callable | None = None, + target_untransform: Callable | None = None, + batch_transform: Callable | None = None, + batch_untransform: Callable | None = None, ): """Convert batch into (input, target), and vice versa. @@ -35,38 +37,43 @@ def __init__( untransform (Callable): Transform adversarial input in the convenient format back into the original format of input, e.g. [0,255]->[0,1]. target_transform (Callable): Transform target. target_untransform (Callable): Untransform target. + batch_transform (Callable): Transform batch before converting the batch. + batch_untransform (callable): Untransform batch after reverting the batch. """ - def no_op(x, device=None): - return x - - self.transform = transform if isinstance(transform, Callable) else no_op - self.untransform = untransform if isinstance(untransform, Callable) else no_op + self.transform = transform + self.untransform = untransform - self.target_transform = ( - target_transform if isinstance(target_transform, Callable) else no_op - ) - self.target_untransform = ( - target_untransform if isinstance(target_untransform, Callable) else no_op - ) + self.target_transform = target_transform + self.target_untransform = target_untransform - self.batch_transform = batch_transform if isinstance(batch_transform, Callable) else no_op - self.batch_untransform = ( - batch_untransform if isinstance(batch_untransform, Callable) else no_op - ) + self.batch_transform = batch_transform + self.batch_untransform = batch_untransform def __call__(self, batch, device=None): - batch_transformed = self.batch_transform(batch, device=device) - input, target = self._convert(batch_transformed) - input_transformed = self.transform(input) - target_transformed = self.target_transform(target) - return input_transformed, target_transformed - - def revert(self, input_transformed, target_transformed): - input = self.untransform(input_transformed) - target = self.target_untransform(target_transformed) - batch_transformed = self._revert(input, target) - batch = self.batch_untransform(batch_transformed) + if self.batch_transform is not None: + batch = self.batch_transform(batch, device=device) + + input, target = self._convert(batch) + + if self.transform is not None: + input = self.transform(input) + if self.target_transform is not None: + target = self.target_transform(target) + + return input, target + + def revert(self, input, target): + if self.untransform is not None: + input = self.untransform(input) + if self.target_untransform is not None: + target = self.target_untransform(target) + + batch = self._revert(input, target) + + if self.batch_untransform is not None: + batch = self.batch_untransform(batch) + return batch @abc.abstractclassmethod From d410c607e1f8fb9a1c7afdd08e80f373bc068fd7 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 31 Jul 2023 17:24:38 -0700 Subject: [PATCH 083/126] Simplify model_transform. --- mart/attack/adversary.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 7d52a176..ab2b62c8 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -107,10 +107,7 @@ def __init__( assert self._attacker.limit_train_batches > 0 self.batch_converter = batch_converter - - self.model_transform = ( - model_transform if isinstance(model_transform, Callable) else lambda x: x - ) + self.model_transform = model_transform @property def perturber(self) -> Perturber: @@ -183,13 +180,14 @@ def forward(self, *, batch: torch.Tensor | list | dict, model: Callable): # Extract and transform input so that is convenient for Adversary. input_transformed, target_transformed = self.batch_converter(batch) - model_transformed = self.model_transform(model) + if self.model_transform is not None: + model = self.model_transform(model) # Optimization loop only sees the transformed input in batches. batch_transformed = { "input": input_transformed, "target": target_transformed, - "model": model_transformed, + "model": model, } # Configure and reset perturbation for current inputs From 8dfbbf7c4f68c620b52fa1c9e88f29564bbaf386 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 31 Jul 2023 17:34:27 -0700 Subject: [PATCH 084/126] Clean up. --- mart/attack/adversary.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 0b9fe83d..ab2b62c8 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -109,10 +109,6 @@ def __init__( self.batch_converter = batch_converter self.model_transform = model_transform - self.model_transform = ( - model_transform if isinstance(model_transform, Callable) else lambda x: x - ) - @property def perturber(self) -> Perturber: # Hide the perturber module in a list, so that perturbation is not exported as a parameter in the model checkpoint, From 0f51d8081a11ed7397f00bc11070d624792066a1 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Thu, 20 Jul 2023 15:06:14 -0700 Subject: [PATCH 085/126] Upgrade dependency to torchmetrics == 1.0.1 (#205) --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 6a80d751..a13938c5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -15,7 +15,7 @@ dependencies = [ "torchvision ~= 0.15.2", "lightning[extra] ~= 2.0.5", # Full functionality including TensorboardX. "pydantic == 1.10.11", # https://github.com/Lightning-AI/lightning/pull/18022/files - "torchmetrics == 1.0.0", + "torchmetrics == 1.0.1", "numpy == 1.23.5", # https://github.com/pytorch/pytorch/issues/91516 # --------- hydra --------- # From d068902c867b36c5dfa1312381a2569f2184378a Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Tue, 1 Aug 2023 06:39:10 -0700 Subject: [PATCH 086/126] Move adversary out of the model sequence. --- mart/attack/adversary.py | 68 +++++++++++++++++++----------- mart/callbacks/visualizer.py | 4 +- tests/test_adversary.py | 82 ++++++++++++------------------------ tests/test_visualizer.py | 2 +- 4 files changed, 73 insertions(+), 83 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 4a884c1c..20ffa794 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -15,6 +15,7 @@ from mart.utils import silent +from ..callbacks.adversarial_training import AdversarialTraining from ..optim import OptimizerFactory if TYPE_CHECKING: @@ -110,13 +111,28 @@ def perturber(self) -> Perturber: def configure_optimizers(self): return self.optimizer(self.perturber) + def get_input_adv(self, *, input, target): + perturbation = self.perturber(input=input, target=target) + input_adv = self.composer(perturbation, input=input, target=target) + return input_adv + def training_step(self, batch, batch_idx): + # TODO: We shouldn't need to copy because it is never changed? # copy batch since we modify it and it is used internally - batch = batch.copy() + # batch = batch.copy() + + input = batch["input"] + target = batch["target"] + # What we need is a frozen model that returns (a dictionary of) logits, or losses. + model = batch["model"] + + # Compose input_adv from input, then give to model for updated gain. + input_adv = self.get_input_adv(input=input, target=target) + # Target model expects input in the original format. + batch_adv = (input_adv, target) - # We need to evaluate the perturbation against the whole model, so call it normally to get a gain. - model = batch.pop("model") - outputs = model(**batch) + # A model that returns output dictionary. + outputs = model(batch_adv) # FIXME: This should really be just `return outputs`. But this might require a new sequence? # FIXME: Everything below here should live in the model as modules. @@ -150,27 +166,15 @@ def configure_gradient_clipping( self.gradient_modifier(group["params"]) @silent() - def forward(self, *, model=None, **batch): - batch["model"] = model + def forward(self, *, batch: torch.Tensor | list | dict, model: Callable): + input, target = batch - # Adversary can live within a sequence of model. To signal the adversary should - # attack, one must pass a model to attack when calling the adversary. Since we - # do not know where the Adversary lives inside the model, we also need the - # remaining sequence to be able to get a loss. - if model: - self._attack(**batch) - - perturbation = self.perturber(**batch) - input_adv = self.composer(perturbation, **batch) - - # Enforce constraints after the attack optimization ends. - if model: - self.enforcer(input_adv, **batch) - - return input_adv - - def _attack(self, *, input, **batch): - batch["input"] = input + # Optimization loop only sees the transformed input in batches. + batch_transformed = { + "input": input, + "target": target, + "model": model, + } # Configure and reset perturbation for current inputs self.perturber.configure_perturbation(input) @@ -178,7 +182,16 @@ def _attack(self, *, input, **batch): # Attack, aka fit a perturbation, for one epoch by cycling over the same input batch. # We use Trainer.limit_train_batches to control the number of attack iterations. self.attacker.fit_loop.max_epochs += 1 - self.attacker.fit(self, train_dataloaders=cycle([batch])) + self.attacker.fit(self, train_dataloaders=cycle([batch_transformed])) + + # Get the input_adv for enforcer checking. + input_adv = self.get_input_adv(input=input, target=target) + self.enforcer(input_adv, input=input, target=target) + + # Revert to the original format of batch. + batch_adv = (input_adv, target) + + return batch_adv @property def attacker(self): @@ -200,6 +213,11 @@ def attacker(self): self._attacker = self._attacker(accelerator=accelerator, devices=devices) + # Remove recursive adversarial training callback from lightning.pytorch.callbacks_factory. + for callback in self._attacker.callbacks: + if isinstance(callback, AdversarialTraining): + self._attacker.callbacks.remove(callback) + return self._attacker def cpu(self): diff --git a/mart/callbacks/visualizer.py b/mart/callbacks/visualizer.py index 39409143..34e7c5cc 100644 --- a/mart/callbacks/visualizer.py +++ b/mart/callbacks/visualizer.py @@ -6,6 +6,7 @@ import os +import torch from lightning.pytorch.callbacks import Callback from torchvision.transforms import ToPILImage @@ -32,7 +33,8 @@ def on_train_batch_end(self, trainer, model, outputs, batch, batch_idx): def on_train_end(self, trainer, model): # FIXME: We should really just save this to outputs instead of recomputing adv_input - adv_input = model(input=self.input, target=self.target) + with torch.no_grad(): + adv_input = model.get_input_adv(input=self.input, target=self.target) for img, tgt in zip(adv_input, self.target): fname = tgt["file_name"] diff --git a/tests/test_adversary.py b/tests/test_adversary.py index 448baa48..f39686b6 100644 --- a/tests/test_adversary.py +++ b/tests/test_adversary.py @@ -17,35 +17,6 @@ from mart.attack.gradient_modifier import Sign -def test_adversary(input_data, target_data, perturbation): - perturber = Mock(spec=Perturber, return_value=perturbation) - composer = mart.attack.composer.Additive() - gain = Mock() - enforcer = Mock() - attacker = Mock(max_epochs=0, limit_train_batches=1, fit_loop=Mock(max_epochs=0)) - - adversary = Adversary( - perturber=perturber, - composer=composer, - optimizer=None, - gain=gain, - enforcer=enforcer, - attacker=attacker, - ) - - output_data = adversary(input=input_data, target=target_data) - - # The enforcer and attacker should only be called when model is not None. - enforcer.assert_not_called() - attacker.fit.assert_not_called() - assert attacker.fit_loop.max_epochs == 0 - - perturber.assert_called_once() - gain.assert_not_called() - - torch.testing.assert_close(output_data, input_data + perturbation) - - def test_with_model(input_data, target_data, perturbation): perturber = Mock(spec=Perturber, return_value=perturbation) composer = mart.attack.composer.Additive() @@ -53,7 +24,7 @@ def test_with_model(input_data, target_data, perturbation): enforcer = Mock() attacker = Mock(max_epochs=0, limit_train_batches=1, fit_loop=Mock(max_epochs=0)) model = Mock() - sequence = Mock() + batch = (input_data, target_data) adversary = Adversary( perturber=perturber, @@ -64,7 +35,8 @@ def test_with_model(input_data, target_data, perturbation): attacker=attacker, ) - output_data = adversary(input=input_data, target=target_data, model=model, sequence=sequence) + batch_adv = adversary(batch=batch, model=model) + output_data = batch_adv[0] # The enforcer is only called when model is not None. enforcer.assert_called_once() @@ -78,7 +50,7 @@ def test_with_model(input_data, target_data, perturbation): torch.testing.assert_close(output_data, input_data + perturbation) -def test_hidden_params(input_data, target_data, perturbation): +def test_hidden_params(): initializer = Mock() composer = mart.attack.composer.Additive() projector = Mock() @@ -88,8 +60,6 @@ def test_hidden_params(input_data, target_data, perturbation): gain = Mock() enforcer = Mock() attacker = Mock(max_epochs=0, limit_train_batches=1, fit_loop=Mock(max_epochs=0)) - model = Mock() - sequence = Mock() adversary = Adversary( perturber=perturber, @@ -100,8 +70,6 @@ def test_hidden_params(input_data, target_data, perturbation): attacker=attacker, ) - # output_data = adversary(input=input_data, target=target_data, model=model, sequence=sequence) - # Adversarial perturbation should not be updated by a regular training optimizer. params = [p for p in adversary.parameters()] assert len(params) == 0 @@ -122,7 +90,7 @@ def test_hidden_params_after_forward(input_data, target_data, perturbation): enforcer = Mock() attacker = Mock(max_epochs=0, limit_train_batches=1, fit_loop=Mock(max_epochs=0)) model = Mock() - sequence = Mock() + batch = (input_data, target_data) adversary = Adversary( perturber=perturber, @@ -133,7 +101,8 @@ def test_hidden_params_after_forward(input_data, target_data, perturbation): attacker=attacker, ) - output_data = adversary(input=input_data, target=target_data, model=model, sequence=sequence) + batch_adv = adversary(batch=batch, model=model) + output_data = batch_adv[0] # Adversary will have no parameter even after forward is called, because we hide Perturber in a list. params = [p for p in adversary.parameters()] @@ -180,7 +149,7 @@ def test_perturbation(input_data, target_data, perturbation): enforcer = Mock() attacker = Mock(max_epochs=0, limit_train_batches=1, fit_loop=Mock(max_epochs=0)) model = Mock() - sequence = Mock() + batch = (input_data, target_data) adversary = Adversary( perturber=perturber, @@ -191,15 +160,16 @@ def test_perturbation(input_data, target_data, perturbation): attacker=attacker, ) - _ = adversary(input=input_data, target=target_data, model=model, sequence=sequence) - output_data = adversary(input=input_data, target=target_data) + batch_adv = adversary(batch=batch, model=model) + output_data = batch_adv[0] # The enforcer is only called when model is not None. enforcer.assert_called_once() attacker.fit.assert_called_once() - # Once with model and sequence and once without - assert perturber.call_count == 2 + # Perturber is called once for generating initial input_adv. + # The fit() doesn't run because max_epochs=0. + assert perturber.call_count == 1 torch.testing.assert_close(output_data, input_data + perturbation) @@ -226,6 +196,8 @@ def initializer(x): projector=None, ) + batch = (input_data, target_data) + adversary = Adversary( perturber=perturber, composer=composer, @@ -236,20 +208,18 @@ def initializer(x): max_iters=1, ) - def model(input, target, model=None, **kwargs): - return {"logits": adversary(input=input, target=target)} - - sequence = Mock() + def model(batch): + return {"logits": batch[0]} - adversary(input=input_data, target=target_data, model=model, sequence=sequence) - input_adv = adversary(input=input_data, target=target_data) + batch_adv = adversary(batch=batch, model=model) + input_adv = batch_adv[0] perturbation = input_data - input_adv torch.testing.assert_close(perturbation.unique(), torch.Tensor([-1, 0, 1])) -def test_configure_optimizers(input_data, target_data): +def test_configure_optimizers(): perturber = Mock() composer = mart.attack.composer.Additive() optimizer = Mock(spec=mart.optim.OptimizerFactory) @@ -268,8 +238,8 @@ def test_configure_optimizers(input_data, target_data): gain.assert_not_called() -def test_training_step(input_data, target_data): - perturber = Mock() +def test_training_step(input_data, target_data, perturbation): + perturber = Mock(spec=Perturber, return_value=perturbation) composer = mart.attack.composer.Additive() optimizer = Mock(spec=mart.optim.OptimizerFactory) gain = Mock(return_value=torch.tensor(1337)) @@ -290,8 +260,8 @@ def test_training_step(input_data, target_data): assert output == 1337 -def test_training_step_with_many_gain(input_data, target_data): - perturber = Mock() +def test_training_step_with_many_gain(input_data, target_data, perturbation): + perturber = Mock(spec=Perturber, return_value=perturbation) composer = mart.attack.composer.Additive() optimizer = Mock(spec=mart.optim.OptimizerFactory) gain = Mock(return_value=torch.tensor([1234, 5678])) @@ -311,8 +281,8 @@ def test_training_step_with_many_gain(input_data, target_data): assert output == 1234 + 5678 -def test_training_step_with_objective(input_data, target_data): - perturber = Mock() +def test_training_step_with_objective(input_data, target_data, perturbation): + perturber = Mock(spec=Perturber, return_value=perturbation) composer = mart.attack.composer.Additive() optimizer = Mock(spec=mart.optim.OptimizerFactory) gain = Mock(return_value=torch.tensor([1234, 5678])) diff --git a/tests/test_visualizer.py b/tests/test_visualizer.py index 5c25e930..cb188591 100644 --- a/tests/test_visualizer.py +++ b/tests/test_visualizer.py @@ -24,7 +24,7 @@ def perturb(input): return result trainer = Mock() - model = Mock(return_value=perturb(input_list)) + model = Mock(get_input_adv=Mock(return_value=perturb(input_list))) outputs = Mock() batch = {"input": input_list, "target": target_list} adversary = Mock(spec=Adversary, side_effect=perturb) From e81a55ab0f96ee7c22b46c1788be29fed3fd4193 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Tue, 1 Aug 2023 06:40:16 -0700 Subject: [PATCH 087/126] Make an adversarial training/evaluation callback. --- mart/callbacks/adversarial_training.py | 75 +++++++++++++------ .../callbacks/adversarial_training.yaml | 5 +- mart/configs/experiment/CIFAR10_CNN_Adv.yaml | 2 +- .../COCO_TorchvisionFasterRCNN_Adv.yaml | 10 +-- mart/models/modular.py | 6 ++ tests/test_experiments.py | 2 +- 6 files changed, 68 insertions(+), 32 deletions(-) diff --git a/mart/callbacks/adversarial_training.py b/mart/callbacks/adversarial_training.py index 65b90b98..0738467c 100644 --- a/mart/callbacks/adversarial_training.py +++ b/mart/callbacks/adversarial_training.py @@ -4,11 +4,14 @@ # SPDX-License-Identifier: BSD-3-Clause # +from __future__ import annotations + import types +from typing import Callable -from pytorch_lightning.callbacks import Callback +from lightning.pytorch.callbacks import Callback -from mart.models import LitModular +from ..utils import MonkeyPatch __all__ = ["AdversarialTraining"] @@ -16,11 +19,22 @@ class AdversarialTraining(Callback): """Perturbs inputs to be adversarial.""" + # TODO: training/validation/test or train/val/test def __init__( - self, adversary=None, train_adversary=None, validation_adversary=None, test_adversary=None + self, + adversary: Callable = None, + train_adversary: Callable = None, + validation_adversary: Callable = None, + test_adversary: Callable = None, ): - adversary = adversary or train_adversary + """AdversaryConnector. + Args: + adversary (Callable, optional): _description_. Defaults to None. + train_adversary (Callable, optional): _description_. Defaults to None. + validation_adversary (Callable, optional): _description_. Defaults to None. + test_adversary (Callable, optional): _description_. Defaults to None. + """ self.train_adversary = train_adversary or adversary self.validation_adversary = validation_adversary or adversary self.test_adversary = test_adversary or adversary @@ -34,35 +48,54 @@ def setup(self, trainer, pl_module, stage=None): def teardown(self, trainer, pl_module, stage=None): pl_module.on_after_batch_transfer = self._on_after_batch_transfer + def wrap_model(self, model, dataloader_idx): + """Make a model, such that `output = model(batch)`.""" + + # Consume dataloader_idx + if hasattr(model, "attack_step"): + + def model_forward(batch): + output = model.attack_step(batch, dataloader_idx) + return output + + elif hasattr(model, "training_step"): + # Monkey-patch model.log to avoid spamming. + def model_forward(batch): + with MonkeyPatch(model, "log", lambda *args, **kwargs: None): + output = model.training_step(batch, dataloader_idx) + return output + + else: + model_forward = model + + return model_forward + def on_after_batch_transfer(self, pl_module, batch, dataloader_idx): batch = self._on_after_batch_transfer(batch, dataloader_idx) - # FIXME: Remove use of step + adversary = None + trainer = pl_module.trainer if trainer.training: adversary = self.train_adversary - step = "training" elif trainer.validating: adversary = self.validation_adversary - step = "validation" elif trainer.testing: adversary = self.test_adversary - step = "test" - else: - return batch - # Create attacked model where the adversary executes before the model - # FIXME: Should we just use pl_module.training_step? Ideally we would not decompose batch - # and instead pass batch directly to the underlying pl_module since it knows how to - # interpret batch. - def attacked_model(input, **batch): - input_adv = adversary(input=input, **batch) - return pl_module(input=input_adv, **batch) + # Skip if adversary is not defined for all phases train/validation/test. + if adversary is None: + return batch # Move adversary to same device as pl_module and run attack - # FIXME: Directly pass batch instead of assuming it has a structure? - input, target = batch adversary.to(pl_module.device) - input_adv = adversary(input=input, target=target, step=step, model=attacked_model) - return [input_adv, target] + # We assume Adversary is not aware of PyTorch Lightning, + # so wrap the model as `output=model(batch)`. + model = self.wrap_model(pl_module, dataloader_idx) + + # TODO: We may need to do model.eval() if there's BN-like layers in the model. + # Directly pass batch instead of assuming it has a structure. + batch_adv = adversary(batch=batch, model=model) + + return batch_adv diff --git a/mart/configs/callbacks/adversarial_training.yaml b/mart/configs/callbacks/adversarial_training.yaml index 0f6a7b47..eb74d0c1 100644 --- a/mart/configs/callbacks/adversarial_training.yaml +++ b/mart/configs/callbacks/adversarial_training.yaml @@ -1,3 +1,6 @@ adversarial_training: _target_: mart.callbacks.AdversarialTraining - adversary: ??? + adversary: null + train_adversary: null + validation_adversary: null + test_adversary: null diff --git a/mart/configs/experiment/CIFAR10_CNN_Adv.yaml b/mart/configs/experiment/CIFAR10_CNN_Adv.yaml index ced39cd1..3d4b1c12 100644 --- a/mart/configs/experiment/CIFAR10_CNN_Adv.yaml +++ b/mart/configs/experiment/CIFAR10_CNN_Adv.yaml @@ -1,7 +1,7 @@ # @package _global_ defaults: - - /attack@callbacks.adversarial_training.adversary: classification_eps1.75_fgsm + - /attack@callbacks.adversarial_training.train_adversary: classification_eps1.75_fgsm - /attack@callbacks.adversarial_training.test_adversary: classification_eps2_pgd10_step1 - override /datamodule: cifar10 - override /model: classifier_cifar10_cnn diff --git a/mart/configs/experiment/COCO_TorchvisionFasterRCNN_Adv.yaml b/mart/configs/experiment/COCO_TorchvisionFasterRCNN_Adv.yaml index 398394bf..a1860696 100644 --- a/mart/configs/experiment/COCO_TorchvisionFasterRCNN_Adv.yaml +++ b/mart/configs/experiment/COCO_TorchvisionFasterRCNN_Adv.yaml @@ -2,15 +2,9 @@ defaults: - COCO_TorchvisionFasterRCNN - - /attack@model.modules.input_adv_test: object_detection_mask_adversary + - /attack@callbacks.adversarial_training.test_adversary: object_detection_mask_adversary - override /datamodule: coco_perturbable_mask + - override /callbacks: [model_checkpoint, lr_monitor, adversarial_training] task_name: "COCO_TorchvisionFasterRCNN_Adv" tags: ["adv"] - -model: - test_sequence: - seq005: input_adv_test - - seq010: - preprocessor: ["input_adv_test"] diff --git a/mart/models/modular.py b/mart/models/modular.py index 192204a2..c63c9fd7 100644 --- a/mart/models/modular.py +++ b/mart/models/modular.py @@ -126,6 +126,12 @@ def configure_optimizers(self): def forward(self, **kwargs): return self.model(**kwargs) + def attack_step(self, batch, batch_idx): + # Use the training sequence in attack. + input, target = batch + output = self(input=input, target=target, model=self.model, step="training") + return output + # # Training # diff --git a/tests/test_experiments.py b/tests/test_experiments.py index 65b27a5d..404a98ef 100644 --- a/tests/test_experiments.py +++ b/tests/test_experiments.py @@ -80,7 +80,7 @@ def test_cifar10_cnn_adv_experiment(classification_cfg, tmp_path): "-m", "experiment=CIFAR10_CNN_Adv", "hydra.sweep.dir=" + str(tmp_path), - "model.modules.input_adv_test.max_iters=10", + "callbacks.adversarial_training.test_adversary.max_iters=10", "optimized_metric=training_metrics/acc", "++datamodule.train_dataset.image_size=[3,32,32]", "++datamodule.train_dataset.num_classes=10", From 579d6531b2dd2a547f9f8cee26e6702b25ef3b40 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Thu, 3 Aug 2023 10:02:32 -0700 Subject: [PATCH 088/126] Remove stuff that is related to callback entry points. --- mart/attack/adversary.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 20ffa794..8a99dd39 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -213,11 +213,6 @@ def attacker(self): self._attacker = self._attacker(accelerator=accelerator, devices=devices) - # Remove recursive adversarial training callback from lightning.pytorch.callbacks_factory. - for callback in self._attacker.callbacks: - if isinstance(callback, AdversarialTraining): - self._attacker.callbacks.remove(callback) - return self._attacker def cpu(self): From 92f9fe23fcb41c876222ce12881e59f9133067cb Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 7 Aug 2023 10:20:27 -0700 Subject: [PATCH 089/126] Replace model wrapper with a configurable model_transform. --- mart/attack/adversary.py | 6 +++ mart/attack/model_transform.py | 38 +++++++++++++++++++ mart/callbacks/adversarial_training.py | 31 +-------------- mart/configs/attack/adversary.yaml | 1 + .../attack/model_transform/lightning.yaml | 1 + 5 files changed, 47 insertions(+), 30 deletions(-) create mode 100644 mart/attack/model_transform.py create mode 100644 mart/configs/attack/model_transform/lightning.yaml diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 8a99dd39..e942dc15 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -43,6 +43,7 @@ def __init__( objective: Objective | None = None, enforcer: Enforcer | None = None, attacker: pl.Trainer | None = None, + model_transform: Callable | None = None, **kwargs, ): """_summary_ @@ -102,6 +103,8 @@ def __init__( assert self._attacker.max_epochs == 0 assert self._attacker.limit_train_batches > 0 + self.model_transform = model_transform + @property def perturber(self) -> Perturber: # Hide the perturber module in a list, so that perturbation is not exported as a parameter in the model checkpoint, @@ -169,6 +172,9 @@ def configure_gradient_clipping( def forward(self, *, batch: torch.Tensor | list | dict, model: Callable): input, target = batch + if self.model_transform is not None: + model = self.model_transform(model) + # Optimization loop only sees the transformed input in batches. batch_transformed = { "input": input, diff --git a/mart/attack/model_transform.py b/mart/attack/model_transform.py new file mode 100644 index 00000000..3cf4771b --- /dev/null +++ b/mart/attack/model_transform.py @@ -0,0 +1,38 @@ +# +# Copyright (C) 2022 Intel Corporation +# +# SPDX-License-Identifier: BSD-3-Clause +# + +from ..utils import MonkeyPatch + + +class LightningModuleAsTarget: + """Prepare a LightningModule as a target model for Adversary, + such that `output = model(batch)`. + """ + + def __call__(self, model): + # Generate a pseudo dataloader_idx. + dataloader_idx = 1 + + if hasattr(model, "attack_step"): + + def model_forward(batch): + output = model.attack_step(batch, dataloader_idx) + return output + + elif hasattr(model, "training_step"): + # Monkey-patch model.log to avoid spamming. + def model_forward(batch): + with MonkeyPatch(model, "log", lambda *args, **kwargs: None): + output = model.training_step(batch, dataloader_idx) + return output + + else: + raise ValueError("Model does not have `attack_step()` or `training_step()`.") + + return model_forward + + +# TODO: We may need to do model.eval() if there's BN-like layers in the model. diff --git a/mart/callbacks/adversarial_training.py b/mart/callbacks/adversarial_training.py index 0738467c..68c398fd 100644 --- a/mart/callbacks/adversarial_training.py +++ b/mart/callbacks/adversarial_training.py @@ -11,8 +11,6 @@ from lightning.pytorch.callbacks import Callback -from ..utils import MonkeyPatch - __all__ = ["AdversarialTraining"] @@ -48,28 +46,6 @@ def setup(self, trainer, pl_module, stage=None): def teardown(self, trainer, pl_module, stage=None): pl_module.on_after_batch_transfer = self._on_after_batch_transfer - def wrap_model(self, model, dataloader_idx): - """Make a model, such that `output = model(batch)`.""" - - # Consume dataloader_idx - if hasattr(model, "attack_step"): - - def model_forward(batch): - output = model.attack_step(batch, dataloader_idx) - return output - - elif hasattr(model, "training_step"): - # Monkey-patch model.log to avoid spamming. - def model_forward(batch): - with MonkeyPatch(model, "log", lambda *args, **kwargs: None): - output = model.training_step(batch, dataloader_idx) - return output - - else: - model_forward = model - - return model_forward - def on_after_batch_transfer(self, pl_module, batch, dataloader_idx): batch = self._on_after_batch_transfer(batch, dataloader_idx) @@ -90,12 +66,7 @@ def on_after_batch_transfer(self, pl_module, batch, dataloader_idx): # Move adversary to same device as pl_module and run attack adversary.to(pl_module.device) - # We assume Adversary is not aware of PyTorch Lightning, - # so wrap the model as `output=model(batch)`. - model = self.wrap_model(pl_module, dataloader_idx) - - # TODO: We may need to do model.eval() if there's BN-like layers in the model. # Directly pass batch instead of assuming it has a structure. - batch_adv = adversary(batch=batch, model=model) + batch_adv = adversary(batch=batch, model=pl_module) return batch_adv diff --git a/mart/configs/attack/adversary.yaml b/mart/configs/attack/adversary.yaml index bbf52433..aeb331c5 100644 --- a/mart/configs/attack/adversary.yaml +++ b/mart/configs/attack/adversary.yaml @@ -1,5 +1,6 @@ defaults: - /callbacks@callbacks: [progress_bar] + - model_transform: lightning _target_: mart.attack.Adversary _convert_: all diff --git a/mart/configs/attack/model_transform/lightning.yaml b/mart/configs/attack/model_transform/lightning.yaml new file mode 100644 index 00000000..b0db31d6 --- /dev/null +++ b/mart/configs/attack/model_transform/lightning.yaml @@ -0,0 +1 @@ +_target_: mart.attack.model_transform.LightningModuleAsTarget From 44c9c4bba4cb13f5c9a2d457843195baec1ac67e Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 7 Aug 2023 11:33:42 -0700 Subject: [PATCH 090/126] Add Adversary.batch_converter(). --- mart/attack/__init__.py | 1 + mart/attack/adversary.py | 35 ++-- mart/attack/batch_converter.py | 173 ++++++++++++++++++ mart/configs/attack/adversary.yaml | 1 + mart/configs/attack/batch_converter/dict.yaml | 2 + .../attack/batch_converter/input_only.yaml | 1 + mart/configs/attack/batch_converter/list.yaml | 2 + .../configs/attack/batch_converter/tuple.yaml | 2 + .../attack/classification_eps1.75_fgsm.yaml | 1 + .../classification_eps2_pgd10_step1.yaml | 1 + .../classification_eps8_pgd10_step1.yaml | 1 + .../object_detection_mask_adversary.yaml | 1 + tests/test_adversary.py | 28 +++ 13 files changed, 236 insertions(+), 13 deletions(-) create mode 100644 mart/attack/batch_converter.py create mode 100644 mart/configs/attack/batch_converter/dict.yaml create mode 100644 mart/configs/attack/batch_converter/input_only.yaml create mode 100644 mart/configs/attack/batch_converter/list.yaml create mode 100644 mart/configs/attack/batch_converter/tuple.yaml diff --git a/mart/attack/__init__.py b/mart/attack/__init__.py index 843ce9bd..2a55d648 100644 --- a/mart/attack/__init__.py +++ b/mart/attack/__init__.py @@ -1,5 +1,6 @@ from .adversary import * from .adversary_wrapper import * +from .batch_converter import * from .composer import * from .enforcer import * from .gain import * diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index e942dc15..74694203 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -43,6 +43,7 @@ def __init__( objective: Objective | None = None, enforcer: Enforcer | None = None, attacker: pl.Trainer | None = None, + batch_converter: Callable, model_transform: Callable | None = None, **kwargs, ): @@ -57,6 +58,7 @@ def __init__( objective (Objective): A function for computing adversarial objective, which returns True or False. Optional. enforcer (Enforcer): A Callable that enforce constraints on the adversarial input. attacker (Trainer): A PyTorch-Lightning Trainer object used to fit the perturbation. + batch_converter (Callable): Convert batch into convenient format and reverse. """ super().__init__() @@ -103,6 +105,7 @@ def __init__( assert self._attacker.max_epochs == 0 assert self._attacker.limit_train_batches > 0 + self.batch_converter = batch_converter self.model_transform = model_transform @property @@ -124,15 +127,17 @@ def training_step(self, batch, batch_idx): # copy batch since we modify it and it is used internally # batch = batch.copy() - input = batch["input"] - target = batch["target"] + input_transformed = batch["input"] + target_transformed = batch["target"] # What we need is a frozen model that returns (a dictionary of) logits, or losses. model = batch["model"] - # Compose input_adv from input, then give to model for updated gain. - input_adv = self.get_input_adv(input=input, target=target) + # Compose input_adv from input. + input_adv_transformed = self.get_input_adv( + input=input_transformed, target=target_transformed + ) # Target model expects input in the original format. - batch_adv = (input_adv, target) + batch_adv = self.batch_converter.revert(input_adv_transformed, target_transformed) # A model that returns output dictionary. outputs = model(batch_adv) @@ -170,32 +175,36 @@ def configure_gradient_clipping( @silent() def forward(self, *, batch: torch.Tensor | list | dict, model: Callable): - input, target = batch + # Extract and transform input so that is convenient for Adversary. + input_transformed, target_transformed = self.batch_converter(batch) if self.model_transform is not None: model = self.model_transform(model) + # Canonical form of batch in the adversary's optimization loop. # Optimization loop only sees the transformed input in batches. batch_transformed = { - "input": input, - "target": target, + "input": input_transformed, + "target": target_transformed, "model": model, } # Configure and reset perturbation for current inputs - self.perturber.configure_perturbation(input) + self.perturber.configure_perturbation(input_transformed) # Attack, aka fit a perturbation, for one epoch by cycling over the same input batch. # We use Trainer.limit_train_batches to control the number of attack iterations. self.attacker.fit_loop.max_epochs += 1 self.attacker.fit(self, train_dataloaders=cycle([batch_transformed])) - # Get the input_adv for enforcer checking. - input_adv = self.get_input_adv(input=input, target=target) - self.enforcer(input_adv, input=input, target=target) + # Get the transformed input_adv for enforcer checking. + input_adv_transformed = self.get_input_adv( + input=input_transformed, target=target_transformed + ) + self.enforcer(input_adv_transformed, input=input_transformed, target=target_transformed) # Revert to the original format of batch. - batch_adv = (input_adv, target) + batch_adv = self.batch_converter.revert(input_adv_transformed, target_transformed) return batch_adv diff --git a/mart/attack/batch_converter.py b/mart/attack/batch_converter.py new file mode 100644 index 00000000..2cf790ed --- /dev/null +++ b/mart/attack/batch_converter.py @@ -0,0 +1,173 @@ +# +# Copyright (C) 2022 Intel Corporation +# +# SPDX-License-Identifier: BSD-3-Clause +# + +from __future__ import annotations + +import abc +from typing import Callable + +# TODO: Do we need to copy batch? + +__all__ = [ + "InputOnlyBatchConverter", + "DictBatchConverter", + "ListBatchConverter", + "TupleBatchConverter", +] + + +class BatchConverter(abc.ABC): + def __init__( + self, + *, + transform: Callable | None = None, + untransform: Callable | None = None, + target_transform: Callable | None = None, + target_untransform: Callable | None = None, + batch_transform: Callable | None = None, + batch_untransform: Callable | None = None, + ): + """Convert batch into (input, target), and vice versa. + + Args: + transform (Callable): Transform input into a convenient format, e.g. [0,1]->[0.255]. + untransform (Callable): Transform adversarial input in the convenient format back into the original format of input, e.g. [0,255]->[0,1]. + target_transform (Callable): Transform target. + target_untransform (Callable): Untransform target. + batch_transform (Callable): Transform batch before converting the batch. + batch_untransform (callable): Untransform batch after reverting the batch. + """ + + self.transform = transform + self.untransform = untransform + + self.target_transform = target_transform + self.target_untransform = target_untransform + + self.batch_transform = batch_transform + self.batch_untransform = batch_untransform + + def __call__(self, batch, device=None): + if self.batch_transform is not None: + batch = self.batch_transform(batch, device=device) + + input, target = self._convert(batch) + + if self.transform is not None: + input = self.transform(input) + if self.target_transform is not None: + target = self.target_transform(target) + + return input, target + + def revert(self, input, target): + if self.untransform is not None: + input = self.untransform(input) + if self.target_untransform is not None: + target = self.target_untransform(target) + + batch = self._revert(input, target) + + if self.batch_untransform is not None: + batch = self.batch_untransform(batch) + + return batch + + @abc.abstractclassmethod + def _revert(self, input, target): + pass + + @abc.abstractclassmethod + def _convert(self, batch): + pass + + +class InputOnlyBatchConverter(BatchConverter): + def _convert(self, batch): + input = batch + target = None + return input, target + + def _revert(self, input, target): + batch = input + return batch + + +class DictBatchConverter(BatchConverter): + def __init__(self, input_key: str = "input", **kwargs): + """_summary_ + + Args: + input_key (str): Input locator in a batch. Defaults to "input". + """ + super().__init__(**kwargs) + + self.input_key = input_key + self.rest = {} + + def _convert(self, batch): + # Make a copy because we don't want to break the original batch. + batch = batch.copy() + input = batch.pop(self.input_key) + if "target" in batch: + target = batch["target"] + self.rest = batch + else: + target = batch + return input, target + + def _revert(self, input, target): + if self.rest == {}: + batch = target + else: + batch = self.rest + + # Input may have been changed. + batch[self.input_key] = input + + return batch + + +class ListBatchConverter(BatchConverter): + def __init__(self, input_key: int = 0, target_size: int | None = None, **kwargs): + super().__init__(**kwargs) + + self.input_key = input_key + self.target_size = target_size + + def _convert(self, batch: list): + # Make a copy because we don't want to break the original batch. + batch = batch.copy() + input = batch.pop(self.input_key) + self.target_size = len(batch) + + if self.target_size == 1: + target = batch[0] + else: + target = batch + + return input, target + + def _revert(self, input, target): + if self.target_size == 1: + batch = [target] + batch.insert(self.input_key, input) + else: + batch = target + batch.insert(self.input_key, input) + return batch + + +class TupleBatchConverter(ListBatchConverter): + def _convert(self, batch: tuple): + batch_list = list(batch) + input, target = super()._convert(batch_list) + return input, target + + def _revert(self, input, target): + batch_list = super()._revert(input, target) + batch = tuple(batch_list) + return batch diff --git a/mart/configs/attack/adversary.yaml b/mart/configs/attack/adversary.yaml index aeb331c5..40765807 100644 --- a/mart/configs/attack/adversary.yaml +++ b/mart/configs/attack/adversary.yaml @@ -13,3 +13,4 @@ gradient_modifier: null objective: null enforcer: ??? attacker: null +batch_converter: ??? diff --git a/mart/configs/attack/batch_converter/dict.yaml b/mart/configs/attack/batch_converter/dict.yaml new file mode 100644 index 00000000..db421039 --- /dev/null +++ b/mart/configs/attack/batch_converter/dict.yaml @@ -0,0 +1,2 @@ +_target_: mart.attack.batch_converter.DictBatchConverter +input_key: input diff --git a/mart/configs/attack/batch_converter/input_only.yaml b/mart/configs/attack/batch_converter/input_only.yaml new file mode 100644 index 00000000..b9bb9207 --- /dev/null +++ b/mart/configs/attack/batch_converter/input_only.yaml @@ -0,0 +1 @@ +_target_: mart.attack.batch_converter.InputOnlyBatchConverter diff --git a/mart/configs/attack/batch_converter/list.yaml b/mart/configs/attack/batch_converter/list.yaml new file mode 100644 index 00000000..53da9fae --- /dev/null +++ b/mart/configs/attack/batch_converter/list.yaml @@ -0,0 +1,2 @@ +_target_: mart.attack.batch_converter.ListBatchConverter +input_key: 0 diff --git a/mart/configs/attack/batch_converter/tuple.yaml b/mart/configs/attack/batch_converter/tuple.yaml new file mode 100644 index 00000000..25ff65b5 --- /dev/null +++ b/mart/configs/attack/batch_converter/tuple.yaml @@ -0,0 +1,2 @@ +_target_: mart.attack.batch_converter.TupleBatchConverter +input_key: 0 diff --git a/mart/configs/attack/classification_eps1.75_fgsm.yaml b/mart/configs/attack/classification_eps1.75_fgsm.yaml index 7c300e2d..c3c0ec46 100644 --- a/mart/configs/attack/classification_eps1.75_fgsm.yaml +++ b/mart/configs/attack/classification_eps1.75_fgsm.yaml @@ -10,6 +10,7 @@ defaults: - objective: misclassification - enforcer: default - enforcer/constraints: [lp, pixel_range] + - batch_converter: list enforcer: constraints: diff --git a/mart/configs/attack/classification_eps2_pgd10_step1.yaml b/mart/configs/attack/classification_eps2_pgd10_step1.yaml index b98cf407..7dd30548 100644 --- a/mart/configs/attack/classification_eps2_pgd10_step1.yaml +++ b/mart/configs/attack/classification_eps2_pgd10_step1.yaml @@ -10,6 +10,7 @@ defaults: - objective: misclassification - enforcer: default - enforcer/constraints: [lp, pixel_range] + - batch_converter: list enforcer: constraints: diff --git a/mart/configs/attack/classification_eps8_pgd10_step1.yaml b/mart/configs/attack/classification_eps8_pgd10_step1.yaml index f1b6242a..7b9577a7 100644 --- a/mart/configs/attack/classification_eps8_pgd10_step1.yaml +++ b/mart/configs/attack/classification_eps8_pgd10_step1.yaml @@ -10,6 +10,7 @@ defaults: - objective: misclassification - enforcer: default - enforcer/constraints: [lp, pixel_range] + - batch_converter: list enforcer: constraints: diff --git a/mart/configs/attack/object_detection_mask_adversary.yaml b/mart/configs/attack/object_detection_mask_adversary.yaml index ad99dda0..cedbd9eb 100644 --- a/mart/configs/attack/object_detection_mask_adversary.yaml +++ b/mart/configs/attack/object_detection_mask_adversary.yaml @@ -10,6 +10,7 @@ defaults: - objective: zero_ap - enforcer: default - enforcer/constraints: [mask, pixel_range] + - batch_converter: tuple # Make a 5-step attack for the demonstration purpose. max_iters: 5 diff --git a/tests/test_adversary.py b/tests/test_adversary.py index f39686b6..352aa654 100644 --- a/tests/test_adversary.py +++ b/tests/test_adversary.py @@ -25,6 +25,7 @@ def test_with_model(input_data, target_data, perturbation): attacker = Mock(max_epochs=0, limit_train_batches=1, fit_loop=Mock(max_epochs=0)) model = Mock() batch = (input_data, target_data) + batch_converter = mart.attack.TupleBatchConverter() adversary = Adversary( perturber=perturber, @@ -33,6 +34,7 @@ def test_with_model(input_data, target_data, perturbation): gain=gain, enforcer=enforcer, attacker=attacker, + batch_converter=batch_converter, ) batch_adv = adversary(batch=batch, model=model) @@ -60,6 +62,7 @@ def test_hidden_params(): gain = Mock() enforcer = Mock() attacker = Mock(max_epochs=0, limit_train_batches=1, fit_loop=Mock(max_epochs=0)) + batch_converter = mart.attack.TupleBatchConverter() adversary = Adversary( perturber=perturber, @@ -68,6 +71,7 @@ def test_hidden_params(): gain=gain, enforcer=enforcer, attacker=attacker, + batch_converter=batch_converter, ) # Adversarial perturbation should not be updated by a regular training optimizer. @@ -91,6 +95,7 @@ def test_hidden_params_after_forward(input_data, target_data, perturbation): attacker = Mock(max_epochs=0, limit_train_batches=1, fit_loop=Mock(max_epochs=0)) model = Mock() batch = (input_data, target_data) + batch_converter = mart.attack.TupleBatchConverter() adversary = Adversary( perturber=perturber, @@ -99,6 +104,7 @@ def test_hidden_params_after_forward(input_data, target_data, perturbation): gain=gain, enforcer=enforcer, attacker=attacker, + batch_converter=batch_converter, ) batch_adv = adversary(batch=batch, model=model) @@ -123,6 +129,7 @@ def test_loading_perturbation_from_state_dict(): gain = Mock() enforcer = Mock() attacker = Mock(max_epochs=0, limit_train_batches=1, fit_loop=Mock(max_epochs=0)) + batch_converter = mart.attack.TupleBatchConverter() adversary = Adversary( perturber=perturber, @@ -131,6 +138,7 @@ def test_loading_perturbation_from_state_dict(): gain=gain, enforcer=enforcer, attacker=attacker, + batch_converter=batch_converter, ) # We should be able to load arbitrary state_dict, because Adversary ignores state_dict. @@ -150,6 +158,7 @@ def test_perturbation(input_data, target_data, perturbation): attacker = Mock(max_epochs=0, limit_train_batches=1, fit_loop=Mock(max_epochs=0)) model = Mock() batch = (input_data, target_data) + batch_converter = mart.attack.TupleBatchConverter() adversary = Adversary( perturber=perturber, @@ -158,6 +167,7 @@ def test_perturbation(input_data, target_data, perturbation): gain=gain, enforcer=enforcer, attacker=attacker, + batch_converter=batch_converter, ) batch_adv = adversary(batch=batch, model=model) @@ -197,6 +207,7 @@ def initializer(x): ) batch = (input_data, target_data) + batch_converter = mart.attack.TupleBatchConverter() adversary = Adversary( perturber=perturber, @@ -206,6 +217,7 @@ def initializer(x): gradient_modifier=Sign(), enforcer=enforcer, max_iters=1, + batch_converter=batch_converter, ) def model(batch): @@ -224,12 +236,14 @@ def test_configure_optimizers(): composer = mart.attack.composer.Additive() optimizer = Mock(spec=mart.optim.OptimizerFactory) gain = Mock() + batch_converter = mart.attack.TupleBatchConverter() adversary = Adversary( perturber=perturber, composer=composer, optimizer=optimizer, gain=gain, + batch_converter=batch_converter, ) adversary.configure_optimizers() @@ -244,14 +258,18 @@ def test_training_step(input_data, target_data, perturbation): optimizer = Mock(spec=mart.optim.OptimizerFactory) gain = Mock(return_value=torch.tensor(1337)) model = Mock(return_value={}) + # Set target_size manually because the test bypasses the convert() step that reads target_size. + batch_converter = mart.attack.TupleBatchConverter(target_size=1) adversary = Adversary( perturber=perturber, composer=composer, optimizer=optimizer, gain=gain, + batch_converter=batch_converter, ) + # The batch is reverted to a tuple inside training_step() before invoking the model. output = adversary.training_step( {"input": input_data, "target": target_data, "model": model}, 0 ) @@ -266,14 +284,18 @@ def test_training_step_with_many_gain(input_data, target_data, perturbation): optimizer = Mock(spec=mart.optim.OptimizerFactory) gain = Mock(return_value=torch.tensor([1234, 5678])) model = Mock(return_value={}) + # Set target_size manually because the test bypasses the convert() step that reads target_size. + batch_converter = mart.attack.TupleBatchConverter(target_size=1) adversary = Adversary( perturber=perturber, composer=composer, optimizer=optimizer, gain=gain, + batch_converter=batch_converter, ) + # The batch is reverted to a tuple inside training_step() before invoking the model. output = adversary.training_step( {"input": input_data, "target": target_data, "model": model}, 0 ) @@ -288,6 +310,8 @@ def test_training_step_with_objective(input_data, target_data, perturbation): gain = Mock(return_value=torch.tensor([1234, 5678])) model = Mock(return_value={}) objective = Mock(return_value=torch.tensor([True, False], dtype=torch.bool)) + # Set target_size manually because the test bypasses the convert() step that reads target_size. + batch_converter = mart.attack.TupleBatchConverter(target_size=1) adversary = Adversary( perturber=perturber, @@ -295,8 +319,10 @@ def test_training_step_with_objective(input_data, target_data, perturbation): optimizer=optimizer, objective=objective, gain=gain, + batch_converter=batch_converter, ) + # The batch is reverted to a tuple inside training_step() before invoking the model. output = adversary.training_step( {"input": input_data, "target": target_data, "model": model}, 0 ) @@ -314,6 +340,7 @@ def test_configure_gradient_clipping(): ) gradient_modifier = Mock() gain = Mock() + batch_converter = mart.attack.TupleBatchConverter() adversary = Adversary( perturber=perturber, @@ -321,6 +348,7 @@ def test_configure_gradient_clipping(): optimizer=optimizer, gradient_modifier=gradient_modifier, gain=gain, + batch_converter=batch_converter, ) # We need to mock a trainer since LightningModule does some checks adversary.trainer = Mock(gradient_clip_val=1.0, gradient_clip_algorithm="norm") From 87d41d641f4b8090424c84b0aa2699e9328239cc Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 7 Aug 2023 15:19:00 -0700 Subject: [PATCH 091/126] Comment. --- mart/attack/adversary.py | 1 + 1 file changed, 1 insertion(+) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index e942dc15..ebabab6e 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -57,6 +57,7 @@ def __init__( objective (Objective): A function for computing adversarial objective, which returns True or False. Optional. enforcer (Enforcer): A Callable that enforce constraints on the adversarial input. attacker (Trainer): A PyTorch-Lightning Trainer object used to fit the perturbation. + model_transform (Callable): Transform a model before attack. """ super().__init__() From a0fb55928ec5851569576ebec9ab096e2cb8b486 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 7 Aug 2023 15:42:47 -0700 Subject: [PATCH 092/126] Update config. --- examples/mart_armory/mart_armory/configs/assemble_attack.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/mart_armory/mart_armory/configs/assemble_attack.yaml b/examples/mart_armory/mart_armory/configs/assemble_attack.yaml index 569d84a6..8ca31dae 100644 --- a/examples/mart_armory/mart_armory/configs/assemble_attack.yaml +++ b/examples/mart_armory/mart_armory/configs/assemble_attack.yaml @@ -5,7 +5,7 @@ defaults: - _self_ - attack: ??? - batch_converter: ??? - - attack/model_transform: armory_objdet + - override attack/model_transform: armory_objdet - override hydra/hydra_logging: disabled - override hydra/job_logging: disabled From 87fca2730cdd9b0c00b8e02b7b660c0798224c4e Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 7 Aug 2023 15:43:08 -0700 Subject: [PATCH 093/126] Create folder automatically. --- examples/mart_armory/mart_armory/generate_attack_config.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/examples/mart_armory/mart_armory/generate_attack_config.py b/examples/mart_armory/mart_armory/generate_attack_config.py index 6376381f..9ce1c57a 100644 --- a/examples/mart_armory/mart_armory/generate_attack_config.py +++ b/examples/mart_armory/mart_armory/generate_attack_config.py @@ -41,6 +41,9 @@ def main(cfg: DictConfig) -> float: if "output" not in cfg: print("You can output config as a yaml file by `output=path/to/file.yaml`") else: + folder = os.path.dirname(cfg.output) + if folder != "" and not os.path.isdir(folder): + os.makedirs(folder) OmegaConf.save(config=cfg, f=cfg.output) print(f"Saved config to {cfg.output}") From 55bbda13105869d58f950607c49950d30c743a14 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 7 Aug 2023 15:57:58 -0700 Subject: [PATCH 094/126] Update example. --- examples/mart_armory/README.md | 5 +++-- .../mart_armory/mart_armory/configs/assemble_attack.yaml | 1 - 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/mart_armory/README.md b/examples/mart_armory/README.md index 86b7ba8f..61ed5109 100644 --- a/examples/mart_armory/README.md +++ b/examples/mart_armory/README.md @@ -15,7 +15,7 @@ python -m mart_armory.generate_attack_config \ batch_converter=object_detection \ attack=[object_detection_mask_adversary,data_coco] \ attack.objective=null \ -attack.model_transform=armory_objdet \ +attack/model_transform=armory_objdet \ output=path/to/attack.yaml ``` @@ -45,5 +45,6 @@ cat scenario_configs/eval7/carla_overhead_object_detection/carla_obj_det_adversa | jq '.attack.module="mart_armory"' \ | jq '.attack.name="MartAttack"' \ | jq '.attack.kwargs.mart_adv_config_yaml="path/to/attack.yaml"' \ -> carla_obj_det_adversarialpatch_undefended_mart_attack.json +| jq '.scenario.export_batches=true' \ +| CUDA_VISIBLE_DEVICES=0 armory run - --no-docker --use-gpu --gpus=1 ``` diff --git a/examples/mart_armory/mart_armory/configs/assemble_attack.yaml b/examples/mart_armory/mart_armory/configs/assemble_attack.yaml index 8ca31dae..e12bed90 100644 --- a/examples/mart_armory/mart_armory/configs/assemble_attack.yaml +++ b/examples/mart_armory/mart_armory/configs/assemble_attack.yaml @@ -5,7 +5,6 @@ defaults: - _self_ - attack: ??? - batch_converter: ??? - - override attack/model_transform: armory_objdet - override hydra/hydra_logging: disabled - override hydra/job_logging: disabled From ccdcf99a15cc0c64b39b24012c9a26ddbd93f854 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 7 Aug 2023 16:01:41 -0700 Subject: [PATCH 095/126] Clean up. --- .../mart_armory/configs/model_wrapper/art_rcnn.yaml | 2 -- mart/attack/adversary.py | 5 ----- mart/configs/attack/batch_converter/tensor.yaml | 1 - 3 files changed, 8 deletions(-) delete mode 100644 examples/mart_armory/mart_armory/configs/model_wrapper/art_rcnn.yaml delete mode 100644 mart/configs/attack/batch_converter/tensor.yaml diff --git a/examples/mart_armory/mart_armory/configs/model_wrapper/art_rcnn.yaml b/examples/mart_armory/mart_armory/configs/model_wrapper/art_rcnn.yaml deleted file mode 100644 index 8dcacd37..00000000 --- a/examples/mart_armory/mart_armory/configs/model_wrapper/art_rcnn.yaml +++ /dev/null @@ -1,2 +0,0 @@ -_target_: mart_armory.pytorch_wrapper.ArtRcnnModelWrapper -_partial_: true diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 5a410117..6e1ffa47 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -229,11 +229,6 @@ def attacker(self): self._attacker = self._attacker(accelerator=accelerator, devices=devices) - # Remove recursive adversarial training callback from lightning.pytorch.callbacks_factory. - for callback in self._attacker.callbacks: - if isinstance(callback, AdversarialTraining): - self._attacker.callbacks.remove(callback) - return self._attacker def cpu(self): diff --git a/mart/configs/attack/batch_converter/tensor.yaml b/mart/configs/attack/batch_converter/tensor.yaml deleted file mode 100644 index 47697bfd..00000000 --- a/mart/configs/attack/batch_converter/tensor.yaml +++ /dev/null @@ -1 +0,0 @@ -_target_: mart.attack.batch_converter.TensorBatchConverter From aa2b7f82a5110db49dc10adbb3066857bd883dd5 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 7 Aug 2023 16:16:51 -0700 Subject: [PATCH 096/126] Clean up. --- .../mart_armory/batch_converter.py | 18 ----------------- .../batch_converter/object_detection.yaml | 20 +++++++++---------- .../mart_armory/pytorch_wrapper.py | 18 +++++++++++------ 3 files changed, 22 insertions(+), 34 deletions(-) diff --git a/examples/mart_armory/mart_armory/batch_converter.py b/examples/mart_armory/mart_armory/batch_converter.py index cda0176f..f5d76085 100644 --- a/examples/mart_armory/mart_armory/batch_converter.py +++ b/examples/mart_armory/mart_armory/batch_converter.py @@ -43,29 +43,11 @@ def _convert(self, batch: dict): target = tuple(target) - # # TODO: Move to transform() that works on both input and target. - # # 1. input permute - # # 2. tuplize input - # # 3. permute and scale target["mask"] - # # NHWC -> NCHW, the PyTorch format. - # input = input.permute((0, 3, 1, 2)) - # # NCHW -> tuple[CHW] - # input = tuple(input) - return input, target def _revert(self, input: tuple[torch.Tensor], target: tuple[dict]) -> dict: batch = {} - # # TODO: Move to untransform(). - # # 1. permute and scale target["mask"] - # # 2. input stack - # # 3. input permute - # # tuple[CHW] -> NCHW - # input = torch.stack(input) - # # NCHW -> NHWC, the TensorFlow format used in ART. - # input = input.permute((0, 2, 3, 1)) - batch[self.input_key] = input # Split target into several self.target_keys diff --git a/examples/mart_armory/mart_armory/configs/batch_converter/object_detection.yaml b/examples/mart_armory/mart_armory/configs/batch_converter/object_detection.yaml index 4fd83be8..4fc74be4 100644 --- a/examples/mart_armory/mart_armory/configs/batch_converter/object_detection.yaml +++ b/examples/mart_armory/mart_armory/configs/batch_converter/object_detection.yaml @@ -7,6 +7,16 @@ target_keys: y_patch_metadata: ["avg_patch_depth", "gs_coords", "mask", "max_depth_perturb_meters"] +batch_transform: + # np.ndarray -> torch.Tensor, on a device. + _target_: mart.transforms.tensor_array.convert + _partial_: true + +batch_untransform: + # torch.Tensor -> np.ndarray + _target_: mart.transforms.tensor_array.convert + _partial_: true + transform: # armory format -> torchvision format. _target_: torchvision.transforms.Compose @@ -64,13 +74,3 @@ target_untransform: - _target_: torch.permute _partial_: true dims: [1, 2, 0] - -batch_transform: - # np.ndarray -> torch.Tensor, on a device. - _target_: mart.transforms.tensor_array.convert - _partial_: true - -batch_untransform: - # torch.Tensor -> np.ndarray - _target_: mart.transforms.tensor_array.convert - _partial_: true diff --git a/examples/mart_armory/mart_armory/pytorch_wrapper.py b/examples/mart_armory/mart_armory/pytorch_wrapper.py index 5c1e563d..3261e82d 100644 --- a/examples/mart_armory/mart_armory/pytorch_wrapper.py +++ b/examples/mart_armory/mart_armory/pytorch_wrapper.py @@ -11,16 +11,22 @@ class MartAttack: - """A minimal wrapper to run PyTorch-based MART adversary in Armory against PyTorch-based - models. + """A minimal wrapper to run MART adversary in Armory against PyTorch-based models. - 1. Extract the PyTorch model from an ART Estimator; - 2. Convert np.ndarray to torch.Tensor; - 3. Run PyTorch-based MART adversary and get result as torch.Tensor; - 4. Convert torch.Tensor back to np.ndarray. + 1. Instantiate an adversary that runs attack in MART; + 2. Instantiate batch_converter that turns Armory's numpy batch into the PyTorch batch; + 3. The adversary.model_transform() extracts the PyTorch model from an ART Estimator and makes other changes to easier attack; + 4. The adversary returns adversarial examples in the PyTorch format; + 5. The batch_converter reverts the adversarial examples into the numpy format. """ def __init__(self, model, mart_adv_config_yaml): + """_summary_ + + Args: + model (Callable): An ART Estimator that contains a PyTorch model. + mart_adv_config_yaml (str): File path to the adversary configuration. + """ # Instantiate a MART adversary. adv_cfg = OmegaConf.load(mart_adv_config_yaml) adv = hydra.utils.instantiate(adv_cfg) From fbff15a70dae41bb4285aaebd812040eb61a37fb Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 7 Aug 2023 16:28:37 -0700 Subject: [PATCH 097/126] Clean up. --- .../transform/{to_pixel_range_255.yaml => pixel_1to255.yaml} | 0 .../transform/{to_pixel_range_1.yaml => pixel_255to1.yaml} | 0 .../mart_armory/mart_armory/configs/attack/data_coco.yaml | 4 ++-- 3 files changed, 2 insertions(+), 2 deletions(-) rename examples/mart_armory/mart_armory/configs/attack/batch_converter/transform/{to_pixel_range_255.yaml => pixel_1to255.yaml} (100%) rename examples/mart_armory/mart_armory/configs/attack/batch_converter/transform/{to_pixel_range_1.yaml => pixel_255to1.yaml} (100%) diff --git a/examples/mart_armory/mart_armory/configs/attack/batch_converter/transform/to_pixel_range_255.yaml b/examples/mart_armory/mart_armory/configs/attack/batch_converter/transform/pixel_1to255.yaml similarity index 100% rename from examples/mart_armory/mart_armory/configs/attack/batch_converter/transform/to_pixel_range_255.yaml rename to examples/mart_armory/mart_armory/configs/attack/batch_converter/transform/pixel_1to255.yaml diff --git a/examples/mart_armory/mart_armory/configs/attack/batch_converter/transform/to_pixel_range_1.yaml b/examples/mart_armory/mart_armory/configs/attack/batch_converter/transform/pixel_255to1.yaml similarity index 100% rename from examples/mart_armory/mart_armory/configs/attack/batch_converter/transform/to_pixel_range_1.yaml rename to examples/mart_armory/mart_armory/configs/attack/batch_converter/transform/pixel_255to1.yaml diff --git a/examples/mart_armory/mart_armory/configs/attack/data_coco.yaml b/examples/mart_armory/mart_armory/configs/attack/data_coco.yaml index 52bae686..57ffa410 100644 --- a/examples/mart_armory/mart_armory/configs/attack/data_coco.yaml +++ b/examples/mart_armory/mart_armory/configs/attack/data_coco.yaml @@ -1,6 +1,6 @@ defaults: - - batch_converter/transform@batch_converter.transform.transforms: to_pixel_range_255 - - batch_converter/transform@batch_converter.untransform.transforms: to_pixel_range_1 + - batch_converter/transform@batch_converter.transform.transforms: pixel_1to255 + - batch_converter/transform@batch_converter.untransform.transforms: pixel_255to1 - override batch_converter: tuple batch_converter: From 5b15b6e950c8f4b6d2b19d2af0665b1f36887917 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 1 Sep 2023 15:58:05 -0700 Subject: [PATCH 098/126] Clean up. --- mart/attack/batch_converter.py | 173 ------------------ mart/attack/model_transform.py | 38 ---- mart/configs/attack/batch_converter/dict.yaml | 2 - .../attack/batch_converter/input_only.yaml | 1 - mart/configs/attack/batch_converter/list.yaml | 2 - .../configs/attack/batch_converter/tuple.yaml | 2 - .../attack/model_transform/lightning.yaml | 1 - 7 files changed, 219 deletions(-) delete mode 100644 mart/attack/batch_converter.py delete mode 100644 mart/attack/model_transform.py delete mode 100644 mart/configs/attack/batch_converter/dict.yaml delete mode 100644 mart/configs/attack/batch_converter/input_only.yaml delete mode 100644 mart/configs/attack/batch_converter/list.yaml delete mode 100644 mart/configs/attack/batch_converter/tuple.yaml delete mode 100644 mart/configs/attack/model_transform/lightning.yaml diff --git a/mart/attack/batch_converter.py b/mart/attack/batch_converter.py deleted file mode 100644 index 2cf790ed..00000000 --- a/mart/attack/batch_converter.py +++ /dev/null @@ -1,173 +0,0 @@ -# -# Copyright (C) 2022 Intel Corporation -# -# SPDX-License-Identifier: BSD-3-Clause -# - -from __future__ import annotations - -import abc -from typing import Callable - -# TODO: Do we need to copy batch? - -__all__ = [ - "InputOnlyBatchConverter", - "DictBatchConverter", - "ListBatchConverter", - "TupleBatchConverter", -] - - -class BatchConverter(abc.ABC): - def __init__( - self, - *, - transform: Callable | None = None, - untransform: Callable | None = None, - target_transform: Callable | None = None, - target_untransform: Callable | None = None, - batch_transform: Callable | None = None, - batch_untransform: Callable | None = None, - ): - """Convert batch into (input, target), and vice versa. - - Args: - transform (Callable): Transform input into a convenient format, e.g. [0,1]->[0.255]. - untransform (Callable): Transform adversarial input in the convenient format back into the original format of input, e.g. [0,255]->[0,1]. - target_transform (Callable): Transform target. - target_untransform (Callable): Untransform target. - batch_transform (Callable): Transform batch before converting the batch. - batch_untransform (callable): Untransform batch after reverting the batch. - """ - - self.transform = transform - self.untransform = untransform - - self.target_transform = target_transform - self.target_untransform = target_untransform - - self.batch_transform = batch_transform - self.batch_untransform = batch_untransform - - def __call__(self, batch, device=None): - if self.batch_transform is not None: - batch = self.batch_transform(batch, device=device) - - input, target = self._convert(batch) - - if self.transform is not None: - input = self.transform(input) - if self.target_transform is not None: - target = self.target_transform(target) - - return input, target - - def revert(self, input, target): - if self.untransform is not None: - input = self.untransform(input) - if self.target_untransform is not None: - target = self.target_untransform(target) - - batch = self._revert(input, target) - - if self.batch_untransform is not None: - batch = self.batch_untransform(batch) - - return batch - - @abc.abstractclassmethod - def _revert(self, input, target): - pass - - @abc.abstractclassmethod - def _convert(self, batch): - pass - - -class InputOnlyBatchConverter(BatchConverter): - def _convert(self, batch): - input = batch - target = None - return input, target - - def _revert(self, input, target): - batch = input - return batch - - -class DictBatchConverter(BatchConverter): - def __init__(self, input_key: str = "input", **kwargs): - """_summary_ - - Args: - input_key (str): Input locator in a batch. Defaults to "input". - """ - super().__init__(**kwargs) - - self.input_key = input_key - self.rest = {} - - def _convert(self, batch): - # Make a copy because we don't want to break the original batch. - batch = batch.copy() - input = batch.pop(self.input_key) - if "target" in batch: - target = batch["target"] - self.rest = batch - else: - target = batch - return input, target - - def _revert(self, input, target): - if self.rest == {}: - batch = target - else: - batch = self.rest - - # Input may have been changed. - batch[self.input_key] = input - - return batch - - -class ListBatchConverter(BatchConverter): - def __init__(self, input_key: int = 0, target_size: int | None = None, **kwargs): - super().__init__(**kwargs) - - self.input_key = input_key - self.target_size = target_size - - def _convert(self, batch: list): - # Make a copy because we don't want to break the original batch. - batch = batch.copy() - input = batch.pop(self.input_key) - self.target_size = len(batch) - - if self.target_size == 1: - target = batch[0] - else: - target = batch - - return input, target - - def _revert(self, input, target): - if self.target_size == 1: - batch = [target] - batch.insert(self.input_key, input) - else: - batch = target - batch.insert(self.input_key, input) - return batch - - -class TupleBatchConverter(ListBatchConverter): - def _convert(self, batch: tuple): - batch_list = list(batch) - input, target = super()._convert(batch_list) - return input, target - - def _revert(self, input, target): - batch_list = super()._revert(input, target) - batch = tuple(batch_list) - return batch diff --git a/mart/attack/model_transform.py b/mart/attack/model_transform.py deleted file mode 100644 index 3cf4771b..00000000 --- a/mart/attack/model_transform.py +++ /dev/null @@ -1,38 +0,0 @@ -# -# Copyright (C) 2022 Intel Corporation -# -# SPDX-License-Identifier: BSD-3-Clause -# - -from ..utils import MonkeyPatch - - -class LightningModuleAsTarget: - """Prepare a LightningModule as a target model for Adversary, - such that `output = model(batch)`. - """ - - def __call__(self, model): - # Generate a pseudo dataloader_idx. - dataloader_idx = 1 - - if hasattr(model, "attack_step"): - - def model_forward(batch): - output = model.attack_step(batch, dataloader_idx) - return output - - elif hasattr(model, "training_step"): - # Monkey-patch model.log to avoid spamming. - def model_forward(batch): - with MonkeyPatch(model, "log", lambda *args, **kwargs: None): - output = model.training_step(batch, dataloader_idx) - return output - - else: - raise ValueError("Model does not have `attack_step()` or `training_step()`.") - - return model_forward - - -# TODO: We may need to do model.eval() if there's BN-like layers in the model. diff --git a/mart/configs/attack/batch_converter/dict.yaml b/mart/configs/attack/batch_converter/dict.yaml deleted file mode 100644 index db421039..00000000 --- a/mart/configs/attack/batch_converter/dict.yaml +++ /dev/null @@ -1,2 +0,0 @@ -_target_: mart.attack.batch_converter.DictBatchConverter -input_key: input diff --git a/mart/configs/attack/batch_converter/input_only.yaml b/mart/configs/attack/batch_converter/input_only.yaml deleted file mode 100644 index b9bb9207..00000000 --- a/mart/configs/attack/batch_converter/input_only.yaml +++ /dev/null @@ -1 +0,0 @@ -_target_: mart.attack.batch_converter.InputOnlyBatchConverter diff --git a/mart/configs/attack/batch_converter/list.yaml b/mart/configs/attack/batch_converter/list.yaml deleted file mode 100644 index 53da9fae..00000000 --- a/mart/configs/attack/batch_converter/list.yaml +++ /dev/null @@ -1,2 +0,0 @@ -_target_: mart.attack.batch_converter.ListBatchConverter -input_key: 0 diff --git a/mart/configs/attack/batch_converter/tuple.yaml b/mart/configs/attack/batch_converter/tuple.yaml deleted file mode 100644 index 25ff65b5..00000000 --- a/mart/configs/attack/batch_converter/tuple.yaml +++ /dev/null @@ -1,2 +0,0 @@ -_target_: mart.attack.batch_converter.TupleBatchConverter -input_key: 0 diff --git a/mart/configs/attack/model_transform/lightning.yaml b/mart/configs/attack/model_transform/lightning.yaml deleted file mode 100644 index b0db31d6..00000000 --- a/mart/configs/attack/model_transform/lightning.yaml +++ /dev/null @@ -1 +0,0 @@ -_target_: mart.attack.model_transform.LightningModuleAsTarget From 5bcd8300588ac7f73846dd557e1556c8ed6b399e Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 1 Sep 2023 16:40:16 -0700 Subject: [PATCH 099/126] Fix parent class. --- examples/mart_armory/mart_armory/batch_converter.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/mart_armory/mart_armory/batch_converter.py b/examples/mart_armory/mart_armory/batch_converter.py index f5d76085..cf760fcb 100644 --- a/examples/mart_armory/mart_armory/batch_converter.py +++ b/examples/mart_armory/mart_armory/batch_converter.py @@ -8,10 +8,10 @@ import torch -from mart.attack.batch_converter import BatchConverter +from mart.transforms.batch_c15n import BatchC15n -class ObjectDetectionBatchConverter(BatchConverter): +class ObjectDetectionBatchConverter(BatchC15n): def __init__( self, input_key: str = "x", From dceda3890b0770e9c8db6ed35f966b14aceb1bde Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 1 Sep 2023 16:55:20 -0700 Subject: [PATCH 100/126] Update to latest Adversary. --- examples/mart_armory/README.md | 5 +++-- .../mart_armory/configs/assemble_attack.yaml | 2 ++ .../mart_armory/configs/attack/data_coco.yaml | 11 ----------- .../mart_armory/configs/batch_c15n/data_coco.yaml | 10 ++++++++++ .../transform/pixel_1to255.yaml | 0 .../transform/pixel_255to1.yaml | 0 .../{attack => }/model_transform/armory_objdet.yaml | 0 examples/mart_armory/mart_armory/pytorch_wrapper.py | 13 +++++++++++-- 8 files changed, 26 insertions(+), 15 deletions(-) delete mode 100644 examples/mart_armory/mart_armory/configs/attack/data_coco.yaml create mode 100644 examples/mart_armory/mart_armory/configs/batch_c15n/data_coco.yaml rename examples/mart_armory/mart_armory/configs/{attack/batch_converter => batch_c15n}/transform/pixel_1to255.yaml (100%) rename examples/mart_armory/mart_armory/configs/{attack/batch_converter => batch_c15n}/transform/pixel_255to1.yaml (100%) rename examples/mart_armory/mart_armory/configs/{attack => }/model_transform/armory_objdet.yaml (100%) diff --git a/examples/mart_armory/README.md b/examples/mart_armory/README.md index 61ed5109..7ba55cf7 100644 --- a/examples/mart_armory/README.md +++ b/examples/mart_armory/README.md @@ -13,9 +13,10 @@ pip install 'git+https://github.com/IntelLabs/MART.git@example_armory_attack#egg ```shell python -m mart_armory.generate_attack_config \ batch_converter=object_detection \ -attack=[object_detection_mask_adversary,data_coco] \ +batch_c15n=data_coco \ +attack=[object_detection_mask_adversary] \ attack.objective=null \ -attack/model_transform=armory_objdet \ +model_transform=armory_objdet \ output=path/to/attack.yaml ``` diff --git a/examples/mart_armory/mart_armory/configs/assemble_attack.yaml b/examples/mart_armory/mart_armory/configs/assemble_attack.yaml index e12bed90..d0ea452f 100644 --- a/examples/mart_armory/mart_armory/configs/assemble_attack.yaml +++ b/examples/mart_armory/mart_armory/configs/assemble_attack.yaml @@ -5,6 +5,8 @@ defaults: - _self_ - attack: ??? - batch_converter: ??? + - batch_c15n: ??? + - model_transform: ??? - override hydra/hydra_logging: disabled - override hydra/job_logging: disabled diff --git a/examples/mart_armory/mart_armory/configs/attack/data_coco.yaml b/examples/mart_armory/mart_armory/configs/attack/data_coco.yaml deleted file mode 100644 index 57ffa410..00000000 --- a/examples/mart_armory/mart_armory/configs/attack/data_coco.yaml +++ /dev/null @@ -1,11 +0,0 @@ -defaults: - - batch_converter/transform@batch_converter.transform.transforms: pixel_1to255 - - batch_converter/transform@batch_converter.untransform.transforms: pixel_255to1 - - override batch_converter: tuple - -batch_converter: - transform: - _target_: mart.transforms.TupleTransforms - - untransform: - _target_: mart.transforms.TupleTransforms diff --git a/examples/mart_armory/mart_armory/configs/batch_c15n/data_coco.yaml b/examples/mart_armory/mart_armory/configs/batch_c15n/data_coco.yaml new file mode 100644 index 00000000..945f827f --- /dev/null +++ b/examples/mart_armory/mart_armory/configs/batch_c15n/data_coco.yaml @@ -0,0 +1,10 @@ +defaults: + - tuple + - transform@transform.transforms: pixel_1to255 + - transform@untransform.transforms: pixel_255to1 + +transform: + _target_: mart.transforms.TupleTransforms + +untransform: + _target_: mart.transforms.TupleTransforms diff --git a/examples/mart_armory/mart_armory/configs/attack/batch_converter/transform/pixel_1to255.yaml b/examples/mart_armory/mart_armory/configs/batch_c15n/transform/pixel_1to255.yaml similarity index 100% rename from examples/mart_armory/mart_armory/configs/attack/batch_converter/transform/pixel_1to255.yaml rename to examples/mart_armory/mart_armory/configs/batch_c15n/transform/pixel_1to255.yaml diff --git a/examples/mart_armory/mart_armory/configs/attack/batch_converter/transform/pixel_255to1.yaml b/examples/mart_armory/mart_armory/configs/batch_c15n/transform/pixel_255to1.yaml similarity index 100% rename from examples/mart_armory/mart_armory/configs/attack/batch_converter/transform/pixel_255to1.yaml rename to examples/mart_armory/mart_armory/configs/batch_c15n/transform/pixel_255to1.yaml diff --git a/examples/mart_armory/mart_armory/configs/attack/model_transform/armory_objdet.yaml b/examples/mart_armory/mart_armory/configs/model_transform/armory_objdet.yaml similarity index 100% rename from examples/mart_armory/mart_armory/configs/attack/model_transform/armory_objdet.yaml rename to examples/mart_armory/mart_armory/configs/model_transform/armory_objdet.yaml diff --git a/examples/mart_armory/mart_armory/pytorch_wrapper.py b/examples/mart_armory/mart_armory/pytorch_wrapper.py index 3261e82d..922403f4 100644 --- a/examples/mart_armory/mart_armory/pytorch_wrapper.py +++ b/examples/mart_armory/mart_armory/pytorch_wrapper.py @@ -32,19 +32,28 @@ def __init__(self, model, mart_adv_config_yaml): adv = hydra.utils.instantiate(adv_cfg) self.batch_converter = adv.batch_converter + self.batch_c15n = adv.batch_c15n + self.model_transform = adv.model_transform self.adversary = adv.attack - self.model = model self.device = model.device # Move adversary to the same device. self.adversary.to(self.device) + # model_transform + self.model = self.model_transform(model) + def generate(self, **batch_armory_np): # Armory format -> torchvision format batch_tv_pth = self.batch_converter(batch_armory_np, device=self.device) + # Attack - batch_adv_tv_pth = self.adversary(batch=batch_tv_pth, model=self.model) + input, target = self.batch_c15n(batch_tv_pth) + self.adversary.fit(input, target, model=self.model) + input_adv, target_adv = self.adversary(input, target) + batch_adv_tv_pth = self.batch_c15n(input_adv, target_adv) + # torchvision format -> Armory format batch_adv_armory_np = self.batch_converter.revert(*batch_adv_tv_pth) From c6c86458e7dae664f87167e124cebc0d86be8129 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 1 Sep 2023 16:59:52 -0700 Subject: [PATCH 101/126] Fix typo. --- examples/mart_armory/mart_armory/pytorch_wrapper.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/mart_armory/mart_armory/pytorch_wrapper.py b/examples/mart_armory/mart_armory/pytorch_wrapper.py index 922403f4..b57dcbe0 100644 --- a/examples/mart_armory/mart_armory/pytorch_wrapper.py +++ b/examples/mart_armory/mart_armory/pytorch_wrapper.py @@ -52,7 +52,7 @@ def generate(self, **batch_armory_np): input, target = self.batch_c15n(batch_tv_pth) self.adversary.fit(input, target, model=self.model) input_adv, target_adv = self.adversary(input, target) - batch_adv_tv_pth = self.batch_c15n(input_adv, target_adv) + batch_adv_tv_pth = self.batch_c15n.revert(input_adv, target_adv) # torchvision format -> Armory format batch_adv_armory_np = self.batch_converter.revert(*batch_adv_tv_pth) From 32b3b57322d0224344fdcd0f9cce4793a7bc0067 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 1 Sep 2023 17:16:06 -0700 Subject: [PATCH 102/126] Fix example. --- examples/mart_armory/README.md | 2 ++ .../configs/model_transform/armory_objdet.yaml | 1 - examples/mart_armory/mart_armory/model_transform.py | 12 +----------- 3 files changed, 3 insertions(+), 12 deletions(-) diff --git a/examples/mart_armory/README.md b/examples/mart_armory/README.md index 7ba55cf7..cb2facbd 100644 --- a/examples/mart_armory/README.md +++ b/examples/mart_armory/README.md @@ -16,6 +16,8 @@ batch_converter=object_detection \ batch_c15n=data_coco \ attack=[object_detection_mask_adversary] \ attack.objective=null \ +attack.max_iters=10 \ +attack.lr=26 \ model_transform=armory_objdet \ output=path/to/attack.yaml ``` diff --git a/examples/mart_armory/mart_armory/configs/model_transform/armory_objdet.yaml b/examples/mart_armory/mart_armory/configs/model_transform/armory_objdet.yaml index 0670a5b8..7d6dee35 100644 --- a/examples/mart_armory/mart_armory/configs/model_transform/armory_objdet.yaml +++ b/examples/mart_armory/mart_armory/configs/model_transform/armory_objdet.yaml @@ -4,4 +4,3 @@ transforms: attrib: "_model" - _target_: mart.models.dual_mode.DualModeGeneralizedRCNN _partial_: true - - _target_: mart_armory.model_transform.ListInputAsArgs diff --git a/examples/mart_armory/mart_armory/model_transform.py b/examples/mart_armory/mart_armory/model_transform.py index 09f94dfa..23aff6c3 100644 --- a/examples/mart_armory/mart_armory/model_transform.py +++ b/examples/mart_armory/mart_armory/model_transform.py @@ -6,21 +6,11 @@ # Modify a model so that it is convenient to attack. # Common issues: -# 1. Make the model accept a single argument `output=model(batch)`; +# 1. Make the model accept non-keyword argument `output=model(input, target)`; # 2. Make the model return loss in eval mode; # 3. Change non-differentiable operations. -class ListInputAsArgs: - """Make a model expand input as non-keyword arguments.""" - - def __call__(self, model): - def forward(batch): - return model(*batch) - - return forward - - class Extract: """Example use case: extract the PyTorch model from an ART Estimator.""" From 9a81279e879a81dda309325d9e49d0d3b6c579a3 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Thu, 14 Sep 2023 11:01:11 -0700 Subject: [PATCH 103/126] Add mart.generate_config. --- mart/generate_config.py | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) create mode 100644 mart/generate_config.py diff --git a/mart/generate_config.py b/mart/generate_config.py new file mode 100644 index 00000000..ceaa45a7 --- /dev/null +++ b/mart/generate_config.py @@ -0,0 +1,38 @@ +import os + +import fire +from hydra import compose, initialize_config_dir +from omegaconf import OmegaConf + + +def generate( + *overrides, + version_base: str = "1.2", + config_dir: str = ".", + config_name: str, + output_node: str = None, + export_name: str = "output.yaml", + resolve: bool = False, +): + # An absolute path {config_dir} is added to the search path of configs, preceding those in mart.configs. + if not os.path.isabs(config_dir): + config_dir = os.path.abspath(config_dir) + + with initialize_config_dir(version_base=version_base, config_dir=config_dir): + cfg = compose(config_name=config_name, overrides=overrides) + + # Resolve all interpolation. + if resolve: + OmegaConf.resolve(cfg) + + # Don't output the whole tree. + if output_node is not None: + for key in output_node.split("."): + cfg = cfg[key] + + OmegaConf.save(config=cfg, f=export_name) + print(f"Config file saved to {export_name}") + + +if __name__ == "__main__": + fire.Fire(generate) From 961313e8a130560f7ba31c7f013054ef42f92ada Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Thu, 14 Sep 2023 11:03:10 -0700 Subject: [PATCH 104/126] Create output folder automatically. --- mart/generate_config.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/mart/generate_config.py b/mart/generate_config.py index ceaa45a7..5bd99292 100644 --- a/mart/generate_config.py +++ b/mart/generate_config.py @@ -30,6 +30,11 @@ def generate( for key in output_node.split("."): cfg = cfg[key] + # Create folders for output if necessary. + folder = os.path.dirname(export_name) + if folder != "" and not os.path.isdir(folder): + os.makedirs(folder) + OmegaConf.save(config=cfg, f=export_name) print(f"Config file saved to {export_name}") From d69f8cde5541c9aa71aecae7d09b142857210c2c Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Thu, 14 Sep 2023 11:04:48 -0700 Subject: [PATCH 105/126] Simplify config generation. --- examples/mart_armory/README.md | 8 ++- .../mart_armory/configs/assemble_attack.yaml | 9 --- .../mart_armory/generate_attack_config.py | 56 ------------------- 3 files changed, 5 insertions(+), 68 deletions(-) delete mode 100644 examples/mart_armory/mart_armory/generate_attack_config.py diff --git a/examples/mart_armory/README.md b/examples/mart_armory/README.md index cb2facbd..eb0ba259 100644 --- a/examples/mart_armory/README.md +++ b/examples/mart_armory/README.md @@ -11,15 +11,17 @@ pip install 'git+https://github.com/IntelLabs/MART.git@example_armory_attack#egg 1. Generate a YAML configuration of attack. ```shell -python -m mart_armory.generate_attack_config \ +python -m mart.generate_config \ +--config_dir=mart_armory/configs \ +--config_name=assemble_attack.yaml \ +--export_name=path/to/attack.yaml \ batch_converter=object_detection \ batch_c15n=data_coco \ attack=[object_detection_mask_adversary] \ attack.objective=null \ attack.max_iters=10 \ attack.lr=26 \ -model_transform=armory_objdet \ -output=path/to/attack.yaml +model_transform=armory_objdet ``` 2. Update the attack section in the Armory scenario configuration. diff --git a/examples/mart_armory/mart_armory/configs/assemble_attack.yaml b/examples/mart_armory/mart_armory/configs/assemble_attack.yaml index d0ea452f..7d49b6c0 100644 --- a/examples/mart_armory/mart_armory/configs/assemble_attack.yaml +++ b/examples/mart_armory/mart_armory/configs/assemble_attack.yaml @@ -7,12 +7,3 @@ defaults: - batch_converter: ??? - batch_c15n: ??? - model_transform: ??? - - override hydra/hydra_logging: disabled - - override hydra/job_logging: disabled - -output: ??? - -hydra: - output_subdir: null - run: - dir: . diff --git a/examples/mart_armory/mart_armory/generate_attack_config.py b/examples/mart_armory/mart_armory/generate_attack_config.py deleted file mode 100644 index 9ce1c57a..00000000 --- a/examples/mart_armory/mart_armory/generate_attack_config.py +++ /dev/null @@ -1,56 +0,0 @@ -# this file acts as a robust starting point for launching hydra runs and multiruns -# can be run from any place - -import os -import sys -from pathlib import Path - -import hydra -import pyrootutils -from omegaconf import DictConfig, OmegaConf - -from mart import utils - -log = utils.get_pylogger(__name__) - -# project root setup -# uses the current working directory as root. -# sets PROJECT_ROOT environment variable (used in `configs/paths/default.yaml`) -# loads environment variables from ".env" if exists -# adds root dir to the PYTHONPATH (so this file can be run from any place) -# https://github.com/ashleve/pyrootutils -# FIXME: Get rid of pyrootutils if we don't infer config.paths.root from PROJECT_ROOT. -root = Path(os.getcwd()) -pyrootutils.set_root(path=root, dotenv=True, pythonpath=True) - -config_path = root / "configs" -if not config_path.exists(): - log.warning(f"No config directory found at {config_path}!") - config_path = "configs" - - -@hydra.main(version_base="1.2", config_path=config_path, config_name="assemble_attack.yaml") -def main(cfg: DictConfig) -> float: - if "attack" not in cfg: - print( - "Please assemble an attack, e.g., `attack=[object_detection_mask_adversary,data_coco]`" - ) - else: - print(OmegaConf.to_yaml(cfg)) - - if "output" not in cfg: - print("You can output config as a yaml file by `output=path/to/file.yaml`") - else: - folder = os.path.dirname(cfg.output) - if folder != "" and not os.path.isdir(folder): - os.makedirs(folder) - OmegaConf.save(config=cfg, f=cfg.output) - print(f"Saved config to {cfg.output}") - - -if __name__ == "__main__": - ret = main() - if ret is not None and ret < 0: - sys.exit(ret) - else: - sys.exit(0) From d9b0a76ebc5f8af4eea124de7b94821cfa986360 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Thu, 14 Sep 2023 11:23:22 -0700 Subject: [PATCH 106/126] Comment. --- examples/mart_armory/mart_armory/pytorch_wrapper.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/examples/mart_armory/mart_armory/pytorch_wrapper.py b/examples/mart_armory/mart_armory/pytorch_wrapper.py index b57dcbe0..8a258417 100644 --- a/examples/mart_armory/mart_armory/pytorch_wrapper.py +++ b/examples/mart_armory/mart_armory/pytorch_wrapper.py @@ -31,9 +31,15 @@ def __init__(self, model, mart_adv_config_yaml): adv_cfg = OmegaConf.load(mart_adv_config_yaml) adv = hydra.utils.instantiate(adv_cfg) + # Transform the ART estimator to an attackable PyTorch model. + self.model_transform = adv.model_transform + + # Convert the Armory batch to a form that is expected by the target PyTorch model. self.batch_converter = adv.batch_converter + + # Canonicalize batches for the Adversary. self.batch_c15n = adv.batch_c15n - self.model_transform = adv.model_transform + self.adversary = adv.attack self.device = model.device @@ -49,6 +55,7 @@ def generate(self, **batch_armory_np): batch_tv_pth = self.batch_converter(batch_armory_np, device=self.device) # Attack + # Canonicalize input and target for the adversary, and revert it at the end. input, target = self.batch_c15n(batch_tv_pth) self.adversary.fit(input, target, model=self.model) input_adv, target_adv = self.adversary(input, target) From be5afef0ad1483264b302b0ef41d23a13a76022d Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 18 Sep 2023 11:34:29 -0700 Subject: [PATCH 107/126] Improve mart.generate_config. --- mart/generate_config.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/mart/generate_config.py b/mart/generate_config.py index 5bd99292..db7cee61 100644 --- a/mart/generate_config.py +++ b/mart/generate_config.py @@ -8,9 +8,9 @@ def generate( *overrides, version_base: str = "1.2", - config_dir: str = ".", - config_name: str, - output_node: str = None, + config_dir: str = "configs", + config_name: str = "lightning.yaml", + export_node: str = None, export_name: str = "output.yaml", resolve: bool = False, ): @@ -21,15 +21,15 @@ def generate( with initialize_config_dir(version_base=version_base, config_dir=config_dir): cfg = compose(config_name=config_name, overrides=overrides) - # Resolve all interpolation. + # Export a sub-tree. + if export_node is not None: + for key in export_node.split("."): + cfg = cfg[key] + + # Resolve all interpolation in the sub-tree. if resolve: OmegaConf.resolve(cfg) - # Don't output the whole tree. - if output_node is not None: - for key in output_node.split("."): - cfg = cfg[key] - # Create folders for output if necessary. folder = os.path.dirname(export_name) if folder != "" and not os.path.isdir(folder): From 79366b978e7ede3e6e73168b22af7cac758194fa Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Thu, 21 Sep 2023 10:04:00 -0700 Subject: [PATCH 108/126] Add mart.generate_config --- mart/generate_config.py | 43 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100644 mart/generate_config.py diff --git a/mart/generate_config.py b/mart/generate_config.py new file mode 100644 index 00000000..db7cee61 --- /dev/null +++ b/mart/generate_config.py @@ -0,0 +1,43 @@ +import os + +import fire +from hydra import compose, initialize_config_dir +from omegaconf import OmegaConf + + +def generate( + *overrides, + version_base: str = "1.2", + config_dir: str = "configs", + config_name: str = "lightning.yaml", + export_node: str = None, + export_name: str = "output.yaml", + resolve: bool = False, +): + # An absolute path {config_dir} is added to the search path of configs, preceding those in mart.configs. + if not os.path.isabs(config_dir): + config_dir = os.path.abspath(config_dir) + + with initialize_config_dir(version_base=version_base, config_dir=config_dir): + cfg = compose(config_name=config_name, overrides=overrides) + + # Export a sub-tree. + if export_node is not None: + for key in export_node.split("."): + cfg = cfg[key] + + # Resolve all interpolation in the sub-tree. + if resolve: + OmegaConf.resolve(cfg) + + # Create folders for output if necessary. + folder = os.path.dirname(export_name) + if folder != "" and not os.path.isdir(folder): + os.makedirs(folder) + + OmegaConf.save(config=cfg, f=export_name) + print(f"Config file saved to {export_name}") + + +if __name__ == "__main__": + fire.Fire(generate) From ec13141947c8ba2b6b33d1986f6fa7995bcfeec7 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Thu, 21 Sep 2023 10:04:30 -0700 Subject: [PATCH 109/126] Add header. --- mart/generate_config.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/mart/generate_config.py b/mart/generate_config.py index db7cee61..383000c6 100644 --- a/mart/generate_config.py +++ b/mart/generate_config.py @@ -1,3 +1,9 @@ +# +# Copyright (C) 2022 Intel Corporation +# +# SPDX-License-Identifier: BSD-3-Clause +# + import os import fire From da814ba0b5f7480c08e4157b5590d724704a36f3 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Thu, 21 Sep 2023 10:14:21 -0700 Subject: [PATCH 110/126] Print yaml to stdout. --- mart/generate_config.py | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/mart/generate_config.py b/mart/generate_config.py index 383000c6..0ca4cdd7 100644 --- a/mart/generate_config.py +++ b/mart/generate_config.py @@ -17,7 +17,6 @@ def generate( config_dir: str = "configs", config_name: str = "lightning.yaml", export_node: str = None, - export_name: str = "output.yaml", resolve: bool = False, ): # An absolute path {config_dir} is added to the search path of configs, preceding those in mart.configs. @@ -36,13 +35,8 @@ def generate( if resolve: OmegaConf.resolve(cfg) - # Create folders for output if necessary. - folder = os.path.dirname(export_name) - if folder != "" and not os.path.isdir(folder): - os.makedirs(folder) - - OmegaConf.save(config=cfg, f=export_name) - print(f"Config file saved to {export_name}") + # OmegaConf.to_yaml() already ends with `\n`. + print(OmegaConf.to_yaml(cfg), end="") if __name__ == "__main__": From 8886b5d5b28929acc8c27f308617abf31b50d8e0 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Thu, 21 Sep 2023 11:17:50 -0700 Subject: [PATCH 111/126] Update mart.generate_config usage. --- examples/mart_armory/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/mart_armory/README.md b/examples/mart_armory/README.md index eb0ba259..bc4ca73b 100644 --- a/examples/mart_armory/README.md +++ b/examples/mart_armory/README.md @@ -14,14 +14,14 @@ pip install 'git+https://github.com/IntelLabs/MART.git@example_armory_attack#egg python -m mart.generate_config \ --config_dir=mart_armory/configs \ --config_name=assemble_attack.yaml \ ---export_name=path/to/attack.yaml \ batch_converter=object_detection \ batch_c15n=data_coco \ attack=[object_detection_mask_adversary] \ attack.objective=null \ attack.max_iters=10 \ attack.lr=26 \ -model_transform=armory_objdet +model_transform=armory_objdet \ +> path/to/attack.yaml ``` 2. Update the attack section in the Armory scenario configuration. From 8acf1c657c0566d7eb44e7286d5d45bfdd0e8012 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Thu, 21 Sep 2023 11:21:09 -0700 Subject: [PATCH 112/126] Add conversion between PyTorch tensors and Numpy arrays. --- mart/transforms/tensor_array.py | 42 +++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 mart/transforms/tensor_array.py diff --git a/mart/transforms/tensor_array.py b/mart/transforms/tensor_array.py new file mode 100644 index 00000000..ca88072f --- /dev/null +++ b/mart/transforms/tensor_array.py @@ -0,0 +1,42 @@ +# +# Copyright (C) 2022 Intel Corporation +# +# SPDX-License-Identifier: BSD-3-Clause +# + +from functools import singledispatch + +import numpy as np +import torch + + +# A recursive function to convert all np.ndarray in an object to torch.Tensor, or vice versa. +@singledispatch +def convert(obj, device=None): + """All other types, no change.""" + return obj + + +@convert.register +def _(obj: dict, device=None): + return {key: convert(value, device=device) for key, value in obj.items()} + + +@convert.register +def _(obj: list, device=None): + return [convert(item, device=device) for item in obj] + + +@convert.register +def _(obj: tuple, device=None): + return tuple(convert(obj, device=device)) + + +@convert.register +def _(obj: np.ndarray, device=None): + return torch.tensor(obj, device=device) + + +@convert.register +def _(obj: torch.Tensor, device=None): + return obj.detach().cpu().numpy() From 45b735bdf358853fe9e0a8e8a356349fd04d73c2 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Thu, 21 Sep 2023 11:36:53 -0700 Subject: [PATCH 113/126] Add test. --- tests/test_transforms.py | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 tests/test_transforms.py diff --git a/tests/test_transforms.py b/tests/test_transforms.py new file mode 100644 index 00000000..9af53288 --- /dev/null +++ b/tests/test_transforms.py @@ -0,0 +1,21 @@ +# +# Copyright (C) 2022 Intel Corporation +# +# SPDX-License-Identifier: BSD-3-Clause +# + +import numpy as np +import torch + +from mart.transforms.tensor_array import convert + + +def test_tensor_array_two_way_convert(): + tensor_expected = [{"key": (torch.tensor(1.0), 2)}] + array_expected = [{"key": (np.array(1.0), 2)}] + + array_result = convert(tensor_expected) + assert array_expected == array_result + + tensor_result = convert(array_expected) + assert tensor_expected == tensor_result From 1d44eed290bbb794eaeb7e60cb464624d57e17d6 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Thu, 21 Sep 2023 11:37:40 -0700 Subject: [PATCH 114/126] Fix a bug. --- mart/transforms/tensor_array.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mart/transforms/tensor_array.py b/mart/transforms/tensor_array.py index ca88072f..00a9345a 100644 --- a/mart/transforms/tensor_array.py +++ b/mart/transforms/tensor_array.py @@ -29,7 +29,7 @@ def _(obj: list, device=None): @convert.register def _(obj: tuple, device=None): - return tuple(convert(obj, device=device)) + return tuple(convert(item, device=device) for item in obj) @convert.register From 03f6dfad1513752f35edb379dfa88e42822b1ceb Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Thu, 21 Sep 2023 12:37:22 -0700 Subject: [PATCH 115/126] Rename. --- examples/mart_armory/mart_armory/__init__.py | 2 +- .../mart_armory/{pytorch_wrapper.py => attack_wrapper.py} | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename examples/mart_armory/mart_armory/{pytorch_wrapper.py => attack_wrapper.py} (100%) diff --git a/examples/mart_armory/mart_armory/__init__.py b/examples/mart_armory/mart_armory/__init__.py index 77272048..2d857924 100644 --- a/examples/mart_armory/mart_armory/__init__.py +++ b/examples/mart_armory/mart_armory/__init__.py @@ -1,5 +1,5 @@ from importlib import metadata -from mart_armory.pytorch_wrapper import MartAttack +from mart_armory.attack_wrapper import MartAttack __version__ = metadata.version(__package__ or __name__) diff --git a/examples/mart_armory/mart_armory/pytorch_wrapper.py b/examples/mart_armory/mart_armory/attack_wrapper.py similarity index 100% rename from examples/mart_armory/mart_armory/pytorch_wrapper.py rename to examples/mart_armory/mart_armory/attack_wrapper.py From 6951a07c902909927de1cd6f0856a977fa593775 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Thu, 12 Oct 2023 20:56:45 -0700 Subject: [PATCH 116/126] Fix batch_c15n in the wrapper. --- examples/mart_armory/mart_armory/attack_wrapper.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/examples/mart_armory/mart_armory/attack_wrapper.py b/examples/mart_armory/mart_armory/attack_wrapper.py index 8a258417..be16c59c 100644 --- a/examples/mart_armory/mart_armory/attack_wrapper.py +++ b/examples/mart_armory/mart_armory/attack_wrapper.py @@ -48,7 +48,13 @@ def __init__(self, model, mart_adv_config_yaml): self.adversary.to(self.device) # model_transform - self.model = self.model_transform(model) + self.model_transformed = self.model_transform(model) + + def model(self, input, target): + # Wrap a model for the Adversary which works with the canonical (input, target) format. + batch = self.batch_c15n.revert(input, target) + output = self.model_transformed(*batch) + return output def generate(self, **batch_armory_np): # Armory format -> torchvision format From fbbadd0fbc55ab973c74296dfa35e912a2dbd324 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 13 Oct 2023 10:54:48 -0700 Subject: [PATCH 117/126] Use Adam as the attack optimizer. --- examples/mart_armory/README.md | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/examples/mart_armory/README.md b/examples/mart_armory/README.md index bc4ca73b..f0e7016e 100644 --- a/examples/mart_armory/README.md +++ b/examples/mart_armory/README.md @@ -8,7 +8,7 @@ pip install 'git+https://github.com/IntelLabs/MART.git@example_armory_attack#egg ## Usage -1. Generate a YAML configuration of attack. +1. Generate a YAML configuration of attack, using Adam as the optimizer. ```shell python -m mart.generate_config \ @@ -17,9 +17,11 @@ python -m mart.generate_config \ batch_converter=object_detection \ batch_c15n=data_coco \ attack=[object_detection_mask_adversary] \ +attack.optimizer.optimizer.path=torch.optim.Adam \ +~attack.optimizer.momentum \ attack.objective=null \ -attack.max_iters=10 \ -attack.lr=26 \ +attack.max_iters=500 \ +attack.lr=13 \ model_transform=armory_objdet \ > path/to/attack.yaml ``` From 5c921d6b39aa3caf3525803763cba00d5ff887a1 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 13 Oct 2023 11:00:49 -0700 Subject: [PATCH 118/126] Allow trainer.precision options in Adversary. --- mart/attack/adversary.py | 1 + 1 file changed, 1 insertion(+) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 161ef870..9fbdec75 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -92,6 +92,7 @@ def __init__( # We should disable progress bar in the progress_bar callback config if needed. enable_progress_bar=True, # detect_anomaly=True, + precision=kwargs.pop("precision", "32-true"), ) else: From beb35e1721ebd232db3df7d8ab36aaf61aa8b434 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 13 Oct 2023 11:01:10 -0700 Subject: [PATCH 119/126] Use 16bit-mixed precision in attack. --- examples/mart_armory/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/examples/mart_armory/README.md b/examples/mart_armory/README.md index f0e7016e..663d4b68 100644 --- a/examples/mart_armory/README.md +++ b/examples/mart_armory/README.md @@ -17,6 +17,7 @@ python -m mart.generate_config \ batch_converter=object_detection \ batch_c15n=data_coco \ attack=[object_detection_mask_adversary] \ ++attack.precision=16 \ attack.optimizer.optimizer.path=torch.optim.Adam \ ~attack.optimizer.momentum \ attack.objective=null \ From e4c55a7114a6991cd1d8b42b4c576956f3619e90 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 13 Oct 2023 11:54:07 -0700 Subject: [PATCH 120/126] Remove dependency upon MART during development. --- examples/mart_armory/pyproject.toml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/examples/mart_armory/pyproject.toml b/examples/mart_armory/pyproject.toml index 550e3bb6..18762db5 100644 --- a/examples/mart_armory/pyproject.toml +++ b/examples/mart_armory/pyproject.toml @@ -11,7 +11,8 @@ authors = [ requires-python = ">=3.9" dependencies = [ - "mart@git+https://github.com/IntelLabs/MART.git@example_armory_attack", + # We recommend you to install MART in the editable during the development phase. + # "mart@git+https://github.com/IntelLabs/MART.git@example_armory_attack", ] [project.urls] From 2ff9d82f6dd8c027c7c25247e1b6a7152add360f Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 13 Oct 2023 11:58:38 -0700 Subject: [PATCH 121/126] Add python-fire in MART's dependency. --- pyproject.toml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index 2839733d..aa56f4ce 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -36,6 +36,8 @@ dependencies = [ # ----- object detection----- # "pycocotools ~= 2.0.5", + + "fire == 0.5.0", ] [project.urls] From d76fd1501ee39f326889d209ce0cd0b07c23d21b Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 13 Oct 2023 12:18:34 -0700 Subject: [PATCH 122/126] Update README. --- examples/mart_armory/README.md | 104 +++++++++++++++++++++++++-------- 1 file changed, 80 insertions(+), 24 deletions(-) diff --git a/examples/mart_armory/README.md b/examples/mart_armory/README.md index 663d4b68..b75f484b 100644 --- a/examples/mart_armory/README.md +++ b/examples/mart_armory/README.md @@ -1,9 +1,63 @@ +## Introduction + +We demonstrate how to configure and run a MART attack against object detection models in ARMORY. + +The demo attack here is about 30% faster than the baseline attack implementation in ARMORY, because we specify to use 16bit-mixed-precision in the MART attack configuration. + +MART is designed to be modular and configurable. It should empower users to evaluate adversarial robustness of deep learning models more effectively and efficiently. + +Please reach out to [Weilin Xu](mailto:weilin.xu@intel.com) if you have any question. + ## Installation -Install the `mart_armory` package from a repo subdirectory. +Download the code repositories. + +```shell +# You can start from any directory other than `~/coder/`, since we always use relative paths in the following commands. +mkdir ~/coder; cd ~/coder + +git clone https://github.com/twosixlabs/armory.git +# Make sure we are on the same page. +cd armory; git checkout 4a5e808bfa02952618da9ca7d5ae06f793775a11; cd .. + +git clone https://github.com/IntelLabs/MART.git -b example_armory_attack +``` + +Create and activate a Python virtualen environment. ```shell -pip install 'git+https://github.com/IntelLabs/MART.git@example_armory_attack#egg=mart_armory&subdirectory=examples/mart_armory' +cd armory +python -m venv .venv +source .venv/bin/activate +``` + +Install ARMORY, MART and the glue package mart_armory in editable mode. + +```shell +pip install -e .[engine] +pip install tensorflow tensorflow-datasets +# PyTorch 2.0+ is already in the dependency of MART. +pip install -e ../MART +pip install -e ../MART/examples/mart_armory +``` + +Make sure PyTorch works on CUDA. + +```console +$ CUDA_VISIBLE_DEVICES=0 python -c "import torch; print(torch.cuda.is_available())" +True +``` + +> You may need to install a different PyTorch distribution if your CUDA is not 12.0. + +> Here's my `nvidia-smi` output: `| NVIDIA-SMI 525.125.06 Driver Version: 525.125.06 CUDA Version: 12.0 |` + +*Optional*: Apply a patch so we can run the ART-based attack in PyTorch 2.0+ for comparison. + +```shell +sed -i \ +'s/x_tensor.requires_grad = True/if not x_tensor.requires_grad:\n x_tensor.requires_grad = True/g' \ +.venv/lib/python3.9/site-packages/art/estimators/object_detection/pytorch_object_detector.py ``` ## Usage @@ -12,7 +66,7 @@ pip install 'git+https://github.com/IntelLabs/MART.git@example_armory_attack#egg ```shell python -m mart.generate_config \ ---config_dir=mart_armory/configs \ +--config_dir=../MART/examples/mart_armory/mart_armory/configs \ --config_name=assemble_attack.yaml \ batch_converter=object_detection \ batch_c15n=data_coco \ @@ -24,35 +78,37 @@ attack.objective=null \ attack.max_iters=500 \ attack.lr=13 \ model_transform=armory_objdet \ -> path/to/attack.yaml -``` - -2. Update the attack section in the Armory scenario configuration. - -```json -"attack": { - "module": "mart_armory", - "name": "MartAttack", - "kwargs": { - "mart_adv_config_yaml": "path/to/attack.yaml" - }, - "knowledge": "white", - "use_label": true -}, +> mart_objdet_attack_adam500.yaml ``` -Note that Armory requires the argument `knowledge`. The statement `"use_label": true` gets `y` for the attack. - -Alternatively, we can use `jq` to update existing scenario json files, for example +2. Run the MART attack on one example. -```bash +```shell cat scenario_configs/eval7/carla_overhead_object_detection/carla_obj_det_adversarialpatch_undefended.json \ | jq 'del(.attack)' \ | jq '.attack.knowledge="white"' \ | jq '.attack.use_label=true' \ | jq '.attack.module="mart_armory"' \ | jq '.attack.name="MartAttack"' \ -| jq '.attack.kwargs.mart_adv_config_yaml="path/to/attack.yaml"' \ +| jq '.attack.kwargs.mart_adv_config_yaml="mart_objdet_attack_adam500.yaml"' \ +| jq '.scenario.export_batches=true' \ +| CUDA_VISIBLE_DEVICES=0 armory run - --no-docker --use-gpu --gpus=1 --num-eval-batches 1 +``` + +``` +2023-10-13 12:05:33 1m14s METRIC armory.instrument.instrument:_write:743 adversarial_object_detection_mAP_tide on adversarial examples w.r.t. ground truth labels: {'mAP': {0.5: 0.0, 0.55: 0.0, 0.6: 0.0, 0.65: 0.0, 0.7: 0.0, 0.75: 0.0, 0.8: 0.0, 0.85: 0.0, 0.9: 0.0, 0.95: 0.0}, 'errors': {'main': {'dAP': {'Cls': 0.0, 'Loc': 0.0, 'Both': 0.0, 'Dupe': 0.0, 'Bkg': 0.0, 'Miss': 0.0}, 'count': {'Cls': 0, 'Loc': 0, 'Both': 0, 'Dupe': 0, 'Bkg': 100, 'Miss': 21}}, 'special': {'dAP': {'FalsePos': 0.0, 'FalseNeg': 0.0}, 'count': {'FalseNeg': 21}}}} +``` + +## Comparison + +Run the baseline attack on the same example for comparison. The MART attack is ~30% faster due to the 16-bit mixed precision. + +```shell +cat scenario_configs/eval7/carla_overhead_object_detection/carla_obj_det_adversarialpatch_undefended.json \ | jq '.scenario.export_batches=true' \ -| CUDA_VISIBLE_DEVICES=0 armory run - --no-docker --use-gpu --gpus=1 +| CUDA_VISIBLE_DEVICES=0 armory run - --no-docker --use-gpu --gpus=1 --num-eval-batches 1 +``` + +```console +2023-10-13 12:11:50 1m33s METRIC armory.instrument.instrument:_write:743 adversarial_object_detection_mAP_tide on adversarial examples w.r.t. ground truth labels: {'mAP': {0.5: 0.0, 0.55: 0.0, 0.6: 0.0, 0.65: 0.0, 0.7: 0.0, 0.75: 0.0, 0.8: 0.0, 0.85: 0.0, 0.9: 0.0, 0.95: 0.0}, 'errors': {'main': {'dAP': {'Cls': 0.0, 'Loc': 0.0, 'Both': 0.0, 'Dupe': 0.0, 'Bkg': 0.0, 'Miss': 0.0}, 'count': {'Cls': 0, 'Loc': 0, 'Both': 0, 'Dupe': 0, 'Bkg': 100, 'Miss': 21}}, 'special': {'dAP': {'FalsePos': 0.0, 'FalseNeg': 0.0}, 'count': {'FalseNeg': 21}}}} ``` From d6fedcc48403f1b8cd306d866a8d666b38832b6c Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Thu, 2 Nov 2023 12:11:19 -0700 Subject: [PATCH 123/126] Update instructions for ARMORY v0.19.0 --- examples/mart_armory/README.md | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/examples/mart_armory/README.md b/examples/mart_armory/README.md index b75f484b..5f31b399 100644 --- a/examples/mart_armory/README.md +++ b/examples/mart_armory/README.md @@ -18,7 +18,7 @@ mkdir ~/coder; cd ~/coder git clone https://github.com/twosixlabs/armory.git # Make sure we are on the same page. -cd armory; git checkout 4a5e808bfa02952618da9ca7d5ae06f793775a11; cd .. +cd armory; git checkout tags/v0.19.0 -b r0.19.0; cd .. git clone https://github.com/IntelLabs/MART.git -b example_armory_attack ``` @@ -52,14 +52,6 @@ True > Here's my `nvidia-smi` output: `| NVIDIA-SMI 525.125.06 Driver Version: 525.125.06 CUDA Version: 12.0 |` -*Optional*: Apply a patch so we can run the ART-based attack in PyTorch 2.0+ for comparison. - -```shell -sed -i \ -'s/x_tensor.requires_grad = True/if not x_tensor.requires_grad:\n x_tensor.requires_grad = True/g' \ -.venv/lib/python3.9/site-packages/art/estimators/object_detection/pytorch_object_detector.py -``` - ## Usage 1. Generate a YAML configuration of attack, using Adam as the optimizer. From 8abc8a01f8d2bfd9462098d398875a4a7e163202 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 6 Nov 2023 20:12:06 -0800 Subject: [PATCH 124/126] Update attack composition. --- examples/mart_armory/README.md | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/examples/mart_armory/README.md b/examples/mart_armory/README.md index 5f31b399..8456429f 100644 --- a/examples/mart_armory/README.md +++ b/examples/mart_armory/README.md @@ -62,7 +62,13 @@ python -m mart.generate_config \ --config_name=assemble_attack.yaml \ batch_converter=object_detection \ batch_c15n=data_coco \ -attack=[object_detection_mask_adversary] \ +attack=[adversary,gradient_ascent,mask] \ ++attack/composer/perturber/initializer=uniform \ +attack.composer.perturber.initializer.max=255 \ +attack.composer.perturber.initializer.min=0 \ ++attack/composer/functions=overlay \ ++attack/gradient_modifier=sign \ ++attack/gain=rcnn_training_loss \ +attack.precision=16 \ attack.optimizer.optimizer.path=torch.optim.Adam \ ~attack.optimizer.momentum \ From ea8c60ff275ab2bf688a59455cd982e09f7d14bc Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 6 Nov 2023 20:20:00 -0800 Subject: [PATCH 125/126] Update mask config. --- examples/mart_armory/README.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/examples/mart_armory/README.md b/examples/mart_armory/README.md index 8456429f..6eab33f5 100644 --- a/examples/mart_armory/README.md +++ b/examples/mart_armory/README.md @@ -62,7 +62,10 @@ python -m mart.generate_config \ --config_name=assemble_attack.yaml \ batch_converter=object_detection \ batch_c15n=data_coco \ -attack=[adversary,gradient_ascent,mask] \ +attack=[adversary,gradient_ascent] \ ++attack/composer/perturber/projector=mask_range \ ++attack/enforcer=default \ ++attack/enforcer/constraints=[mask,pixel_range] \ +attack/composer/perturber/initializer=uniform \ attack.composer.perturber.initializer.max=255 \ attack.composer.perturber.initializer.min=0 \ From f14f61da142a897111e3a4e096d0d996d16d0125 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 6 Nov 2023 20:27:03 -0800 Subject: [PATCH 126/126] Update gradient ascent config. --- examples/mart_armory/README.md | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/examples/mart_armory/README.md b/examples/mart_armory/README.md index 6eab33f5..970f5944 100644 --- a/examples/mart_armory/README.md +++ b/examples/mart_armory/README.md @@ -62,7 +62,11 @@ python -m mart.generate_config \ --config_name=assemble_attack.yaml \ batch_converter=object_detection \ batch_c15n=data_coco \ -attack=[adversary,gradient_ascent] \ +attack=adversary \ ++optimizer@attack.optimizer=sgd \ +attack.optimizer.maximize=true \ ++attack.optimizer.lr=13 \ ++attack.max_iters=500 \ +attack/composer/perturber/projector=mask_range \ +attack/enforcer=default \ +attack/enforcer/constraints=[mask,pixel_range] \ @@ -76,8 +80,6 @@ attack.composer.perturber.initializer.min=0 \ attack.optimizer.optimizer.path=torch.optim.Adam \ ~attack.optimizer.momentum \ attack.objective=null \ -attack.max_iters=500 \ -attack.lr=13 \ model_transform=armory_objdet \ > mart_objdet_attack_adam500.yaml ```