From 5b46ddfa003e738f02b3842c3b9a5a7270e0bbc1 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Wed, 28 Jun 2023 13:50:02 -0700 Subject: [PATCH 01/74] Initial implementation of adversarial training callback --- mart/callbacks/__init__.py | 1 + mart/callbacks/adversarial_training.py | 46 +++++++++++++++++++ .../callbacks/adversarial_training.yaml | 3 ++ mart/configs/experiment/CIFAR10_CNN_Adv.yaml | 39 ++++++++++------ 4 files changed, 76 insertions(+), 13 deletions(-) create mode 100644 mart/callbacks/adversarial_training.py create mode 100644 mart/configs/callbacks/adversarial_training.yaml diff --git a/mart/callbacks/__init__.py b/mart/callbacks/__init__.py index 8e117180..0dccb7f7 100644 --- a/mart/callbacks/__init__.py +++ b/mart/callbacks/__init__.py @@ -1,3 +1,4 @@ +from .adversarial_training import * from .eval_mode import * from .gradients import * from .no_grad_mode import * diff --git a/mart/callbacks/adversarial_training.py b/mart/callbacks/adversarial_training.py new file mode 100644 index 00000000..7453e52e --- /dev/null +++ b/mart/callbacks/adversarial_training.py @@ -0,0 +1,46 @@ +# +# Copyright (C) 2022 Intel Corporation +# +# SPDX-License-Identifier: BSD-3-Clause +# + +from pytorch_lightning.callbacks import Callback + +from mart.models import LitModular + +__all__ = ["AdversarialTraining"] + + +class AdversarialTraining(Callback): + """Perturbs inputs to be adversarial.""" + + def __init__( + self, adversary=None, train_adversary=None, validation_adversary=None, test_adversary=None + ): + adversary = adversary or train_adversary + + self.train_adversary = train_adversary or adversary + self.validation_adversary = validation_adversary or adversary + self.test_adversary = test_adversary or adversary + + def on_train_batch_start(self, trainer, pl_module, batch, batch_idx): + input, target = batch + + # FIXME: We reach into LitModular here...how can we get rid of this? + assert isinstance(pl_module, LitModular) + model = pl_module.model + sequence = model._sequences["training"] + + # FIXME: This doesn't work because sequence does not include the Adversary module. How can we fix that? + # Because this a callback, we can safely assume the Adversary module should live before the model. + # We should be able to "manually" insert it into the sequence here. + out = self.train_adversary(input=input, target=target, model=model, sequence=sequence) + print("out =", out) + + def on_validation_batch_start(self, trainer, pl_module, batch, batch_idx, dataloader_idx): + # FIXME: Copy on_train_batch_start + pass + + def on_test_batch_start(self, trainer, pl_module, batch, batch_idx, dataloader_idx): + # FIXME: Copy on_train_batch_start + pass diff --git a/mart/configs/callbacks/adversarial_training.yaml b/mart/configs/callbacks/adversarial_training.yaml new file mode 100644 index 00000000..0f6a7b47 --- /dev/null +++ b/mart/configs/callbacks/adversarial_training.yaml @@ -0,0 +1,3 @@ +adversarial_training: + _target_: mart.callbacks.AdversarialTraining + adversary: ??? diff --git a/mart/configs/experiment/CIFAR10_CNN_Adv.yaml b/mart/configs/experiment/CIFAR10_CNN_Adv.yaml index c254669b..ced39cd1 100644 --- a/mart/configs/experiment/CIFAR10_CNN_Adv.yaml +++ b/mart/configs/experiment/CIFAR10_CNN_Adv.yaml @@ -1,23 +1,36 @@ # @package _global_ defaults: - - CIFAR10_CNN - - /attack@model.modules.input_adv_training: classification_eps1.75_fgsm - - /attack@model.modules.input_adv_test: classification_eps2_pgd10_step1 + - /attack@callbacks.adversarial_training.adversary: classification_eps1.75_fgsm + - /attack@callbacks.adversarial_training.test_adversary: classification_eps2_pgd10_step1 + - override /datamodule: cifar10 + - override /model: classifier_cifar10_cnn + - override /metric: accuracy + - override /optimization: super_convergence + - override /callbacks: [model_checkpoint, lr_monitor, adversarial_training] task_name: "CIFAR10_CNN_Adv" tags: ["adv", "fat"] -model: - training_sequence: - seq005: input_adv_training +optimized_metric: "test_metrics/acc" + +callbacks: + model_checkpoint: + monitor: "validation_metrics/acc" + mode: "max" - seq010: - preprocessor: - _call_with_args_: ["input_adv_training"] +trainer: + # 50K training images, batch_size=128, drop_last, 15 epochs. + max_steps: 5850 + precision: 32 - test_sequence: - seq005: input_adv_test +datamodule: + ims_per_batch: 128 + world_size: 1 + num_workers: 8 - seq010: - preprocessor: ["input_adv_test"] +model: + optimizer: + lr: 0.1 + momentum: 0.9 + weight_decay: 1e-4 From 849f07136e1f31986f6e3ffb60bdfa3a2f952695 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Thu, 29 Jun 2023 13:42:56 -0700 Subject: [PATCH 02/74] Make callback work in CPU --- mart/attack/adversary.py | 23 +++++++++++++++-------- mart/callbacks/adversarial_training.py | 25 ++++++++++--------------- mart/configs/attack/adversary.yaml | 1 + 3 files changed, 26 insertions(+), 23 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 8c5513d2..376f8429 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -152,22 +152,21 @@ def configure_gradient_clipping( self.gradient_modifier(group["params"]) @silent() - def forward(self, *, model=None, sequence=None, **batch): + def forward(self, *, model=None, **batch): batch["model"] = model - batch["sequence"] = sequence - # Adversary lives within a sequence of model. To signal the adversary should attack, one - # must pass a model to attack when calling the adversary. Since we do not know where the - # Adversary lives inside the model, we also need the remaining sequence to be able to - # get a loss. - if model and sequence: + # Adversary can live within a sequence of model. To signal the adversary should + # attack, one must pass a model to attack when calling the adversary. Since we + # do not know where the Adversary lives inside the model, we also need the + # remaining sequence to be able to get a loss. + if model: self._attack(**batch) perturbation = self.perturber(**batch) input_adv = self.composer(perturbation, **batch) # Enforce constraints after the attack optimization ends. - if model and sequence: + if model: self.enforcer(input_adv, **batch) return input_adv @@ -211,3 +210,11 @@ def cpu(self): # This is a problem when this LightningModule has parameters, so we stop this from # happening by ignoring the call to cpu(). pass + + def attack(adversary, model, **batch): + # Create attacked model where the adversary executes before the model + def attacked_model(*, input, **batch): + adv_input = adversary(input=input, **batch) + return model(input=adv_input, **batch) + + return adversary(**batch, model=attacked_model) diff --git a/mart/callbacks/adversarial_training.py b/mart/callbacks/adversarial_training.py index 7453e52e..875bea18 100644 --- a/mart/callbacks/adversarial_training.py +++ b/mart/callbacks/adversarial_training.py @@ -23,24 +23,19 @@ def __init__( self.validation_adversary = validation_adversary or adversary self.test_adversary = test_adversary or adversary + # FIXME: These are hacks. Ideally we would use on_after_batch_transfer but that isn't exposed to + # callbacks only to LightningModules. But maybe we can forward those to callbacks? def on_train_batch_start(self, trainer, pl_module, batch, batch_idx): input, target = batch - - # FIXME: We reach into LitModular here...how can we get rid of this? - assert isinstance(pl_module, LitModular) - model = pl_module.model - sequence = model._sequences["training"] - - # FIXME: This doesn't work because sequence does not include the Adversary module. How can we fix that? - # Because this a callback, we can safely assume the Adversary module should live before the model. - # We should be able to "manually" insert it into the sequence here. - out = self.train_adversary(input=input, target=target, model=model, sequence=sequence) - print("out =", out) + input_adv = self.train_adversary.attack(pl_module, input=input, target=target, step="training") + input[:] = input_adv # XXX: hacke def on_validation_batch_start(self, trainer, pl_module, batch, batch_idx, dataloader_idx): - # FIXME: Copy on_train_batch_start - pass + input, target = batch + input_adv = self.validation_adversary.attack(pl_module, input=input, target=target, step="validation") + input[:] = input_adv # XXX: hacke def on_test_batch_start(self, trainer, pl_module, batch, batch_idx, dataloader_idx): - # FIXME: Copy on_train_batch_start - pass + input, target = batch + input_adv = self.test_adversary.attack(pl_module, input=input, target=target, step="test") + input[:] = input_adv # XXX: hacke diff --git a/mart/configs/attack/adversary.yaml b/mart/configs/attack/adversary.yaml index 40188b5a..bbf52433 100644 --- a/mart/configs/attack/adversary.yaml +++ b/mart/configs/attack/adversary.yaml @@ -2,6 +2,7 @@ defaults: - /callbacks@callbacks: [progress_bar] _target_: mart.attack.Adversary +_convert_: all perturber: ??? composer: ??? optimizer: From feffbe3d5d9e384d22aca0a30d8866b683ac72c0 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Thu, 29 Jun 2023 13:45:30 -0700 Subject: [PATCH 03/74] style --- mart/callbacks/adversarial_training.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/mart/callbacks/adversarial_training.py b/mart/callbacks/adversarial_training.py index 875bea18..b633b3c1 100644 --- a/mart/callbacks/adversarial_training.py +++ b/mart/callbacks/adversarial_training.py @@ -27,12 +27,16 @@ def __init__( # callbacks only to LightningModules. But maybe we can forward those to callbacks? def on_train_batch_start(self, trainer, pl_module, batch, batch_idx): input, target = batch - input_adv = self.train_adversary.attack(pl_module, input=input, target=target, step="training") + input_adv = self.train_adversary.attack( + pl_module, input=input, target=target, step="training" + ) input[:] = input_adv # XXX: hacke def on_validation_batch_start(self, trainer, pl_module, batch, batch_idx, dataloader_idx): input, target = batch - input_adv = self.validation_adversary.attack(pl_module, input=input, target=target, step="validation") + input_adv = self.validation_adversary.attack( + pl_module, input=input, target=target, step="validation" + ) input[:] = input_adv # XXX: hacke def on_test_batch_start(self, trainer, pl_module, batch, batch_idx, dataloader_idx): From 5e6c272273c63bb7e1391e1ee2aaae03ab66c3ca Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Thu, 29 Jun 2023 16:58:04 -0700 Subject: [PATCH 04/74] Add and use on_after_batch_transfer hook --- mart/callbacks/adversarial_training.py | 38 ++++++++++++++------------ mart/models/modular.py | 8 ++++++ 2 files changed, 29 insertions(+), 17 deletions(-) diff --git a/mart/callbacks/adversarial_training.py b/mart/callbacks/adversarial_training.py index b633b3c1..de4392fd 100644 --- a/mart/callbacks/adversarial_training.py +++ b/mart/callbacks/adversarial_training.py @@ -23,23 +23,27 @@ def __init__( self.validation_adversary = validation_adversary or adversary self.test_adversary = test_adversary or adversary - # FIXME: These are hacks. Ideally we would use on_after_batch_transfer but that isn't exposed to - # callbacks only to LightningModules. But maybe we can forward those to callbacks? - def on_train_batch_start(self, trainer, pl_module, batch, batch_idx): + def on_after_batch_transfer(self, trainer, pl_module, batch, dataloader_idx): + # FIXME: Would be nice if batch was a structured object (or a dict) input, target = batch - input_adv = self.train_adversary.attack( - pl_module, input=input, target=target, step="training" - ) - input[:] = input_adv # XXX: hacke - def on_validation_batch_start(self, trainer, pl_module, batch, batch_idx, dataloader_idx): - input, target = batch - input_adv = self.validation_adversary.attack( - pl_module, input=input, target=target, step="validation" - ) - input[:] = input_adv # XXX: hacke + if trainer.training: + adversary = self.train_adversary + step = "training" # FIXME: Use pl_module.training_step? - def on_test_batch_start(self, trainer, pl_module, batch, batch_idx, dataloader_idx): - input, target = batch - input_adv = self.test_adversary.attack(pl_module, input=input, target=target, step="test") - input[:] = input_adv # XXX: hacke + elif trainer.validating: + adversary = self.validation_adversary + step = "validation" # FIXME: Use pl_module.validation_step? + + elif trainer.testing: + adversary = self.test_adversary + step = "test" # FIXME: Use pl_module.test_step? + + else: + return batch + + # Move adversary to same device as pl_module + adversary.to(pl_module.device) + input = adversary.attack(pl_module, input=input, target=target, step=step) + + return [input, target] diff --git a/mart/models/modular.py b/mart/models/modular.py index a27c6867..461fd701 100644 --- a/mart/models/modular.py +++ b/mart/models/modular.py @@ -120,6 +120,14 @@ def configure_optimizers(self): return config + # FIXME: This is a hack to make callbacks have an on_after_batch_transfer hook. + def on_after_batch_transfer(self, batch, dataloader_idx): + for callback in self.trainer.callbacks: + if not hasattr(callback, "on_after_batch_transfer"): + continue + + return callback.on_after_batch_transfer(self.trainer, self, batch, dataloader_idx) + def forward(self, **kwargs): return self.model(**kwargs) From 59003c25cc6b1ea95e6ec7a03710a17c76bfdf68 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 30 Jun 2023 08:17:34 -0700 Subject: [PATCH 05/74] Override on_after_batch_transfer in callback setup --- mart/callbacks/adversarial_training.py | 19 ++++++++++++++++--- mart/models/modular.py | 8 -------- 2 files changed, 16 insertions(+), 11 deletions(-) diff --git a/mart/callbacks/adversarial_training.py b/mart/callbacks/adversarial_training.py index de4392fd..c8ac22fd 100644 --- a/mart/callbacks/adversarial_training.py +++ b/mart/callbacks/adversarial_training.py @@ -4,6 +4,8 @@ # SPDX-License-Identifier: BSD-3-Clause # +import types + from pytorch_lightning.callbacks import Callback from mart.models import LitModular @@ -23,21 +25,32 @@ def __init__( self.validation_adversary = validation_adversary or adversary self.test_adversary = test_adversary or adversary - def on_after_batch_transfer(self, trainer, pl_module, batch, dataloader_idx): + def setup(self, trainer, pl_module, stage=None): + pl_module.on_after_batch_transfer = types.MethodType( + self.on_after_batch_transfer, pl_module + ) + + def teardown(self, trainer, pl_module, start=None): + # FIXME: remove on_after_batch_transfer + pass + + def on_after_batch_transfer(self, pl_module, batch, dataloader_idx): # FIXME: Would be nice if batch was a structured object (or a dict) input, target = batch + trainer = pl_module.trainer + if trainer.training: adversary = self.train_adversary step = "training" # FIXME: Use pl_module.training_step? elif trainer.validating: adversary = self.validation_adversary - step = "validation" # FIXME: Use pl_module.validation_step? + step = "validation" # FIXME: Use pl_module.training_step? elif trainer.testing: adversary = self.test_adversary - step = "test" # FIXME: Use pl_module.test_step? + step = "test" # FIXME: Use pl_module.training_step? else: return batch diff --git a/mart/models/modular.py b/mart/models/modular.py index 461fd701..a27c6867 100644 --- a/mart/models/modular.py +++ b/mart/models/modular.py @@ -120,14 +120,6 @@ def configure_optimizers(self): return config - # FIXME: This is a hack to make callbacks have an on_after_batch_transfer hook. - def on_after_batch_transfer(self, batch, dataloader_idx): - for callback in self.trainer.callbacks: - if not hasattr(callback, "on_after_batch_transfer"): - continue - - return callback.on_after_batch_transfer(self.trainer, self, batch, dataloader_idx) - def forward(self, **kwargs): return self.model(**kwargs) From 8218f6516d5c8683a6b0c12d044d171b43bcfbbb Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 30 Jun 2023 09:12:58 -0700 Subject: [PATCH 06/74] Restore on_after_batch_transfer hook in teardown --- mart/callbacks/adversarial_training.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mart/callbacks/adversarial_training.py b/mart/callbacks/adversarial_training.py index c8ac22fd..1dfe741e 100644 --- a/mart/callbacks/adversarial_training.py +++ b/mart/callbacks/adversarial_training.py @@ -26,13 +26,13 @@ def __init__( self.test_adversary = test_adversary or adversary def setup(self, trainer, pl_module, stage=None): + self._on_after_batch_transfer = pl_module.on_after_batch_transfer pl_module.on_after_batch_transfer = types.MethodType( self.on_after_batch_transfer, pl_module ) - def teardown(self, trainer, pl_module, start=None): - # FIXME: remove on_after_batch_transfer - pass + def teardown(self, trainer, pl_module, stage=None): + pl_module.on_after_batch_transfer = self._on_after_batch_transfer def on_after_batch_transfer(self, pl_module, batch, dataloader_idx): # FIXME: Would be nice if batch was a structured object (or a dict) From 226664d216ee608c29f9c7697a88ac4bf98ecb67 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 30 Jun 2023 09:13:26 -0700 Subject: [PATCH 07/74] cleanup --- mart/callbacks/adversarial_training.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/mart/callbacks/adversarial_training.py b/mart/callbacks/adversarial_training.py index 1dfe741e..f19cbb04 100644 --- a/mart/callbacks/adversarial_training.py +++ b/mart/callbacks/adversarial_training.py @@ -39,19 +39,15 @@ def on_after_batch_transfer(self, pl_module, batch, dataloader_idx): input, target = batch trainer = pl_module.trainer - if trainer.training: adversary = self.train_adversary - step = "training" # FIXME: Use pl_module.training_step? - + step = "training" elif trainer.validating: adversary = self.validation_adversary - step = "validation" # FIXME: Use pl_module.training_step? - + step = "validation" elif trainer.testing: adversary = self.test_adversary - step = "test" # FIXME: Use pl_module.training_step? - + step = "test" else: return batch From d1187c90fc8a791d29d82057bad35c4f64a6030c Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 30 Jun 2023 09:14:04 -0700 Subject: [PATCH 08/74] Call original on_after_batch_transfer hook --- mart/callbacks/adversarial_training.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mart/callbacks/adversarial_training.py b/mart/callbacks/adversarial_training.py index f19cbb04..5b6fc430 100644 --- a/mart/callbacks/adversarial_training.py +++ b/mart/callbacks/adversarial_training.py @@ -35,6 +35,8 @@ def teardown(self, trainer, pl_module, stage=None): pl_module.on_after_batch_transfer = self._on_after_batch_transfer def on_after_batch_transfer(self, pl_module, batch, dataloader_idx): + batch = self._on_after_batch_transfer(batch, dataloader_idx) + # FIXME: Would be nice if batch was a structured object (or a dict) input, target = batch From 9850a7862897944381d7c647537920b422674666 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 30 Jun 2023 09:18:21 -0700 Subject: [PATCH 09/74] Move Adversary.attack into callback --- mart/attack/adversary.py | 8 -------- mart/callbacks/adversarial_training.py | 20 ++++++++++++++------ 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 376f8429..ff822673 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -210,11 +210,3 @@ def cpu(self): # This is a problem when this LightningModule has parameters, so we stop this from # happening by ignoring the call to cpu(). pass - - def attack(adversary, model, **batch): - # Create attacked model where the adversary executes before the model - def attacked_model(*, input, **batch): - adv_input = adversary(input=input, **batch) - return model(input=adv_input, **batch) - - return adversary(**batch, model=attacked_model) diff --git a/mart/callbacks/adversarial_training.py b/mart/callbacks/adversarial_training.py index 5b6fc430..65b90b98 100644 --- a/mart/callbacks/adversarial_training.py +++ b/mart/callbacks/adversarial_training.py @@ -37,9 +37,7 @@ def teardown(self, trainer, pl_module, stage=None): def on_after_batch_transfer(self, pl_module, batch, dataloader_idx): batch = self._on_after_batch_transfer(batch, dataloader_idx) - # FIXME: Would be nice if batch was a structured object (or a dict) - input, target = batch - + # FIXME: Remove use of step trainer = pl_module.trainer if trainer.training: adversary = self.train_adversary @@ -53,8 +51,18 @@ def on_after_batch_transfer(self, pl_module, batch, dataloader_idx): else: return batch - # Move adversary to same device as pl_module + # Create attacked model where the adversary executes before the model + # FIXME: Should we just use pl_module.training_step? Ideally we would not decompose batch + # and instead pass batch directly to the underlying pl_module since it knows how to + # interpret batch. + def attacked_model(input, **batch): + input_adv = adversary(input=input, **batch) + return pl_module(input=input_adv, **batch) + + # Move adversary to same device as pl_module and run attack + # FIXME: Directly pass batch instead of assuming it has a structure? + input, target = batch adversary.to(pl_module.device) - input = adversary.attack(pl_module, input=input, target=target, step=step) + input_adv = adversary(input=input, target=target, step=step, model=attacked_model) - return [input, target] + return [input_adv, target] From d71cb43591e813d687029c1432809b8ce182f4de Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 14 Jul 2023 10:11:15 -0700 Subject: [PATCH 10/74] Use training sequence by default. --- mart/models/modular.py | 1 + mart/nn/nn.py | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/mart/models/modular.py b/mart/models/modular.py index 192204a2..e814fee9 100644 --- a/mart/models/modular.py +++ b/mart/models/modular.py @@ -62,6 +62,7 @@ def __init__( "training": training_sequence, "validation": validation_sequence, "test": test_sequence, + None: training_sequence, # use training sequence with losses by default. } self.model = SequentialDict(modules, sequences) diff --git a/mart/nn/nn.py b/mart/nn/nn.py index 02113899..67d82c1b 100644 --- a/mart/nn/nn.py +++ b/mart/nn/nn.py @@ -57,7 +57,8 @@ def __init__(self, modules, sequences=None): self._sequences = { name: self.parse_sequence(sequence) for name, sequence in sequences.items() } - self._sequences[None] = self + # We intend to make training sequence as the default sequence. + # self._sequences[None] = self def parse_sequence(self, sequence): if sequence is None: From 83f017e4c0fdf9c4acddd664214376d42837b7c8 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 14 Jul 2023 10:11:54 -0700 Subject: [PATCH 11/74] Assume a simple model interface in adversary. --- mart/attack/adversary.py | 60 +++++++++++++++----------- mart/callbacks/adversarial_training.py | 25 ++++------- 2 files changed, 43 insertions(+), 42 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 4a884c1c..c6d05bb7 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -101,6 +101,9 @@ def __init__( assert self._attacker.max_epochs == 0 assert self._attacker.limit_train_batches > 0 + # TODO: Make this configurable. E.g. [0,1] <-> [0,255] + self.transform = self.untransform = lambda x: x + @property def perturber(self) -> Perturber: # Hide the perturber module in a list, so that perturbation is not exported as a parameter in the model checkpoint, @@ -110,13 +113,26 @@ def perturber(self) -> Perturber: def configure_optimizers(self): return self.optimizer(self.perturber) + def get_input_adv(self, *, input, target, untransform=True): + perturbation = self.perturber(input=input, target=target) + input_adv = self.composer(perturbation, input=input, target=target) + + if untransform: + input_adv = self.untransform(input_adv) + + return input_adv + def training_step(self, batch, batch_idx): + # TODO: We shouldn't need to copy because it is never changed? # copy batch since we modify it and it is used internally - batch = batch.copy() + # batch = batch.copy() + + # Compose un-transformed input_adv from batch["input"], then give to model for updated gain. + # Note: Only input and target are required by perturber.projector and composer. + input_adv = self.get_input_adv(**batch) - # We need to evaluate the perturbation against the whole model, so call it normally to get a gain. - model = batch.pop("model") - outputs = model(**batch) + # A model that returns output dictionary. + outputs = self.model(input=input_adv, target=batch["target"]) # FIXME: This should really be just `return outputs`. But this might require a new sequence? # FIXME: Everything below here should live in the model as modules. @@ -150,36 +166,30 @@ def configure_gradient_clipping( self.gradient_modifier(group["params"]) @silent() - def forward(self, *, model=None, **batch): - batch["model"] = model - - # Adversary can live within a sequence of model. To signal the adversary should - # attack, one must pass a model to attack when calling the adversary. Since we - # do not know where the Adversary lives inside the model, we also need the - # remaining sequence to be able to get a loss. - if model: - self._attack(**batch) + def forward(self, *, input, target, model): + # What we need is a frozen model that returns (a dictionary of) logits, or losses. + self.model = model - perturbation = self.perturber(**batch) - input_adv = self.composer(perturbation, **batch) - - # Enforce constraints after the attack optimization ends. - if model: - self.enforcer(input_adv, **batch) - - return input_adv - - def _attack(self, *, input, **batch): - batch["input"] = input + # Transform input so that it's easier to work with by adversary. + input_transformed = self.transform(input) + batch = {"input": input_transformed, "target": target} # Configure and reset perturbation for current inputs - self.perturber.configure_perturbation(input) + self.perturber.configure_perturbation(input_transformed) # Attack, aka fit a perturbation, for one epoch by cycling over the same input batch. # We use Trainer.limit_train_batches to control the number of attack iterations. self.attacker.fit_loop.max_epochs += 1 self.attacker.fit(self, train_dataloaders=cycle([batch])) + # Get the transformed input_adv for enforcer checking. + input_adv_transformed = self.get_input_adv(untransform=False, **batch) + self.enforcer(input_adv_transformed, **batch) + # Un-transform to the same format as input. + input_adv = self.untransform(input_adv_transformed) + + return input_adv + @property def attacker(self): if not isinstance(self._attacker, partial): diff --git a/mart/callbacks/adversarial_training.py b/mart/callbacks/adversarial_training.py index 65b90b98..4ef44b16 100644 --- a/mart/callbacks/adversarial_training.py +++ b/mart/callbacks/adversarial_training.py @@ -6,9 +6,7 @@ import types -from pytorch_lightning.callbacks import Callback - -from mart.models import LitModular +from lightning.pytorch.callbacks import Callback __all__ = ["AdversarialTraining"] @@ -16,6 +14,7 @@ class AdversarialTraining(Callback): """Perturbs inputs to be adversarial.""" + # TODO: training/validation/test or train/val/test def __init__( self, adversary=None, train_adversary=None, validation_adversary=None, test_adversary=None ): @@ -37,32 +36,24 @@ def teardown(self, trainer, pl_module, stage=None): def on_after_batch_transfer(self, pl_module, batch, dataloader_idx): batch = self._on_after_batch_transfer(batch, dataloader_idx) - # FIXME: Remove use of step trainer = pl_module.trainer if trainer.training: adversary = self.train_adversary - step = "training" elif trainer.validating: adversary = self.validation_adversary - step = "validation" elif trainer.testing: adversary = self.test_adversary - step = "test" else: return batch - # Create attacked model where the adversary executes before the model - # FIXME: Should we just use pl_module.training_step? Ideally we would not decompose batch - # and instead pass batch directly to the underlying pl_module since it knows how to - # interpret batch. - def attacked_model(input, **batch): - input_adv = adversary(input=input, **batch) - return pl_module(input=input_adv, **batch) - # Move adversary to same device as pl_module and run attack + adversary.to(pl_module.device) + # FIXME: Directly pass batch instead of assuming it has a structure? input, target = batch - adversary.to(pl_module.device) - input_adv = adversary(input=input, target=target, step=step, model=attacked_model) + input_adv = adversary(input=input, target=target, model=pl_module) + + # Replace the adversarial trainer with the original trainer. + pl_module.trainer = trainer return [input_adv, target] From 88a807be66bdb208b09ad9bea8cc5e7f5c756abb Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 14 Jul 2023 10:12:13 -0700 Subject: [PATCH 12/74] Update configs. --- mart/configs/callbacks/adversarial_training.yaml | 2 +- mart/configs/experiment/CIFAR10_CNN_Adv.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/mart/configs/callbacks/adversarial_training.yaml b/mart/configs/callbacks/adversarial_training.yaml index 0f6a7b47..232e2278 100644 --- a/mart/configs/callbacks/adversarial_training.yaml +++ b/mart/configs/callbacks/adversarial_training.yaml @@ -1,3 +1,3 @@ adversarial_training: _target_: mart.callbacks.AdversarialTraining - adversary: ??? + # adversary: ??? diff --git a/mart/configs/experiment/CIFAR10_CNN_Adv.yaml b/mart/configs/experiment/CIFAR10_CNN_Adv.yaml index ced39cd1..3d4b1c12 100644 --- a/mart/configs/experiment/CIFAR10_CNN_Adv.yaml +++ b/mart/configs/experiment/CIFAR10_CNN_Adv.yaml @@ -1,7 +1,7 @@ # @package _global_ defaults: - - /attack@callbacks.adversarial_training.adversary: classification_eps1.75_fgsm + - /attack@callbacks.adversarial_training.train_adversary: classification_eps1.75_fgsm - /attack@callbacks.adversarial_training.test_adversary: classification_eps2_pgd10_step1 - override /datamodule: cifar10 - override /model: classifier_cifar10_cnn From 9dfd7f6c413a9c0aafac42c41e0d47599bd95b6e Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 14 Jul 2023 10:44:02 -0700 Subject: [PATCH 13/74] Log original gain on progress bar. --- mart/attack/adversary.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index c6d05bb7..650af474 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -139,6 +139,9 @@ def training_step(self, batch, batch_idx): # Use CallWith to dispatch **outputs. gain = self.gain_fn(**outputs) + # Log original gain as a metric for LR scheduler to monitor, and show gain on progress bar. + self.log("gain", gain.sum(), prog_bar=True) + # objective_fn is optional, because adversaries may never reach their objective. if self.objective_fn is not None: found = self.objective_fn(**outputs) @@ -147,13 +150,7 @@ def training_step(self, batch, batch_idx): if len(gain.shape) > 0: gain = gain[~found] - if len(gain.shape) > 0: - gain = gain.sum() - - # Log gain as a metric for LR scheduler to monitor, and show gain on progress bar. - self.log("gain", gain, prog_bar=True) - - return gain + return gain.sum() def configure_gradient_clipping( self, optimizer, gradient_clip_val=None, gradient_clip_algorithm=None From 2cd7ca86bf1f8886b6f0e110347696d5fd908736 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 14 Jul 2023 12:07:45 -0700 Subject: [PATCH 14/74] Hide model in Adversary so that it's not tampered. --- mart/attack/adversary.py | 7 ++++++- mart/callbacks/adversarial_training.py | 5 ++--- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 650af474..86740653 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -162,10 +162,15 @@ def configure_gradient_clipping( for group in optimizer.param_groups: self.gradient_modifier(group["params"]) + @property + def model(self): + # Hide model in a list, so that it won't be tampered by the inner Trainer. + return self._model[0] + @silent() def forward(self, *, input, target, model): # What we need is a frozen model that returns (a dictionary of) logits, or losses. - self.model = model + self._model = [model] # Transform input so that it's easier to work with by adversary. input_transformed = self.transform(input) diff --git a/mart/callbacks/adversarial_training.py b/mart/callbacks/adversarial_training.py index 4ef44b16..a68ca277 100644 --- a/mart/callbacks/adversarial_training.py +++ b/mart/callbacks/adversarial_training.py @@ -51,9 +51,8 @@ def on_after_batch_transfer(self, pl_module, batch, dataloader_idx): # FIXME: Directly pass batch instead of assuming it has a structure? input, target = batch - input_adv = adversary(input=input, target=target, model=pl_module) - # Replace the adversarial trainer with the original trainer. - pl_module.trainer = trainer + # TODO: We may need to do model.eval() if there's BN-like layers in the model. + input_adv = adversary(input=input, target=target, model=pl_module) return [input_adv, target] From e64247428fb3ca7d24cff61e53db0196f2896509 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 14 Jul 2023 14:58:27 -0700 Subject: [PATCH 15/74] Skip adversary if not defined. --- mart/callbacks/adversarial_training.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/mart/callbacks/adversarial_training.py b/mart/callbacks/adversarial_training.py index a68ca277..253b930f 100644 --- a/mart/callbacks/adversarial_training.py +++ b/mart/callbacks/adversarial_training.py @@ -36,6 +36,8 @@ def teardown(self, trainer, pl_module, stage=None): def on_after_batch_transfer(self, pl_module, batch, dataloader_idx): batch = self._on_after_batch_transfer(batch, dataloader_idx) + adversary = None + trainer = pl_module.trainer if trainer.training: adversary = self.train_adversary @@ -43,7 +45,9 @@ def on_after_batch_transfer(self, pl_module, batch, dataloader_idx): adversary = self.validation_adversary elif trainer.testing: adversary = self.test_adversary - else: + + # Skip if adversary is not defined for the phase train/validation/test. + if adversary is None: return batch # Move adversary to same device as pl_module and run attack From f222aefe2f22ab94e170e1345fd4d019dfa82eca Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 14 Jul 2023 14:58:47 -0700 Subject: [PATCH 16/74] Update adversarial experiment on COCO. --- .../experiment/COCO_TorchvisionFasterRCNN_Adv.yaml | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/mart/configs/experiment/COCO_TorchvisionFasterRCNN_Adv.yaml b/mart/configs/experiment/COCO_TorchvisionFasterRCNN_Adv.yaml index 398394bf..a1860696 100644 --- a/mart/configs/experiment/COCO_TorchvisionFasterRCNN_Adv.yaml +++ b/mart/configs/experiment/COCO_TorchvisionFasterRCNN_Adv.yaml @@ -2,15 +2,9 @@ defaults: - COCO_TorchvisionFasterRCNN - - /attack@model.modules.input_adv_test: object_detection_mask_adversary + - /attack@callbacks.adversarial_training.test_adversary: object_detection_mask_adversary - override /datamodule: coco_perturbable_mask + - override /callbacks: [model_checkpoint, lr_monitor, adversarial_training] task_name: "COCO_TorchvisionFasterRCNN_Adv" tags: ["adv"] - -model: - test_sequence: - seq005: input_adv_test - - seq010: - preprocessor: ["input_adv_test"] From 1d9b18211e8a9906d8e43ae906d5d9e3cb6f9278 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 14 Jul 2023 14:59:02 -0700 Subject: [PATCH 17/74] Fix test. --- tests/test_experiments.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_experiments.py b/tests/test_experiments.py index 65b27a5d..404a98ef 100644 --- a/tests/test_experiments.py +++ b/tests/test_experiments.py @@ -80,7 +80,7 @@ def test_cifar10_cnn_adv_experiment(classification_cfg, tmp_path): "-m", "experiment=CIFAR10_CNN_Adv", "hydra.sweep.dir=" + str(tmp_path), - "model.modules.input_adv_test.max_iters=10", + "callbacks.adversarial_training.test_adversary.max_iters=10", "optimized_metric=training_metrics/acc", "++datamodule.train_dataset.image_size=[3,32,32]", "++datamodule.train_dataset.num_classes=10", From 45b5bf0d52aff0b3b56038091637238b1de5ad9f Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 14 Jul 2023 15:22:43 -0700 Subject: [PATCH 18/74] Hide model in adversar's batch. --- mart/attack/adversary.py | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 86740653..f5c34eeb 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -127,12 +127,16 @@ def training_step(self, batch, batch_idx): # copy batch since we modify it and it is used internally # batch = batch.copy() + input = batch["input"] + target = batch["target"] + # What we need is a frozen model that returns (a dictionary of) logits, or losses. + model = batch["model"] + # Compose un-transformed input_adv from batch["input"], then give to model for updated gain. - # Note: Only input and target are required by perturber.projector and composer. - input_adv = self.get_input_adv(**batch) + input_adv = self.get_input_adv(input=input, target=target) # A model that returns output dictionary. - outputs = self.model(input=input_adv, target=batch["target"]) + outputs = model(input=input_adv, target=target) # FIXME: This should really be just `return outputs`. But this might require a new sequence? # FIXME: Everything below here should live in the model as modules. @@ -162,19 +166,12 @@ def configure_gradient_clipping( for group in optimizer.param_groups: self.gradient_modifier(group["params"]) - @property - def model(self): - # Hide model in a list, so that it won't be tampered by the inner Trainer. - return self._model[0] - @silent() def forward(self, *, input, target, model): - # What we need is a frozen model that returns (a dictionary of) logits, or losses. - self._model = [model] - # Transform input so that it's easier to work with by adversary. input_transformed = self.transform(input) - batch = {"input": input_transformed, "target": target} + + batch = {"input": input_transformed, "target": target, "model": model} # Configure and reset perturbation for current inputs self.perturber.configure_perturbation(input_transformed) @@ -185,8 +182,9 @@ def forward(self, *, input, target, model): self.attacker.fit(self, train_dataloaders=cycle([batch])) # Get the transformed input_adv for enforcer checking. - input_adv_transformed = self.get_input_adv(untransform=False, **batch) - self.enforcer(input_adv_transformed, **batch) + input_adv_transformed = self.get_input_adv(input=input, target=target, untransform=False) + self.enforcer(input_adv_transformed, input=input, target=target) + # Un-transform to the same format as input. input_adv = self.untransform(input_adv_transformed) From 9a2531f14a3a29c8dcb5ba953eb22480fab4f1da Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 14 Jul 2023 15:52:23 -0700 Subject: [PATCH 19/74] Fix test. --- tests/test_adversary.py | 73 ++++++++++------------------------------- 1 file changed, 17 insertions(+), 56 deletions(-) diff --git a/tests/test_adversary.py b/tests/test_adversary.py index 448baa48..0c363902 100644 --- a/tests/test_adversary.py +++ b/tests/test_adversary.py @@ -17,35 +17,6 @@ from mart.attack.gradient_modifier import Sign -def test_adversary(input_data, target_data, perturbation): - perturber = Mock(spec=Perturber, return_value=perturbation) - composer = mart.attack.composer.Additive() - gain = Mock() - enforcer = Mock() - attacker = Mock(max_epochs=0, limit_train_batches=1, fit_loop=Mock(max_epochs=0)) - - adversary = Adversary( - perturber=perturber, - composer=composer, - optimizer=None, - gain=gain, - enforcer=enforcer, - attacker=attacker, - ) - - output_data = adversary(input=input_data, target=target_data) - - # The enforcer and attacker should only be called when model is not None. - enforcer.assert_not_called() - attacker.fit.assert_not_called() - assert attacker.fit_loop.max_epochs == 0 - - perturber.assert_called_once() - gain.assert_not_called() - - torch.testing.assert_close(output_data, input_data + perturbation) - - def test_with_model(input_data, target_data, perturbation): perturber = Mock(spec=Perturber, return_value=perturbation) composer = mart.attack.composer.Additive() @@ -53,7 +24,6 @@ def test_with_model(input_data, target_data, perturbation): enforcer = Mock() attacker = Mock(max_epochs=0, limit_train_batches=1, fit_loop=Mock(max_epochs=0)) model = Mock() - sequence = Mock() adversary = Adversary( perturber=perturber, @@ -64,7 +34,7 @@ def test_with_model(input_data, target_data, perturbation): attacker=attacker, ) - output_data = adversary(input=input_data, target=target_data, model=model, sequence=sequence) + output_data = adversary(input=input_data, target=target_data, model=model) # The enforcer is only called when model is not None. enforcer.assert_called_once() @@ -78,7 +48,7 @@ def test_with_model(input_data, target_data, perturbation): torch.testing.assert_close(output_data, input_data + perturbation) -def test_hidden_params(input_data, target_data, perturbation): +def test_hidden_params(): initializer = Mock() composer = mart.attack.composer.Additive() projector = Mock() @@ -88,8 +58,6 @@ def test_hidden_params(input_data, target_data, perturbation): gain = Mock() enforcer = Mock() attacker = Mock(max_epochs=0, limit_train_batches=1, fit_loop=Mock(max_epochs=0)) - model = Mock() - sequence = Mock() adversary = Adversary( perturber=perturber, @@ -100,8 +68,6 @@ def test_hidden_params(input_data, target_data, perturbation): attacker=attacker, ) - # output_data = adversary(input=input_data, target=target_data, model=model, sequence=sequence) - # Adversarial perturbation should not be updated by a regular training optimizer. params = [p for p in adversary.parameters()] assert len(params) == 0 @@ -122,7 +88,6 @@ def test_hidden_params_after_forward(input_data, target_data, perturbation): enforcer = Mock() attacker = Mock(max_epochs=0, limit_train_batches=1, fit_loop=Mock(max_epochs=0)) model = Mock() - sequence = Mock() adversary = Adversary( perturber=perturber, @@ -133,7 +98,7 @@ def test_hidden_params_after_forward(input_data, target_data, perturbation): attacker=attacker, ) - output_data = adversary(input=input_data, target=target_data, model=model, sequence=sequence) + output_data = adversary(input=input_data, target=target_data, model=model) # Adversary will have no parameter even after forward is called, because we hide Perturber in a list. params = [p for p in adversary.parameters()] @@ -180,7 +145,6 @@ def test_perturbation(input_data, target_data, perturbation): enforcer = Mock() attacker = Mock(max_epochs=0, limit_train_batches=1, fit_loop=Mock(max_epochs=0)) model = Mock() - sequence = Mock() adversary = Adversary( perturber=perturber, @@ -191,15 +155,15 @@ def test_perturbation(input_data, target_data, perturbation): attacker=attacker, ) - _ = adversary(input=input_data, target=target_data, model=model, sequence=sequence) - output_data = adversary(input=input_data, target=target_data) + output_data = adversary(input=input_data, target=target_data, model=model) # The enforcer is only called when model is not None. enforcer.assert_called_once() attacker.fit.assert_called_once() - # Once with model and sequence and once without - assert perturber.call_count == 2 + # Perturber is called once for generating initial input_adv. + # The fit() doesn't run because max_epochs=0. + assert perturber.call_count == 1 torch.testing.assert_close(output_data, input_data + perturbation) @@ -236,20 +200,17 @@ def initializer(x): max_iters=1, ) - def model(input, target, model=None, **kwargs): - return {"logits": adversary(input=input, target=target)} - - sequence = Mock() + def model(input, target): + return {"logits": input} - adversary(input=input_data, target=target_data, model=model, sequence=sequence) - input_adv = adversary(input=input_data, target=target_data) + input_adv = adversary(input=input_data, target=target_data, model=model) perturbation = input_data - input_adv torch.testing.assert_close(perturbation.unique(), torch.Tensor([-1, 0, 1])) -def test_configure_optimizers(input_data, target_data): +def test_configure_optimizers(): perturber = Mock() composer = mart.attack.composer.Additive() optimizer = Mock(spec=mart.optim.OptimizerFactory) @@ -268,8 +229,8 @@ def test_configure_optimizers(input_data, target_data): gain.assert_not_called() -def test_training_step(input_data, target_data): - perturber = Mock() +def test_training_step(input_data, target_data, perturbation): + perturber = Mock(spec=Perturber, return_value=perturbation) composer = mart.attack.composer.Additive() optimizer = Mock(spec=mart.optim.OptimizerFactory) gain = Mock(return_value=torch.tensor(1337)) @@ -290,8 +251,8 @@ def test_training_step(input_data, target_data): assert output == 1337 -def test_training_step_with_many_gain(input_data, target_data): - perturber = Mock() +def test_training_step_with_many_gain(input_data, target_data, perturbation): + perturber = Mock(spec=Perturber, return_value=perturbation) composer = mart.attack.composer.Additive() optimizer = Mock(spec=mart.optim.OptimizerFactory) gain = Mock(return_value=torch.tensor([1234, 5678])) @@ -311,8 +272,8 @@ def test_training_step_with_many_gain(input_data, target_data): assert output == 1234 + 5678 -def test_training_step_with_objective(input_data, target_data): - perturber = Mock() +def test_training_step_with_objective(input_data, target_data, perturbation): + perturber = Mock(spec=Perturber, return_value=perturbation) composer = mart.attack.composer.Additive() optimizer = Mock(spec=mart.optim.OptimizerFactory) gain = Mock(return_value=torch.tensor([1234, 5678])) From c5bf847f4725269c57db3a695d3d4704d73bfa4c Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 14 Jul 2023 16:00:52 -0700 Subject: [PATCH 20/74] Revert "Make input a dictionary for multi-modal object detection (#95)" This reverts commit de77a9d1363f4bbfc94f5301263323c4ce291956. --- ...CarlaOverObjDet_TorchvisionFasterRCNN.yaml | 1 - .../model/modules/tuple_normalizer.yaml | 9 -------- .../modules/tuple_tensorizer_normalizer.yaml | 14 ------------ .../model/torchvision_object_detection.yaml | 8 ++++++- mart/datamodules/coco.py | 17 -------------- mart/transforms/transforms.py | 22 +------------------ 6 files changed, 8 insertions(+), 63 deletions(-) delete mode 100644 mart/configs/model/modules/tuple_normalizer.yaml delete mode 100644 mart/configs/model/modules/tuple_tensorizer_normalizer.yaml diff --git a/examples/carla_overhead_object_detection/configs/experiment/ArmoryCarlaOverObjDet_TorchvisionFasterRCNN.yaml b/examples/carla_overhead_object_detection/configs/experiment/ArmoryCarlaOverObjDet_TorchvisionFasterRCNN.yaml index 991a87e7..a4e5660f 100644 --- a/examples/carla_overhead_object_detection/configs/experiment/ArmoryCarlaOverObjDet_TorchvisionFasterRCNN.yaml +++ b/examples/carla_overhead_object_detection/configs/experiment/ArmoryCarlaOverObjDet_TorchvisionFasterRCNN.yaml @@ -2,7 +2,6 @@ defaults: - COCO_TorchvisionFasterRCNN - - override /model/modules@model.modules.preprocessor: tuple_tensorizer_normalizer - override /datamodule: armory_carla_over_objdet_perturbable_mask task_name: "ArmoryCarlaOverObjDet_TorchvisionFasterRCNN" diff --git a/mart/configs/model/modules/tuple_normalizer.yaml b/mart/configs/model/modules/tuple_normalizer.yaml deleted file mode 100644 index 13fdf97a..00000000 --- a/mart/configs/model/modules/tuple_normalizer.yaml +++ /dev/null @@ -1,9 +0,0 @@ -# @package model.modules.preprocessor -_target_: mart.transforms.TupleTransforms -transforms: - _target_: torchvision.transforms.Compose - # Normalize to [0, 1]. - transforms: - - _target_: torchvision.transforms.Normalize - mean: 0 - std: 255 diff --git a/mart/configs/model/modules/tuple_tensorizer_normalizer.yaml b/mart/configs/model/modules/tuple_tensorizer_normalizer.yaml deleted file mode 100644 index 21ac6fb7..00000000 --- a/mart/configs/model/modules/tuple_tensorizer_normalizer.yaml +++ /dev/null @@ -1,14 +0,0 @@ -# @package model.modules.preprocessor -defaults: - - tuple_normalizer - -# Convert dictionary input into tensor input, then normalize to [0, 1]. -transforms: - transforms: - - _target_: mart.transforms.GetItems - keys: ["rgb"] - - _target_: mart.transforms.Cat - dim: 0 - - _target_: torchvision.transforms.Normalize - mean: 0 - std: 255 diff --git a/mart/configs/model/torchvision_object_detection.yaml b/mart/configs/model/torchvision_object_detection.yaml index 1bbd678c..534f0fc9 100644 --- a/mart/configs/model/torchvision_object_detection.yaml +++ b/mart/configs/model/torchvision_object_detection.yaml @@ -1,7 +1,6 @@ # We simply wrap a torchvision object detection model for validation. defaults: - modular - - /model/modules@modules.preprocessor: tuple_normalizer training_step_log: loss: "loss" @@ -13,6 +12,13 @@ test_sequence: ??? output_preds_key: "losses_and_detections.eval" modules: + preprocessor: + _target_: mart.transforms.TupleTransforms + transforms: + _target_: torchvision.transforms.Normalize + mean: 0 + std: 255 + losses_and_detections: # Return losses in the training mode and predictions in the eval mode in one pass. _target_: mart.models.DualMode diff --git a/mart/datamodules/coco.py b/mart/datamodules/coco.py index 42ddcebb..2a05d806 100644 --- a/mart/datamodules/coco.py +++ b/mart/datamodules/coco.py @@ -68,23 +68,6 @@ def _load_target(self, id: int) -> List[Any]: return {"image_id": id, "file_name": file_name, "annotations": annotations} - def __getitem__(self, index: int): - """Override __getitem__() to dictionarize input for multi-modality datasets. - - This runs after _load_image() and transforms(), while transforms() typically converts - images to tensors. - """ - - image, target_dict = super().__getitem__(index) - - # Convert multi-modal input to a dictionary. - if self.modalities is not None: - # We assume image is a multi-channel tensor, with each modality including 3 channels. - assert image.shape[0] == len(self.modalities) * 3 - image = dict(zip(self.modalities, image.split(3))) - - return image, target_dict - # Source: https://github.com/pytorch/vision/blob/dc07ac2add8285e16a716564867d0b4b953f6735/references/detection/utils.py#L203 def collate_fn(batch): diff --git a/mart/transforms/transforms.py b/mart/transforms/transforms.py index 4c7f29f7..bc67d33a 100644 --- a/mart/transforms/transforms.py +++ b/mart/transforms/transforms.py @@ -7,16 +7,7 @@ import torch from torchvision.transforms import transforms as T -__all__ = [ - "Denormalize", - "Cat", - "Permute", - "Unsqueeze", - "Squeeze", - "Chunk", - "TupleTransforms", - "GetItems", -] +__all__ = ["Denormalize", "Cat", "Permute", "Unsqueeze", "Squeeze", "Chunk", "TupleTransforms"] class Denormalize(T.Normalize): @@ -90,14 +81,3 @@ def __init__(self, transforms): def forward(self, x_tuple): output_tuple = tuple(self.transforms(x) for x in x_tuple) return output_tuple - - -class GetItems: - """Get a list of values with a list of keys from a dictionary.""" - - def __init__(self, keys): - self.keys = keys - - def __call__(self, x): - x_list = [x[key] for key in self.keys] - return x_list From 51720da67bfd9d1c1e3b569c11f0ad6464a2f4f3 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 14 Jul 2023 16:04:28 -0700 Subject: [PATCH 21/74] [0,255] -> [0,1] --- mart/configs/datamodule/cifar10.yaml | 20 ------------- mart/configs/datamodule/coco.yaml | 30 ------------------- .../datamodule/coco_perturbable_mask.yaml | 30 ------------------- mart/configs/datamodule/imagenet.yaml | 20 ------------- .../configs/model/classifier_cifar10_cnn.yaml | 6 ++-- mart/configs/model/classifier_timm.yaml | 4 +-- .../model/torchvision_object_detection.yaml | 3 +- 7 files changed, 7 insertions(+), 106 deletions(-) diff --git a/mart/configs/datamodule/cifar10.yaml b/mart/configs/datamodule/cifar10.yaml index 9392448b..6988096b 100644 --- a/mart/configs/datamodule/cifar10.yaml +++ b/mart/configs/datamodule/cifar10.yaml @@ -17,16 +17,6 @@ train_dataset: padding_mode: reflect - _target_: torchvision.transforms.RandomHorizontalFlip - _target_: torchvision.transforms.ToTensor - - _target_: mart.transforms.Denormalize - center: 0 - scale: 255 - - _target_: torch.fake_quantize_per_tensor_affine - _partial_: true - # (x/1+0).round().clamp(0, 255) * 1 - scale: 1 - zero_point: 0 - quant_min: 0 - quant_max: 255 target_transform: null download: true @@ -38,16 +28,6 @@ val_dataset: _target_: torchvision.transforms.Compose transforms: - _target_: torchvision.transforms.ToTensor - - _target_: mart.transforms.Denormalize - center: 0 - scale: 255 - - _target_: torch.fake_quantize_per_tensor_affine - _partial_: true - # (x/1+0).round().clamp(0, 255) * 1 - scale: 1 - zero_point: 0 - quant_min: 0 - quant_max: 255 target_transform: null download: true diff --git a/mart/configs/datamodule/coco.yaml b/mart/configs/datamodule/coco.yaml index a4ec3403..0ea6bccd 100644 --- a/mart/configs/datamodule/coco.yaml +++ b/mart/configs/datamodule/coco.yaml @@ -12,16 +12,6 @@ train_dataset: - _target_: mart.transforms.ConvertCocoPolysToMask - _target_: mart.transforms.RandomHorizontalFlip p: 0.5 - - _target_: mart.transforms.Denormalize - center: 0 - scale: 255 - - _target_: torch.fake_quantize_per_tensor_affine - _partial_: true - # (x/1+0).round().clamp(0, 255) * 1 - scale: 1 - zero_point: 0 - quant_min: 0 - quant_max: 255 val_dataset: _target_: mart.datamodules.coco.CocoDetection @@ -32,16 +22,6 @@ val_dataset: transforms: - _target_: torchvision.transforms.ToTensor - _target_: mart.transforms.ConvertCocoPolysToMask - - _target_: mart.transforms.Denormalize - center: 0 - scale: 255 - - _target_: torch.fake_quantize_per_tensor_affine - _partial_: true - # (x/1+0).round().clamp(0, 255) * 1 - scale: 1 - zero_point: 0 - quant_min: 0 - quant_max: 255 test_dataset: _target_: mart.datamodules.coco.CocoDetection @@ -52,16 +32,6 @@ test_dataset: transforms: - _target_: torchvision.transforms.ToTensor - _target_: mart.transforms.ConvertCocoPolysToMask - - _target_: mart.transforms.Denormalize - center: 0 - scale: 255 - - _target_: torch.fake_quantize_per_tensor_affine - _partial_: true - # (x/1+0).round().clamp(0, 255) * 1 - scale: 1 - zero_point: 0 - quant_min: 0 - quant_max: 255 num_workers: 2 collate_fn: diff --git a/mart/configs/datamodule/coco_perturbable_mask.yaml b/mart/configs/datamodule/coco_perturbable_mask.yaml index 6d7bdd77..461ac9b1 100644 --- a/mart/configs/datamodule/coco_perturbable_mask.yaml +++ b/mart/configs/datamodule/coco_perturbable_mask.yaml @@ -10,16 +10,6 @@ train_dataset: - _target_: mart.transforms.RandomHorizontalFlip p: 0.5 - _target_: mart.transforms.ConvertInstanceSegmentationToPerturbable - - _target_: mart.transforms.Denormalize - center: 0 - scale: 255 - - _target_: torch.fake_quantize_per_tensor_affine - _partial_: true - # (x/1+0).round().clamp(0, 255) * 1 - scale: 1 - zero_point: 0 - quant_min: 0 - quant_max: 255 val_dataset: transforms: @@ -28,16 +18,6 @@ val_dataset: # ConvertCocoPolysToMask must be prior to ConvertInstanceSegmentationToPerturbable. - _target_: mart.transforms.ConvertCocoPolysToMask - _target_: mart.transforms.ConvertInstanceSegmentationToPerturbable - - _target_: mart.transforms.Denormalize - center: 0 - scale: 255 - - _target_: torch.fake_quantize_per_tensor_affine - _partial_: true - # (x/1+0).round().clamp(0, 255) * 1 - scale: 1 - zero_point: 0 - quant_min: 0 - quant_max: 255 test_dataset: transforms: @@ -46,13 +26,3 @@ test_dataset: # ConvertCocoPolysToMask must be prior to ConvertInstanceSegmentationToPerturbable. - _target_: mart.transforms.ConvertCocoPolysToMask - _target_: mart.transforms.ConvertInstanceSegmentationToPerturbable - - _target_: mart.transforms.Denormalize - center: 0 - scale: 255 - - _target_: torch.fake_quantize_per_tensor_affine - _partial_: true - # (x/1+0).round().clamp(0, 255) * 1 - scale: 1 - zero_point: 0 - quant_min: 0 - quant_max: 255 diff --git a/mart/configs/datamodule/imagenet.yaml b/mart/configs/datamodule/imagenet.yaml index 97417bbb..9894a8d7 100644 --- a/mart/configs/datamodule/imagenet.yaml +++ b/mart/configs/datamodule/imagenet.yaml @@ -13,16 +13,6 @@ train_dataset: size: 224 - _target_: torchvision.transforms.RandomHorizontalFlip - _target_: torchvision.transforms.ToTensor - - _target_: mart.transforms.Denormalize - center: 0 - scale: 255 - - _target_: torch.fake_quantize_per_tensor_affine - _partial_: true - # (x/1+0).round().clamp(0, 255) * 1 - scale: 1 - zero_point: 0 - quant_min: 0 - quant_max: 255 val_dataset: _target_: torchvision.datasets.ImageNet @@ -34,16 +24,6 @@ val_dataset: - _target_: torchvision.transforms.CenterCrop size: 224 - _target_: torchvision.transforms.ToTensor - - _target_: mart.transforms.Denormalize - center: 0 - scale: 255 - - _target_: torch.fake_quantize_per_tensor_affine - _partial_: true - # (x/1+0).round().clamp(0, 255) * 1 - scale: 1 - zero_point: 0 - quant_min: 0 - quant_max: 255 test_dataset: ${.val_dataset} diff --git a/mart/configs/model/classifier_cifar10_cnn.yaml b/mart/configs/model/classifier_cifar10_cnn.yaml index ba7455b1..f31ee799 100644 --- a/mart/configs/model/classifier_cifar10_cnn.yaml +++ b/mart/configs/model/classifier_cifar10_cnn.yaml @@ -4,7 +4,7 @@ defaults: modules: preprocessor: - # Normalize [0, 255] input. + # Normalize [0, 1] input. _target_: torchvision.transforms.Normalize - mean: [125.307, 122.961, 113.8575] - std: [51.5865, 50.847, 51.255] + mean: [0.4914, 0.4822, 0.4465] + std: [0.2023, 0.1994, 0.2010] diff --git a/mart/configs/model/classifier_timm.yaml b/mart/configs/model/classifier_timm.yaml index 0fa5ba1e..476a266d 100644 --- a/mart/configs/model/classifier_timm.yaml +++ b/mart/configs/model/classifier_timm.yaml @@ -3,10 +3,10 @@ defaults: modules: preprocessor: - # Convert [0, 255] input to [0, 1] + # no-op _target_: torchvision.transforms.Normalize mean: 0 - std: 255 + std: 1 logits: _target_: timm.models.convnext.convnext_tiny diff --git a/mart/configs/model/torchvision_object_detection.yaml b/mart/configs/model/torchvision_object_detection.yaml index 534f0fc9..f6a22898 100644 --- a/mart/configs/model/torchvision_object_detection.yaml +++ b/mart/configs/model/torchvision_object_detection.yaml @@ -15,9 +15,10 @@ modules: preprocessor: _target_: mart.transforms.TupleTransforms transforms: + # no-op _target_: torchvision.transforms.Normalize mean: 0 - std: 255 + std: 1 losses_and_detections: # Return losses in the training mode and predictions in the eval mode in one pass. From 756e0f88816fd8e5b896145e51d245a6446e18e7 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 14 Jul 2023 16:37:19 -0700 Subject: [PATCH 22/74] Add transform in untransform in Adversary. --- mart/attack/adversary.py | 14 ++++++++++---- .../attack/classification_eps1.75_fgsm.yaml | 2 ++ .../attack/classification_eps2_pgd10_step1.yaml | 2 ++ .../attack/classification_eps8_pgd10_step1.yaml | 2 ++ .../configs/attack/transform/to_pixel_range_1.yaml | 3 +++ .../attack/transform/to_pixel_range_255.yaml | 13 +++++++++++++ 6 files changed, 32 insertions(+), 4 deletions(-) create mode 100644 mart/configs/attack/transform/to_pixel_range_1.yaml create mode 100644 mart/configs/attack/transform/to_pixel_range_255.yaml diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index f5c34eeb..26e87290 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -42,6 +42,8 @@ def __init__( objective: Objective | None = None, enforcer: Enforcer | None = None, attacker: pl.Trainer | None = None, + transform: Callable | None, + untransform: Callable | None, **kwargs, ): """_summary_ @@ -55,6 +57,8 @@ def __init__( objective (Objective): A function for computing adversarial objective, which returns True or False. Optional. enforcer (Enforcer): A Callable that enforce constraints on the adversarial input. attacker (Trainer): A PyTorch-Lightning Trainer object used to fit the perturbation. + transform (Callable): Transform input into a convenient format, e.g. [0,1]->[0.255]. + untransform (Callable): Transform adversarial input in the convenient format back into the original format of input, e.g. [0,255]->[0,1]. """ super().__init__() @@ -101,8 +105,8 @@ def __init__( assert self._attacker.max_epochs == 0 assert self._attacker.limit_train_batches > 0 - # TODO: Make this configurable. E.g. [0,1] <-> [0,255] - self.transform = self.untransform = lambda x: x + self.transform = transform if transform is not None else lambda x: x + self.untransform = untransform if untransform is not None else lambda x: x @property def perturber(self) -> Perturber: @@ -182,8 +186,10 @@ def forward(self, *, input, target, model): self.attacker.fit(self, train_dataloaders=cycle([batch])) # Get the transformed input_adv for enforcer checking. - input_adv_transformed = self.get_input_adv(input=input, target=target, untransform=False) - self.enforcer(input_adv_transformed, input=input, target=target) + input_adv_transformed = self.get_input_adv( + input=input_transformed, target=target, untransform=False + ) + self.enforcer(input_adv_transformed, input=input_transformed, target=target) # Un-transform to the same format as input. input_adv = self.untransform(input_adv_transformed) diff --git a/mart/configs/attack/classification_eps1.75_fgsm.yaml b/mart/configs/attack/classification_eps1.75_fgsm.yaml index 7c300e2d..d940472b 100644 --- a/mart/configs/attack/classification_eps1.75_fgsm.yaml +++ b/mart/configs/attack/classification_eps1.75_fgsm.yaml @@ -1,5 +1,7 @@ defaults: - adversary + - transform: to_pixel_range_255 + - transform@untransform: to_pixel_range_1 - perturber: default - perturber/initializer: constant - perturber/projector: linf_additive_range diff --git a/mart/configs/attack/classification_eps2_pgd10_step1.yaml b/mart/configs/attack/classification_eps2_pgd10_step1.yaml index b98cf407..7fa1448e 100644 --- a/mart/configs/attack/classification_eps2_pgd10_step1.yaml +++ b/mart/configs/attack/classification_eps2_pgd10_step1.yaml @@ -1,5 +1,7 @@ defaults: - adversary + - transform: to_pixel_range_255 + - transform@untransform: to_pixel_range_1 - perturber: default - perturber/initializer: uniform_lp - perturber/projector: linf_additive_range diff --git a/mart/configs/attack/classification_eps8_pgd10_step1.yaml b/mart/configs/attack/classification_eps8_pgd10_step1.yaml index f1b6242a..206e339d 100644 --- a/mart/configs/attack/classification_eps8_pgd10_step1.yaml +++ b/mart/configs/attack/classification_eps8_pgd10_step1.yaml @@ -1,5 +1,7 @@ defaults: - adversary + - transform: to_pixel_range_255 + - transform@untransform: to_pixel_range_1 - perturber: default - perturber/initializer: uniform_lp - perturber/projector: linf_additive_range diff --git a/mart/configs/attack/transform/to_pixel_range_1.yaml b/mart/configs/attack/transform/to_pixel_range_1.yaml new file mode 100644 index 00000000..92a63b7c --- /dev/null +++ b/mart/configs/attack/transform/to_pixel_range_1.yaml @@ -0,0 +1,3 @@ +_target_: torchvision.transforms.Normalize +mean: 0 +std: 255 diff --git a/mart/configs/attack/transform/to_pixel_range_255.yaml b/mart/configs/attack/transform/to_pixel_range_255.yaml new file mode 100644 index 00000000..dbeff64d --- /dev/null +++ b/mart/configs/attack/transform/to_pixel_range_255.yaml @@ -0,0 +1,13 @@ +_target_: torchvision.transforms.Compose +transforms: + - _target_: mart.transforms.Denormalize + center: 0 + scale: 255 + # Fix potential numeric error. + - _target_: torch.fake_quantize_per_tensor_affine + _partial_: true + # (x/1+0).round().clamp(0, 255) * 1 + scale: 1 + zero_point: 0 + quant_min: 0 + quant_max: 255 From 33cbe72aa47c34b5dc6ead964cc3b63d5d8541f9 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 14 Jul 2023 16:48:54 -0700 Subject: [PATCH 23/74] Make transform/untransform optional arguments. --- mart/attack/adversary.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 26e87290..28e9f5a2 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -42,8 +42,8 @@ def __init__( objective: Objective | None = None, enforcer: Enforcer | None = None, attacker: pl.Trainer | None = None, - transform: Callable | None, - untransform: Callable | None, + transform: Callable | None = None, + untransform: Callable | None = None, **kwargs, ): """_summary_ From d49f41174dfbd4d799b5be4f7574e4427c68f81b Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 14 Jul 2023 16:53:12 -0700 Subject: [PATCH 24/74] Add tuple transform/untransform. --- .../attack/object_detection_mask_adversary.yaml | 3 +++ .../attack/transform/tuple_to_pixel_range_1.yaml | 6 ++++++ .../transform/tuple_to_pixel_range_255.yaml | 16 ++++++++++++++++ 3 files changed, 25 insertions(+) create mode 100644 mart/configs/attack/transform/tuple_to_pixel_range_1.yaml create mode 100644 mart/configs/attack/transform/tuple_to_pixel_range_255.yaml diff --git a/mart/configs/attack/object_detection_mask_adversary.yaml b/mart/configs/attack/object_detection_mask_adversary.yaml index ad99dda0..efda6ff9 100644 --- a/mart/configs/attack/object_detection_mask_adversary.yaml +++ b/mart/configs/attack/object_detection_mask_adversary.yaml @@ -1,5 +1,7 @@ defaults: - adversary + - transform: tuple_to_pixel_range_255 + - transform@untransform: tuple_to_pixel_range_1 - perturber: default - perturber/initializer: constant - perturber/projector: mask_range @@ -10,6 +12,7 @@ defaults: - objective: zero_ap - enforcer: default - enforcer/constraints: [mask, pixel_range] + # - /callbacks@callbacks: [progress_bar, image_visualizer] # Make a 5-step attack for the demonstration purpose. max_iters: 5 diff --git a/mart/configs/attack/transform/tuple_to_pixel_range_1.yaml b/mart/configs/attack/transform/tuple_to_pixel_range_1.yaml new file mode 100644 index 00000000..93f70015 --- /dev/null +++ b/mart/configs/attack/transform/tuple_to_pixel_range_1.yaml @@ -0,0 +1,6 @@ +# FIXME: Merge tuple and tensor transforms to simplify configs. +_target_: mart.transforms.TupleTransforms +transforms: + _target_: torchvision.transforms.Normalize + mean: 0 + std: 255 diff --git a/mart/configs/attack/transform/tuple_to_pixel_range_255.yaml b/mart/configs/attack/transform/tuple_to_pixel_range_255.yaml new file mode 100644 index 00000000..db80e815 --- /dev/null +++ b/mart/configs/attack/transform/tuple_to_pixel_range_255.yaml @@ -0,0 +1,16 @@ +# FIXME: Merge tuple and tensor transforms to simplify configs. +_target_: mart.transforms.TupleTransforms +transforms: + _target_: torchvision.transforms.Compose + transforms: + - _target_: mart.transforms.Denormalize + center: 0 + scale: 255 + # Fix potential numeric error. + - _target_: torch.fake_quantize_per_tensor_affine + _partial_: true + # (x/1+0).round().clamp(0, 255) * 1 + scale: 1 + zero_point: 0 + quant_min: 0 + quant_max: 255 From 9bd4c1fc56f18f024d08061f07b1af0ab0b1e8e0 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 14 Jul 2023 17:04:19 -0700 Subject: [PATCH 25/74] Comment. --- mart/attack/adversary.py | 1 + 1 file changed, 1 insertion(+) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 28e9f5a2..d76411a9 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -175,6 +175,7 @@ def forward(self, *, input, target, model): # Transform input so that it's easier to work with by adversary. input_transformed = self.transform(input) + # Optimization loop only sees the transformed input in batches. batch = {"input": input_transformed, "target": target, "model": model} # Configure and reset perturbation for current inputs From 6d0327174e0633ac66b19e11272e4b123f04263f Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 14 Jul 2023 17:05:42 -0700 Subject: [PATCH 26/74] Fix image visualizer for object detection. --- mart/callbacks/visualizer.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/mart/callbacks/visualizer.py b/mart/callbacks/visualizer.py index 39409143..6bb532e7 100644 --- a/mart/callbacks/visualizer.py +++ b/mart/callbacks/visualizer.py @@ -6,6 +6,7 @@ import os +import torch from lightning.pytorch.callbacks import Callback from torchvision.transforms import ToPILImage @@ -32,10 +33,12 @@ def on_train_batch_end(self, trainer, model, outputs, batch, batch_idx): def on_train_end(self, trainer, model): # FIXME: We should really just save this to outputs instead of recomputing adv_input - adv_input = model(input=self.input, target=self.target) + with torch.no_grad(): + # Get [0,1] input as untransform=True by default. + adv_input = model.get_input_adv(input=self.input, target=self.target) for img, tgt in zip(adv_input, self.target): fname = tgt["file_name"] fpath = os.path.join(self.folder, fname) - im = self.convert(img / 255) + im = self.convert(img) im.save(fpath) From b30a34fb35f6a4d5d9461a67b9b85830114108f5 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 14 Jul 2023 17:45:49 -0700 Subject: [PATCH 27/74] Switch to [0,1] data in tests. --- tests/conftest.py | 4 ++-- tests/test_adversary.py | 7 ++++++- tests/test_visualizer.py | 4 ++-- 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index 220f8734..0eaf4812 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -47,7 +47,7 @@ def cfg_experiment(request) -> DictConfig: @pytest.fixture(scope="function") def input_data(): image_size = (3, 32, 32) - return torch.randint(0, 256, image_size, dtype=torch.float) + return torch.rand(image_size, dtype=torch.float) @pytest.fixture(scope="function") @@ -59,5 +59,5 @@ def target_data(): @pytest.fixture(scope="function") def perturbation(): torch.manual_seed(0) - perturbation = torch.randint(0, 256, (3, 32, 32), dtype=torch.float) + perturbation = torch.rand((3, 32, 32), dtype=torch.float) return perturbation diff --git a/tests/test_adversary.py b/tests/test_adversary.py index 0c363902..dab1d2ee 100644 --- a/tests/test_adversary.py +++ b/tests/test_adversary.py @@ -207,7 +207,12 @@ def model(input, target): perturbation = input_data - input_adv - torch.testing.assert_close(perturbation.unique(), torch.Tensor([-1, 0, 1])) + # torch.unique() does not approximate floating number with threshold. So do it manually with 1e-6. + expansion = torch.tensor([19.9316]) # 1 / (2 ** 19.9316) = 1e-6 + unique_int = perturbation.ldexp(expansion).round().unique() + unique = unique_int.ldexp(-expansion) + + torch.testing.assert_close(unique, torch.Tensor([-1, 0, 1])) def test_configure_optimizers(): diff --git a/tests/test_visualizer.py b/tests/test_visualizer.py index 5c25e930..3d105e39 100644 --- a/tests/test_visualizer.py +++ b/tests/test_visualizer.py @@ -24,7 +24,7 @@ def perturb(input): return result trainer = Mock() - model = Mock(return_value=perturb(input_list)) + model = Mock(get_input_adv=Mock(return_value=perturb(input_list))) outputs = Mock() batch = {"input": input_list, "target": target_list} adversary = Mock(spec=Adversary, side_effect=perturb) @@ -40,7 +40,7 @@ def perturb(input): # verify image file content perturbed_img = input_data + perturbation converter = ToPILImage() - expected_img = converter(perturbed_img / 255) + expected_img = converter(perturbed_img) expected_img.save(folder / "test_expected.jpg") stored_img = Image.open(expected_output_path) From 704fc018a144b9c207ec985acb0d54204b08d8fe Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 14 Jul 2023 18:28:36 -0700 Subject: [PATCH 28/74] Hide transform in untransform in this PR. --- mart/attack/adversary.py | 25 ++++++------------------- 1 file changed, 6 insertions(+), 19 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index f5c34eeb..e0106689 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -101,9 +101,6 @@ def __init__( assert self._attacker.max_epochs == 0 assert self._attacker.limit_train_batches > 0 - # TODO: Make this configurable. E.g. [0,1] <-> [0,255] - self.transform = self.untransform = lambda x: x - @property def perturber(self) -> Perturber: # Hide the perturber module in a list, so that perturbation is not exported as a parameter in the model checkpoint, @@ -113,13 +110,9 @@ def perturber(self) -> Perturber: def configure_optimizers(self): return self.optimizer(self.perturber) - def get_input_adv(self, *, input, target, untransform=True): + def get_input_adv(self, *, input, target): perturbation = self.perturber(input=input, target=target) input_adv = self.composer(perturbation, input=input, target=target) - - if untransform: - input_adv = self.untransform(input_adv) - return input_adv def training_step(self, batch, batch_idx): @@ -132,7 +125,7 @@ def training_step(self, batch, batch_idx): # What we need is a frozen model that returns (a dictionary of) logits, or losses. model = batch["model"] - # Compose un-transformed input_adv from batch["input"], then give to model for updated gain. + # Compose input_adv from input, then give to model for updated gain. input_adv = self.get_input_adv(input=input, target=target) # A model that returns output dictionary. @@ -168,13 +161,10 @@ def configure_gradient_clipping( @silent() def forward(self, *, input, target, model): - # Transform input so that it's easier to work with by adversary. - input_transformed = self.transform(input) - - batch = {"input": input_transformed, "target": target, "model": model} + batch = {"input": input, "target": target, "model": model} # Configure and reset perturbation for current inputs - self.perturber.configure_perturbation(input_transformed) + self.perturber.configure_perturbation(input) # Attack, aka fit a perturbation, for one epoch by cycling over the same input batch. # We use Trainer.limit_train_batches to control the number of attack iterations. @@ -182,11 +172,8 @@ def forward(self, *, input, target, model): self.attacker.fit(self, train_dataloaders=cycle([batch])) # Get the transformed input_adv for enforcer checking. - input_adv_transformed = self.get_input_adv(input=input, target=target, untransform=False) - self.enforcer(input_adv_transformed, input=input, target=target) - - # Un-transform to the same format as input. - input_adv = self.untransform(input_adv_transformed) + input_adv = self.get_input_adv(input=input, target=target) + self.enforcer(input_adv, input=input, target=target) return input_adv From 5bb36e70ea85b12502413bdd342193c58cc7f387 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 14 Jul 2023 18:39:59 -0700 Subject: [PATCH 29/74] Revert changes in gain logging. --- mart/attack/adversary.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index e0106689..0669d95e 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -136,9 +136,6 @@ def training_step(self, batch, batch_idx): # Use CallWith to dispatch **outputs. gain = self.gain_fn(**outputs) - # Log original gain as a metric for LR scheduler to monitor, and show gain on progress bar. - self.log("gain", gain.sum(), prog_bar=True) - # objective_fn is optional, because adversaries may never reach their objective. if self.objective_fn is not None: found = self.objective_fn(**outputs) @@ -147,7 +144,13 @@ def training_step(self, batch, batch_idx): if len(gain.shape) > 0: gain = gain[~found] - return gain.sum() + if len(gain.shape) > 0: + gain = gain.sum() + + # Log gain as a metric for LR scheduler to monitor, and show gain on progress bar. + self.log("gain", gain, prog_bar=True) + + return gain def configure_gradient_clipping( self, optimizer, gradient_clip_val=None, gradient_clip_algorithm=None From 7f561402dfdf8683e8b01a1092df60e2ec4ddab5 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 17 Jul 2023 16:43:17 -0700 Subject: [PATCH 30/74] Fix normalization params for timm. --- mart/configs/model/classifier_timm.yaml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/mart/configs/model/classifier_timm.yaml b/mart/configs/model/classifier_timm.yaml index 476a266d..85f51e64 100644 --- a/mart/configs/model/classifier_timm.yaml +++ b/mart/configs/model/classifier_timm.yaml @@ -3,10 +3,11 @@ defaults: modules: preprocessor: - # no-op + # Use normalization parameters from timm + # https://github.com/huggingface/pytorch-image-models/blob/3d05c0e86f2f4c57bf495468aa3f8a7d3487c986/timm/data/constants.py#L3 _target_: torchvision.transforms.Normalize - mean: 0 - std: 1 + mean: [0.485, 0.456, 0.406] + std: [0.229, 0.224, 0.225] logits: _target_: timm.models.convnext.convnext_tiny From c85182dda0b6b77461efe1c8d6e37c2095e1db76 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 17 Jul 2023 22:07:44 -0700 Subject: [PATCH 31/74] Add LitModular.attack_step() for adversary to run model.forward() to get gradients. --- mart/callbacks/adversarial_training.py | 105 ++++++++++++++++++++++++- mart/models/modular.py | 7 +- mart/nn/nn.py | 3 +- 3 files changed, 109 insertions(+), 6 deletions(-) diff --git a/mart/callbacks/adversarial_training.py b/mart/callbacks/adversarial_training.py index 253b930f..3947ccdc 100644 --- a/mart/callbacks/adversarial_training.py +++ b/mart/callbacks/adversarial_training.py @@ -4,10 +4,15 @@ # SPDX-License-Identifier: BSD-3-Clause # +from __future__ import annotations + import types +from typing import Callable from lightning.pytorch.callbacks import Callback +from ..utils import MonkeyPatch + __all__ = ["AdversarialTraining"] @@ -16,14 +21,30 @@ class AdversarialTraining(Callback): # TODO: training/validation/test or train/val/test def __init__( - self, adversary=None, train_adversary=None, validation_adversary=None, test_adversary=None + self, + adversary: Callable = None, + train_adversary: Callable = None, + validation_adversary: Callable = None, + test_adversary: Callable = None, + batch_input_key: str | int = 0, ): + """AdversaryConnector. + + Args: + adversary (Callable, optional): _description_. Defaults to None. + train_adversary (Callable, optional): _description_. Defaults to None. + validation_adversary (Callable, optional): _description_. Defaults to None. + test_adversary (Callable, optional): _description_. Defaults to None. + batch_input_key (str | int, optional): Input locator in a batch. Defaults to 0. + """ adversary = adversary or train_adversary self.train_adversary = train_adversary or adversary self.validation_adversary = validation_adversary or adversary self.test_adversary = test_adversary or adversary + self.batch_input_key = batch_input_key + def setup(self, trainer, pl_module, stage=None): self._on_after_batch_transfer = pl_module.on_after_batch_transfer pl_module.on_after_batch_transfer = types.MethodType( @@ -33,6 +54,81 @@ def setup(self, trainer, pl_module, stage=None): def teardown(self, trainer, pl_module, stage=None): pl_module.on_after_batch_transfer = self._on_after_batch_transfer + def get_input_target_batcher(self, batch_orig): + if isinstance(batch_orig, tuple): + # Convert tuple to list + batch = list(batch_orig).copy() + else: + batch = batch_orig.copy() + + batch_input_key = self.batch_input_key + + # pop() works for both list and dict. + input = batch.pop(batch_input_key) + + if isinstance(batch, list) and len(batch) == 1: + target = batch[0] + + def batch_constructor(input, target): + batch = [target] + batch.insert(batch_input_key, input) + return batch + + elif isinstance(batch, list) and len(batch) > 2: + target = batch.copy() + + def batch_constructor(input, target): + batch = target.copy() + batch.insert(batch_input_key, input) + return batch + + elif isinstance(batch, dict) and "target" in dict: + target = batch["target"] + + def batch_constructor(input, target): + return {batch_input_key: input, "target": target} + + elif isinstance(batch, dict) and "target" not in dict: + # Example in anomalib: dict_keys(['image_path', 'label', 'image', 'mask_path', 'mask']) + # image: NCHW; label: N, + target = batch + + def batch_constructor(input, target): + # Besides input and target, add others back to batch. + return target | {batch_input_key: input} + + else: + raise NotImplementedError() + + return input, target, batch_constructor + + def wrap_model(self, model, batch_constructor, dataloader_idx): + """Make a model, such that output = model(input, target).""" + + # Consume dataloader_idx + if hasattr(model, "attack_step"): + + def model_forward(batch): + output = model.attack_step(batch, dataloader_idx) + return output + + elif hasattr(model, "training_step"): + # Monkey-patch model.log to avoid spamming. + @MonkeyPatch(model, "log", lambda *args, **kwargs: None) + def model_forward(batch): + output = model.training_step(batch, dataloader_idx) + return output + + else: + model_forward = model + + def wrapped_model(*, input, target): + batch = batch_constructor(input, target) + output = model_forward(batch) + return output + + return wrapped_model + def on_after_batch_transfer(self, pl_module, batch, dataloader_idx): batch = self._on_after_batch_transfer(batch, dataloader_idx) @@ -54,9 +150,12 @@ def on_after_batch_transfer(self, pl_module, batch, dataloader_idx): adversary.to(pl_module.device) # FIXME: Directly pass batch instead of assuming it has a structure? - input, target = batch + input, target, batch_constructor = self.get_input_target_batcher(batch) + + # We also need to construct a batch for model during attack iterations. + model = self.wrap_model(pl_module, batch_constructor, dataloader_idx) # TODO: We may need to do model.eval() if there's BN-like layers in the model. - input_adv = adversary(input=input, target=target, model=pl_module) + input_adv = adversary(input=input, target=target, model=model) return [input_adv, target] diff --git a/mart/models/modular.py b/mart/models/modular.py index e814fee9..c63c9fd7 100644 --- a/mart/models/modular.py +++ b/mart/models/modular.py @@ -62,7 +62,6 @@ def __init__( "training": training_sequence, "validation": validation_sequence, "test": test_sequence, - None: training_sequence, # use training sequence with losses by default. } self.model = SequentialDict(modules, sequences) @@ -127,6 +126,12 @@ def configure_optimizers(self): def forward(self, **kwargs): return self.model(**kwargs) + def attack_step(self, batch, batch_idx): + # Use the training sequence in attack. + input, target = batch + output = self(input=input, target=target, model=self.model, step="training") + return output + # # Training # diff --git a/mart/nn/nn.py b/mart/nn/nn.py index 67d82c1b..02113899 100644 --- a/mart/nn/nn.py +++ b/mart/nn/nn.py @@ -57,8 +57,7 @@ def __init__(self, modules, sequences=None): self._sequences = { name: self.parse_sequence(sequence) for name, sequence in sequences.items() } - # We intend to make training sequence as the default sequence. - # self._sequences[None] = self + self._sequences[None] = self def parse_sequence(self, sequence): if sequence is None: From c3aeb8dcaf9ce831f9bdc861302058a37dc9979a Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Tue, 18 Jul 2023 10:42:43 -0700 Subject: [PATCH 32/74] Decouple untransfrom() from get_input_adv(). --- mart/attack/adversary.py | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index ef5fcc67..3942d961 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -117,13 +117,9 @@ def perturber(self) -> Perturber: def configure_optimizers(self): return self.optimizer(self.perturber) - def get_input_adv(self, *, input, target, untransform=True): + def get_input_adv(self, *, input, target): perturbation = self.perturber(input=input, target=target) input_adv = self.composer(perturbation, input=input, target=target) - - if untransform: - input_adv = self.untransform(input_adv) - return input_adv def training_step(self, batch, batch_idx): @@ -131,13 +127,15 @@ def training_step(self, batch, batch_idx): # copy batch since we modify it and it is used internally # batch = batch.copy() - input = batch["input"] + input_transformed = batch["input"] target = batch["target"] # What we need is a frozen model that returns (a dictionary of) logits, or losses. model = batch["model"] # Compose un-transformed input_adv from batch["input"], then give to model for updated gain. - input_adv = self.get_input_adv(input=input, target=target) + input_adv_transformed = self.get_input_adv(input=input_transformed, target=target) + # Target model expects input in the original format. + input_adv = self.untransform(input_adv_transformed) # A model that returns output dictionary. outputs = model(input=input_adv, target=target) @@ -190,9 +188,7 @@ def forward(self, *, input, target, model): self.attacker.fit(self, train_dataloaders=cycle([batch])) # Get the transformed input_adv for enforcer checking. - input_adv_transformed = self.get_input_adv( - input=input_transformed, target=target, untransform=False - ) + input_adv_transformed = self.get_input_adv(input=input_transformed, target=target) self.enforcer(input_adv_transformed, input=input_transformed, target=target) # Un-transform to the same format as input. From ad74a3531efbe50d44329184bab5600608411d6d Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Tue, 18 Jul 2023 16:56:55 -0700 Subject: [PATCH 33/74] Move BatchConverter to Adversary; move transform/untransform to BatchConverter. --- mart/attack/__init__.py | 1 + mart/attack/adversary.py | 29 ++-- mart/attack/batch_converter.py | 128 ++++++++++++++++++ mart/callbacks/adversarial_training.py | 72 ++-------- mart/configs/attack/adversary.yaml | 1 + mart/configs/attack/batch_converter/dict.yaml | 2 + mart/configs/attack/batch_converter/list.yaml | 2 + .../attack/batch_converter/tensor.yaml | 1 + .../transform/to_pixel_range_1.yaml | 0 .../transform/to_pixel_range_255.yaml | 0 .../transform/tuple_to_pixel_range_1.yaml | 0 .../transform/tuple_to_pixel_range_255.yaml | 0 .../configs/attack/batch_converter/tuple.yaml | 2 + .../attack/classification_eps1.75_fgsm.yaml | 2 - .../classification_eps2_pgd10_step1.yaml | 2 - .../classification_eps8_pgd10_step1.yaml | 2 - mart/configs/attack/data_coco.yaml | 11 ++ mart/configs/attack/data_list_pixel01.yaml | 4 + .../object_detection_mask_adversary.yaml | 2 - mart/configs/experiment/CIFAR10_CNN_Adv.yaml | 6 +- .../COCO_TorchvisionFasterRCNN_Adv.yaml | 3 +- tests/test_adversary.py | 43 +++++- 22 files changed, 217 insertions(+), 96 deletions(-) create mode 100644 mart/attack/batch_converter.py create mode 100644 mart/configs/attack/batch_converter/dict.yaml create mode 100644 mart/configs/attack/batch_converter/list.yaml create mode 100644 mart/configs/attack/batch_converter/tensor.yaml rename mart/configs/attack/{ => batch_converter}/transform/to_pixel_range_1.yaml (100%) rename mart/configs/attack/{ => batch_converter}/transform/to_pixel_range_255.yaml (100%) rename mart/configs/attack/{ => batch_converter}/transform/tuple_to_pixel_range_1.yaml (100%) rename mart/configs/attack/{ => batch_converter}/transform/tuple_to_pixel_range_255.yaml (100%) create mode 100644 mart/configs/attack/batch_converter/tuple.yaml create mode 100644 mart/configs/attack/data_coco.yaml create mode 100644 mart/configs/attack/data_list_pixel01.yaml diff --git a/mart/attack/__init__.py b/mart/attack/__init__.py index 843ce9bd..2a55d648 100644 --- a/mart/attack/__init__.py +++ b/mart/attack/__init__.py @@ -1,5 +1,6 @@ from .adversary import * from .adversary_wrapper import * +from .batch_converter import * from .composer import * from .enforcer import * from .gain import * diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 3942d961..d24caf5b 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -42,8 +42,7 @@ def __init__( objective: Objective | None = None, enforcer: Enforcer | None = None, attacker: pl.Trainer | None = None, - transform: Callable | None = None, - untransform: Callable | None = None, + batch_converter: Callable, **kwargs, ): """_summary_ @@ -57,8 +56,7 @@ def __init__( objective (Objective): A function for computing adversarial objective, which returns True or False. Optional. enforcer (Enforcer): A Callable that enforce constraints on the adversarial input. attacker (Trainer): A PyTorch-Lightning Trainer object used to fit the perturbation. - transform (Callable): Transform input into a convenient format, e.g. [0,1]->[0.255]. - untransform (Callable): Transform adversarial input in the convenient format back into the original format of input, e.g. [0,255]->[0,1]. + batch_converter (Callable): Convert batch into convenient format and reverse. """ super().__init__() @@ -105,8 +103,7 @@ def __init__( assert self._attacker.max_epochs == 0 assert self._attacker.limit_train_batches > 0 - self.transform = transform if transform is not None else lambda x: x - self.untransform = untransform if untransform is not None else lambda x: x + self.batch_converter = batch_converter @property def perturber(self) -> Perturber: @@ -135,10 +132,10 @@ def training_step(self, batch, batch_idx): # Compose un-transformed input_adv from batch["input"], then give to model for updated gain. input_adv_transformed = self.get_input_adv(input=input_transformed, target=target) # Target model expects input in the original format. - input_adv = self.untransform(input_adv_transformed) + batch_adv = self.batch_converter.revert(input_adv_transformed, target) # A model that returns output dictionary. - outputs = model(input=input_adv, target=target) + outputs = model(batch_adv) # FIXME: This should really be just `return outputs`. But this might require a new sequence? # FIXME: Everything below here should live in the model as modules. @@ -172,12 +169,12 @@ def configure_gradient_clipping( self.gradient_modifier(group["params"]) @silent() - def forward(self, *, input, target, model): - # Transform input so that it's easier to work with by adversary. - input_transformed = self.transform(input) + def forward(self, *, batch: torch.Tensor | list | dict, model: Callable): + # Extract and transform input so that is convenient for Adversary. + input_transformed, target = self.batch_converter(batch) # Optimization loop only sees the transformed input in batches. - batch = {"input": input_transformed, "target": target, "model": model} + batch_transformed = {"input": input_transformed, "target": target, "model": model} # Configure and reset perturbation for current inputs self.perturber.configure_perturbation(input_transformed) @@ -185,16 +182,16 @@ def forward(self, *, input, target, model): # Attack, aka fit a perturbation, for one epoch by cycling over the same input batch. # We use Trainer.limit_train_batches to control the number of attack iterations. self.attacker.fit_loop.max_epochs += 1 - self.attacker.fit(self, train_dataloaders=cycle([batch])) + self.attacker.fit(self, train_dataloaders=cycle([batch_transformed])) # Get the transformed input_adv for enforcer checking. input_adv_transformed = self.get_input_adv(input=input_transformed, target=target) self.enforcer(input_adv_transformed, input=input_transformed, target=target) - # Un-transform to the same format as input. - input_adv = self.untransform(input_adv_transformed) + # Revert to the original format of batch. + batch_adv = self.batch_converter.revert(input_adv_transformed, target) - return input_adv + return batch_adv @property def attacker(self): diff --git a/mart/attack/batch_converter.py b/mart/attack/batch_converter.py new file mode 100644 index 00000000..0eb19e01 --- /dev/null +++ b/mart/attack/batch_converter.py @@ -0,0 +1,128 @@ +# +# Copyright (C) 2022 Intel Corporation +# +# SPDX-License-Identifier: BSD-3-Clause +# + +import abc +from typing import Callable + +# TODO: Do we need to copy batch? + +__all__ = [ + "TensorBatchConverter", + "DictBatchConverter", + "ListBatchConverter", + "TupleBatchConverter", +] + + +class BatchConverter(abc.ABC): + def __init__(self, *, transform: Callable = None, untransform: Callable = None): + """_summary_ + + Args: + transform (Callable): Transform input into a convenient format, e.g. [0,1]->[0.255]. + untransform (Callable): Transform adversarial input in the convenient format back into the original format of input, e.g. [0,255]->[0,1]. + """ + self.transform = transform if transform is not None else lambda x: x + self.untransform = untransform if untransform is not None else lambda x: x + + def __call__(self, batch): + input, target = self._convert(batch) + input_transformed = self.transform(input) + return input_transformed, target + + def revert(self, input_transformed, target): + input = self.untransform(input_transformed) + batch = self._revert(input, target) + return batch + + @abc.abstractclassmethod + def _revert(self, input, target): + pass + + @abc.abstractclassmethod + def _convert(self, batch): + pass + + +class TensorBatchConverter(BatchConverter): + def _convert(self, batch): + input = batch + target = None + return input, target + + def _revert(self, input, target): + batch = input + return batch + + +class DictBatchConverter(BatchConverter): + def __init__(self, input_key: str = "input", **kwargs): + """_summary_ + + Args: + input_key (str): Input locator in a batch. Defaults to "input". + """ + super().__init__(**kwargs) + + self.input_key = input_key + self.rest = {} + + def _convert(self, batch): + input = batch.pop(self.input_key) + if "target" in batch: + target = batch.pop("target") + self.rest = batch + else: + target = batch + return input, target + + def _revert(self, input, target): + if self.rest is {}: + batch = {self.input_key: input} | target + else: + batch = {self.input_key: input, "target": target} | self.rest + + return batch + + +class ListBatchConverter(BatchConverter): + def __init__(self, input_key: int = 0, **kwargs): + super().__init__(**kwargs) + + self.input_key = input_key + self.target_size = None + + def _convert(self, batch: list): + input = batch.pop(self.input_key) + self.target_size = len(batch) + + if self.target_size == 1: + target = batch[0] + else: + target = batch + + return input, target + + def _revert(self, input, target): + if self.target_size == 1: + batch = [target] + batch.insert(self.input_key, input) + else: + batch = target + batch.insert(self.input_key, input) + return batch + + +class TupleBatchConverter(ListBatchConverter): + def _convert(self, batch: tuple): + batch_list = list(batch) + input, target = super()._convert(batch_list) + return input, target + + def _revert(self, input, target): + batch_list = super()._revert(input, target) + batch = tuple(batch_list) + return batch diff --git a/mart/callbacks/adversarial_training.py b/mart/callbacks/adversarial_training.py index 3947ccdc..22b7a6a4 100644 --- a/mart/callbacks/adversarial_training.py +++ b/mart/callbacks/adversarial_training.py @@ -54,56 +54,8 @@ def setup(self, trainer, pl_module, stage=None): def teardown(self, trainer, pl_module, stage=None): pl_module.on_after_batch_transfer = self._on_after_batch_transfer - def get_input_target_batcher(self, batch_orig): - if isinstance(batch_orig, tuple): - # Convert tuple to list - batch = list(batch_orig).copy() - else: - batch = batch_orig.copy() - - batch_input_key = self.batch_input_key - - # pop() works for both list and dict. - input = batch.pop(batch_input_key) - - if isinstance(batch, list) and len(batch) == 1: - target = batch[0] - - def batch_constructor(input, target): - batch = [target] - batch.insert(batch_input_key, input) - return batch - - elif isinstance(batch, list) and len(batch) > 2: - target = batch.copy() - - def batch_constructor(input, target): - batch = target.copy() - batch.insert(batch_input_key, input) - return batch - - elif isinstance(batch, dict) and "target" in dict: - target = batch["target"] - - def batch_constructor(input, target): - return {batch_input_key: input, "target": target} - - elif isinstance(batch, dict) and "target" not in dict: - # Example in anomalib: dict_keys(['image_path', 'label', 'image', 'mask_path', 'mask']) - # image: NCHW; label: N, - target = batch - - def batch_constructor(input, target): - # Besides input and target, add others back to batch. - return target | {batch_input_key: input} - - else: - raise NotImplementedError() - - return input, target, batch_constructor - - def wrap_model(self, model, batch_constructor, dataloader_idx): - """Make a model, such that output = model(input, target).""" + def wrap_model(self, model, dataloader_idx): + """Make a model, such that `output = model(batch)`.""" # Consume dataloader_idx if hasattr(model, "attack_step"): @@ -122,12 +74,7 @@ def model_forward(batch): else: model_forward = model - def wrapped_model(*, input, target): - batch = batch_constructor(input, target) - output = model_forward(batch) - return output - - return wrapped_model + return model_forward def on_after_batch_transfer(self, pl_module, batch, dataloader_idx): batch = self._on_after_batch_transfer(batch, dataloader_idx) @@ -149,13 +96,12 @@ def on_after_batch_transfer(self, pl_module, batch, dataloader_idx): # Move adversary to same device as pl_module and run attack adversary.to(pl_module.device) - # FIXME: Directly pass batch instead of assuming it has a structure? - input, target, batch_constructor = self.get_input_target_batcher(batch) - - # We also need to construct a batch for model during attack iterations. - model = self.wrap_model(pl_module, batch_constructor, dataloader_idx) + # We assume Adversary is not aware of PyTorch Lightning, + # so wrap the model as `output=model(batch)`. + model = self.wrap_model(pl_module, dataloader_idx) # TODO: We may need to do model.eval() if there's BN-like layers in the model. - input_adv = adversary(input=input, target=target, model=model) + # Directly pass batch instead of assuming it has a structure. + batch_adv = adversary(batch=batch, model=model) - return [input_adv, target] + return batch_adv diff --git a/mart/configs/attack/adversary.yaml b/mart/configs/attack/adversary.yaml index bbf52433..480e3a5b 100644 --- a/mart/configs/attack/adversary.yaml +++ b/mart/configs/attack/adversary.yaml @@ -12,3 +12,4 @@ gradient_modifier: null objective: null enforcer: ??? attacker: null +batch_converter: ??? diff --git a/mart/configs/attack/batch_converter/dict.yaml b/mart/configs/attack/batch_converter/dict.yaml new file mode 100644 index 00000000..db421039 --- /dev/null +++ b/mart/configs/attack/batch_converter/dict.yaml @@ -0,0 +1,2 @@ +_target_: mart.attack.batch_converter.DictBatchConverter +input_key: input diff --git a/mart/configs/attack/batch_converter/list.yaml b/mart/configs/attack/batch_converter/list.yaml new file mode 100644 index 00000000..53da9fae --- /dev/null +++ b/mart/configs/attack/batch_converter/list.yaml @@ -0,0 +1,2 @@ +_target_: mart.attack.batch_converter.ListBatchConverter +input_key: 0 diff --git a/mart/configs/attack/batch_converter/tensor.yaml b/mart/configs/attack/batch_converter/tensor.yaml new file mode 100644 index 00000000..47697bfd --- /dev/null +++ b/mart/configs/attack/batch_converter/tensor.yaml @@ -0,0 +1 @@ +_target_: mart.attack.batch_converter.TensorBatchConverter diff --git a/mart/configs/attack/transform/to_pixel_range_1.yaml b/mart/configs/attack/batch_converter/transform/to_pixel_range_1.yaml similarity index 100% rename from mart/configs/attack/transform/to_pixel_range_1.yaml rename to mart/configs/attack/batch_converter/transform/to_pixel_range_1.yaml diff --git a/mart/configs/attack/transform/to_pixel_range_255.yaml b/mart/configs/attack/batch_converter/transform/to_pixel_range_255.yaml similarity index 100% rename from mart/configs/attack/transform/to_pixel_range_255.yaml rename to mart/configs/attack/batch_converter/transform/to_pixel_range_255.yaml diff --git a/mart/configs/attack/transform/tuple_to_pixel_range_1.yaml b/mart/configs/attack/batch_converter/transform/tuple_to_pixel_range_1.yaml similarity index 100% rename from mart/configs/attack/transform/tuple_to_pixel_range_1.yaml rename to mart/configs/attack/batch_converter/transform/tuple_to_pixel_range_1.yaml diff --git a/mart/configs/attack/transform/tuple_to_pixel_range_255.yaml b/mart/configs/attack/batch_converter/transform/tuple_to_pixel_range_255.yaml similarity index 100% rename from mart/configs/attack/transform/tuple_to_pixel_range_255.yaml rename to mart/configs/attack/batch_converter/transform/tuple_to_pixel_range_255.yaml diff --git a/mart/configs/attack/batch_converter/tuple.yaml b/mart/configs/attack/batch_converter/tuple.yaml new file mode 100644 index 00000000..25ff65b5 --- /dev/null +++ b/mart/configs/attack/batch_converter/tuple.yaml @@ -0,0 +1,2 @@ +_target_: mart.attack.batch_converter.TupleBatchConverter +input_key: 0 diff --git a/mart/configs/attack/classification_eps1.75_fgsm.yaml b/mart/configs/attack/classification_eps1.75_fgsm.yaml index d940472b..7c300e2d 100644 --- a/mart/configs/attack/classification_eps1.75_fgsm.yaml +++ b/mart/configs/attack/classification_eps1.75_fgsm.yaml @@ -1,7 +1,5 @@ defaults: - adversary - - transform: to_pixel_range_255 - - transform@untransform: to_pixel_range_1 - perturber: default - perturber/initializer: constant - perturber/projector: linf_additive_range diff --git a/mart/configs/attack/classification_eps2_pgd10_step1.yaml b/mart/configs/attack/classification_eps2_pgd10_step1.yaml index 7fa1448e..b98cf407 100644 --- a/mart/configs/attack/classification_eps2_pgd10_step1.yaml +++ b/mart/configs/attack/classification_eps2_pgd10_step1.yaml @@ -1,7 +1,5 @@ defaults: - adversary - - transform: to_pixel_range_255 - - transform@untransform: to_pixel_range_1 - perturber: default - perturber/initializer: uniform_lp - perturber/projector: linf_additive_range diff --git a/mart/configs/attack/classification_eps8_pgd10_step1.yaml b/mart/configs/attack/classification_eps8_pgd10_step1.yaml index 206e339d..f1b6242a 100644 --- a/mart/configs/attack/classification_eps8_pgd10_step1.yaml +++ b/mart/configs/attack/classification_eps8_pgd10_step1.yaml @@ -1,7 +1,5 @@ defaults: - adversary - - transform: to_pixel_range_255 - - transform@untransform: to_pixel_range_1 - perturber: default - perturber/initializer: uniform_lp - perturber/projector: linf_additive_range diff --git a/mart/configs/attack/data_coco.yaml b/mart/configs/attack/data_coco.yaml new file mode 100644 index 00000000..45c97355 --- /dev/null +++ b/mart/configs/attack/data_coco.yaml @@ -0,0 +1,11 @@ +defaults: + - batch_converter: tuple + - batch_converter/transform@batch_converter.transform.transforms: to_pixel_range_255 + - batch_converter/transform@batch_converter.untransform.transforms: to_pixel_range_1 + +batch_converter: + transform: + _target_: mart.transforms.TupleTransforms + + untransform: + _target_: mart.transforms.TupleTransforms diff --git a/mart/configs/attack/data_list_pixel01.yaml b/mart/configs/attack/data_list_pixel01.yaml new file mode 100644 index 00000000..d159ba2b --- /dev/null +++ b/mart/configs/attack/data_list_pixel01.yaml @@ -0,0 +1,4 @@ +defaults: + - batch_converter: list + - batch_converter/transform: to_pixel_range_255 + - batch_converter/transform@batch_converter.untransform: to_pixel_range_1 diff --git a/mart/configs/attack/object_detection_mask_adversary.yaml b/mart/configs/attack/object_detection_mask_adversary.yaml index efda6ff9..9cc3a28f 100644 --- a/mart/configs/attack/object_detection_mask_adversary.yaml +++ b/mart/configs/attack/object_detection_mask_adversary.yaml @@ -1,7 +1,5 @@ defaults: - adversary - - transform: tuple_to_pixel_range_255 - - transform@untransform: tuple_to_pixel_range_1 - perturber: default - perturber/initializer: constant - perturber/projector: mask_range diff --git a/mart/configs/experiment/CIFAR10_CNN_Adv.yaml b/mart/configs/experiment/CIFAR10_CNN_Adv.yaml index 3d4b1c12..95534794 100644 --- a/mart/configs/experiment/CIFAR10_CNN_Adv.yaml +++ b/mart/configs/experiment/CIFAR10_CNN_Adv.yaml @@ -1,8 +1,10 @@ # @package _global_ defaults: - - /attack@callbacks.adversarial_training.train_adversary: classification_eps1.75_fgsm - - /attack@callbacks.adversarial_training.test_adversary: classification_eps2_pgd10_step1 + - /attack@callbacks.adversarial_training.train_adversary: + [data_list_pixel01, classification_eps1.75_fgsm] + - /attack@callbacks.adversarial_training.test_adversary: + [data_list_pixel01, classification_eps2_pgd10_step1] - override /datamodule: cifar10 - override /model: classifier_cifar10_cnn - override /metric: accuracy diff --git a/mart/configs/experiment/COCO_TorchvisionFasterRCNN_Adv.yaml b/mart/configs/experiment/COCO_TorchvisionFasterRCNN_Adv.yaml index a1860696..a27a363f 100644 --- a/mart/configs/experiment/COCO_TorchvisionFasterRCNN_Adv.yaml +++ b/mart/configs/experiment/COCO_TorchvisionFasterRCNN_Adv.yaml @@ -2,7 +2,8 @@ defaults: - COCO_TorchvisionFasterRCNN - - /attack@callbacks.adversarial_training.test_adversary: object_detection_mask_adversary + - /attack@callbacks.adversarial_training.test_adversary: + [data_coco, object_detection_mask_adversary] - override /datamodule: coco_perturbable_mask - override /callbacks: [model_checkpoint, lr_monitor, adversarial_training] diff --git a/tests/test_adversary.py b/tests/test_adversary.py index dab1d2ee..c72edc6d 100644 --- a/tests/test_adversary.py +++ b/tests/test_adversary.py @@ -24,6 +24,8 @@ def test_with_model(input_data, target_data, perturbation): enforcer = Mock() attacker = Mock(max_epochs=0, limit_train_batches=1, fit_loop=Mock(max_epochs=0)) model = Mock() + batch = {"input": input_data, "target": target_data} + batch_converter = mart.attack.DictBatchConverter() adversary = Adversary( perturber=perturber, @@ -32,9 +34,11 @@ def test_with_model(input_data, target_data, perturbation): gain=gain, enforcer=enforcer, attacker=attacker, + batch_converter=batch_converter, ) - output_data = adversary(input=input_data, target=target_data, model=model) + batch_adv = adversary(batch=batch, model=model) + output_data = batch_adv["input"] # The enforcer is only called when model is not None. enforcer.assert_called_once() @@ -58,6 +62,7 @@ def test_hidden_params(): gain = Mock() enforcer = Mock() attacker = Mock(max_epochs=0, limit_train_batches=1, fit_loop=Mock(max_epochs=0)) + batch_converter = mart.attack.DictBatchConverter() adversary = Adversary( perturber=perturber, @@ -66,6 +71,7 @@ def test_hidden_params(): gain=gain, enforcer=enforcer, attacker=attacker, + batch_converter=batch_converter, ) # Adversarial perturbation should not be updated by a regular training optimizer. @@ -88,6 +94,8 @@ def test_hidden_params_after_forward(input_data, target_data, perturbation): enforcer = Mock() attacker = Mock(max_epochs=0, limit_train_batches=1, fit_loop=Mock(max_epochs=0)) model = Mock() + batch = {"input": input_data, "target": target_data} + batch_converter = mart.attack.DictBatchConverter() adversary = Adversary( perturber=perturber, @@ -96,9 +104,11 @@ def test_hidden_params_after_forward(input_data, target_data, perturbation): gain=gain, enforcer=enforcer, attacker=attacker, + batch_converter=batch_converter, ) - output_data = adversary(input=input_data, target=target_data, model=model) + batch_adv = adversary(batch=batch, model=model) + output_data = batch_adv["input"] # Adversary will have no parameter even after forward is called, because we hide Perturber in a list. params = [p for p in adversary.parameters()] @@ -119,6 +129,7 @@ def test_loading_perturbation_from_state_dict(): gain = Mock() enforcer = Mock() attacker = Mock(max_epochs=0, limit_train_batches=1, fit_loop=Mock(max_epochs=0)) + batch_converter = mart.attack.DictBatchConverter() adversary = Adversary( perturber=perturber, @@ -127,6 +138,7 @@ def test_loading_perturbation_from_state_dict(): gain=gain, enforcer=enforcer, attacker=attacker, + batch_converter=batch_converter, ) # We should be able to load arbitrary state_dict, because Adversary ignores state_dict. @@ -145,6 +157,8 @@ def test_perturbation(input_data, target_data, perturbation): enforcer = Mock() attacker = Mock(max_epochs=0, limit_train_batches=1, fit_loop=Mock(max_epochs=0)) model = Mock() + batch = {"input": input_data, "target": target_data} + batch_converter = mart.attack.DictBatchConverter() adversary = Adversary( perturber=perturber, @@ -153,9 +167,11 @@ def test_perturbation(input_data, target_data, perturbation): gain=gain, enforcer=enforcer, attacker=attacker, + batch_converter=batch_converter, ) - output_data = adversary(input=input_data, target=target_data, model=model) + batch_adv = adversary(batch=batch, model=model) + output_data = batch_adv["input"] # The enforcer is only called when model is not None. enforcer.assert_called_once() @@ -190,6 +206,9 @@ def initializer(x): projector=None, ) + batch = {"input": input_data, "target": target_data} + batch_converter = mart.attack.DictBatchConverter() + adversary = Adversary( perturber=perturber, composer=composer, @@ -198,12 +217,14 @@ def initializer(x): gradient_modifier=Sign(), enforcer=enforcer, max_iters=1, + batch_converter=batch_converter, ) - def model(input, target): - return {"logits": input} + def model(batch): + return {"logits": batch["input"]} - input_adv = adversary(input=input_data, target=target_data, model=model) + batch_adv = adversary(batch=batch, model=model) + input_adv = batch_adv["input"] perturbation = input_data - input_adv @@ -220,12 +241,14 @@ def test_configure_optimizers(): composer = mart.attack.composer.Additive() optimizer = Mock(spec=mart.optim.OptimizerFactory) gain = Mock() + batch_converter = mart.attack.DictBatchConverter() adversary = Adversary( perturber=perturber, composer=composer, optimizer=optimizer, gain=gain, + batch_converter=batch_converter, ) adversary.configure_optimizers() @@ -240,12 +263,14 @@ def test_training_step(input_data, target_data, perturbation): optimizer = Mock(spec=mart.optim.OptimizerFactory) gain = Mock(return_value=torch.tensor(1337)) model = Mock(return_value={}) + batch_converter = mart.attack.DictBatchConverter() adversary = Adversary( perturber=perturber, composer=composer, optimizer=optimizer, gain=gain, + batch_converter=batch_converter, ) output = adversary.training_step( @@ -262,12 +287,14 @@ def test_training_step_with_many_gain(input_data, target_data, perturbation): optimizer = Mock(spec=mart.optim.OptimizerFactory) gain = Mock(return_value=torch.tensor([1234, 5678])) model = Mock(return_value={}) + batch_converter = mart.attack.DictBatchConverter() adversary = Adversary( perturber=perturber, composer=composer, optimizer=optimizer, gain=gain, + batch_converter=batch_converter, ) output = adversary.training_step( @@ -284,6 +311,7 @@ def test_training_step_with_objective(input_data, target_data, perturbation): gain = Mock(return_value=torch.tensor([1234, 5678])) model = Mock(return_value={}) objective = Mock(return_value=torch.tensor([True, False], dtype=torch.bool)) + batch_converter = mart.attack.DictBatchConverter() adversary = Adversary( perturber=perturber, @@ -291,6 +319,7 @@ def test_training_step_with_objective(input_data, target_data, perturbation): optimizer=optimizer, objective=objective, gain=gain, + batch_converter=batch_converter, ) output = adversary.training_step( @@ -310,6 +339,7 @@ def test_configure_gradient_clipping(): ) gradient_modifier = Mock() gain = Mock() + batch_converter = mart.attack.DictBatchConverter() adversary = Adversary( perturber=perturber, @@ -317,6 +347,7 @@ def test_configure_gradient_clipping(): optimizer=optimizer, gradient_modifier=gradient_modifier, gain=gain, + batch_converter=batch_converter, ) # We need to mock a trainer since LightningModule does some checks adversary.trainer = Mock(gradient_clip_val=1.0, gradient_clip_algorithm="norm") From 558fc7b7b0782f6224df1d5cfb183561fb133137 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Tue, 18 Jul 2023 17:15:58 -0700 Subject: [PATCH 34/74] Add batch_converter in Adversary that supports transform/untransform. --- mart/attack/__init__.py | 1 + mart/attack/adversary.py | 35 +++-- mart/attack/batch_converter.py | 128 ++++++++++++++++++ mart/callbacks/adversarial_training.py | 72 ++-------- mart/configs/attack/adversary.yaml | 1 + mart/configs/attack/batch_converter/dict.yaml | 2 + mart/configs/attack/batch_converter/list.yaml | 2 + .../attack/batch_converter/tensor.yaml | 1 + .../configs/attack/batch_converter/tuple.yaml | 2 + .../attack/classification_eps1.75_fgsm.yaml | 1 + .../classification_eps2_pgd10_step1.yaml | 1 + .../classification_eps8_pgd10_step1.yaml | 1 + .../object_detection_mask_adversary.yaml | 1 + tests/test_adversary.py | 43 +++++- 14 files changed, 211 insertions(+), 80 deletions(-) create mode 100644 mart/attack/batch_converter.py create mode 100644 mart/configs/attack/batch_converter/dict.yaml create mode 100644 mart/configs/attack/batch_converter/list.yaml create mode 100644 mart/configs/attack/batch_converter/tensor.yaml create mode 100644 mart/configs/attack/batch_converter/tuple.yaml diff --git a/mart/attack/__init__.py b/mart/attack/__init__.py index 843ce9bd..2a55d648 100644 --- a/mart/attack/__init__.py +++ b/mart/attack/__init__.py @@ -1,5 +1,6 @@ from .adversary import * from .adversary_wrapper import * +from .batch_converter import * from .composer import * from .enforcer import * from .gain import * diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 0669d95e..d24caf5b 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -42,6 +42,7 @@ def __init__( objective: Objective | None = None, enforcer: Enforcer | None = None, attacker: pl.Trainer | None = None, + batch_converter: Callable, **kwargs, ): """_summary_ @@ -55,6 +56,7 @@ def __init__( objective (Objective): A function for computing adversarial objective, which returns True or False. Optional. enforcer (Enforcer): A Callable that enforce constraints on the adversarial input. attacker (Trainer): A PyTorch-Lightning Trainer object used to fit the perturbation. + batch_converter (Callable): Convert batch into convenient format and reverse. """ super().__init__() @@ -101,6 +103,8 @@ def __init__( assert self._attacker.max_epochs == 0 assert self._attacker.limit_train_batches > 0 + self.batch_converter = batch_converter + @property def perturber(self) -> Perturber: # Hide the perturber module in a list, so that perturbation is not exported as a parameter in the model checkpoint, @@ -120,16 +124,18 @@ def training_step(self, batch, batch_idx): # copy batch since we modify it and it is used internally # batch = batch.copy() - input = batch["input"] + input_transformed = batch["input"] target = batch["target"] # What we need is a frozen model that returns (a dictionary of) logits, or losses. model = batch["model"] - # Compose input_adv from input, then give to model for updated gain. - input_adv = self.get_input_adv(input=input, target=target) + # Compose un-transformed input_adv from batch["input"], then give to model for updated gain. + input_adv_transformed = self.get_input_adv(input=input_transformed, target=target) + # Target model expects input in the original format. + batch_adv = self.batch_converter.revert(input_adv_transformed, target) # A model that returns output dictionary. - outputs = model(input=input_adv, target=target) + outputs = model(batch_adv) # FIXME: This should really be just `return outputs`. But this might require a new sequence? # FIXME: Everything below here should live in the model as modules. @@ -163,22 +169,29 @@ def configure_gradient_clipping( self.gradient_modifier(group["params"]) @silent() - def forward(self, *, input, target, model): - batch = {"input": input, "target": target, "model": model} + def forward(self, *, batch: torch.Tensor | list | dict, model: Callable): + # Extract and transform input so that is convenient for Adversary. + input_transformed, target = self.batch_converter(batch) + + # Optimization loop only sees the transformed input in batches. + batch_transformed = {"input": input_transformed, "target": target, "model": model} # Configure and reset perturbation for current inputs - self.perturber.configure_perturbation(input) + self.perturber.configure_perturbation(input_transformed) # Attack, aka fit a perturbation, for one epoch by cycling over the same input batch. # We use Trainer.limit_train_batches to control the number of attack iterations. self.attacker.fit_loop.max_epochs += 1 - self.attacker.fit(self, train_dataloaders=cycle([batch])) + self.attacker.fit(self, train_dataloaders=cycle([batch_transformed])) # Get the transformed input_adv for enforcer checking. - input_adv = self.get_input_adv(input=input, target=target) - self.enforcer(input_adv, input=input, target=target) + input_adv_transformed = self.get_input_adv(input=input_transformed, target=target) + self.enforcer(input_adv_transformed, input=input_transformed, target=target) - return input_adv + # Revert to the original format of batch. + batch_adv = self.batch_converter.revert(input_adv_transformed, target) + + return batch_adv @property def attacker(self): diff --git a/mart/attack/batch_converter.py b/mart/attack/batch_converter.py new file mode 100644 index 00000000..0eb19e01 --- /dev/null +++ b/mart/attack/batch_converter.py @@ -0,0 +1,128 @@ +# +# Copyright (C) 2022 Intel Corporation +# +# SPDX-License-Identifier: BSD-3-Clause +# + +import abc +from typing import Callable + +# TODO: Do we need to copy batch? + +__all__ = [ + "TensorBatchConverter", + "DictBatchConverter", + "ListBatchConverter", + "TupleBatchConverter", +] + + +class BatchConverter(abc.ABC): + def __init__(self, *, transform: Callable = None, untransform: Callable = None): + """_summary_ + + Args: + transform (Callable): Transform input into a convenient format, e.g. [0,1]->[0.255]. + untransform (Callable): Transform adversarial input in the convenient format back into the original format of input, e.g. [0,255]->[0,1]. + """ + self.transform = transform if transform is not None else lambda x: x + self.untransform = untransform if untransform is not None else lambda x: x + + def __call__(self, batch): + input, target = self._convert(batch) + input_transformed = self.transform(input) + return input_transformed, target + + def revert(self, input_transformed, target): + input = self.untransform(input_transformed) + batch = self._revert(input, target) + return batch + + @abc.abstractclassmethod + def _revert(self, input, target): + pass + + @abc.abstractclassmethod + def _convert(self, batch): + pass + + +class TensorBatchConverter(BatchConverter): + def _convert(self, batch): + input = batch + target = None + return input, target + + def _revert(self, input, target): + batch = input + return batch + + +class DictBatchConverter(BatchConverter): + def __init__(self, input_key: str = "input", **kwargs): + """_summary_ + + Args: + input_key (str): Input locator in a batch. Defaults to "input". + """ + super().__init__(**kwargs) + + self.input_key = input_key + self.rest = {} + + def _convert(self, batch): + input = batch.pop(self.input_key) + if "target" in batch: + target = batch.pop("target") + self.rest = batch + else: + target = batch + return input, target + + def _revert(self, input, target): + if self.rest is {}: + batch = {self.input_key: input} | target + else: + batch = {self.input_key: input, "target": target} | self.rest + + return batch + + +class ListBatchConverter(BatchConverter): + def __init__(self, input_key: int = 0, **kwargs): + super().__init__(**kwargs) + + self.input_key = input_key + self.target_size = None + + def _convert(self, batch: list): + input = batch.pop(self.input_key) + self.target_size = len(batch) + + if self.target_size == 1: + target = batch[0] + else: + target = batch + + return input, target + + def _revert(self, input, target): + if self.target_size == 1: + batch = [target] + batch.insert(self.input_key, input) + else: + batch = target + batch.insert(self.input_key, input) + return batch + + +class TupleBatchConverter(ListBatchConverter): + def _convert(self, batch: tuple): + batch_list = list(batch) + input, target = super()._convert(batch_list) + return input, target + + def _revert(self, input, target): + batch_list = super()._revert(input, target) + batch = tuple(batch_list) + return batch diff --git a/mart/callbacks/adversarial_training.py b/mart/callbacks/adversarial_training.py index 3947ccdc..22b7a6a4 100644 --- a/mart/callbacks/adversarial_training.py +++ b/mart/callbacks/adversarial_training.py @@ -54,56 +54,8 @@ def setup(self, trainer, pl_module, stage=None): def teardown(self, trainer, pl_module, stage=None): pl_module.on_after_batch_transfer = self._on_after_batch_transfer - def get_input_target_batcher(self, batch_orig): - if isinstance(batch_orig, tuple): - # Convert tuple to list - batch = list(batch_orig).copy() - else: - batch = batch_orig.copy() - - batch_input_key = self.batch_input_key - - # pop() works for both list and dict. - input = batch.pop(batch_input_key) - - if isinstance(batch, list) and len(batch) == 1: - target = batch[0] - - def batch_constructor(input, target): - batch = [target] - batch.insert(batch_input_key, input) - return batch - - elif isinstance(batch, list) and len(batch) > 2: - target = batch.copy() - - def batch_constructor(input, target): - batch = target.copy() - batch.insert(batch_input_key, input) - return batch - - elif isinstance(batch, dict) and "target" in dict: - target = batch["target"] - - def batch_constructor(input, target): - return {batch_input_key: input, "target": target} - - elif isinstance(batch, dict) and "target" not in dict: - # Example in anomalib: dict_keys(['image_path', 'label', 'image', 'mask_path', 'mask']) - # image: NCHW; label: N, - target = batch - - def batch_constructor(input, target): - # Besides input and target, add others back to batch. - return target | {batch_input_key: input} - - else: - raise NotImplementedError() - - return input, target, batch_constructor - - def wrap_model(self, model, batch_constructor, dataloader_idx): - """Make a model, such that output = model(input, target).""" + def wrap_model(self, model, dataloader_idx): + """Make a model, such that `output = model(batch)`.""" # Consume dataloader_idx if hasattr(model, "attack_step"): @@ -122,12 +74,7 @@ def model_forward(batch): else: model_forward = model - def wrapped_model(*, input, target): - batch = batch_constructor(input, target) - output = model_forward(batch) - return output - - return wrapped_model + return model_forward def on_after_batch_transfer(self, pl_module, batch, dataloader_idx): batch = self._on_after_batch_transfer(batch, dataloader_idx) @@ -149,13 +96,12 @@ def on_after_batch_transfer(self, pl_module, batch, dataloader_idx): # Move adversary to same device as pl_module and run attack adversary.to(pl_module.device) - # FIXME: Directly pass batch instead of assuming it has a structure? - input, target, batch_constructor = self.get_input_target_batcher(batch) - - # We also need to construct a batch for model during attack iterations. - model = self.wrap_model(pl_module, batch_constructor, dataloader_idx) + # We assume Adversary is not aware of PyTorch Lightning, + # so wrap the model as `output=model(batch)`. + model = self.wrap_model(pl_module, dataloader_idx) # TODO: We may need to do model.eval() if there's BN-like layers in the model. - input_adv = adversary(input=input, target=target, model=model) + # Directly pass batch instead of assuming it has a structure. + batch_adv = adversary(batch=batch, model=model) - return [input_adv, target] + return batch_adv diff --git a/mart/configs/attack/adversary.yaml b/mart/configs/attack/adversary.yaml index bbf52433..480e3a5b 100644 --- a/mart/configs/attack/adversary.yaml +++ b/mart/configs/attack/adversary.yaml @@ -12,3 +12,4 @@ gradient_modifier: null objective: null enforcer: ??? attacker: null +batch_converter: ??? diff --git a/mart/configs/attack/batch_converter/dict.yaml b/mart/configs/attack/batch_converter/dict.yaml new file mode 100644 index 00000000..db421039 --- /dev/null +++ b/mart/configs/attack/batch_converter/dict.yaml @@ -0,0 +1,2 @@ +_target_: mart.attack.batch_converter.DictBatchConverter +input_key: input diff --git a/mart/configs/attack/batch_converter/list.yaml b/mart/configs/attack/batch_converter/list.yaml new file mode 100644 index 00000000..53da9fae --- /dev/null +++ b/mart/configs/attack/batch_converter/list.yaml @@ -0,0 +1,2 @@ +_target_: mart.attack.batch_converter.ListBatchConverter +input_key: 0 diff --git a/mart/configs/attack/batch_converter/tensor.yaml b/mart/configs/attack/batch_converter/tensor.yaml new file mode 100644 index 00000000..47697bfd --- /dev/null +++ b/mart/configs/attack/batch_converter/tensor.yaml @@ -0,0 +1 @@ +_target_: mart.attack.batch_converter.TensorBatchConverter diff --git a/mart/configs/attack/batch_converter/tuple.yaml b/mart/configs/attack/batch_converter/tuple.yaml new file mode 100644 index 00000000..25ff65b5 --- /dev/null +++ b/mart/configs/attack/batch_converter/tuple.yaml @@ -0,0 +1,2 @@ +_target_: mart.attack.batch_converter.TupleBatchConverter +input_key: 0 diff --git a/mart/configs/attack/classification_eps1.75_fgsm.yaml b/mart/configs/attack/classification_eps1.75_fgsm.yaml index 7c300e2d..c3c0ec46 100644 --- a/mart/configs/attack/classification_eps1.75_fgsm.yaml +++ b/mart/configs/attack/classification_eps1.75_fgsm.yaml @@ -10,6 +10,7 @@ defaults: - objective: misclassification - enforcer: default - enforcer/constraints: [lp, pixel_range] + - batch_converter: list enforcer: constraints: diff --git a/mart/configs/attack/classification_eps2_pgd10_step1.yaml b/mart/configs/attack/classification_eps2_pgd10_step1.yaml index b98cf407..7dd30548 100644 --- a/mart/configs/attack/classification_eps2_pgd10_step1.yaml +++ b/mart/configs/attack/classification_eps2_pgd10_step1.yaml @@ -10,6 +10,7 @@ defaults: - objective: misclassification - enforcer: default - enforcer/constraints: [lp, pixel_range] + - batch_converter: list enforcer: constraints: diff --git a/mart/configs/attack/classification_eps8_pgd10_step1.yaml b/mart/configs/attack/classification_eps8_pgd10_step1.yaml index f1b6242a..7b9577a7 100644 --- a/mart/configs/attack/classification_eps8_pgd10_step1.yaml +++ b/mart/configs/attack/classification_eps8_pgd10_step1.yaml @@ -10,6 +10,7 @@ defaults: - objective: misclassification - enforcer: default - enforcer/constraints: [lp, pixel_range] + - batch_converter: list enforcer: constraints: diff --git a/mart/configs/attack/object_detection_mask_adversary.yaml b/mart/configs/attack/object_detection_mask_adversary.yaml index ad99dda0..cedbd9eb 100644 --- a/mart/configs/attack/object_detection_mask_adversary.yaml +++ b/mart/configs/attack/object_detection_mask_adversary.yaml @@ -10,6 +10,7 @@ defaults: - objective: zero_ap - enforcer: default - enforcer/constraints: [mask, pixel_range] + - batch_converter: tuple # Make a 5-step attack for the demonstration purpose. max_iters: 5 diff --git a/tests/test_adversary.py b/tests/test_adversary.py index 0c363902..2986f48d 100644 --- a/tests/test_adversary.py +++ b/tests/test_adversary.py @@ -24,6 +24,8 @@ def test_with_model(input_data, target_data, perturbation): enforcer = Mock() attacker = Mock(max_epochs=0, limit_train_batches=1, fit_loop=Mock(max_epochs=0)) model = Mock() + batch = {"input": input_data, "target": target_data} + batch_converter = mart.attack.DictBatchConverter() adversary = Adversary( perturber=perturber, @@ -32,9 +34,11 @@ def test_with_model(input_data, target_data, perturbation): gain=gain, enforcer=enforcer, attacker=attacker, + batch_converter=batch_converter, ) - output_data = adversary(input=input_data, target=target_data, model=model) + batch_adv = adversary(batch=batch, model=model) + output_data = batch_adv["input"] # The enforcer is only called when model is not None. enforcer.assert_called_once() @@ -58,6 +62,7 @@ def test_hidden_params(): gain = Mock() enforcer = Mock() attacker = Mock(max_epochs=0, limit_train_batches=1, fit_loop=Mock(max_epochs=0)) + batch_converter = mart.attack.DictBatchConverter() adversary = Adversary( perturber=perturber, @@ -66,6 +71,7 @@ def test_hidden_params(): gain=gain, enforcer=enforcer, attacker=attacker, + batch_converter=batch_converter, ) # Adversarial perturbation should not be updated by a regular training optimizer. @@ -88,6 +94,8 @@ def test_hidden_params_after_forward(input_data, target_data, perturbation): enforcer = Mock() attacker = Mock(max_epochs=0, limit_train_batches=1, fit_loop=Mock(max_epochs=0)) model = Mock() + batch = {"input": input_data, "target": target_data} + batch_converter = mart.attack.DictBatchConverter() adversary = Adversary( perturber=perturber, @@ -96,9 +104,11 @@ def test_hidden_params_after_forward(input_data, target_data, perturbation): gain=gain, enforcer=enforcer, attacker=attacker, + batch_converter=batch_converter, ) - output_data = adversary(input=input_data, target=target_data, model=model) + batch_adv = adversary(batch=batch, model=model) + output_data = batch_adv["input"] # Adversary will have no parameter even after forward is called, because we hide Perturber in a list. params = [p for p in adversary.parameters()] @@ -119,6 +129,7 @@ def test_loading_perturbation_from_state_dict(): gain = Mock() enforcer = Mock() attacker = Mock(max_epochs=0, limit_train_batches=1, fit_loop=Mock(max_epochs=0)) + batch_converter = mart.attack.DictBatchConverter() adversary = Adversary( perturber=perturber, @@ -127,6 +138,7 @@ def test_loading_perturbation_from_state_dict(): gain=gain, enforcer=enforcer, attacker=attacker, + batch_converter=batch_converter, ) # We should be able to load arbitrary state_dict, because Adversary ignores state_dict. @@ -145,6 +157,8 @@ def test_perturbation(input_data, target_data, perturbation): enforcer = Mock() attacker = Mock(max_epochs=0, limit_train_batches=1, fit_loop=Mock(max_epochs=0)) model = Mock() + batch = {"input": input_data, "target": target_data} + batch_converter = mart.attack.DictBatchConverter() adversary = Adversary( perturber=perturber, @@ -153,9 +167,11 @@ def test_perturbation(input_data, target_data, perturbation): gain=gain, enforcer=enforcer, attacker=attacker, + batch_converter=batch_converter, ) - output_data = adversary(input=input_data, target=target_data, model=model) + batch_adv = adversary(batch=batch, model=model) + output_data = batch_adv["input"] # The enforcer is only called when model is not None. enforcer.assert_called_once() @@ -190,6 +206,9 @@ def initializer(x): projector=None, ) + batch = {"input": input_data, "target": target_data} + batch_converter = mart.attack.DictBatchConverter() + adversary = Adversary( perturber=perturber, composer=composer, @@ -198,12 +217,14 @@ def initializer(x): gradient_modifier=Sign(), enforcer=enforcer, max_iters=1, + batch_converter=batch_converter, ) - def model(input, target): - return {"logits": input} + def model(batch): + return {"logits": batch["input"]} - input_adv = adversary(input=input_data, target=target_data, model=model) + batch_adv = adversary(batch=batch, model=model) + input_adv = batch_adv["input"] perturbation = input_data - input_adv @@ -215,12 +236,14 @@ def test_configure_optimizers(): composer = mart.attack.composer.Additive() optimizer = Mock(spec=mart.optim.OptimizerFactory) gain = Mock() + batch_converter = mart.attack.DictBatchConverter() adversary = Adversary( perturber=perturber, composer=composer, optimizer=optimizer, gain=gain, + batch_converter=batch_converter, ) adversary.configure_optimizers() @@ -235,12 +258,14 @@ def test_training_step(input_data, target_data, perturbation): optimizer = Mock(spec=mart.optim.OptimizerFactory) gain = Mock(return_value=torch.tensor(1337)) model = Mock(return_value={}) + batch_converter = mart.attack.DictBatchConverter() adversary = Adversary( perturber=perturber, composer=composer, optimizer=optimizer, gain=gain, + batch_converter=batch_converter, ) output = adversary.training_step( @@ -257,12 +282,14 @@ def test_training_step_with_many_gain(input_data, target_data, perturbation): optimizer = Mock(spec=mart.optim.OptimizerFactory) gain = Mock(return_value=torch.tensor([1234, 5678])) model = Mock(return_value={}) + batch_converter = mart.attack.DictBatchConverter() adversary = Adversary( perturber=perturber, composer=composer, optimizer=optimizer, gain=gain, + batch_converter=batch_converter, ) output = adversary.training_step( @@ -279,6 +306,7 @@ def test_training_step_with_objective(input_data, target_data, perturbation): gain = Mock(return_value=torch.tensor([1234, 5678])) model = Mock(return_value={}) objective = Mock(return_value=torch.tensor([True, False], dtype=torch.bool)) + batch_converter = mart.attack.DictBatchConverter() adversary = Adversary( perturber=perturber, @@ -286,6 +314,7 @@ def test_training_step_with_objective(input_data, target_data, perturbation): optimizer=optimizer, objective=objective, gain=gain, + batch_converter=batch_converter, ) output = adversary.training_step( @@ -305,6 +334,7 @@ def test_configure_gradient_clipping(): ) gradient_modifier = Mock() gain = Mock() + batch_converter = mart.attack.DictBatchConverter() adversary = Adversary( perturber=perturber, @@ -312,6 +342,7 @@ def test_configure_gradient_clipping(): optimizer=optimizer, gradient_modifier=gradient_modifier, gain=gain, + batch_converter=batch_converter, ) # We need to mock a trainer since LightningModule does some checks adversary.trainer = Mock(gradient_clip_val=1.0, gradient_clip_algorithm="norm") From 7ed32acfcd4a8a9a12e25abfd3713fc1d409816f Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Tue, 18 Jul 2023 17:17:36 -0700 Subject: [PATCH 35/74] Clean up. --- mart/callbacks/adversarial_training.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/mart/callbacks/adversarial_training.py b/mart/callbacks/adversarial_training.py index 22b7a6a4..31879c59 100644 --- a/mart/callbacks/adversarial_training.py +++ b/mart/callbacks/adversarial_training.py @@ -26,7 +26,6 @@ def __init__( train_adversary: Callable = None, validation_adversary: Callable = None, test_adversary: Callable = None, - batch_input_key: str | int = 0, ): """AdversaryConnector. @@ -35,7 +34,6 @@ def __init__( train_adversary (Callable, optional): _description_. Defaults to None. validation_adversary (Callable, optional): _description_. Defaults to None. test_adversary (Callable, optional): _description_. Defaults to None. - batch_input_key (str | int, optional): Input locator in a batch. Defaults to 0. """ adversary = adversary or train_adversary @@ -43,8 +41,6 @@ def __init__( self.validation_adversary = validation_adversary or adversary self.test_adversary = test_adversary or adversary - self.batch_input_key = batch_input_key - def setup(self, trainer, pl_module, stage=None): self._on_after_batch_transfer = pl_module.on_after_batch_transfer pl_module.on_after_batch_transfer = types.MethodType( From 744ebfdad21516c1dfb3c58f8675af25ab4d8dd5 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Tue, 18 Jul 2023 17:30:42 -0700 Subject: [PATCH 36/74] Fix adv. visualizer. --- mart/callbacks/visualizer.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/mart/callbacks/visualizer.py b/mart/callbacks/visualizer.py index 6bb532e7..34e7c5cc 100644 --- a/mart/callbacks/visualizer.py +++ b/mart/callbacks/visualizer.py @@ -34,11 +34,10 @@ def on_train_batch_end(self, trainer, model, outputs, batch, batch_idx): def on_train_end(self, trainer, model): # FIXME: We should really just save this to outputs instead of recomputing adv_input with torch.no_grad(): - # Get [0,1] input as untransform=True by default. adv_input = model.get_input_adv(input=self.input, target=self.target) for img, tgt in zip(adv_input, self.target): fname = tgt["file_name"] fpath = os.path.join(self.folder, fname) - im = self.convert(img) + im = self.convert(img / 255) im.save(fpath) From 6e434964d5c78173ee285adad601a3f28d0b82a7 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Tue, 18 Jul 2023 17:31:56 -0700 Subject: [PATCH 37/74] Fix adv. visualizer. --- mart/callbacks/visualizer.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/mart/callbacks/visualizer.py b/mart/callbacks/visualizer.py index 39409143..34e7c5cc 100644 --- a/mart/callbacks/visualizer.py +++ b/mart/callbacks/visualizer.py @@ -6,6 +6,7 @@ import os +import torch from lightning.pytorch.callbacks import Callback from torchvision.transforms import ToPILImage @@ -32,7 +33,8 @@ def on_train_batch_end(self, trainer, model, outputs, batch, batch_idx): def on_train_end(self, trainer, model): # FIXME: We should really just save this to outputs instead of recomputing adv_input - adv_input = model(input=self.input, target=self.target) + with torch.no_grad(): + adv_input = model.get_input_adv(input=self.input, target=self.target) for img, tgt in zip(adv_input, self.target): fname = tgt["file_name"] From 254e1680ee6c9898b6caa33ee70b3a726667a975 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Tue, 18 Jul 2023 17:41:02 -0700 Subject: [PATCH 38/74] Fix visualizer test. --- tests/test_visualizer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_visualizer.py b/tests/test_visualizer.py index 5c25e930..cb188591 100644 --- a/tests/test_visualizer.py +++ b/tests/test_visualizer.py @@ -24,7 +24,7 @@ def perturb(input): return result trainer = Mock() - model = Mock(return_value=perturb(input_list)) + model = Mock(get_input_adv=Mock(return_value=perturb(input_list))) outputs = Mock() batch = {"input": input_list, "target": target_list} adversary = Mock(spec=Adversary, side_effect=perturb) From f4f7b16b3d48d22c7701f461670a02327277a947 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Tue, 18 Jul 2023 17:54:15 -0700 Subject: [PATCH 39/74] Clean up. --- .../transform/tuple_to_pixel_range_1.yaml | 6 ------ .../transform/tuple_to_pixel_range_255.yaml | 16 ---------------- 2 files changed, 22 deletions(-) delete mode 100644 mart/configs/attack/batch_converter/transform/tuple_to_pixel_range_1.yaml delete mode 100644 mart/configs/attack/batch_converter/transform/tuple_to_pixel_range_255.yaml diff --git a/mart/configs/attack/batch_converter/transform/tuple_to_pixel_range_1.yaml b/mart/configs/attack/batch_converter/transform/tuple_to_pixel_range_1.yaml deleted file mode 100644 index 93f70015..00000000 --- a/mart/configs/attack/batch_converter/transform/tuple_to_pixel_range_1.yaml +++ /dev/null @@ -1,6 +0,0 @@ -# FIXME: Merge tuple and tensor transforms to simplify configs. -_target_: mart.transforms.TupleTransforms -transforms: - _target_: torchvision.transforms.Normalize - mean: 0 - std: 255 diff --git a/mart/configs/attack/batch_converter/transform/tuple_to_pixel_range_255.yaml b/mart/configs/attack/batch_converter/transform/tuple_to_pixel_range_255.yaml deleted file mode 100644 index db80e815..00000000 --- a/mart/configs/attack/batch_converter/transform/tuple_to_pixel_range_255.yaml +++ /dev/null @@ -1,16 +0,0 @@ -# FIXME: Merge tuple and tensor transforms to simplify configs. -_target_: mart.transforms.TupleTransforms -transforms: - _target_: torchvision.transforms.Compose - transforms: - - _target_: mart.transforms.Denormalize - center: 0 - scale: 255 - # Fix potential numeric error. - - _target_: torch.fake_quantize_per_tensor_affine - _partial_: true - # (x/1+0).round().clamp(0, 255) * 1 - scale: 1 - zero_point: 0 - quant_min: 0 - quant_max: 255 From 265886f7099349d5aa1baf3a445fb9f45d1e2a3f Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Tue, 18 Jul 2023 17:56:14 -0700 Subject: [PATCH 40/74] Fix test. --- tests/test_visualizer.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/tests/test_visualizer.py b/tests/test_visualizer.py index 3d105e39..6754b082 100644 --- a/tests/test_visualizer.py +++ b/tests/test_visualizer.py @@ -15,12 +15,15 @@ def test_visualizer_run_end(input_data, target_data, perturbation, tmp_path): folder = tmp_path / "test" - input_list = [input_data] + + # Convert to [0,255] space for adversary. + perturbation_255 = perturbation * 255 + input_list = [input_data * 255] target_list = [target_data] # simulate an addition perturbation def perturb(input): - result = [sample + perturbation for sample in input] + result = [sample + perturbation_255 for sample in input] return result trainer = Mock() From 46b5cb7e75258b27702a606ea22e8eccda8327a6 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Wed, 19 Jul 2023 13:43:08 -0700 Subject: [PATCH 41/74] Make adversary optional in some phases. --- mart/callbacks/adversarial_training.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/mart/callbacks/adversarial_training.py b/mart/callbacks/adversarial_training.py index 31879c59..3437c74b 100644 --- a/mart/callbacks/adversarial_training.py +++ b/mart/callbacks/adversarial_training.py @@ -35,8 +35,6 @@ def __init__( validation_adversary (Callable, optional): _description_. Defaults to None. test_adversary (Callable, optional): _description_. Defaults to None. """ - adversary = adversary or train_adversary - self.train_adversary = train_adversary or adversary self.validation_adversary = validation_adversary or adversary self.test_adversary = test_adversary or adversary @@ -85,7 +83,7 @@ def on_after_batch_transfer(self, pl_module, batch, dataloader_idx): elif trainer.testing: adversary = self.test_adversary - # Skip if adversary is not defined for the phase train/validation/test. + # Skip if adversary is not defined for all phases train/validation/test. if adversary is None: return batch From 342fef29f00aac2bf298d4a7f7816bc0d8c43773 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 24 Jul 2023 11:28:09 -0700 Subject: [PATCH 42/74] Add target_transform in target_untransform. --- mart/attack/batch_converter.py | 30 ++++++++++++++++++++++++------ 1 file changed, 24 insertions(+), 6 deletions(-) diff --git a/mart/attack/batch_converter.py b/mart/attack/batch_converter.py index 0eb19e01..58cc784c 100644 --- a/mart/attack/batch_converter.py +++ b/mart/attack/batch_converter.py @@ -18,23 +18,41 @@ class BatchConverter(abc.ABC): - def __init__(self, *, transform: Callable = None, untransform: Callable = None): - """_summary_ + def __init__( + self, + *, + transform: Callable = None, + untransform: Callable = None, + target_transform: Callable = None, + target_untransform: Callable = None, + ): + """Convert batch into (input, target), and vice versa. Args: transform (Callable): Transform input into a convenient format, e.g. [0,1]->[0.255]. untransform (Callable): Transform adversarial input in the convenient format back into the original format of input, e.g. [0,255]->[0,1]. + target_transform (Callable): Transform target. + target_untransform (Callable): Untransform target. """ - self.transform = transform if transform is not None else lambda x: x - self.untransform = untransform if untransform is not None else lambda x: x + self.transform = transform if isinstance(transform, Callable) else lambda x: x + self.untransform = untransform if isinstance(untransform, Callable) else lambda x: x + + self.target_transform = ( + target_transform if isinstance(target_transform, Callable) else lambda x: x + ) + self.target_untransform = ( + target_untransform if isinstance(target_untransform, Callable) else lambda x: x + ) def __call__(self, batch): input, target = self._convert(batch) input_transformed = self.transform(input) - return input_transformed, target + target_transformed = self.target_transform(target) + return input_transformed, target_transformed - def revert(self, input_transformed, target): + def revert(self, input_transformed, target_transformed): input = self.untransform(input_transformed) + target = self.target_untransform(target_transformed) batch = self._revert(input, target) return batch From ea7cfcef39e241d49b7006e76abeeda0a6a44867 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Wed, 26 Jul 2023 17:16:22 -0700 Subject: [PATCH 43/74] Rename variable: target -> target_transformed --- mart/attack/adversary.py | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index d24caf5b..8505cfd0 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -125,14 +125,16 @@ def training_step(self, batch, batch_idx): # batch = batch.copy() input_transformed = batch["input"] - target = batch["target"] + target_transformed = batch["target"] # What we need is a frozen model that returns (a dictionary of) logits, or losses. model = batch["model"] # Compose un-transformed input_adv from batch["input"], then give to model for updated gain. - input_adv_transformed = self.get_input_adv(input=input_transformed, target=target) + input_adv_transformed = self.get_input_adv( + input=input_transformed, target=target_transformed + ) # Target model expects input in the original format. - batch_adv = self.batch_converter.revert(input_adv_transformed, target) + batch_adv = self.batch_converter.revert(input_adv_transformed, target_transformed) # A model that returns output dictionary. outputs = model(batch_adv) @@ -171,10 +173,14 @@ def configure_gradient_clipping( @silent() def forward(self, *, batch: torch.Tensor | list | dict, model: Callable): # Extract and transform input so that is convenient for Adversary. - input_transformed, target = self.batch_converter(batch) + input_transformed, target_transformed = self.batch_converter(batch) # Optimization loop only sees the transformed input in batches. - batch_transformed = {"input": input_transformed, "target": target, "model": model} + batch_transformed = { + "input": input_transformed, + "target": target_transformed, + "model": model, + } # Configure and reset perturbation for current inputs self.perturber.configure_perturbation(input_transformed) @@ -185,11 +191,13 @@ def forward(self, *, batch: torch.Tensor | list | dict, model: Callable): self.attacker.fit(self, train_dataloaders=cycle([batch_transformed])) # Get the transformed input_adv for enforcer checking. - input_adv_transformed = self.get_input_adv(input=input_transformed, target=target) - self.enforcer(input_adv_transformed, input=input_transformed, target=target) + input_adv_transformed = self.get_input_adv( + input=input_transformed, target=target_transformed + ) + self.enforcer(input_adv_transformed, input=input_transformed, target=target_transformed) # Revert to the original format of batch. - batch_adv = self.batch_converter.revert(input_adv_transformed, target) + batch_adv = self.batch_converter.revert(input_adv_transformed, target_transformed) return batch_adv From bae2345a0269245ebe7d266fd3c731b1e65faeec Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 28 Jul 2023 10:53:58 -0700 Subject: [PATCH 44/74] Fix monkey patch in adv_training callback. --- mart/callbacks/adversarial_training.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mart/callbacks/adversarial_training.py b/mart/callbacks/adversarial_training.py index 3437c74b..0738467c 100644 --- a/mart/callbacks/adversarial_training.py +++ b/mart/callbacks/adversarial_training.py @@ -60,9 +60,9 @@ def model_forward(batch): elif hasattr(model, "training_step"): # Monkey-patch model.log to avoid spamming. - @MonkeyPatch(model, "log", lambda *args, **kwargs: None) def model_forward(batch): - output = model.training_step(batch, dataloader_idx) + with MonkeyPatch(model, "log", lambda *args, **kwargs: None): + output = model.training_step(batch, dataloader_idx) return output else: From 9f1d35da7c76a27fe14dc6f000f7ae9a28bde31f Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 28 Jul 2023 10:54:15 -0700 Subject: [PATCH 45/74] Fix DictBatchConverter. --- mart/attack/batch_converter.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/mart/attack/batch_converter.py b/mart/attack/batch_converter.py index 58cc784c..89e0648c 100644 --- a/mart/attack/batch_converter.py +++ b/mart/attack/batch_converter.py @@ -91,17 +91,20 @@ def __init__(self, input_key: str = "input", **kwargs): def _convert(self, batch): input = batch.pop(self.input_key) if "target" in batch: - target = batch.pop("target") + target = batch["target"] self.rest = batch else: target = batch return input, target def _revert(self, input, target): - if self.rest is {}: - batch = {self.input_key: input} | target + if self.rest == {}: + batch = target else: - batch = {self.input_key: input, "target": target} | self.rest + batch = self.rest + + # Input may have been changed. + batch[self.input_key] = input return batch From 2a8505ba740f42773d5f1f788f7139705ac77c54 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 28 Jul 2023 10:59:27 -0700 Subject: [PATCH 46/74] Remove recursive adversarial training callback in Adversary. --- mart/attack/adversary.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 8505cfd0..8a99384f 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -15,6 +15,7 @@ from mart.utils import silent +from ..callbacks.adversarial_training import AdversarialTraining from ..optim import OptimizerFactory if TYPE_CHECKING: @@ -221,6 +222,11 @@ def attacker(self): self._attacker = self._attacker(accelerator=accelerator, devices=devices) + # Remove recursive adversarial training callback from lightning.pytorch.callbacks_factory. + for callback in self._attacker.callbacks: + if isinstance(callback, AdversarialTraining): + self._attacker.callbacks.remove(callback) + return self._attacker def cpu(self): From 5d81324dcd9243d060fb8928a84b39bc902ffdfc Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 28 Jul 2023 11:45:43 -0700 Subject: [PATCH 47/74] Copy batch before transform(). --- mart/attack/adversary.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 8a99384f..f3ba5325 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -173,8 +173,9 @@ def configure_gradient_clipping( @silent() def forward(self, *, batch: torch.Tensor | list | dict, model: Callable): + # Copy to keep the original batch. # Extract and transform input so that is convenient for Adversary. - input_transformed, target_transformed = self.batch_converter(batch) + input_transformed, target_transformed = self.batch_converter(batch.copy()) # Optimization loop only sees the transformed input in batches. batch_transformed = { From 77de175d75ae11a8c48e23b67870ce8f924fe892 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 28 Jul 2023 16:24:36 -0700 Subject: [PATCH 48/74] Copy original batch in batch_converter(), because tuple batch cannot be copied. --- mart/attack/adversary.py | 2 +- mart/attack/batch_converter.py | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index f3ba5325..80e455f7 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -175,7 +175,7 @@ def configure_gradient_clipping( def forward(self, *, batch: torch.Tensor | list | dict, model: Callable): # Copy to keep the original batch. # Extract and transform input so that is convenient for Adversary. - input_transformed, target_transformed = self.batch_converter(batch.copy()) + input_transformed, target_transformed = self.batch_converter(batch) # Optimization loop only sees the transformed input in batches. batch_transformed = { diff --git a/mart/attack/batch_converter.py b/mart/attack/batch_converter.py index 89e0648c..1f3b7a95 100644 --- a/mart/attack/batch_converter.py +++ b/mart/attack/batch_converter.py @@ -89,6 +89,8 @@ def __init__(self, input_key: str = "input", **kwargs): self.rest = {} def _convert(self, batch): + # Make a copy because we don't want to break the original batch. + batch = batch.copy() input = batch.pop(self.input_key) if "target" in batch: target = batch["target"] @@ -117,6 +119,8 @@ def __init__(self, input_key: int = 0, **kwargs): self.target_size = None def _convert(self, batch: list): + # Make a copy because we don't want to break the original batch. + batch = batch.copy() input = batch.pop(self.input_key) self.target_size = len(batch) From fb79aa8fcb2c29831a09237df5459157214469a3 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 28 Jul 2023 16:24:36 -0700 Subject: [PATCH 49/74] Copy original batch in batch_converter(), because tuple batch cannot be copied. --- mart/attack/adversary.py | 2 +- mart/attack/batch_converter.py | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index f3ba5325..80e455f7 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -175,7 +175,7 @@ def configure_gradient_clipping( def forward(self, *, batch: torch.Tensor | list | dict, model: Callable): # Copy to keep the original batch. # Extract and transform input so that is convenient for Adversary. - input_transformed, target_transformed = self.batch_converter(batch.copy()) + input_transformed, target_transformed = self.batch_converter(batch) # Optimization loop only sees the transformed input in batches. batch_transformed = { diff --git a/mart/attack/batch_converter.py b/mart/attack/batch_converter.py index 89e0648c..1f3b7a95 100644 --- a/mart/attack/batch_converter.py +++ b/mart/attack/batch_converter.py @@ -89,6 +89,8 @@ def __init__(self, input_key: str = "input", **kwargs): self.rest = {} def _convert(self, batch): + # Make a copy because we don't want to break the original batch. + batch = batch.copy() input = batch.pop(self.input_key) if "target" in batch: target = batch["target"] @@ -117,6 +119,8 @@ def __init__(self, input_key: int = 0, **kwargs): self.target_size = None def _convert(self, batch: list): + # Make a copy because we don't want to break the original batch. + batch = batch.copy() input = batch.pop(self.input_key) self.target_size = len(batch) From 0f51d8081a11ed7397f00bc11070d624792066a1 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Thu, 20 Jul 2023 15:06:14 -0700 Subject: [PATCH 50/74] Upgrade dependency to torchmetrics == 1.0.1 (#205) --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 6a80d751..a13938c5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -15,7 +15,7 @@ dependencies = [ "torchvision ~= 0.15.2", "lightning[extra] ~= 2.0.5", # Full functionality including TensorboardX. "pydantic == 1.10.11", # https://github.com/Lightning-AI/lightning/pull/18022/files - "torchmetrics == 1.0.0", + "torchmetrics == 1.0.1", "numpy == 1.23.5", # https://github.com/pytorch/pytorch/issues/91516 # --------- hydra --------- # From d068902c867b36c5dfa1312381a2569f2184378a Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Tue, 1 Aug 2023 06:39:10 -0700 Subject: [PATCH 51/74] Move adversary out of the model sequence. --- mart/attack/adversary.py | 68 +++++++++++++++++++----------- mart/callbacks/visualizer.py | 4 +- tests/test_adversary.py | 82 ++++++++++++------------------------ tests/test_visualizer.py | 2 +- 4 files changed, 73 insertions(+), 83 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 4a884c1c..20ffa794 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -15,6 +15,7 @@ from mart.utils import silent +from ..callbacks.adversarial_training import AdversarialTraining from ..optim import OptimizerFactory if TYPE_CHECKING: @@ -110,13 +111,28 @@ def perturber(self) -> Perturber: def configure_optimizers(self): return self.optimizer(self.perturber) + def get_input_adv(self, *, input, target): + perturbation = self.perturber(input=input, target=target) + input_adv = self.composer(perturbation, input=input, target=target) + return input_adv + def training_step(self, batch, batch_idx): + # TODO: We shouldn't need to copy because it is never changed? # copy batch since we modify it and it is used internally - batch = batch.copy() + # batch = batch.copy() + + input = batch["input"] + target = batch["target"] + # What we need is a frozen model that returns (a dictionary of) logits, or losses. + model = batch["model"] + + # Compose input_adv from input, then give to model for updated gain. + input_adv = self.get_input_adv(input=input, target=target) + # Target model expects input in the original format. + batch_adv = (input_adv, target) - # We need to evaluate the perturbation against the whole model, so call it normally to get a gain. - model = batch.pop("model") - outputs = model(**batch) + # A model that returns output dictionary. + outputs = model(batch_adv) # FIXME: This should really be just `return outputs`. But this might require a new sequence? # FIXME: Everything below here should live in the model as modules. @@ -150,27 +166,15 @@ def configure_gradient_clipping( self.gradient_modifier(group["params"]) @silent() - def forward(self, *, model=None, **batch): - batch["model"] = model + def forward(self, *, batch: torch.Tensor | list | dict, model: Callable): + input, target = batch - # Adversary can live within a sequence of model. To signal the adversary should - # attack, one must pass a model to attack when calling the adversary. Since we - # do not know where the Adversary lives inside the model, we also need the - # remaining sequence to be able to get a loss. - if model: - self._attack(**batch) - - perturbation = self.perturber(**batch) - input_adv = self.composer(perturbation, **batch) - - # Enforce constraints after the attack optimization ends. - if model: - self.enforcer(input_adv, **batch) - - return input_adv - - def _attack(self, *, input, **batch): - batch["input"] = input + # Optimization loop only sees the transformed input in batches. + batch_transformed = { + "input": input, + "target": target, + "model": model, + } # Configure and reset perturbation for current inputs self.perturber.configure_perturbation(input) @@ -178,7 +182,16 @@ def _attack(self, *, input, **batch): # Attack, aka fit a perturbation, for one epoch by cycling over the same input batch. # We use Trainer.limit_train_batches to control the number of attack iterations. self.attacker.fit_loop.max_epochs += 1 - self.attacker.fit(self, train_dataloaders=cycle([batch])) + self.attacker.fit(self, train_dataloaders=cycle([batch_transformed])) + + # Get the input_adv for enforcer checking. + input_adv = self.get_input_adv(input=input, target=target) + self.enforcer(input_adv, input=input, target=target) + + # Revert to the original format of batch. + batch_adv = (input_adv, target) + + return batch_adv @property def attacker(self): @@ -200,6 +213,11 @@ def attacker(self): self._attacker = self._attacker(accelerator=accelerator, devices=devices) + # Remove recursive adversarial training callback from lightning.pytorch.callbacks_factory. + for callback in self._attacker.callbacks: + if isinstance(callback, AdversarialTraining): + self._attacker.callbacks.remove(callback) + return self._attacker def cpu(self): diff --git a/mart/callbacks/visualizer.py b/mart/callbacks/visualizer.py index 39409143..34e7c5cc 100644 --- a/mart/callbacks/visualizer.py +++ b/mart/callbacks/visualizer.py @@ -6,6 +6,7 @@ import os +import torch from lightning.pytorch.callbacks import Callback from torchvision.transforms import ToPILImage @@ -32,7 +33,8 @@ def on_train_batch_end(self, trainer, model, outputs, batch, batch_idx): def on_train_end(self, trainer, model): # FIXME: We should really just save this to outputs instead of recomputing adv_input - adv_input = model(input=self.input, target=self.target) + with torch.no_grad(): + adv_input = model.get_input_adv(input=self.input, target=self.target) for img, tgt in zip(adv_input, self.target): fname = tgt["file_name"] diff --git a/tests/test_adversary.py b/tests/test_adversary.py index 448baa48..f39686b6 100644 --- a/tests/test_adversary.py +++ b/tests/test_adversary.py @@ -17,35 +17,6 @@ from mart.attack.gradient_modifier import Sign -def test_adversary(input_data, target_data, perturbation): - perturber = Mock(spec=Perturber, return_value=perturbation) - composer = mart.attack.composer.Additive() - gain = Mock() - enforcer = Mock() - attacker = Mock(max_epochs=0, limit_train_batches=1, fit_loop=Mock(max_epochs=0)) - - adversary = Adversary( - perturber=perturber, - composer=composer, - optimizer=None, - gain=gain, - enforcer=enforcer, - attacker=attacker, - ) - - output_data = adversary(input=input_data, target=target_data) - - # The enforcer and attacker should only be called when model is not None. - enforcer.assert_not_called() - attacker.fit.assert_not_called() - assert attacker.fit_loop.max_epochs == 0 - - perturber.assert_called_once() - gain.assert_not_called() - - torch.testing.assert_close(output_data, input_data + perturbation) - - def test_with_model(input_data, target_data, perturbation): perturber = Mock(spec=Perturber, return_value=perturbation) composer = mart.attack.composer.Additive() @@ -53,7 +24,7 @@ def test_with_model(input_data, target_data, perturbation): enforcer = Mock() attacker = Mock(max_epochs=0, limit_train_batches=1, fit_loop=Mock(max_epochs=0)) model = Mock() - sequence = Mock() + batch = (input_data, target_data) adversary = Adversary( perturber=perturber, @@ -64,7 +35,8 @@ def test_with_model(input_data, target_data, perturbation): attacker=attacker, ) - output_data = adversary(input=input_data, target=target_data, model=model, sequence=sequence) + batch_adv = adversary(batch=batch, model=model) + output_data = batch_adv[0] # The enforcer is only called when model is not None. enforcer.assert_called_once() @@ -78,7 +50,7 @@ def test_with_model(input_data, target_data, perturbation): torch.testing.assert_close(output_data, input_data + perturbation) -def test_hidden_params(input_data, target_data, perturbation): +def test_hidden_params(): initializer = Mock() composer = mart.attack.composer.Additive() projector = Mock() @@ -88,8 +60,6 @@ def test_hidden_params(input_data, target_data, perturbation): gain = Mock() enforcer = Mock() attacker = Mock(max_epochs=0, limit_train_batches=1, fit_loop=Mock(max_epochs=0)) - model = Mock() - sequence = Mock() adversary = Adversary( perturber=perturber, @@ -100,8 +70,6 @@ def test_hidden_params(input_data, target_data, perturbation): attacker=attacker, ) - # output_data = adversary(input=input_data, target=target_data, model=model, sequence=sequence) - # Adversarial perturbation should not be updated by a regular training optimizer. params = [p for p in adversary.parameters()] assert len(params) == 0 @@ -122,7 +90,7 @@ def test_hidden_params_after_forward(input_data, target_data, perturbation): enforcer = Mock() attacker = Mock(max_epochs=0, limit_train_batches=1, fit_loop=Mock(max_epochs=0)) model = Mock() - sequence = Mock() + batch = (input_data, target_data) adversary = Adversary( perturber=perturber, @@ -133,7 +101,8 @@ def test_hidden_params_after_forward(input_data, target_data, perturbation): attacker=attacker, ) - output_data = adversary(input=input_data, target=target_data, model=model, sequence=sequence) + batch_adv = adversary(batch=batch, model=model) + output_data = batch_adv[0] # Adversary will have no parameter even after forward is called, because we hide Perturber in a list. params = [p for p in adversary.parameters()] @@ -180,7 +149,7 @@ def test_perturbation(input_data, target_data, perturbation): enforcer = Mock() attacker = Mock(max_epochs=0, limit_train_batches=1, fit_loop=Mock(max_epochs=0)) model = Mock() - sequence = Mock() + batch = (input_data, target_data) adversary = Adversary( perturber=perturber, @@ -191,15 +160,16 @@ def test_perturbation(input_data, target_data, perturbation): attacker=attacker, ) - _ = adversary(input=input_data, target=target_data, model=model, sequence=sequence) - output_data = adversary(input=input_data, target=target_data) + batch_adv = adversary(batch=batch, model=model) + output_data = batch_adv[0] # The enforcer is only called when model is not None. enforcer.assert_called_once() attacker.fit.assert_called_once() - # Once with model and sequence and once without - assert perturber.call_count == 2 + # Perturber is called once for generating initial input_adv. + # The fit() doesn't run because max_epochs=0. + assert perturber.call_count == 1 torch.testing.assert_close(output_data, input_data + perturbation) @@ -226,6 +196,8 @@ def initializer(x): projector=None, ) + batch = (input_data, target_data) + adversary = Adversary( perturber=perturber, composer=composer, @@ -236,20 +208,18 @@ def initializer(x): max_iters=1, ) - def model(input, target, model=None, **kwargs): - return {"logits": adversary(input=input, target=target)} - - sequence = Mock() + def model(batch): + return {"logits": batch[0]} - adversary(input=input_data, target=target_data, model=model, sequence=sequence) - input_adv = adversary(input=input_data, target=target_data) + batch_adv = adversary(batch=batch, model=model) + input_adv = batch_adv[0] perturbation = input_data - input_adv torch.testing.assert_close(perturbation.unique(), torch.Tensor([-1, 0, 1])) -def test_configure_optimizers(input_data, target_data): +def test_configure_optimizers(): perturber = Mock() composer = mart.attack.composer.Additive() optimizer = Mock(spec=mart.optim.OptimizerFactory) @@ -268,8 +238,8 @@ def test_configure_optimizers(input_data, target_data): gain.assert_not_called() -def test_training_step(input_data, target_data): - perturber = Mock() +def test_training_step(input_data, target_data, perturbation): + perturber = Mock(spec=Perturber, return_value=perturbation) composer = mart.attack.composer.Additive() optimizer = Mock(spec=mart.optim.OptimizerFactory) gain = Mock(return_value=torch.tensor(1337)) @@ -290,8 +260,8 @@ def test_training_step(input_data, target_data): assert output == 1337 -def test_training_step_with_many_gain(input_data, target_data): - perturber = Mock() +def test_training_step_with_many_gain(input_data, target_data, perturbation): + perturber = Mock(spec=Perturber, return_value=perturbation) composer = mart.attack.composer.Additive() optimizer = Mock(spec=mart.optim.OptimizerFactory) gain = Mock(return_value=torch.tensor([1234, 5678])) @@ -311,8 +281,8 @@ def test_training_step_with_many_gain(input_data, target_data): assert output == 1234 + 5678 -def test_training_step_with_objective(input_data, target_data): - perturber = Mock() +def test_training_step_with_objective(input_data, target_data, perturbation): + perturber = Mock(spec=Perturber, return_value=perturbation) composer = mart.attack.composer.Additive() optimizer = Mock(spec=mart.optim.OptimizerFactory) gain = Mock(return_value=torch.tensor([1234, 5678])) diff --git a/tests/test_visualizer.py b/tests/test_visualizer.py index 5c25e930..cb188591 100644 --- a/tests/test_visualizer.py +++ b/tests/test_visualizer.py @@ -24,7 +24,7 @@ def perturb(input): return result trainer = Mock() - model = Mock(return_value=perturb(input_list)) + model = Mock(get_input_adv=Mock(return_value=perturb(input_list))) outputs = Mock() batch = {"input": input_list, "target": target_list} adversary = Mock(spec=Adversary, side_effect=perturb) From e81a55ab0f96ee7c22b46c1788be29fed3fd4193 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Tue, 1 Aug 2023 06:40:16 -0700 Subject: [PATCH 52/74] Make an adversarial training/evaluation callback. --- mart/callbacks/adversarial_training.py | 75 +++++++++++++------ .../callbacks/adversarial_training.yaml | 5 +- mart/configs/experiment/CIFAR10_CNN_Adv.yaml | 2 +- .../COCO_TorchvisionFasterRCNN_Adv.yaml | 10 +-- mart/models/modular.py | 6 ++ tests/test_experiments.py | 2 +- 6 files changed, 68 insertions(+), 32 deletions(-) diff --git a/mart/callbacks/adversarial_training.py b/mart/callbacks/adversarial_training.py index 65b90b98..0738467c 100644 --- a/mart/callbacks/adversarial_training.py +++ b/mart/callbacks/adversarial_training.py @@ -4,11 +4,14 @@ # SPDX-License-Identifier: BSD-3-Clause # +from __future__ import annotations + import types +from typing import Callable -from pytorch_lightning.callbacks import Callback +from lightning.pytorch.callbacks import Callback -from mart.models import LitModular +from ..utils import MonkeyPatch __all__ = ["AdversarialTraining"] @@ -16,11 +19,22 @@ class AdversarialTraining(Callback): """Perturbs inputs to be adversarial.""" + # TODO: training/validation/test or train/val/test def __init__( - self, adversary=None, train_adversary=None, validation_adversary=None, test_adversary=None + self, + adversary: Callable = None, + train_adversary: Callable = None, + validation_adversary: Callable = None, + test_adversary: Callable = None, ): - adversary = adversary or train_adversary + """AdversaryConnector. + Args: + adversary (Callable, optional): _description_. Defaults to None. + train_adversary (Callable, optional): _description_. Defaults to None. + validation_adversary (Callable, optional): _description_. Defaults to None. + test_adversary (Callable, optional): _description_. Defaults to None. + """ self.train_adversary = train_adversary or adversary self.validation_adversary = validation_adversary or adversary self.test_adversary = test_adversary or adversary @@ -34,35 +48,54 @@ def setup(self, trainer, pl_module, stage=None): def teardown(self, trainer, pl_module, stage=None): pl_module.on_after_batch_transfer = self._on_after_batch_transfer + def wrap_model(self, model, dataloader_idx): + """Make a model, such that `output = model(batch)`.""" + + # Consume dataloader_idx + if hasattr(model, "attack_step"): + + def model_forward(batch): + output = model.attack_step(batch, dataloader_idx) + return output + + elif hasattr(model, "training_step"): + # Monkey-patch model.log to avoid spamming. + def model_forward(batch): + with MonkeyPatch(model, "log", lambda *args, **kwargs: None): + output = model.training_step(batch, dataloader_idx) + return output + + else: + model_forward = model + + return model_forward + def on_after_batch_transfer(self, pl_module, batch, dataloader_idx): batch = self._on_after_batch_transfer(batch, dataloader_idx) - # FIXME: Remove use of step + adversary = None + trainer = pl_module.trainer if trainer.training: adversary = self.train_adversary - step = "training" elif trainer.validating: adversary = self.validation_adversary - step = "validation" elif trainer.testing: adversary = self.test_adversary - step = "test" - else: - return batch - # Create attacked model where the adversary executes before the model - # FIXME: Should we just use pl_module.training_step? Ideally we would not decompose batch - # and instead pass batch directly to the underlying pl_module since it knows how to - # interpret batch. - def attacked_model(input, **batch): - input_adv = adversary(input=input, **batch) - return pl_module(input=input_adv, **batch) + # Skip if adversary is not defined for all phases train/validation/test. + if adversary is None: + return batch # Move adversary to same device as pl_module and run attack - # FIXME: Directly pass batch instead of assuming it has a structure? - input, target = batch adversary.to(pl_module.device) - input_adv = adversary(input=input, target=target, step=step, model=attacked_model) - return [input_adv, target] + # We assume Adversary is not aware of PyTorch Lightning, + # so wrap the model as `output=model(batch)`. + model = self.wrap_model(pl_module, dataloader_idx) + + # TODO: We may need to do model.eval() if there's BN-like layers in the model. + # Directly pass batch instead of assuming it has a structure. + batch_adv = adversary(batch=batch, model=model) + + return batch_adv diff --git a/mart/configs/callbacks/adversarial_training.yaml b/mart/configs/callbacks/adversarial_training.yaml index 0f6a7b47..eb74d0c1 100644 --- a/mart/configs/callbacks/adversarial_training.yaml +++ b/mart/configs/callbacks/adversarial_training.yaml @@ -1,3 +1,6 @@ adversarial_training: _target_: mart.callbacks.AdversarialTraining - adversary: ??? + adversary: null + train_adversary: null + validation_adversary: null + test_adversary: null diff --git a/mart/configs/experiment/CIFAR10_CNN_Adv.yaml b/mart/configs/experiment/CIFAR10_CNN_Adv.yaml index ced39cd1..3d4b1c12 100644 --- a/mart/configs/experiment/CIFAR10_CNN_Adv.yaml +++ b/mart/configs/experiment/CIFAR10_CNN_Adv.yaml @@ -1,7 +1,7 @@ # @package _global_ defaults: - - /attack@callbacks.adversarial_training.adversary: classification_eps1.75_fgsm + - /attack@callbacks.adversarial_training.train_adversary: classification_eps1.75_fgsm - /attack@callbacks.adversarial_training.test_adversary: classification_eps2_pgd10_step1 - override /datamodule: cifar10 - override /model: classifier_cifar10_cnn diff --git a/mart/configs/experiment/COCO_TorchvisionFasterRCNN_Adv.yaml b/mart/configs/experiment/COCO_TorchvisionFasterRCNN_Adv.yaml index 398394bf..a1860696 100644 --- a/mart/configs/experiment/COCO_TorchvisionFasterRCNN_Adv.yaml +++ b/mart/configs/experiment/COCO_TorchvisionFasterRCNN_Adv.yaml @@ -2,15 +2,9 @@ defaults: - COCO_TorchvisionFasterRCNN - - /attack@model.modules.input_adv_test: object_detection_mask_adversary + - /attack@callbacks.adversarial_training.test_adversary: object_detection_mask_adversary - override /datamodule: coco_perturbable_mask + - override /callbacks: [model_checkpoint, lr_monitor, adversarial_training] task_name: "COCO_TorchvisionFasterRCNN_Adv" tags: ["adv"] - -model: - test_sequence: - seq005: input_adv_test - - seq010: - preprocessor: ["input_adv_test"] diff --git a/mart/models/modular.py b/mart/models/modular.py index 192204a2..c63c9fd7 100644 --- a/mart/models/modular.py +++ b/mart/models/modular.py @@ -126,6 +126,12 @@ def configure_optimizers(self): def forward(self, **kwargs): return self.model(**kwargs) + def attack_step(self, batch, batch_idx): + # Use the training sequence in attack. + input, target = batch + output = self(input=input, target=target, model=self.model, step="training") + return output + # # Training # diff --git a/tests/test_experiments.py b/tests/test_experiments.py index 65b27a5d..404a98ef 100644 --- a/tests/test_experiments.py +++ b/tests/test_experiments.py @@ -80,7 +80,7 @@ def test_cifar10_cnn_adv_experiment(classification_cfg, tmp_path): "-m", "experiment=CIFAR10_CNN_Adv", "hydra.sweep.dir=" + str(tmp_path), - "model.modules.input_adv_test.max_iters=10", + "callbacks.adversarial_training.test_adversary.max_iters=10", "optimized_metric=training_metrics/acc", "++datamodule.train_dataset.image_size=[3,32,32]", "++datamodule.train_dataset.num_classes=10", From 579d6531b2dd2a547f9f8cee26e6702b25ef3b40 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Thu, 3 Aug 2023 10:02:32 -0700 Subject: [PATCH 53/74] Remove stuff that is related to callback entry points. --- mart/attack/adversary.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 20ffa794..8a99dd39 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -213,11 +213,6 @@ def attacker(self): self._attacker = self._attacker(accelerator=accelerator, devices=devices) - # Remove recursive adversarial training callback from lightning.pytorch.callbacks_factory. - for callback in self._attacker.callbacks: - if isinstance(callback, AdversarialTraining): - self._attacker.callbacks.remove(callback) - return self._attacker def cpu(self): From 92f9fe23fcb41c876222ce12881e59f9133067cb Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 7 Aug 2023 10:20:27 -0700 Subject: [PATCH 54/74] Replace model wrapper with a configurable model_transform. --- mart/attack/adversary.py | 6 +++ mart/attack/model_transform.py | 38 +++++++++++++++++++ mart/callbacks/adversarial_training.py | 31 +-------------- mart/configs/attack/adversary.yaml | 1 + .../attack/model_transform/lightning.yaml | 1 + 5 files changed, 47 insertions(+), 30 deletions(-) create mode 100644 mart/attack/model_transform.py create mode 100644 mart/configs/attack/model_transform/lightning.yaml diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 8a99dd39..e942dc15 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -43,6 +43,7 @@ def __init__( objective: Objective | None = None, enforcer: Enforcer | None = None, attacker: pl.Trainer | None = None, + model_transform: Callable | None = None, **kwargs, ): """_summary_ @@ -102,6 +103,8 @@ def __init__( assert self._attacker.max_epochs == 0 assert self._attacker.limit_train_batches > 0 + self.model_transform = model_transform + @property def perturber(self) -> Perturber: # Hide the perturber module in a list, so that perturbation is not exported as a parameter in the model checkpoint, @@ -169,6 +172,9 @@ def configure_gradient_clipping( def forward(self, *, batch: torch.Tensor | list | dict, model: Callable): input, target = batch + if self.model_transform is not None: + model = self.model_transform(model) + # Optimization loop only sees the transformed input in batches. batch_transformed = { "input": input, diff --git a/mart/attack/model_transform.py b/mart/attack/model_transform.py new file mode 100644 index 00000000..3cf4771b --- /dev/null +++ b/mart/attack/model_transform.py @@ -0,0 +1,38 @@ +# +# Copyright (C) 2022 Intel Corporation +# +# SPDX-License-Identifier: BSD-3-Clause +# + +from ..utils import MonkeyPatch + + +class LightningModuleAsTarget: + """Prepare a LightningModule as a target model for Adversary, + such that `output = model(batch)`. + """ + + def __call__(self, model): + # Generate a pseudo dataloader_idx. + dataloader_idx = 1 + + if hasattr(model, "attack_step"): + + def model_forward(batch): + output = model.attack_step(batch, dataloader_idx) + return output + + elif hasattr(model, "training_step"): + # Monkey-patch model.log to avoid spamming. + def model_forward(batch): + with MonkeyPatch(model, "log", lambda *args, **kwargs: None): + output = model.training_step(batch, dataloader_idx) + return output + + else: + raise ValueError("Model does not have `attack_step()` or `training_step()`.") + + return model_forward + + +# TODO: We may need to do model.eval() if there's BN-like layers in the model. diff --git a/mart/callbacks/adversarial_training.py b/mart/callbacks/adversarial_training.py index 0738467c..68c398fd 100644 --- a/mart/callbacks/adversarial_training.py +++ b/mart/callbacks/adversarial_training.py @@ -11,8 +11,6 @@ from lightning.pytorch.callbacks import Callback -from ..utils import MonkeyPatch - __all__ = ["AdversarialTraining"] @@ -48,28 +46,6 @@ def setup(self, trainer, pl_module, stage=None): def teardown(self, trainer, pl_module, stage=None): pl_module.on_after_batch_transfer = self._on_after_batch_transfer - def wrap_model(self, model, dataloader_idx): - """Make a model, such that `output = model(batch)`.""" - - # Consume dataloader_idx - if hasattr(model, "attack_step"): - - def model_forward(batch): - output = model.attack_step(batch, dataloader_idx) - return output - - elif hasattr(model, "training_step"): - # Monkey-patch model.log to avoid spamming. - def model_forward(batch): - with MonkeyPatch(model, "log", lambda *args, **kwargs: None): - output = model.training_step(batch, dataloader_idx) - return output - - else: - model_forward = model - - return model_forward - def on_after_batch_transfer(self, pl_module, batch, dataloader_idx): batch = self._on_after_batch_transfer(batch, dataloader_idx) @@ -90,12 +66,7 @@ def on_after_batch_transfer(self, pl_module, batch, dataloader_idx): # Move adversary to same device as pl_module and run attack adversary.to(pl_module.device) - # We assume Adversary is not aware of PyTorch Lightning, - # so wrap the model as `output=model(batch)`. - model = self.wrap_model(pl_module, dataloader_idx) - - # TODO: We may need to do model.eval() if there's BN-like layers in the model. # Directly pass batch instead of assuming it has a structure. - batch_adv = adversary(batch=batch, model=model) + batch_adv = adversary(batch=batch, model=pl_module) return batch_adv diff --git a/mart/configs/attack/adversary.yaml b/mart/configs/attack/adversary.yaml index bbf52433..aeb331c5 100644 --- a/mart/configs/attack/adversary.yaml +++ b/mart/configs/attack/adversary.yaml @@ -1,5 +1,6 @@ defaults: - /callbacks@callbacks: [progress_bar] + - model_transform: lightning _target_: mart.attack.Adversary _convert_: all diff --git a/mart/configs/attack/model_transform/lightning.yaml b/mart/configs/attack/model_transform/lightning.yaml new file mode 100644 index 00000000..b0db31d6 --- /dev/null +++ b/mart/configs/attack/model_transform/lightning.yaml @@ -0,0 +1 @@ +_target_: mart.attack.model_transform.LightningModuleAsTarget From 44c9c4bba4cb13f5c9a2d457843195baec1ac67e Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 7 Aug 2023 11:33:42 -0700 Subject: [PATCH 55/74] Add Adversary.batch_converter(). --- mart/attack/__init__.py | 1 + mart/attack/adversary.py | 35 ++-- mart/attack/batch_converter.py | 173 ++++++++++++++++++ mart/configs/attack/adversary.yaml | 1 + mart/configs/attack/batch_converter/dict.yaml | 2 + .../attack/batch_converter/input_only.yaml | 1 + mart/configs/attack/batch_converter/list.yaml | 2 + .../configs/attack/batch_converter/tuple.yaml | 2 + .../attack/classification_eps1.75_fgsm.yaml | 1 + .../classification_eps2_pgd10_step1.yaml | 1 + .../classification_eps8_pgd10_step1.yaml | 1 + .../object_detection_mask_adversary.yaml | 1 + tests/test_adversary.py | 28 +++ 13 files changed, 236 insertions(+), 13 deletions(-) create mode 100644 mart/attack/batch_converter.py create mode 100644 mart/configs/attack/batch_converter/dict.yaml create mode 100644 mart/configs/attack/batch_converter/input_only.yaml create mode 100644 mart/configs/attack/batch_converter/list.yaml create mode 100644 mart/configs/attack/batch_converter/tuple.yaml diff --git a/mart/attack/__init__.py b/mart/attack/__init__.py index 843ce9bd..2a55d648 100644 --- a/mart/attack/__init__.py +++ b/mart/attack/__init__.py @@ -1,5 +1,6 @@ from .adversary import * from .adversary_wrapper import * +from .batch_converter import * from .composer import * from .enforcer import * from .gain import * diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index e942dc15..74694203 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -43,6 +43,7 @@ def __init__( objective: Objective | None = None, enforcer: Enforcer | None = None, attacker: pl.Trainer | None = None, + batch_converter: Callable, model_transform: Callable | None = None, **kwargs, ): @@ -57,6 +58,7 @@ def __init__( objective (Objective): A function for computing adversarial objective, which returns True or False. Optional. enforcer (Enforcer): A Callable that enforce constraints on the adversarial input. attacker (Trainer): A PyTorch-Lightning Trainer object used to fit the perturbation. + batch_converter (Callable): Convert batch into convenient format and reverse. """ super().__init__() @@ -103,6 +105,7 @@ def __init__( assert self._attacker.max_epochs == 0 assert self._attacker.limit_train_batches > 0 + self.batch_converter = batch_converter self.model_transform = model_transform @property @@ -124,15 +127,17 @@ def training_step(self, batch, batch_idx): # copy batch since we modify it and it is used internally # batch = batch.copy() - input = batch["input"] - target = batch["target"] + input_transformed = batch["input"] + target_transformed = batch["target"] # What we need is a frozen model that returns (a dictionary of) logits, or losses. model = batch["model"] - # Compose input_adv from input, then give to model for updated gain. - input_adv = self.get_input_adv(input=input, target=target) + # Compose input_adv from input. + input_adv_transformed = self.get_input_adv( + input=input_transformed, target=target_transformed + ) # Target model expects input in the original format. - batch_adv = (input_adv, target) + batch_adv = self.batch_converter.revert(input_adv_transformed, target_transformed) # A model that returns output dictionary. outputs = model(batch_adv) @@ -170,32 +175,36 @@ def configure_gradient_clipping( @silent() def forward(self, *, batch: torch.Tensor | list | dict, model: Callable): - input, target = batch + # Extract and transform input so that is convenient for Adversary. + input_transformed, target_transformed = self.batch_converter(batch) if self.model_transform is not None: model = self.model_transform(model) + # Canonical form of batch in the adversary's optimization loop. # Optimization loop only sees the transformed input in batches. batch_transformed = { - "input": input, - "target": target, + "input": input_transformed, + "target": target_transformed, "model": model, } # Configure and reset perturbation for current inputs - self.perturber.configure_perturbation(input) + self.perturber.configure_perturbation(input_transformed) # Attack, aka fit a perturbation, for one epoch by cycling over the same input batch. # We use Trainer.limit_train_batches to control the number of attack iterations. self.attacker.fit_loop.max_epochs += 1 self.attacker.fit(self, train_dataloaders=cycle([batch_transformed])) - # Get the input_adv for enforcer checking. - input_adv = self.get_input_adv(input=input, target=target) - self.enforcer(input_adv, input=input, target=target) + # Get the transformed input_adv for enforcer checking. + input_adv_transformed = self.get_input_adv( + input=input_transformed, target=target_transformed + ) + self.enforcer(input_adv_transformed, input=input_transformed, target=target_transformed) # Revert to the original format of batch. - batch_adv = (input_adv, target) + batch_adv = self.batch_converter.revert(input_adv_transformed, target_transformed) return batch_adv diff --git a/mart/attack/batch_converter.py b/mart/attack/batch_converter.py new file mode 100644 index 00000000..2cf790ed --- /dev/null +++ b/mart/attack/batch_converter.py @@ -0,0 +1,173 @@ +# +# Copyright (C) 2022 Intel Corporation +# +# SPDX-License-Identifier: BSD-3-Clause +# + +from __future__ import annotations + +import abc +from typing import Callable + +# TODO: Do we need to copy batch? + +__all__ = [ + "InputOnlyBatchConverter", + "DictBatchConverter", + "ListBatchConverter", + "TupleBatchConverter", +] + + +class BatchConverter(abc.ABC): + def __init__( + self, + *, + transform: Callable | None = None, + untransform: Callable | None = None, + target_transform: Callable | None = None, + target_untransform: Callable | None = None, + batch_transform: Callable | None = None, + batch_untransform: Callable | None = None, + ): + """Convert batch into (input, target), and vice versa. + + Args: + transform (Callable): Transform input into a convenient format, e.g. [0,1]->[0.255]. + untransform (Callable): Transform adversarial input in the convenient format back into the original format of input, e.g. [0,255]->[0,1]. + target_transform (Callable): Transform target. + target_untransform (Callable): Untransform target. + batch_transform (Callable): Transform batch before converting the batch. + batch_untransform (callable): Untransform batch after reverting the batch. + """ + + self.transform = transform + self.untransform = untransform + + self.target_transform = target_transform + self.target_untransform = target_untransform + + self.batch_transform = batch_transform + self.batch_untransform = batch_untransform + + def __call__(self, batch, device=None): + if self.batch_transform is not None: + batch = self.batch_transform(batch, device=device) + + input, target = self._convert(batch) + + if self.transform is not None: + input = self.transform(input) + if self.target_transform is not None: + target = self.target_transform(target) + + return input, target + + def revert(self, input, target): + if self.untransform is not None: + input = self.untransform(input) + if self.target_untransform is not None: + target = self.target_untransform(target) + + batch = self._revert(input, target) + + if self.batch_untransform is not None: + batch = self.batch_untransform(batch) + + return batch + + @abc.abstractclassmethod + def _revert(self, input, target): + pass + + @abc.abstractclassmethod + def _convert(self, batch): + pass + + +class InputOnlyBatchConverter(BatchConverter): + def _convert(self, batch): + input = batch + target = None + return input, target + + def _revert(self, input, target): + batch = input + return batch + + +class DictBatchConverter(BatchConverter): + def __init__(self, input_key: str = "input", **kwargs): + """_summary_ + + Args: + input_key (str): Input locator in a batch. Defaults to "input". + """ + super().__init__(**kwargs) + + self.input_key = input_key + self.rest = {} + + def _convert(self, batch): + # Make a copy because we don't want to break the original batch. + batch = batch.copy() + input = batch.pop(self.input_key) + if "target" in batch: + target = batch["target"] + self.rest = batch + else: + target = batch + return input, target + + def _revert(self, input, target): + if self.rest == {}: + batch = target + else: + batch = self.rest + + # Input may have been changed. + batch[self.input_key] = input + + return batch + + +class ListBatchConverter(BatchConverter): + def __init__(self, input_key: int = 0, target_size: int | None = None, **kwargs): + super().__init__(**kwargs) + + self.input_key = input_key + self.target_size = target_size + + def _convert(self, batch: list): + # Make a copy because we don't want to break the original batch. + batch = batch.copy() + input = batch.pop(self.input_key) + self.target_size = len(batch) + + if self.target_size == 1: + target = batch[0] + else: + target = batch + + return input, target + + def _revert(self, input, target): + if self.target_size == 1: + batch = [target] + batch.insert(self.input_key, input) + else: + batch = target + batch.insert(self.input_key, input) + return batch + + +class TupleBatchConverter(ListBatchConverter): + def _convert(self, batch: tuple): + batch_list = list(batch) + input, target = super()._convert(batch_list) + return input, target + + def _revert(self, input, target): + batch_list = super()._revert(input, target) + batch = tuple(batch_list) + return batch diff --git a/mart/configs/attack/adversary.yaml b/mart/configs/attack/adversary.yaml index aeb331c5..40765807 100644 --- a/mart/configs/attack/adversary.yaml +++ b/mart/configs/attack/adversary.yaml @@ -13,3 +13,4 @@ gradient_modifier: null objective: null enforcer: ??? attacker: null +batch_converter: ??? diff --git a/mart/configs/attack/batch_converter/dict.yaml b/mart/configs/attack/batch_converter/dict.yaml new file mode 100644 index 00000000..db421039 --- /dev/null +++ b/mart/configs/attack/batch_converter/dict.yaml @@ -0,0 +1,2 @@ +_target_: mart.attack.batch_converter.DictBatchConverter +input_key: input diff --git a/mart/configs/attack/batch_converter/input_only.yaml b/mart/configs/attack/batch_converter/input_only.yaml new file mode 100644 index 00000000..b9bb9207 --- /dev/null +++ b/mart/configs/attack/batch_converter/input_only.yaml @@ -0,0 +1 @@ +_target_: mart.attack.batch_converter.InputOnlyBatchConverter diff --git a/mart/configs/attack/batch_converter/list.yaml b/mart/configs/attack/batch_converter/list.yaml new file mode 100644 index 00000000..53da9fae --- /dev/null +++ b/mart/configs/attack/batch_converter/list.yaml @@ -0,0 +1,2 @@ +_target_: mart.attack.batch_converter.ListBatchConverter +input_key: 0 diff --git a/mart/configs/attack/batch_converter/tuple.yaml b/mart/configs/attack/batch_converter/tuple.yaml new file mode 100644 index 00000000..25ff65b5 --- /dev/null +++ b/mart/configs/attack/batch_converter/tuple.yaml @@ -0,0 +1,2 @@ +_target_: mart.attack.batch_converter.TupleBatchConverter +input_key: 0 diff --git a/mart/configs/attack/classification_eps1.75_fgsm.yaml b/mart/configs/attack/classification_eps1.75_fgsm.yaml index 7c300e2d..c3c0ec46 100644 --- a/mart/configs/attack/classification_eps1.75_fgsm.yaml +++ b/mart/configs/attack/classification_eps1.75_fgsm.yaml @@ -10,6 +10,7 @@ defaults: - objective: misclassification - enforcer: default - enforcer/constraints: [lp, pixel_range] + - batch_converter: list enforcer: constraints: diff --git a/mart/configs/attack/classification_eps2_pgd10_step1.yaml b/mart/configs/attack/classification_eps2_pgd10_step1.yaml index b98cf407..7dd30548 100644 --- a/mart/configs/attack/classification_eps2_pgd10_step1.yaml +++ b/mart/configs/attack/classification_eps2_pgd10_step1.yaml @@ -10,6 +10,7 @@ defaults: - objective: misclassification - enforcer: default - enforcer/constraints: [lp, pixel_range] + - batch_converter: list enforcer: constraints: diff --git a/mart/configs/attack/classification_eps8_pgd10_step1.yaml b/mart/configs/attack/classification_eps8_pgd10_step1.yaml index f1b6242a..7b9577a7 100644 --- a/mart/configs/attack/classification_eps8_pgd10_step1.yaml +++ b/mart/configs/attack/classification_eps8_pgd10_step1.yaml @@ -10,6 +10,7 @@ defaults: - objective: misclassification - enforcer: default - enforcer/constraints: [lp, pixel_range] + - batch_converter: list enforcer: constraints: diff --git a/mart/configs/attack/object_detection_mask_adversary.yaml b/mart/configs/attack/object_detection_mask_adversary.yaml index ad99dda0..cedbd9eb 100644 --- a/mart/configs/attack/object_detection_mask_adversary.yaml +++ b/mart/configs/attack/object_detection_mask_adversary.yaml @@ -10,6 +10,7 @@ defaults: - objective: zero_ap - enforcer: default - enforcer/constraints: [mask, pixel_range] + - batch_converter: tuple # Make a 5-step attack for the demonstration purpose. max_iters: 5 diff --git a/tests/test_adversary.py b/tests/test_adversary.py index f39686b6..352aa654 100644 --- a/tests/test_adversary.py +++ b/tests/test_adversary.py @@ -25,6 +25,7 @@ def test_with_model(input_data, target_data, perturbation): attacker = Mock(max_epochs=0, limit_train_batches=1, fit_loop=Mock(max_epochs=0)) model = Mock() batch = (input_data, target_data) + batch_converter = mart.attack.TupleBatchConverter() adversary = Adversary( perturber=perturber, @@ -33,6 +34,7 @@ def test_with_model(input_data, target_data, perturbation): gain=gain, enforcer=enforcer, attacker=attacker, + batch_converter=batch_converter, ) batch_adv = adversary(batch=batch, model=model) @@ -60,6 +62,7 @@ def test_hidden_params(): gain = Mock() enforcer = Mock() attacker = Mock(max_epochs=0, limit_train_batches=1, fit_loop=Mock(max_epochs=0)) + batch_converter = mart.attack.TupleBatchConverter() adversary = Adversary( perturber=perturber, @@ -68,6 +71,7 @@ def test_hidden_params(): gain=gain, enforcer=enforcer, attacker=attacker, + batch_converter=batch_converter, ) # Adversarial perturbation should not be updated by a regular training optimizer. @@ -91,6 +95,7 @@ def test_hidden_params_after_forward(input_data, target_data, perturbation): attacker = Mock(max_epochs=0, limit_train_batches=1, fit_loop=Mock(max_epochs=0)) model = Mock() batch = (input_data, target_data) + batch_converter = mart.attack.TupleBatchConverter() adversary = Adversary( perturber=perturber, @@ -99,6 +104,7 @@ def test_hidden_params_after_forward(input_data, target_data, perturbation): gain=gain, enforcer=enforcer, attacker=attacker, + batch_converter=batch_converter, ) batch_adv = adversary(batch=batch, model=model) @@ -123,6 +129,7 @@ def test_loading_perturbation_from_state_dict(): gain = Mock() enforcer = Mock() attacker = Mock(max_epochs=0, limit_train_batches=1, fit_loop=Mock(max_epochs=0)) + batch_converter = mart.attack.TupleBatchConverter() adversary = Adversary( perturber=perturber, @@ -131,6 +138,7 @@ def test_loading_perturbation_from_state_dict(): gain=gain, enforcer=enforcer, attacker=attacker, + batch_converter=batch_converter, ) # We should be able to load arbitrary state_dict, because Adversary ignores state_dict. @@ -150,6 +158,7 @@ def test_perturbation(input_data, target_data, perturbation): attacker = Mock(max_epochs=0, limit_train_batches=1, fit_loop=Mock(max_epochs=0)) model = Mock() batch = (input_data, target_data) + batch_converter = mart.attack.TupleBatchConverter() adversary = Adversary( perturber=perturber, @@ -158,6 +167,7 @@ def test_perturbation(input_data, target_data, perturbation): gain=gain, enforcer=enforcer, attacker=attacker, + batch_converter=batch_converter, ) batch_adv = adversary(batch=batch, model=model) @@ -197,6 +207,7 @@ def initializer(x): ) batch = (input_data, target_data) + batch_converter = mart.attack.TupleBatchConverter() adversary = Adversary( perturber=perturber, @@ -206,6 +217,7 @@ def initializer(x): gradient_modifier=Sign(), enforcer=enforcer, max_iters=1, + batch_converter=batch_converter, ) def model(batch): @@ -224,12 +236,14 @@ def test_configure_optimizers(): composer = mart.attack.composer.Additive() optimizer = Mock(spec=mart.optim.OptimizerFactory) gain = Mock() + batch_converter = mart.attack.TupleBatchConverter() adversary = Adversary( perturber=perturber, composer=composer, optimizer=optimizer, gain=gain, + batch_converter=batch_converter, ) adversary.configure_optimizers() @@ -244,14 +258,18 @@ def test_training_step(input_data, target_data, perturbation): optimizer = Mock(spec=mart.optim.OptimizerFactory) gain = Mock(return_value=torch.tensor(1337)) model = Mock(return_value={}) + # Set target_size manually because the test bypasses the convert() step that reads target_size. + batch_converter = mart.attack.TupleBatchConverter(target_size=1) adversary = Adversary( perturber=perturber, composer=composer, optimizer=optimizer, gain=gain, + batch_converter=batch_converter, ) + # The batch is reverted to a tuple inside training_step() before invoking the model. output = adversary.training_step( {"input": input_data, "target": target_data, "model": model}, 0 ) @@ -266,14 +284,18 @@ def test_training_step_with_many_gain(input_data, target_data, perturbation): optimizer = Mock(spec=mart.optim.OptimizerFactory) gain = Mock(return_value=torch.tensor([1234, 5678])) model = Mock(return_value={}) + # Set target_size manually because the test bypasses the convert() step that reads target_size. + batch_converter = mart.attack.TupleBatchConverter(target_size=1) adversary = Adversary( perturber=perturber, composer=composer, optimizer=optimizer, gain=gain, + batch_converter=batch_converter, ) + # The batch is reverted to a tuple inside training_step() before invoking the model. output = adversary.training_step( {"input": input_data, "target": target_data, "model": model}, 0 ) @@ -288,6 +310,8 @@ def test_training_step_with_objective(input_data, target_data, perturbation): gain = Mock(return_value=torch.tensor([1234, 5678])) model = Mock(return_value={}) objective = Mock(return_value=torch.tensor([True, False], dtype=torch.bool)) + # Set target_size manually because the test bypasses the convert() step that reads target_size. + batch_converter = mart.attack.TupleBatchConverter(target_size=1) adversary = Adversary( perturber=perturber, @@ -295,8 +319,10 @@ def test_training_step_with_objective(input_data, target_data, perturbation): optimizer=optimizer, objective=objective, gain=gain, + batch_converter=batch_converter, ) + # The batch is reverted to a tuple inside training_step() before invoking the model. output = adversary.training_step( {"input": input_data, "target": target_data, "model": model}, 0 ) @@ -314,6 +340,7 @@ def test_configure_gradient_clipping(): ) gradient_modifier = Mock() gain = Mock() + batch_converter = mart.attack.TupleBatchConverter() adversary = Adversary( perturber=perturber, @@ -321,6 +348,7 @@ def test_configure_gradient_clipping(): optimizer=optimizer, gradient_modifier=gradient_modifier, gain=gain, + batch_converter=batch_converter, ) # We need to mock a trainer since LightningModule does some checks adversary.trainer = Mock(gradient_clip_val=1.0, gradient_clip_algorithm="norm") From 87d41d641f4b8090424c84b0aa2699e9328239cc Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 7 Aug 2023 15:19:00 -0700 Subject: [PATCH 56/74] Comment. --- mart/attack/adversary.py | 1 + 1 file changed, 1 insertion(+) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index e942dc15..ebabab6e 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -57,6 +57,7 @@ def __init__( objective (Objective): A function for computing adversarial objective, which returns True or False. Optional. enforcer (Enforcer): A Callable that enforce constraints on the adversarial input. attacker (Trainer): A PyTorch-Lightning Trainer object used to fit the perturbation. + model_transform (Callable): Transform a model before attack. """ super().__init__() From fe78536ff7396bdd0e81cb3cfd45113b17506317 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Thu, 24 Aug 2023 15:28:27 -0700 Subject: [PATCH 57/74] Rename as train/val/test. --- mart/callbacks/adversarial_training.py | 18 +++++++++--------- .../callbacks/adversarial_training.yaml | 2 +- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/mart/callbacks/adversarial_training.py b/mart/callbacks/adversarial_training.py index 68c398fd..4d689b39 100644 --- a/mart/callbacks/adversarial_training.py +++ b/mart/callbacks/adversarial_training.py @@ -17,24 +17,24 @@ class AdversarialTraining(Callback): """Perturbs inputs to be adversarial.""" - # TODO: training/validation/test or train/val/test def __init__( self, adversary: Callable = None, train_adversary: Callable = None, - validation_adversary: Callable = None, + val_adversary: Callable = None, test_adversary: Callable = None, ): - """AdversaryConnector. + """A pl.Trainer callback which perturbs input to be adversarial in training/validation/test + phase. Args: - adversary (Callable, optional): _description_. Defaults to None. - train_adversary (Callable, optional): _description_. Defaults to None. - validation_adversary (Callable, optional): _description_. Defaults to None. - test_adversary (Callable, optional): _description_. Defaults to None. + adversary (Callable, optional): Adversary in the training/validation/test phase if not defined explicitly. Defaults to None. + train_adversary (Callable, optional): Adversary in the training phase. Defaults to None. + val_adversary (Callable, optional): Adversary in the validation phase. Defaults to None. + test_adversary (Callable, optional): Adversary in the test phase. Defaults to None. """ self.train_adversary = train_adversary or adversary - self.validation_adversary = validation_adversary or adversary + self.val_adversary = val_adversary or adversary self.test_adversary = test_adversary or adversary def setup(self, trainer, pl_module, stage=None): @@ -55,7 +55,7 @@ def on_after_batch_transfer(self, pl_module, batch, dataloader_idx): if trainer.training: adversary = self.train_adversary elif trainer.validating: - adversary = self.validation_adversary + adversary = self.val_adversary elif trainer.testing: adversary = self.test_adversary diff --git a/mart/configs/callbacks/adversarial_training.yaml b/mart/configs/callbacks/adversarial_training.yaml index eb74d0c1..13f528d2 100644 --- a/mart/configs/callbacks/adversarial_training.yaml +++ b/mart/configs/callbacks/adversarial_training.yaml @@ -2,5 +2,5 @@ adversarial_training: _target_: mart.callbacks.AdversarialTraining adversary: null train_adversary: null - validation_adversary: null + val_adversary: null test_adversary: null From c9a990056ace5cd07bd7f350fe3fb49078c08791 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Thu, 24 Aug 2023 15:33:01 -0700 Subject: [PATCH 58/74] Rename the callback to AdversaryConnector because we may not perform adversarial training by allowing train_adversary=None. --- mart/attack/adversary.py | 1 - mart/callbacks/__init__.py | 2 +- .../{adversarial_training.py => adversary_connector.py} | 4 ++-- .../{adversarial_training.yaml => adversary_connector.yaml} | 4 ++-- mart/configs/experiment/CIFAR10_CNN_Adv.yaml | 6 +++--- mart/configs/experiment/COCO_TorchvisionFasterRCNN_Adv.yaml | 4 ++-- 6 files changed, 10 insertions(+), 11 deletions(-) rename mart/callbacks/{adversarial_training.py => adversary_connector.py} (97%) rename mart/configs/callbacks/{adversarial_training.yaml => adversary_connector.yaml} (55%) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index ebabab6e..01bb47f5 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -15,7 +15,6 @@ from mart.utils import silent -from ..callbacks.adversarial_training import AdversarialTraining from ..optim import OptimizerFactory if TYPE_CHECKING: diff --git a/mart/callbacks/__init__.py b/mart/callbacks/__init__.py index 0dccb7f7..7d16d846 100644 --- a/mart/callbacks/__init__.py +++ b/mart/callbacks/__init__.py @@ -1,4 +1,4 @@ -from .adversarial_training import * +from .adversary_connector import * from .eval_mode import * from .gradients import * from .no_grad_mode import * diff --git a/mart/callbacks/adversarial_training.py b/mart/callbacks/adversary_connector.py similarity index 97% rename from mart/callbacks/adversarial_training.py rename to mart/callbacks/adversary_connector.py index 4d689b39..5d70847b 100644 --- a/mart/callbacks/adversarial_training.py +++ b/mart/callbacks/adversary_connector.py @@ -11,10 +11,10 @@ from lightning.pytorch.callbacks import Callback -__all__ = ["AdversarialTraining"] +__all__ = ["AdversaryConnector"] -class AdversarialTraining(Callback): +class AdversaryConnector(Callback): """Perturbs inputs to be adversarial.""" def __init__( diff --git a/mart/configs/callbacks/adversarial_training.yaml b/mart/configs/callbacks/adversary_connector.yaml similarity index 55% rename from mart/configs/callbacks/adversarial_training.yaml rename to mart/configs/callbacks/adversary_connector.yaml index 13f528d2..5e86c4e7 100644 --- a/mart/configs/callbacks/adversarial_training.yaml +++ b/mart/configs/callbacks/adversary_connector.yaml @@ -1,5 +1,5 @@ -adversarial_training: - _target_: mart.callbacks.AdversarialTraining +adversary_connector: + _target_: mart.callbacks.AdversaryConnector adversary: null train_adversary: null val_adversary: null diff --git a/mart/configs/experiment/CIFAR10_CNN_Adv.yaml b/mart/configs/experiment/CIFAR10_CNN_Adv.yaml index 3d4b1c12..cc55eb27 100644 --- a/mart/configs/experiment/CIFAR10_CNN_Adv.yaml +++ b/mart/configs/experiment/CIFAR10_CNN_Adv.yaml @@ -1,13 +1,13 @@ # @package _global_ defaults: - - /attack@callbacks.adversarial_training.train_adversary: classification_eps1.75_fgsm - - /attack@callbacks.adversarial_training.test_adversary: classification_eps2_pgd10_step1 + - /attack@callbacks.adversary_connector.train_adversary: classification_eps1.75_fgsm + - /attack@callbacks.adversary_connector.test_adversary: classification_eps2_pgd10_step1 - override /datamodule: cifar10 - override /model: classifier_cifar10_cnn - override /metric: accuracy - override /optimization: super_convergence - - override /callbacks: [model_checkpoint, lr_monitor, adversarial_training] + - override /callbacks: [model_checkpoint, lr_monitor, adversary_connector] task_name: "CIFAR10_CNN_Adv" tags: ["adv", "fat"] diff --git a/mart/configs/experiment/COCO_TorchvisionFasterRCNN_Adv.yaml b/mart/configs/experiment/COCO_TorchvisionFasterRCNN_Adv.yaml index a1860696..8c2737bd 100644 --- a/mart/configs/experiment/COCO_TorchvisionFasterRCNN_Adv.yaml +++ b/mart/configs/experiment/COCO_TorchvisionFasterRCNN_Adv.yaml @@ -2,9 +2,9 @@ defaults: - COCO_TorchvisionFasterRCNN - - /attack@callbacks.adversarial_training.test_adversary: object_detection_mask_adversary + - /attack@callbacks.adversary_connector.test_adversary: object_detection_mask_adversary - override /datamodule: coco_perturbable_mask - - override /callbacks: [model_checkpoint, lr_monitor, adversarial_training] + - override /callbacks: [model_checkpoint, lr_monitor, adversary_connector] task_name: "COCO_TorchvisionFasterRCNN_Adv" tags: ["adv"] From 79c127aeb8cce44aac1ef5e2a182c939c1afc416 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Thu, 24 Aug 2023 15:55:14 -0700 Subject: [PATCH 59/74] Rename config: adversarial_training -> adversary_connector. --- tests/test_experiments.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_experiments.py b/tests/test_experiments.py index 404a98ef..737ab12c 100644 --- a/tests/test_experiments.py +++ b/tests/test_experiments.py @@ -80,7 +80,7 @@ def test_cifar10_cnn_adv_experiment(classification_cfg, tmp_path): "-m", "experiment=CIFAR10_CNN_Adv", "hydra.sweep.dir=" + str(tmp_path), - "callbacks.adversarial_training.test_adversary.max_iters=10", + "callbacks.adversary_connector.test_adversary.max_iters=10", "optimized_metric=training_metrics/acc", "++datamodule.train_dataset.image_size=[3,32,32]", "++datamodule.train_dataset.num_classes=10", From 59c3d66b816b8f670202e4436e1e9d079d31dacc Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Thu, 24 Aug 2023 16:13:59 -0700 Subject: [PATCH 60/74] Update comments. --- mart/attack/batch_converter.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mart/attack/batch_converter.py b/mart/attack/batch_converter.py index 2cf790ed..0e9397dd 100644 --- a/mart/attack/batch_converter.py +++ b/mart/attack/batch_converter.py @@ -33,8 +33,8 @@ def __init__( """Convert batch into (input, target), and vice versa. Args: - transform (Callable): Transform input into a convenient format, e.g. [0,1]->[0.255]. - untransform (Callable): Transform adversarial input in the convenient format back into the original format of input, e.g. [0,255]->[0,1]. + transform (Callable): Transform input into a convenient format, e.g. normalized_input->[0, 255]. + untransform (Callable): Transform adversarial input in the convenient format back into the original format of input, e.g. [0,255]->normalized_input. target_transform (Callable): Transform target. target_untransform (Callable): Untransform target. batch_transform (Callable): Transform batch before converting the batch. From 08e3df120a18d2d98d27fd9ce96b68be3ff21ea1 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 25 Aug 2023 11:47:48 -0700 Subject: [PATCH 61/74] Remove model_transform, again. --- mart/attack/adversary.py | 3 -- mart/attack/model_transform.py | 38 ------------------- mart/configs/attack/adversary.yaml | 1 - .../attack/model_transform/lightning.yaml | 1 - 4 files changed, 43 deletions(-) delete mode 100644 mart/attack/model_transform.py delete mode 100644 mart/configs/attack/model_transform/lightning.yaml diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 90d2c6af..ae8cfab1 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -44,7 +44,6 @@ def __init__( enforcer: Enforcer | None = None, attacker: pl.Trainer | None = None, batch_converter: Callable, - model_transform: Callable | None = None, **kwargs, ): """_summary_ @@ -59,7 +58,6 @@ def __init__( enforcer (Enforcer): A Callable that enforce constraints on the adversarial input. attacker (Trainer): A PyTorch-Lightning Trainer object used to fit the perturbation. batch_converter (Callable): Convert batch into convenient format and reverse. - model_transform (Callable): Transform a model before attack. """ super().__init__() @@ -107,7 +105,6 @@ def __init__( assert self._attacker.limit_train_batches > 0 self.batch_converter = batch_converter - self.model_transform = model_transform @property def perturber(self) -> Perturber: diff --git a/mart/attack/model_transform.py b/mart/attack/model_transform.py deleted file mode 100644 index 3cf4771b..00000000 --- a/mart/attack/model_transform.py +++ /dev/null @@ -1,38 +0,0 @@ -# -# Copyright (C) 2022 Intel Corporation -# -# SPDX-License-Identifier: BSD-3-Clause -# - -from ..utils import MonkeyPatch - - -class LightningModuleAsTarget: - """Prepare a LightningModule as a target model for Adversary, - such that `output = model(batch)`. - """ - - def __call__(self, model): - # Generate a pseudo dataloader_idx. - dataloader_idx = 1 - - if hasattr(model, "attack_step"): - - def model_forward(batch): - output = model.attack_step(batch, dataloader_idx) - return output - - elif hasattr(model, "training_step"): - # Monkey-patch model.log to avoid spamming. - def model_forward(batch): - with MonkeyPatch(model, "log", lambda *args, **kwargs: None): - output = model.training_step(batch, dataloader_idx) - return output - - else: - raise ValueError("Model does not have `attack_step()` or `training_step()`.") - - return model_forward - - -# TODO: We may need to do model.eval() if there's BN-like layers in the model. diff --git a/mart/configs/attack/adversary.yaml b/mart/configs/attack/adversary.yaml index 40765807..480e3a5b 100644 --- a/mart/configs/attack/adversary.yaml +++ b/mart/configs/attack/adversary.yaml @@ -1,6 +1,5 @@ defaults: - /callbacks@callbacks: [progress_bar] - - model_transform: lightning _target_: mart.attack.Adversary _convert_: all diff --git a/mart/configs/attack/model_transform/lightning.yaml b/mart/configs/attack/model_transform/lightning.yaml deleted file mode 100644 index b0db31d6..00000000 --- a/mart/configs/attack/model_transform/lightning.yaml +++ /dev/null @@ -1 +0,0 @@ -_target_: mart.attack.model_transform.LightningModuleAsTarget From 597a08e136a1aef13586cf668571f97e7c17bfe5 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 25 Aug 2023 11:53:02 -0700 Subject: [PATCH 62/74] Comment. --- mart/attack/adversary.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index ae8cfab1..c58b30c3 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -115,8 +115,8 @@ def perturber(self) -> Perturber: def configure_optimizers(self): return self.optimizer(self.perturber) - def training_step(self, batch_and_model, batch_idx): - input_transformed, target_transformed, model = batch_and_model + def training_step(self, batch_transformed_and_model, batch_idx): + input_transformed, target_transformed, model = batch_transformed_and_model # Compose input_adv from input, then give to model for updated gain. perturbation = self.perturber(input=input_transformed, target=target_transformed) @@ -170,12 +170,12 @@ def configure_gradient_clipping( @silent() def forward(self, *, batch: torch.Tensor | list | dict, model: Callable): - # Extract and transform input so that is convenient for Adversary. + # Extract and transform input/target so that is convenient for Adversary. input_transformed, target_transformed = self.batch_converter(batch) - # The attack needs access to the model at every iteration. # Canonical form of batch in the adversary's optimization loop. - # Optimization loop only sees the transformed input in batches. + # We only see the transformed input/target in the attack optimization loop. + # The attack also needs access to the model at every iteration. batch_transformed_and_model = (input_transformed, target_transformed, model) # Configure and reset perturbation for current inputs From 68ec00fb0c236a423f8c3ed828978bb336b50356 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 25 Aug 2023 12:01:19 -0700 Subject: [PATCH 63/74] Comments. --- mart/configs/attack/batch_converter/dict.yaml | 1 + mart/configs/attack/batch_converter/list.yaml | 2 ++ mart/configs/attack/batch_converter/tuple.yaml | 2 ++ 3 files changed, 5 insertions(+) diff --git a/mart/configs/attack/batch_converter/dict.yaml b/mart/configs/attack/batch_converter/dict.yaml index db421039..899b42be 100644 --- a/mart/configs/attack/batch_converter/dict.yaml +++ b/mart/configs/attack/batch_converter/dict.yaml @@ -1,2 +1,3 @@ +# We expect the original batch looks like `{"input": tensor, ...}` with the default parameters. _target_: mart.attack.batch_converter.DictBatchConverter input_key: input diff --git a/mart/configs/attack/batch_converter/list.yaml b/mart/configs/attack/batch_converter/list.yaml index 53da9fae..8ce952e7 100644 --- a/mart/configs/attack/batch_converter/list.yaml +++ b/mart/configs/attack/batch_converter/list.yaml @@ -1,2 +1,4 @@ +# We expect the original batch looks like `[input, target]` with the default parameters. _target_: mart.attack.batch_converter.ListBatchConverter input_key: 0 +target_size: 1 diff --git a/mart/configs/attack/batch_converter/tuple.yaml b/mart/configs/attack/batch_converter/tuple.yaml index 25ff65b5..fb7f88e6 100644 --- a/mart/configs/attack/batch_converter/tuple.yaml +++ b/mart/configs/attack/batch_converter/tuple.yaml @@ -1,2 +1,4 @@ +# We expect the original batch looks like `(input, target)` with the default parameters. _target_: mart.attack.batch_converter.TupleBatchConverter input_key: 0 +target_size: 1 From 8b1f953b555fb2b2951e51d3b81665a481038fce Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 28 Aug 2023 10:48:01 -0700 Subject: [PATCH 64/74] Rename batch_converter as batch_c15n. --- mart/attack/__init__.py | 2 +- mart/attack/adversary.py | 12 ++--- .../{batch_converter.py => batch_c15n.py} | 22 +++++----- mart/configs/attack/adversary.yaml | 2 +- .../{batch_converter => batch_c15n}/dict.yaml | 2 +- .../configs/attack/batch_c15n/input_only.yaml | 1 + .../{batch_converter => batch_c15n}/list.yaml | 2 +- .../tuple.yaml | 2 +- .../attack/batch_converter/input_only.yaml | 1 - .../attack/classification_fgsm_linf.yaml | 2 +- .../attack/classification_pgd_linf.yaml | 2 +- .../object_detection_mask_adversary.yaml | 2 +- ...bject_detection_mask_adversary_missed.yaml | 1 + tests/test_adversary.py | 44 +++++++++---------- 14 files changed, 48 insertions(+), 49 deletions(-) rename mart/attack/{batch_converter.py => batch_c15n.py} (91%) rename mart/configs/attack/{batch_converter => batch_c15n}/dict.yaml (66%) create mode 100644 mart/configs/attack/batch_c15n/input_only.yaml rename mart/configs/attack/{batch_converter => batch_c15n}/list.yaml (67%) rename mart/configs/attack/{batch_converter => batch_c15n}/tuple.yaml (66%) delete mode 100644 mart/configs/attack/batch_converter/input_only.yaml diff --git a/mart/attack/__init__.py b/mart/attack/__init__.py index 2a55d648..3c110143 100644 --- a/mart/attack/__init__.py +++ b/mart/attack/__init__.py @@ -1,6 +1,6 @@ from .adversary import * from .adversary_wrapper import * -from .batch_converter import * +from .batch_c15n import * from .composer import * from .enforcer import * from .gain import * diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index c58b30c3..59917b86 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -43,7 +43,7 @@ def __init__( objective: Objective | None = None, enforcer: Enforcer | None = None, attacker: pl.Trainer | None = None, - batch_converter: Callable, + batch_c15n: Callable, **kwargs, ): """_summary_ @@ -57,7 +57,7 @@ def __init__( objective (Objective): A function for computing adversarial objective, which returns True or False. Optional. enforcer (Enforcer): A Callable that enforce constraints on the adversarial input. attacker (Trainer): A PyTorch-Lightning Trainer object used to fit the perturbation. - batch_converter (Callable): Convert batch into convenient format and reverse. + batch_c15n (Callable): Canonicalize batch into convenient format and revert to the original format. """ super().__init__() @@ -104,7 +104,7 @@ def __init__( assert self._attacker.max_epochs == 0 assert self._attacker.limit_train_batches > 0 - self.batch_converter = batch_converter + self.batch_c15n = batch_c15n @property def perturber(self) -> Perturber: @@ -125,7 +125,7 @@ def training_step(self, batch_transformed_and_model, batch_idx): ) # Target model expects input in the original format. - batch_adv = self.batch_converter.revert(input_adv_transformed, target_transformed) + batch_adv = self.batch_c15n.revert(input_adv_transformed, target_transformed) # A model that returns output dictionary. if hasattr(model, "attack_step"): @@ -171,7 +171,7 @@ def configure_gradient_clipping( @silent() def forward(self, *, batch: torch.Tensor | list | dict, model: Callable): # Extract and transform input/target so that is convenient for Adversary. - input_transformed, target_transformed = self.batch_converter(batch) + input_transformed, target_transformed = self.batch_c15n(batch) # Canonical form of batch in the adversary's optimization loop. # We only see the transformed input/target in the attack optimization loop. @@ -194,7 +194,7 @@ def forward(self, *, batch: torch.Tensor | list | dict, model: Callable): self.enforcer(input_adv_transformed, input=input_transformed, target=target_transformed) # Revert to the original format of batch. - batch_adv = self.batch_converter.revert(input_adv_transformed, target_transformed) + batch_adv = self.batch_c15n.revert(input_adv_transformed, target_transformed) return batch_adv diff --git a/mart/attack/batch_converter.py b/mart/attack/batch_c15n.py similarity index 91% rename from mart/attack/batch_converter.py rename to mart/attack/batch_c15n.py index 0e9397dd..40256de2 100644 --- a/mart/attack/batch_converter.py +++ b/mart/attack/batch_c15n.py @@ -9,17 +9,15 @@ import abc from typing import Callable -# TODO: Do we need to copy batch? - __all__ = [ - "InputOnlyBatchConverter", - "DictBatchConverter", - "ListBatchConverter", - "TupleBatchConverter", + "InputOnlyBatchC15n", + "DictBatchC15n", + "ListBatchC15n", + "TupleBatchC15n", ] -class BatchConverter(abc.ABC): +class BatchC15n(abc.ABC): def __init__( self, *, @@ -30,7 +28,7 @@ def __init__( batch_transform: Callable | None = None, batch_untransform: Callable | None = None, ): - """Convert batch into (input, target), and vice versa. + """Convert original batch into (input, target), and vice versa. Args: transform (Callable): Transform input into a convenient format, e.g. normalized_input->[0, 255]. @@ -85,7 +83,7 @@ def _convert(self, batch): pass -class InputOnlyBatchConverter(BatchConverter): +class InputOnlyBatchC15n(BatchC15n): def _convert(self, batch): input = batch target = None @@ -96,7 +94,7 @@ def _revert(self, input, target): return batch -class DictBatchConverter(BatchConverter): +class DictBatchC15n(BatchC15n): def __init__(self, input_key: str = "input", **kwargs): """_summary_ @@ -131,7 +129,7 @@ def _revert(self, input, target): return batch -class ListBatchConverter(BatchConverter): +class ListBatchC15n(BatchC15n): def __init__(self, input_key: int = 0, target_size: int | None = None, **kwargs): super().__init__(**kwargs) @@ -161,7 +159,7 @@ def _revert(self, input, target): return batch -class TupleBatchConverter(ListBatchConverter): +class TupleBatchC15n(ListBatchC15n): def _convert(self, batch: tuple): batch_list = list(batch) input, target = super()._convert(batch_list) diff --git a/mart/configs/attack/adversary.yaml b/mart/configs/attack/adversary.yaml index 480e3a5b..ef7bee13 100644 --- a/mart/configs/attack/adversary.yaml +++ b/mart/configs/attack/adversary.yaml @@ -12,4 +12,4 @@ gradient_modifier: null objective: null enforcer: ??? attacker: null -batch_converter: ??? +batch_c15n: ??? diff --git a/mart/configs/attack/batch_converter/dict.yaml b/mart/configs/attack/batch_c15n/dict.yaml similarity index 66% rename from mart/configs/attack/batch_converter/dict.yaml rename to mart/configs/attack/batch_c15n/dict.yaml index 899b42be..27770710 100644 --- a/mart/configs/attack/batch_converter/dict.yaml +++ b/mart/configs/attack/batch_c15n/dict.yaml @@ -1,3 +1,3 @@ # We expect the original batch looks like `{"input": tensor, ...}` with the default parameters. -_target_: mart.attack.batch_converter.DictBatchConverter +_target_: mart.attack.batch_c15n.DictBatchC15n input_key: input diff --git a/mart/configs/attack/batch_c15n/input_only.yaml b/mart/configs/attack/batch_c15n/input_only.yaml new file mode 100644 index 00000000..382a7909 --- /dev/null +++ b/mart/configs/attack/batch_c15n/input_only.yaml @@ -0,0 +1 @@ +_target_: mart.attack.batch_c15n.InputOnlyBatchC15n diff --git a/mart/configs/attack/batch_converter/list.yaml b/mart/configs/attack/batch_c15n/list.yaml similarity index 67% rename from mart/configs/attack/batch_converter/list.yaml rename to mart/configs/attack/batch_c15n/list.yaml index 8ce952e7..813ed87b 100644 --- a/mart/configs/attack/batch_converter/list.yaml +++ b/mart/configs/attack/batch_c15n/list.yaml @@ -1,4 +1,4 @@ # We expect the original batch looks like `[input, target]` with the default parameters. -_target_: mart.attack.batch_converter.ListBatchConverter +_target_: mart.attack.batch_c15n.ListBatchC15n input_key: 0 target_size: 1 diff --git a/mart/configs/attack/batch_converter/tuple.yaml b/mart/configs/attack/batch_c15n/tuple.yaml similarity index 66% rename from mart/configs/attack/batch_converter/tuple.yaml rename to mart/configs/attack/batch_c15n/tuple.yaml index fb7f88e6..00b31694 100644 --- a/mart/configs/attack/batch_converter/tuple.yaml +++ b/mart/configs/attack/batch_c15n/tuple.yaml @@ -1,4 +1,4 @@ # We expect the original batch looks like `(input, target)` with the default parameters. -_target_: mart.attack.batch_converter.TupleBatchConverter +_target_: mart.attack.batch_c15n.TupleBatchC15n input_key: 0 target_size: 1 diff --git a/mart/configs/attack/batch_converter/input_only.yaml b/mart/configs/attack/batch_converter/input_only.yaml deleted file mode 100644 index b9bb9207..00000000 --- a/mart/configs/attack/batch_converter/input_only.yaml +++ /dev/null @@ -1 +0,0 @@ -_target_: mart.attack.batch_converter.InputOnlyBatchConverter diff --git a/mart/configs/attack/classification_fgsm_linf.yaml b/mart/configs/attack/classification_fgsm_linf.yaml index dc2d25c3..852c6d6a 100644 --- a/mart/configs/attack/classification_fgsm_linf.yaml +++ b/mart/configs/attack/classification_fgsm_linf.yaml @@ -6,7 +6,7 @@ defaults: - gradient_modifier: sign - gain: cross_entropy - objective: misclassification - - batch_converter: list + - batch_c15n: list eps: ??? max_iters: 1 diff --git a/mart/configs/attack/classification_pgd_linf.yaml b/mart/configs/attack/classification_pgd_linf.yaml index 879ba8d1..83bdb5d1 100644 --- a/mart/configs/attack/classification_pgd_linf.yaml +++ b/mart/configs/attack/classification_pgd_linf.yaml @@ -6,7 +6,7 @@ defaults: - gradient_modifier: sign - gain: cross_entropy - objective: misclassification - - batch_converter: list + - batch_c15n: list eps: ??? lr: ??? diff --git a/mart/configs/attack/object_detection_mask_adversary.yaml b/mart/configs/attack/object_detection_mask_adversary.yaml index 085afe0a..ec28cea0 100644 --- a/mart/configs/attack/object_detection_mask_adversary.yaml +++ b/mart/configs/attack/object_detection_mask_adversary.yaml @@ -7,7 +7,7 @@ defaults: - gradient_modifier: sign - gain: rcnn_training_loss - objective: zero_ap - - batch_converter: tuple + - batch_c15n: tuple max_iters: ??? lr: ??? diff --git a/mart/configs/attack/object_detection_mask_adversary_missed.yaml b/mart/configs/attack/object_detection_mask_adversary_missed.yaml index 4f5fc039..54a44d01 100644 --- a/mart/configs/attack/object_detection_mask_adversary_missed.yaml +++ b/mart/configs/attack/object_detection_mask_adversary_missed.yaml @@ -7,6 +7,7 @@ defaults: - gradient_modifier: sign - gain: rcnn_class_background - objective: object_detection_missed + - batch_c15n: tuple max_iters: ??? lr: ??? diff --git a/tests/test_adversary.py b/tests/test_adversary.py index 6917c3e6..fbe11997 100644 --- a/tests/test_adversary.py +++ b/tests/test_adversary.py @@ -25,7 +25,7 @@ def test_with_model(input_data, target_data, perturbation): attacker = Mock(max_epochs=0, limit_train_batches=1, fit_loop=Mock(max_epochs=0)) model = Mock() batch = (input_data, target_data) - batch_converter = mart.attack.TupleBatchConverter() + batch_c15n = mart.attack.TupleBatchC15n() adversary = Adversary( perturber=perturber, @@ -34,7 +34,7 @@ def test_with_model(input_data, target_data, perturbation): gain=gain, enforcer=enforcer, attacker=attacker, - batch_converter=batch_converter, + batch_c15n=batch_c15n, ) batch_adv = adversary(batch=batch, model=model) @@ -62,7 +62,7 @@ def test_hidden_params(): gain = Mock() enforcer = Mock() attacker = Mock(max_epochs=0, limit_train_batches=1, fit_loop=Mock(max_epochs=0)) - batch_converter = mart.attack.TupleBatchConverter() + batch_c15n = mart.attack.TupleBatchC15n() adversary = Adversary( perturber=perturber, @@ -71,7 +71,7 @@ def test_hidden_params(): gain=gain, enforcer=enforcer, attacker=attacker, - batch_converter=batch_converter, + batch_c15n=batch_c15n, ) # Adversarial perturbation should not be updated by a regular training optimizer. @@ -95,7 +95,7 @@ def test_hidden_params_after_forward(input_data, target_data, perturbation): attacker = Mock(max_epochs=0, limit_train_batches=1, fit_loop=Mock(max_epochs=0)) model = Mock() batch = (input_data, target_data) - batch_converter = mart.attack.TupleBatchConverter() + batch_c15n = mart.attack.TupleBatchC15n() adversary = Adversary( perturber=perturber, @@ -104,7 +104,7 @@ def test_hidden_params_after_forward(input_data, target_data, perturbation): gain=gain, enforcer=enforcer, attacker=attacker, - batch_converter=batch_converter, + batch_c15n=batch_c15n, ) batch_adv = adversary(batch=batch, model=model) @@ -129,7 +129,7 @@ def test_loading_perturbation_from_state_dict(): gain = Mock() enforcer = Mock() attacker = Mock(max_epochs=0, limit_train_batches=1, fit_loop=Mock(max_epochs=0)) - batch_converter = mart.attack.TupleBatchConverter() + batch_c15n = mart.attack.TupleBatchC15n() adversary = Adversary( perturber=perturber, @@ -138,7 +138,7 @@ def test_loading_perturbation_from_state_dict(): gain=gain, enforcer=enforcer, attacker=attacker, - batch_converter=batch_converter, + batch_c15n=batch_c15n, ) # We should be able to load arbitrary state_dict, because Adversary ignores state_dict. @@ -158,7 +158,7 @@ def test_perturbation(input_data, target_data, perturbation): attacker = Mock(max_epochs=0, limit_train_batches=1, fit_loop=Mock(max_epochs=0)) model = Mock() batch = (input_data, target_data) - batch_converter = mart.attack.TupleBatchConverter() + batch_c15n = mart.attack.TupleBatchC15n() adversary = Adversary( perturber=perturber, @@ -167,7 +167,7 @@ def test_perturbation(input_data, target_data, perturbation): gain=gain, enforcer=enforcer, attacker=attacker, - batch_converter=batch_converter, + batch_c15n=batch_c15n, ) batch_adv = adversary(batch=batch, model=model) @@ -207,7 +207,7 @@ def initializer(x): ) batch = (input_data, target_data) - batch_converter = mart.attack.TupleBatchConverter() + batch_c15n = mart.attack.TupleBatchC15n() adversary = Adversary( perturber=perturber, @@ -217,7 +217,7 @@ def initializer(x): gradient_modifier=Sign(), enforcer=enforcer, max_iters=1, - batch_converter=batch_converter, + batch_c15n=batch_c15n, ) def model(batch): @@ -236,14 +236,14 @@ def test_configure_optimizers(): composer = mart.attack.composer.Additive() optimizer = Mock(spec=mart.optim.OptimizerFactory) gain = Mock() - batch_converter = mart.attack.TupleBatchConverter() + batch_c15n = mart.attack.TupleBatchC15n() adversary = Adversary( perturber=perturber, composer=composer, optimizer=optimizer, gain=gain, - batch_converter=batch_converter, + batch_c15n=batch_c15n, ) adversary.configure_optimizers() @@ -259,14 +259,14 @@ def test_training_step(input_data, target_data, perturbation): gain = Mock(return_value=torch.tensor(1337)) model = Mock(spec="__call__", return_value={}) # Set target_size manually because the test bypasses the convert() step that reads target_size. - batch_converter = mart.attack.TupleBatchConverter(target_size=1) + batch_c15n = mart.attack.TupleBatchC15n(target_size=1) adversary = Adversary( perturber=perturber, composer=composer, optimizer=optimizer, gain=gain, - batch_converter=batch_converter, + batch_c15n=batch_c15n, ) output = adversary.training_step((input_data, target_data, model), 0) @@ -282,14 +282,14 @@ def test_training_step_with_many_gain(input_data, target_data, perturbation): gain = Mock(return_value=torch.tensor([1234, 5678])) model = Mock(spec="__call__", return_value={}) # Set target_size manually because the test bypasses the convert() step that reads target_size. - batch_converter = mart.attack.TupleBatchConverter(target_size=1) + batch_c15n = mart.attack.TupleBatchC15n(target_size=1) adversary = Adversary( perturber=perturber, composer=composer, optimizer=optimizer, gain=gain, - batch_converter=batch_converter, + batch_c15n=batch_c15n, ) output = adversary.training_step((input_data, target_data, model), 0) @@ -306,7 +306,7 @@ def test_training_step_with_objective(input_data, target_data, perturbation): model = Mock(spec="__call__", return_value={}) objective = Mock(return_value=torch.tensor([True, False], dtype=torch.bool)) # Set target_size manually because the test bypasses the convert() step that reads target_size. - batch_converter = mart.attack.TupleBatchConverter(target_size=1) + batch_c15n = mart.attack.TupleBatchC15n(target_size=1) adversary = Adversary( perturber=perturber, @@ -314,7 +314,7 @@ def test_training_step_with_objective(input_data, target_data, perturbation): optimizer=optimizer, objective=objective, gain=gain, - batch_converter=batch_converter, + batch_c15n=batch_c15n, ) output = adversary.training_step((input_data, target_data, model), 0) @@ -332,7 +332,7 @@ def test_configure_gradient_clipping(): ) gradient_modifier = Mock() gain = Mock() - batch_converter = mart.attack.TupleBatchConverter() + batch_c15n = mart.attack.TupleBatchC15n() adversary = Adversary( perturber=perturber, @@ -340,7 +340,7 @@ def test_configure_gradient_clipping(): optimizer=optimizer, gradient_modifier=gradient_modifier, gain=gain, - batch_converter=batch_converter, + batch_c15n=batch_c15n, ) # We need to mock a trainer since LightningModule does some checks adversary.trainer = Mock(gradient_clip_val=1.0, gradient_clip_algorithm="norm") From df1734fdbcec9c318575d45c3396f44fbba38959 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 28 Aug 2023 10:55:11 -0700 Subject: [PATCH 65/74] Replace the _transformed suffix with the _orig suffix, because _orig is rare. --- mart/attack/adversary.py | 44 ++++++++++++++++++---------------------- 1 file changed, 20 insertions(+), 24 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 59917b86..018e594e 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -115,27 +115,25 @@ def perturber(self) -> Perturber: def configure_optimizers(self): return self.optimizer(self.perturber) - def training_step(self, batch_transformed_and_model, batch_idx): - input_transformed, target_transformed, model = batch_transformed_and_model + def training_step(self, batch_and_model, batch_idx): + input, target, model = batch_and_model # Compose input_adv from input, then give to model for updated gain. - perturbation = self.perturber(input=input_transformed, target=target_transformed) - input_adv_transformed = self.composer( - perturbation, input=input_transformed, target=target_transformed - ) + perturbation = self.perturber(input=input, target=target) + input_adv = self.composer(perturbation, input=input, target=target) # Target model expects input in the original format. - batch_adv = self.batch_c15n.revert(input_adv_transformed, target_transformed) + batch_adv_orig = self.batch_c15n.revert(input_adv, target) # A model that returns output dictionary. if hasattr(model, "attack_step"): - outputs = model.attack_step(batch_adv, batch_idx) + outputs = model.attack_step(batch_adv_orig, batch_idx) elif hasattr(model, "training_step"): # Disable logging if we have to reuse training_step() of the target model. with MonkeyPatch(model, "log", lambda *args, **kwargs: None): - outputs = model.training_step(batch_adv, batch_idx) + outputs = model.training_step(batch_adv_orig, batch_idx) else: - outputs = model(batch_adv) + outputs = model(batch_adv_orig) # FIXME: This should really be just `return outputs`. But this might require a new sequence? # FIXME: Everything below here should live in the model as modules. @@ -170,33 +168,31 @@ def configure_gradient_clipping( @silent() def forward(self, *, batch: torch.Tensor | list | dict, model: Callable): - # Extract and transform input/target so that is convenient for Adversary. - input_transformed, target_transformed = self.batch_c15n(batch) + # Extract and canonicalize input/target so that is convenient for Adversary. + input, target = self.batch_c15n(batch) # Canonical form of batch in the adversary's optimization loop. - # We only see the transformed input/target in the attack optimization loop. + # We only see the canonicalized input/target in the attack optimization loop. # The attack also needs access to the model at every iteration. - batch_transformed_and_model = (input_transformed, target_transformed, model) + batch_and_model = (input, target, model) # Configure and reset perturbation for current inputs - self.perturber.configure_perturbation(input_transformed) + self.perturber.configure_perturbation(input) # Attack, aka fit a perturbation, for one epoch by cycling over the same input batch. # We use Trainer.limit_train_batches to control the number of attack iterations. self.attacker.fit_loop.max_epochs += 1 - self.attacker.fit(self, train_dataloaders=cycle([batch_transformed_and_model])) + self.attacker.fit(self, train_dataloaders=cycle([batch_and_model])) - # Get the transformed input_adv for enforcer checking. - perturbation = self.perturber(input=input_transformed, target=target_transformed) - input_adv_transformed = self.composer( - perturbation, input=input_transformed, target=target_transformed - ) - self.enforcer(input_adv_transformed, input=input_transformed, target=target_transformed) + # Get the canonicalized input_adv for enforcer checking. + perturbation = self.perturber(input=input, target=target) + input_adv = self.composer(perturbation, input=input, target=target) + self.enforcer(input_adv, input=input, target=target) # Revert to the original format of batch. - batch_adv = self.batch_c15n.revert(input_adv_transformed, target_transformed) + batch_adv_orig = self.batch_c15n.revert(input_adv, target) - return batch_adv + return batch_adv_orig @property def attacker(self): From 2559665bbaa343f8d86184f7405726a0066d5e41 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 28 Aug 2023 12:28:30 -0700 Subject: [PATCH 66/74] Rename transforms in batch_c15n/transform. --- .../transform/{to_pixel_range_255.yaml => range_1to255.yaml} | 0 .../transform/{to_pixel_range_1.yaml => range_255to1.yaml} | 0 mart/configs/attack/data_coco.yaml | 4 ++-- mart/configs/attack/data_list_pixel01.yaml | 4 ++-- 4 files changed, 4 insertions(+), 4 deletions(-) rename mart/configs/attack/batch_c15n/transform/{to_pixel_range_255.yaml => range_1to255.yaml} (100%) rename mart/configs/attack/batch_c15n/transform/{to_pixel_range_1.yaml => range_255to1.yaml} (100%) diff --git a/mart/configs/attack/batch_c15n/transform/to_pixel_range_255.yaml b/mart/configs/attack/batch_c15n/transform/range_1to255.yaml similarity index 100% rename from mart/configs/attack/batch_c15n/transform/to_pixel_range_255.yaml rename to mart/configs/attack/batch_c15n/transform/range_1to255.yaml diff --git a/mart/configs/attack/batch_c15n/transform/to_pixel_range_1.yaml b/mart/configs/attack/batch_c15n/transform/range_255to1.yaml similarity index 100% rename from mart/configs/attack/batch_c15n/transform/to_pixel_range_1.yaml rename to mart/configs/attack/batch_c15n/transform/range_255to1.yaml diff --git a/mart/configs/attack/data_coco.yaml b/mart/configs/attack/data_coco.yaml index 0fa5f396..5b0a5655 100644 --- a/mart/configs/attack/data_coco.yaml +++ b/mart/configs/attack/data_coco.yaml @@ -1,7 +1,7 @@ defaults: - batch_c15n: tuple - - batch_c15n/transform@batch_c15n.transform.transforms: to_pixel_range_255 - - batch_c15n/transform@batch_c15n.untransform.transforms: to_pixel_range_1 + - batch_c15n/transform@batch_c15n.transform.transforms: range_1to255 + - batch_c15n/transform@batch_c15n.untransform.transforms: range_255to1 batch_c15n: transform: diff --git a/mart/configs/attack/data_list_pixel01.yaml b/mart/configs/attack/data_list_pixel01.yaml index f1f53c51..77ad76e5 100644 --- a/mart/configs/attack/data_list_pixel01.yaml +++ b/mart/configs/attack/data_list_pixel01.yaml @@ -1,4 +1,4 @@ defaults: - batch_c15n: list - - batch_c15n/transform: to_pixel_range_255 - - batch_c15n/transform@batch_c15n.untransform: to_pixel_range_1 + - batch_c15n/transform: range_1to255 + - batch_c15n/transform@batch_c15n.untransform: range_255to1 From fb095759d01a158d7570263eeea5d4e33464f4dc Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 28 Aug 2023 15:13:21 -0700 Subject: [PATCH 67/74] Comment. --- mart/configs/attack/data_coco.yaml | 1 + mart/configs/attack/data_list_pixel01.yaml | 1 + 2 files changed, 2 insertions(+) diff --git a/mart/configs/attack/data_coco.yaml b/mart/configs/attack/data_coco.yaml index 5b0a5655..63e21b56 100644 --- a/mart/configs/attack/data_coco.yaml +++ b/mart/configs/attack/data_coco.yaml @@ -1,3 +1,4 @@ +# Scale pixel input in the range of [0,1] to [0,255] for Adversary. defaults: - batch_c15n: tuple - batch_c15n/transform@batch_c15n.transform.transforms: range_1to255 diff --git a/mart/configs/attack/data_list_pixel01.yaml b/mart/configs/attack/data_list_pixel01.yaml index 77ad76e5..ed3e5be4 100644 --- a/mart/configs/attack/data_list_pixel01.yaml +++ b/mart/configs/attack/data_list_pixel01.yaml @@ -1,3 +1,4 @@ +# Scale pixel input in the range of [0,1] to [0,255] for Adversary. defaults: - batch_c15n: list - batch_c15n/transform: range_1to255 From ad4b2d59ec74e7b43bd6c476fd21b8198e901053 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Thu, 31 Aug 2023 13:56:19 -0700 Subject: [PATCH 68/74] Fix configs. --- mart/configs/attack/classification_fgsm_linf.yaml | 1 - mart/configs/attack/classification_pgd_linf.yaml | 1 - mart/configs/attack/object_detection_mask_adversary.yaml | 1 - mart/configs/attack/object_detection_mask_adversary_missed.yaml | 1 - 4 files changed, 4 deletions(-) diff --git a/mart/configs/attack/classification_fgsm_linf.yaml b/mart/configs/attack/classification_fgsm_linf.yaml index 0b51df00..d22a1fb1 100644 --- a/mart/configs/attack/classification_fgsm_linf.yaml +++ b/mart/configs/attack/classification_fgsm_linf.yaml @@ -7,7 +7,6 @@ defaults: - gradient_modifier: sign - gain: cross_entropy - objective: misclassification - - batch_c15n: list eps: ??? max_iters: 1 diff --git a/mart/configs/attack/classification_pgd_linf.yaml b/mart/configs/attack/classification_pgd_linf.yaml index 02569e77..730ff556 100644 --- a/mart/configs/attack/classification_pgd_linf.yaml +++ b/mart/configs/attack/classification_pgd_linf.yaml @@ -7,7 +7,6 @@ defaults: - gradient_modifier: sign - gain: cross_entropy - objective: misclassification - - batch_c15n: list eps: ??? lr: ??? diff --git a/mart/configs/attack/object_detection_mask_adversary.yaml b/mart/configs/attack/object_detection_mask_adversary.yaml index feb1e31e..427157b8 100644 --- a/mart/configs/attack/object_detection_mask_adversary.yaml +++ b/mart/configs/attack/object_detection_mask_adversary.yaml @@ -8,7 +8,6 @@ defaults: - gradient_modifier: sign - gain: rcnn_training_loss - objective: zero_ap - - batch_c15n: tuple max_iters: ??? lr: ??? diff --git a/mart/configs/attack/object_detection_mask_adversary_missed.yaml b/mart/configs/attack/object_detection_mask_adversary_missed.yaml index 92030076..fcdd829d 100644 --- a/mart/configs/attack/object_detection_mask_adversary_missed.yaml +++ b/mart/configs/attack/object_detection_mask_adversary_missed.yaml @@ -8,7 +8,6 @@ defaults: - gradient_modifier: sign - gain: rcnn_class_background - objective: object_detection_missed - - batch_c15n: tuple max_iters: ??? lr: ??? From 79608ff6de083987eeee8140f05ef90e4cc6892a Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Thu, 31 Aug 2023 14:02:45 -0700 Subject: [PATCH 69/74] Remove preprocessor in torchvision_object_detection. --- mart/configs/model/torchvision_faster_rcnn.yaml | 15 +++------------ .../model/torchvision_object_detection.yaml | 8 -------- mart/configs/model/torchvision_retinanet.yaml | 9 +++------ 3 files changed, 6 insertions(+), 26 deletions(-) diff --git a/mart/configs/model/torchvision_faster_rcnn.yaml b/mart/configs/model/torchvision_faster_rcnn.yaml index 65200579..7e488183 100644 --- a/mart/configs/model/torchvision_faster_rcnn.yaml +++ b/mart/configs/model/torchvision_faster_rcnn.yaml @@ -11,12 +11,9 @@ training_step_log: training_sequence: seq010: - preprocessor: ["input"] + losses_and_detections: ["input", "target"] seq020: - losses_and_detections: ["preprocessor", "target"] - - seq030: loss: # Sum up the losses. [ @@ -28,17 +25,11 @@ training_sequence: validation_sequence: seq010: - preprocessor: ["input"] - - seq020: - losses_and_detections: ["preprocessor", "target"] + losses_and_detections: ["input", "target"] test_sequence: seq010: - preprocessor: ["input"] - - seq020: - losses_and_detections: ["preprocessor", "target"] + losses_and_detections: ["input", "target"] modules: losses_and_detections: diff --git a/mart/configs/model/torchvision_object_detection.yaml b/mart/configs/model/torchvision_object_detection.yaml index f6a22898..eb04b648 100644 --- a/mart/configs/model/torchvision_object_detection.yaml +++ b/mart/configs/model/torchvision_object_detection.yaml @@ -12,14 +12,6 @@ test_sequence: ??? output_preds_key: "losses_and_detections.eval" modules: - preprocessor: - _target_: mart.transforms.TupleTransforms - transforms: - # no-op - _target_: torchvision.transforms.Normalize - mean: 0 - std: 1 - losses_and_detections: # Return losses in the training mode and predictions in the eval mode in one pass. _target_: mart.models.DualMode diff --git a/mart/configs/model/torchvision_retinanet.yaml b/mart/configs/model/torchvision_retinanet.yaml index 34b66945..a261a0e2 100644 --- a/mart/configs/model/torchvision_retinanet.yaml +++ b/mart/configs/model/torchvision_retinanet.yaml @@ -8,8 +8,7 @@ training_step_log: loss_box_reg: "losses_and_detections.training.bbox_regression" training_sequence: - - preprocessor: ["input"] - - losses_and_detections: ["preprocessor", "target"] + - losses_and_detections: ["input", "target"] - loss: # Sum up the losses. [ @@ -18,12 +17,10 @@ training_sequence: ] validation_sequence: - - preprocessor: ["input"] - - losses_and_detections: ["preprocessor", "target"] + - losses_and_detections: ["input", "target"] test_sequence: - - preprocessor: ["input"] - - losses_and_detections: ["preprocessor", "target"] + - losses_and_detections: ["input", "target"] modules: losses_and_detections: From d09524a0320c430c3041297fd24b644556981f35 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 1 Sep 2023 15:30:07 -0700 Subject: [PATCH 70/74] Move configs/attack/batch_c15n to configs/batch_c15n --- mart/configs/attack/classification_fgsm_linf.yaml | 1 - mart/configs/attack/classification_pgd_linf.yaml | 1 - mart/configs/attack/data_coco.yaml | 12 ------------ mart/configs/attack/data_list_pixel01.yaml | 5 ----- .../attack/object_detection_mask_adversary.yaml | 1 - .../object_detection_mask_adversary_missed.yaml | 1 - mart/configs/batch_c15n/data_coco.yaml | 11 +++++++++++ mart/configs/batch_c15n/data_list_pixel01.yaml | 5 +++++ .../batch_c15n/transform/range_1to255.yaml | 0 .../batch_c15n/transform/range_255to1.yaml | 0 mart/configs/experiment/CIFAR10_CNN_Adv.yaml | 1 + .../experiment/COCO_TorchvisionFasterRCNN_Adv.yaml | 1 + 12 files changed, 18 insertions(+), 21 deletions(-) delete mode 100644 mart/configs/attack/data_coco.yaml delete mode 100644 mart/configs/attack/data_list_pixel01.yaml create mode 100644 mart/configs/batch_c15n/data_coco.yaml create mode 100644 mart/configs/batch_c15n/data_list_pixel01.yaml rename mart/configs/{attack => }/batch_c15n/transform/range_1to255.yaml (100%) rename mart/configs/{attack => }/batch_c15n/transform/range_255to1.yaml (100%) diff --git a/mart/configs/attack/classification_fgsm_linf.yaml b/mart/configs/attack/classification_fgsm_linf.yaml index d22a1fb1..45465429 100644 --- a/mart/configs/attack/classification_fgsm_linf.yaml +++ b/mart/configs/attack/classification_fgsm_linf.yaml @@ -2,7 +2,6 @@ defaults: - adversary - fgm - linf - - data_list_pixel01 - composer: additive - gradient_modifier: sign - gain: cross_entropy diff --git a/mart/configs/attack/classification_pgd_linf.yaml b/mart/configs/attack/classification_pgd_linf.yaml index 730ff556..b2e8ddfd 100644 --- a/mart/configs/attack/classification_pgd_linf.yaml +++ b/mart/configs/attack/classification_pgd_linf.yaml @@ -2,7 +2,6 @@ defaults: - adversary - pgd - linf - - data_list_pixel01 - composer: additive - gradient_modifier: sign - gain: cross_entropy diff --git a/mart/configs/attack/data_coco.yaml b/mart/configs/attack/data_coco.yaml deleted file mode 100644 index 63e21b56..00000000 --- a/mart/configs/attack/data_coco.yaml +++ /dev/null @@ -1,12 +0,0 @@ -# Scale pixel input in the range of [0,1] to [0,255] for Adversary. -defaults: - - batch_c15n: tuple - - batch_c15n/transform@batch_c15n.transform.transforms: range_1to255 - - batch_c15n/transform@batch_c15n.untransform.transforms: range_255to1 - -batch_c15n: - transform: - _target_: mart.transforms.TupleTransforms - - untransform: - _target_: mart.transforms.TupleTransforms diff --git a/mart/configs/attack/data_list_pixel01.yaml b/mart/configs/attack/data_list_pixel01.yaml deleted file mode 100644 index ed3e5be4..00000000 --- a/mart/configs/attack/data_list_pixel01.yaml +++ /dev/null @@ -1,5 +0,0 @@ -# Scale pixel input in the range of [0,1] to [0,255] for Adversary. -defaults: - - batch_c15n: list - - batch_c15n/transform: range_1to255 - - batch_c15n/transform@batch_c15n.untransform: range_255to1 diff --git a/mart/configs/attack/object_detection_mask_adversary.yaml b/mart/configs/attack/object_detection_mask_adversary.yaml index 427157b8..0e42cb61 100644 --- a/mart/configs/attack/object_detection_mask_adversary.yaml +++ b/mart/configs/attack/object_detection_mask_adversary.yaml @@ -2,7 +2,6 @@ defaults: - adversary - gradient_ascent - mask - - data_coco - perturber/initializer: constant - composer: overlay - gradient_modifier: sign diff --git a/mart/configs/attack/object_detection_mask_adversary_missed.yaml b/mart/configs/attack/object_detection_mask_adversary_missed.yaml index fcdd829d..4f5fc039 100644 --- a/mart/configs/attack/object_detection_mask_adversary_missed.yaml +++ b/mart/configs/attack/object_detection_mask_adversary_missed.yaml @@ -2,7 +2,6 @@ defaults: - adversary - gradient_ascent - mask - - data_coco - perturber/initializer: constant - composer: overlay - gradient_modifier: sign diff --git a/mart/configs/batch_c15n/data_coco.yaml b/mart/configs/batch_c15n/data_coco.yaml new file mode 100644 index 00000000..2bb865ef --- /dev/null +++ b/mart/configs/batch_c15n/data_coco.yaml @@ -0,0 +1,11 @@ +# Scale pixel input in the range of [0,1] to [0,255] for Adversary. +defaults: + - tuple + - transform@transform.transforms: range_1to255 + - transform@untransform.transforms: range_255to1 + +transform: + _target_: mart.transforms.TupleTransforms + +untransform: + _target_: mart.transforms.TupleTransforms diff --git a/mart/configs/batch_c15n/data_list_pixel01.yaml b/mart/configs/batch_c15n/data_list_pixel01.yaml new file mode 100644 index 00000000..db7ce0fd --- /dev/null +++ b/mart/configs/batch_c15n/data_list_pixel01.yaml @@ -0,0 +1,5 @@ +# Scale pixel input in the range of [0,1] to [0,255] for Adversary. +defaults: + - list + - transform: range_1to255 + - transform@untransform: range_255to1 diff --git a/mart/configs/attack/batch_c15n/transform/range_1to255.yaml b/mart/configs/batch_c15n/transform/range_1to255.yaml similarity index 100% rename from mart/configs/attack/batch_c15n/transform/range_1to255.yaml rename to mart/configs/batch_c15n/transform/range_1to255.yaml diff --git a/mart/configs/attack/batch_c15n/transform/range_255to1.yaml b/mart/configs/batch_c15n/transform/range_255to1.yaml similarity index 100% rename from mart/configs/attack/batch_c15n/transform/range_255to1.yaml rename to mart/configs/batch_c15n/transform/range_255to1.yaml diff --git a/mart/configs/experiment/CIFAR10_CNN_Adv.yaml b/mart/configs/experiment/CIFAR10_CNN_Adv.yaml index 72e119da..9aa80563 100644 --- a/mart/configs/experiment/CIFAR10_CNN_Adv.yaml +++ b/mart/configs/experiment/CIFAR10_CNN_Adv.yaml @@ -3,6 +3,7 @@ defaults: - /attack@callbacks.adversary_connector.train_adversary: classification_fgsm_linf - /attack@callbacks.adversary_connector.test_adversary: classification_pgd_linf + - override /batch_c15n@callbacks.adversary_connector.batch_c15n: data_list_pixel01 - override /datamodule: cifar10 - override /model: classifier_cifar10_cnn - override /metric: accuracy diff --git a/mart/configs/experiment/COCO_TorchvisionFasterRCNN_Adv.yaml b/mart/configs/experiment/COCO_TorchvisionFasterRCNN_Adv.yaml index 7ff99caf..647a4897 100644 --- a/mart/configs/experiment/COCO_TorchvisionFasterRCNN_Adv.yaml +++ b/mart/configs/experiment/COCO_TorchvisionFasterRCNN_Adv.yaml @@ -3,6 +3,7 @@ defaults: - COCO_TorchvisionFasterRCNN - /attack@callbacks.adversary_connector.test_adversary: object_detection_mask_adversary + - override /batch_c15n@callbacks.adversary_connector.batch_c15n: data_coco - override /datamodule: coco_perturbable_mask - override /callbacks: [model_checkpoint, lr_monitor, adversary_connector] From 2b0ef2b7754875a690fe3ffe7d509238c1c3a5c7 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 1 Sep 2023 15:45:54 -0700 Subject: [PATCH 71/74] Rename config files. --- mart/configs/batch_c15n/data_list_pixel01.yaml | 5 ----- mart/configs/batch_c15n/input_tensor_float01.yaml | 5 +++++ .../batch_c15n/{data_coco.yaml => input_tuple_float01.yaml} | 4 ++-- .../transform/{range_255to1.yaml => divided_by_255.yaml} | 0 .../{range_1to255.yaml => times_255_and_round.yaml} | 0 mart/configs/experiment/CIFAR10_CNN_Adv.yaml | 2 +- mart/configs/experiment/COCO_TorchvisionFasterRCNN_Adv.yaml | 2 +- 7 files changed, 9 insertions(+), 9 deletions(-) delete mode 100644 mart/configs/batch_c15n/data_list_pixel01.yaml create mode 100644 mart/configs/batch_c15n/input_tensor_float01.yaml rename mart/configs/batch_c15n/{data_coco.yaml => input_tuple_float01.yaml} (64%) rename mart/configs/batch_c15n/transform/{range_255to1.yaml => divided_by_255.yaml} (100%) rename mart/configs/batch_c15n/transform/{range_1to255.yaml => times_255_and_round.yaml} (100%) diff --git a/mart/configs/batch_c15n/data_list_pixel01.yaml b/mart/configs/batch_c15n/data_list_pixel01.yaml deleted file mode 100644 index db7ce0fd..00000000 --- a/mart/configs/batch_c15n/data_list_pixel01.yaml +++ /dev/null @@ -1,5 +0,0 @@ -# Scale pixel input in the range of [0,1] to [0,255] for Adversary. -defaults: - - list - - transform: range_1to255 - - transform@untransform: range_255to1 diff --git a/mart/configs/batch_c15n/input_tensor_float01.yaml b/mart/configs/batch_c15n/input_tensor_float01.yaml new file mode 100644 index 00000000..972a1e4f --- /dev/null +++ b/mart/configs/batch_c15n/input_tensor_float01.yaml @@ -0,0 +1,5 @@ +# Scale pixel input in the range of [0,1] to [0,255] for Adversary. +defaults: + - tuple + - transform: times_255_and_round + - transform@untransform: divided_by_255 diff --git a/mart/configs/batch_c15n/data_coco.yaml b/mart/configs/batch_c15n/input_tuple_float01.yaml similarity index 64% rename from mart/configs/batch_c15n/data_coco.yaml rename to mart/configs/batch_c15n/input_tuple_float01.yaml index 2bb865ef..49dbe4c2 100644 --- a/mart/configs/batch_c15n/data_coco.yaml +++ b/mart/configs/batch_c15n/input_tuple_float01.yaml @@ -1,8 +1,8 @@ # Scale pixel input in the range of [0,1] to [0,255] for Adversary. defaults: - tuple - - transform@transform.transforms: range_1to255 - - transform@untransform.transforms: range_255to1 + - transform@transform.transforms: times_255_and_round + - transform@untransform.transforms: divided_by_255 transform: _target_: mart.transforms.TupleTransforms diff --git a/mart/configs/batch_c15n/transform/range_255to1.yaml b/mart/configs/batch_c15n/transform/divided_by_255.yaml similarity index 100% rename from mart/configs/batch_c15n/transform/range_255to1.yaml rename to mart/configs/batch_c15n/transform/divided_by_255.yaml diff --git a/mart/configs/batch_c15n/transform/range_1to255.yaml b/mart/configs/batch_c15n/transform/times_255_and_round.yaml similarity index 100% rename from mart/configs/batch_c15n/transform/range_1to255.yaml rename to mart/configs/batch_c15n/transform/times_255_and_round.yaml diff --git a/mart/configs/experiment/CIFAR10_CNN_Adv.yaml b/mart/configs/experiment/CIFAR10_CNN_Adv.yaml index 9aa80563..a867507e 100644 --- a/mart/configs/experiment/CIFAR10_CNN_Adv.yaml +++ b/mart/configs/experiment/CIFAR10_CNN_Adv.yaml @@ -3,7 +3,7 @@ defaults: - /attack@callbacks.adversary_connector.train_adversary: classification_fgsm_linf - /attack@callbacks.adversary_connector.test_adversary: classification_pgd_linf - - override /batch_c15n@callbacks.adversary_connector.batch_c15n: data_list_pixel01 + - override /batch_c15n@callbacks.adversary_connector.batch_c15n: input_tensor_float01 - override /datamodule: cifar10 - override /model: classifier_cifar10_cnn - override /metric: accuracy diff --git a/mart/configs/experiment/COCO_TorchvisionFasterRCNN_Adv.yaml b/mart/configs/experiment/COCO_TorchvisionFasterRCNN_Adv.yaml index 647a4897..cd6a3894 100644 --- a/mart/configs/experiment/COCO_TorchvisionFasterRCNN_Adv.yaml +++ b/mart/configs/experiment/COCO_TorchvisionFasterRCNN_Adv.yaml @@ -3,7 +3,7 @@ defaults: - COCO_TorchvisionFasterRCNN - /attack@callbacks.adversary_connector.test_adversary: object_detection_mask_adversary - - override /batch_c15n@callbacks.adversary_connector.batch_c15n: data_coco + - override /batch_c15n@callbacks.adversary_connector.batch_c15n: input_tuple_float01 - override /datamodule: coco_perturbable_mask - override /callbacks: [model_checkpoint, lr_monitor, adversary_connector] From d37a41454cb4d2ccca7506e3b79f86762850fbf0 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 1 Sep 2023 16:01:52 -0700 Subject: [PATCH 72/74] Update fiftyone configs. --- mart/configs/datamodule/fiftyone.yaml | 30 ------------------- .../datamodule/fiftyone_perturbable_mask.yaml | 30 ------------------- 2 files changed, 60 deletions(-) diff --git a/mart/configs/datamodule/fiftyone.yaml b/mart/configs/datamodule/fiftyone.yaml index 1fbdd9a6..7c45687c 100644 --- a/mart/configs/datamodule/fiftyone.yaml +++ b/mart/configs/datamodule/fiftyone.yaml @@ -14,16 +14,6 @@ train_dataset: - _target_: mart.transforms.ConvertCocoPolysToMask - _target_: mart.transforms.RandomHorizontalFlip p: 0.5 - - _target_: mart.transforms.Denormalize - center: 0 - scale: 255 - - _target_: torch.fake_quantize_per_tensor_affine - _partial_: true - # (x/1+0).round().clamp(0, 255) * 1 - scale: 1 - zero_point: 0 - quant_min: 0 - quant_max: 255 val_dataset: _target_: mart.datamodules.fiftyone.FiftyOneDataset @@ -36,16 +26,6 @@ val_dataset: transforms: - _target_: torchvision.transforms.ToTensor - _target_: mart.transforms.ConvertCocoPolysToMask - - _target_: mart.transforms.Denormalize - center: 0 - scale: 255 - - _target_: torch.fake_quantize_per_tensor_affine - _partial_: true - # (x/1+0).round().clamp(0, 255) * 1 - scale: 1 - zero_point: 0 - quant_min: 0 - quant_max: 255 test_dataset: _target_: mart.datamodules.fiftyone.FiftyOneDataset @@ -58,16 +38,6 @@ test_dataset: transforms: - _target_: torchvision.transforms.ToTensor - _target_: mart.transforms.ConvertCocoPolysToMask - - _target_: mart.transforms.Denormalize - center: 0 - scale: 255 - - _target_: torch.fake_quantize_per_tensor_affine - _partial_: true - # (x/1+0).round().clamp(0, 255) * 1 - scale: 1 - zero_point: 0 - quant_min: 0 - quant_max: 255 num_workers: 2 collate_fn: diff --git a/mart/configs/datamodule/fiftyone_perturbable_mask.yaml b/mart/configs/datamodule/fiftyone_perturbable_mask.yaml index 69a7f622..1c6889bd 100644 --- a/mart/configs/datamodule/fiftyone_perturbable_mask.yaml +++ b/mart/configs/datamodule/fiftyone_perturbable_mask.yaml @@ -16,16 +16,6 @@ train_dataset: - _target_: mart.transforms.RandomHorizontalFlip p: 0.5 - _target_: mart.transforms.ConvertInstanceSegmentationToPerturbable - - _target_: mart.transforms.Denormalize - center: 0 - scale: 255 - - _target_: torch.fake_quantize_per_tensor_affine - _partial_: true - # (x/1+0).round().clamp(0, 255) * 1 - scale: 1 - zero_point: 0 - quant_min: 0 - quant_max: 255 val_dataset: _target_: mart.datamodules.fiftyone.FiftyOneDataset @@ -40,16 +30,6 @@ val_dataset: # ConvertCocoPolysToMask must be prior to ConvertInstanceSegmentationToPerturbable. - _target_: mart.transforms.ConvertCocoPolysToMask - _target_: mart.transforms.ConvertInstanceSegmentationToPerturbable - - _target_: mart.transforms.Denormalize - center: 0 - scale: 255 - - _target_: torch.fake_quantize_per_tensor_affine - _partial_: true - # (x/1+0).round().clamp(0, 255) * 1 - scale: 1 - zero_point: 0 - quant_min: 0 - quant_max: 255 test_dataset: _target_: mart.datamodules.fiftyone.FiftyOneDataset @@ -64,13 +44,3 @@ test_dataset: # ConvertCocoPolysToMask must be prior to ConvertInstanceSegmentationToPerturbable. - _target_: mart.transforms.ConvertCocoPolysToMask - _target_: mart.transforms.ConvertInstanceSegmentationToPerturbable - - _target_: mart.transforms.Denormalize - center: 0 - scale: 255 - - _target_: torch.fake_quantize_per_tensor_affine - _partial_: true - # (x/1+0).round().clamp(0, 255) * 1 - scale: 1 - zero_point: 0 - quant_min: 0 - quant_max: 255 From b67b0716b59f653130de2b32b00efc7557f4601e Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 8 Jan 2024 10:31:31 -0800 Subject: [PATCH 73/74] Mock Adversary.forward(). --- tests/test_visualizer.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_visualizer.py b/tests/test_visualizer.py index 1b6d91bf..a0cf6bce 100644 --- a/tests/test_visualizer.py +++ b/tests/test_visualizer.py @@ -22,9 +22,9 @@ def test_visualizer_run_end(input_data, target_data, perturbation, tmp_path): target_list = [target_data] # simulate an addition perturbation - def perturb(input): + def perturb(input, target): result = [sample + perturbation_255 for sample in input] - return result + return result, target adversary = Mock(spec=Adversary, side_effect=perturb) trainer = Mock() From 6a1eca057a480e65b45246d2499925f0d4fedcfa Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 8 Jan 2024 10:45:34 -0800 Subject: [PATCH 74/74] Upgrade pre-commit flake8 to 6.0.0 to work with newer importlib_metadata. --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ac184d67..2fef7a75 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -47,7 +47,7 @@ repos: # python check (PEP8), programming errors and code complexity - repo: https://github.com/PyCQA/flake8 - rev: 4.0.1 + rev: 6.0.0 hooks: - id: flake8 # ignore E203 because black is used for formatting.