Upload 6 files
Browse files- README.md +81 -3
- adv_train.py +240 -0
- attacks_per_round.py +107 -0
- gitattributes +34 -0
- main.py +196 -0
- utils.py +239 -0
README.md
CHANGED
|
@@ -1,3 +1,81 @@
|
|
| 1 |
-
---
|
| 2 |
-
license:
|
| 3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: mit
|
| 3 |
+
datasets:
|
| 4 |
+
- imagenet-1k
|
| 5 |
+
metrics:
|
| 6 |
+
- accuracy
|
| 7 |
+
tags:
|
| 8 |
+
- adversarial machine learning
|
| 9 |
+
---
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
## RobArch: Designing Robust Architectures against Adversarial Attacks
|
| 14 |
+
*ShengYun Peng, Weilin Xu, Cory Cornelius, Kevin Li, Rahul Duggal, Duen Horng Chau, Jason Martin*
|
| 15 |
+
|
| 16 |
+
Check https://github.com/ShengYun-Peng/RobArch for the complete code.
|
| 17 |
+
|
| 18 |
+
### Abstract
|
| 19 |
+
Adversarial Training is the most effective approach for improving the robustness of Deep Neural Networks (DNNs). However, compared to the large body of research in optimizing the adversarial training process, there are few investigations into how architecture components affect robustness, and they rarely constrain model capacity. Thus, it is unclear where robustness precisely comes from. In this work, we present the first large-scale systematic study on the robustness of DNN architecture components under fixed parameter budgets. Through our investigation, we distill 18 actionable robust network design guidelines that empower model developers to gain deep insights. We demonstrate these guidelines' effectiveness by introducing the novel Robust Architecture (RobArch) model that instantiates the guidelines to build a family of top-performing models across parameter capacities against strong adversarial attacks. RobArch achieves the new state-of-the-art AutoAttack accuracy on the RobustBench ImageNet leaderboard.
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
### Prerequisites
|
| 23 |
+
1. Register Weights & Biases [account](https://wandb.ai/site)
|
| 24 |
+
2. Prepare ImageNet via [Fast AT - Installation step 3 & 4](https://github.com/locuslab/fast_adversarial/tree/master/ImageNet)
|
| 25 |
+
> Run step 4 only if you want to use Fast-AT.
|
| 26 |
+
3. Set up venv:
|
| 27 |
+
```bash
|
| 28 |
+
make .venv_done
|
| 29 |
+
```
|
| 30 |
+
|
| 31 |
+
### Training
|
| 32 |
+
Fast-AT is much faster than standard PGD AT. For RobArch-S, Fast-AT takes ~1.5 days on 2 Nvidia A100s, but ~5 days on 4 Nvidia A100s.
|
| 33 |
+
#### Torchvision models - Fast AT (e.g., ResNet-50)
|
| 34 |
+
```bash
|
| 35 |
+
make BASE=<imagenet root dir> WANDB_ACCOUNT=<name> experiments/Torch_ResNet50/.done_test_pgd
|
| 36 |
+
```
|
| 37 |
+
If you want to test other off-the-shelf models in [torchvision](https://pytorch.org/vision/stable/models.html#classification), add the model name in [MODEL.mk](MODEL.mk), and create a new make target by following other ResNets/WideResNets in [Makefile](Makefile).
|
| 38 |
+
#### RobArch - Fast AT (e.g., RobArch-S)
|
| 39 |
+
```bash
|
| 40 |
+
make BASE=<imagenet root dir> WANDB_ACCOUNT=<name> experiments/RobArch_S/.done_test_pgd
|
| 41 |
+
```
|
| 42 |
+
|
| 43 |
+
#### RobArch - Standard PGD AT (e.g., RobArch-S)
|
| 44 |
+
```bash
|
| 45 |
+
# Training
|
| 46 |
+
make BASE=<imagenet root dir> WANDB_ACCOUNT=<name> experiments/PGDAT_RobArch_S/.done_train
|
| 47 |
+
|
| 48 |
+
# Evaluation on PGD
|
| 49 |
+
make BASE=<imagenet root dir> WANDB_ACCOUNT=<name> experiments/PGDAT_RobArch_S/.done_test_pgd
|
| 50 |
+
|
| 51 |
+
# Evaluation on AutoAttack
|
| 52 |
+
make BASE=<imagenet root dir> WANDB_ACCOUNT=<name> experiments/PGDAT_RobArch_S/.done_test_aa
|
| 53 |
+
|
| 54 |
+
# Pretrained models evaluated on AutoAttack
|
| 55 |
+
make BASE=<imagenet root dir> WANDB_ACCOUNT=<name> experiments/PGDAT_RobArch_S/.done_test_pretrained
|
| 56 |
+
```
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
### Pretrained models
|
| 60 |
+
- ImageNet $\ell_\infty$-norm
|
| 61 |
+
|
| 62 |
+
| Architecture | #Param | Natural | AutoAttack | PGD10-4 | PGD50-4 | PGD100-4 | PGD100-2 | PGD100-8 |
|
| 63 |
+
| :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: |
|
| 64 |
+
| [RobArch-S](https://huggingface.co/poloclub/RobArch/resolve/main/pretrained/robarch_s.pt) | 26M | 70.17% | 44.14% | 48.19% | 47.78% | 47.77% | 60.06% | 21.77% |
|
| 65 |
+
| [RobArch-M](https://huggingface.co/poloclub/RobArch/resolve/main/pretrained/robarch_m.pt) | 46M | 71.88% | 46.26% | 49.84% | 49.32% | 49.30% | 61.89% | 23.01% |
|
| 66 |
+
| [RobArch-L](https://huggingface.co/poloclub/RobArch/resolve/main/pretrained/robarch_l.pt) | 104M | 73.44% | 48.94% | 51.72% | 51.04% | 51.03% | 63.49% | 25.31% |
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
### Citation
|
| 71 |
+
|
| 72 |
+
```bibtex
|
| 73 |
+
@misc{peng2023robarch,
|
| 74 |
+
title={RobArch: Designing Robust Architectures against Adversarial Attacks},
|
| 75 |
+
author={ShengYun Peng and Weilin Xu and Cory Cornelius and Kevin Li and Rahul Duggal and Duen Horng Chau and Jason Martin},
|
| 76 |
+
year={2023},
|
| 77 |
+
eprint={2301.03110},
|
| 78 |
+
archivePrefix={arXiv},
|
| 79 |
+
primaryClass={cs.CV}
|
| 80 |
+
}
|
| 81 |
+
```
|
adv_train.py
ADDED
|
@@ -0,0 +1,240 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, Callable
|
| 2 |
+
import torch
|
| 3 |
+
from torch.distributions import Uniform
|
| 4 |
+
import time
|
| 5 |
+
from torch.utils.data import DataLoader
|
| 6 |
+
import torch.nn as nn
|
| 7 |
+
import numpy as np
|
| 8 |
+
from omegaconf import DictConfig
|
| 9 |
+
import logging
|
| 10 |
+
import wandb
|
| 11 |
+
from apex import amp
|
| 12 |
+
from advertorch.attacks import LinfPGDAttack
|
| 13 |
+
|
| 14 |
+
from robustarch.utils import AverageMeter, accuracy, pad_str
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def train(model: nn.Module, train_loader: DataLoader, optimizer: torch.optim.Optimizer, criterion: nn.Module, lr_schedule: Any, epoch: int, device: torch.device, cfg: DictConfig, log: logging.Logger, eps_schedule: Callable = None):
|
| 18 |
+
"""
|
| 19 |
+
Normalization step belongs to the model. It should be the first step of forward function.
|
| 20 |
+
epoch: current training epoch
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
# initialize all meters
|
| 24 |
+
batch_time = AverageMeter()
|
| 25 |
+
losses = AverageMeter()
|
| 26 |
+
top1 = AverageMeter()
|
| 27 |
+
top5 = AverageMeter()
|
| 28 |
+
|
| 29 |
+
model.train() # train mode
|
| 30 |
+
mark_time = time.time()
|
| 31 |
+
train_eps = 1.0 * cfg.attack.train.eps / cfg.dataset.max_color_value
|
| 32 |
+
test_eps = 1.0 * cfg.attack.test.eps / cfg.dataset.max_color_value
|
| 33 |
+
assert train_eps <= 1.0
|
| 34 |
+
assert test_eps <= 1.0
|
| 35 |
+
|
| 36 |
+
if cfg.train_test.mode.lower() == "fat":
|
| 37 |
+
sampler = Uniform(low=-test_eps, high=test_eps)
|
| 38 |
+
else:
|
| 39 |
+
if eps_schedule:
|
| 40 |
+
log.info(pad_str(f" Train with eps={eps_schedule(epoch)} "))
|
| 41 |
+
else:
|
| 42 |
+
log.info(pad_str(f" Train with eps={train_eps} "))
|
| 43 |
+
if cfg.attack.train.norm == "linf":
|
| 44 |
+
train_gamma = 1.0 * cfg.attack.train.gamma / cfg.dataset.max_color_value
|
| 45 |
+
assert train_gamma <= 1.0
|
| 46 |
+
adversary = LinfPGDAttack(
|
| 47 |
+
predict=model,
|
| 48 |
+
loss_fn=criterion,
|
| 49 |
+
eps=eps_schedule(epoch) if eps_schedule else train_eps,
|
| 50 |
+
nb_iter=cfg.attack.train.step,
|
| 51 |
+
eps_iter=train_gamma,
|
| 52 |
+
rand_init=cfg.attack.train.random_init,
|
| 53 |
+
clip_min=0.0, clip_max=1.0, targeted=False
|
| 54 |
+
)
|
| 55 |
+
else:
|
| 56 |
+
raise NotImplementedError
|
| 57 |
+
|
| 58 |
+
for i, (input, target) in enumerate(train_loader):
|
| 59 |
+
input = input.to(device, non_blocking=True)
|
| 60 |
+
target = target.to(device, non_blocking=True)
|
| 61 |
+
|
| 62 |
+
N, C, W, H = input.shape
|
| 63 |
+
assert C == 3
|
| 64 |
+
total_batch = len(train_loader)
|
| 65 |
+
|
| 66 |
+
## inner maximization (move normalization into the model)
|
| 67 |
+
if cfg.train_test.mode.lower() == "fat":
|
| 68 |
+
batch_shape = (N, C, W, H)
|
| 69 |
+
if cfg.attack.train.random_init:
|
| 70 |
+
init_noise = sampler.sample(batch_shape).to(device)
|
| 71 |
+
else:
|
| 72 |
+
init_noise = torch.zeros(batch_shape).to(device)
|
| 73 |
+
|
| 74 |
+
# update lr only for FAT
|
| 75 |
+
lr = lr_schedule(epoch + (i + 1) / total_batch)
|
| 76 |
+
for param_group in optimizer.param_groups:
|
| 77 |
+
param_group["lr"] = lr
|
| 78 |
+
|
| 79 |
+
# fgsm for FAT
|
| 80 |
+
batch_noise = init_noise.clone().detach().requires_grad_(True)
|
| 81 |
+
input_adv = input + batch_noise
|
| 82 |
+
input_adv.clamp_(0., 1.)
|
| 83 |
+
output = model(input_adv)
|
| 84 |
+
|
| 85 |
+
loss = criterion(output, target)
|
| 86 |
+
if cfg.train_test.half:
|
| 87 |
+
with amp.scale_loss(loss, optimizer) as scaled_loss:
|
| 88 |
+
scaled_loss.backward()
|
| 89 |
+
else:
|
| 90 |
+
loss.backward()
|
| 91 |
+
|
| 92 |
+
# 1-iter pdg attack
|
| 93 |
+
fgsm_noise = torch.sign(batch_noise.grad) * train_eps
|
| 94 |
+
init_noise += fgsm_noise.data
|
| 95 |
+
init_noise.clamp_(-test_eps, test_eps)
|
| 96 |
+
|
| 97 |
+
batch_noise = init_noise.clone().detach().requires_grad_(False)
|
| 98 |
+
input_adv = input + batch_noise
|
| 99 |
+
input.clamp_(0., 1.)
|
| 100 |
+
else:
|
| 101 |
+
input_adv = adversary.perturb(input, target)
|
| 102 |
+
lr = optimizer.param_groups[0]["lr"]
|
| 103 |
+
|
| 104 |
+
## outer minimization
|
| 105 |
+
output = model(input_adv)
|
| 106 |
+
loss = criterion(output, target)
|
| 107 |
+
|
| 108 |
+
# optimizer
|
| 109 |
+
optimizer.zero_grad()
|
| 110 |
+
if cfg.train_test.half:
|
| 111 |
+
with amp.scale_loss(loss, optimizer) as scaled_loss:
|
| 112 |
+
scaled_loss.backward()
|
| 113 |
+
else:
|
| 114 |
+
loss.backward()
|
| 115 |
+
|
| 116 |
+
optimizer.step()
|
| 117 |
+
|
| 118 |
+
## record results and elapsed time
|
| 119 |
+
prec1, prec5 = accuracy(output, target, topk=(1, 5))
|
| 120 |
+
losses.update(loss.item(), N)
|
| 121 |
+
top1.update(prec1, N)
|
| 122 |
+
top5.update(prec5, N)
|
| 123 |
+
batch_time.update(time.time() - mark_time)
|
| 124 |
+
|
| 125 |
+
mark_time = time.time()
|
| 126 |
+
|
| 127 |
+
# log results
|
| 128 |
+
if i % cfg.train_test.print_freq == 0:
|
| 129 |
+
log.info(f"Train Epoch: [{epoch}][{i}/{total_batch}] Time {batch_time.val:.3f} ({batch_time.avg:.3f}) Loss {losses.val:.4f} ({losses.avg:.4f}) Prec@1 {top1.val:.3f} ({top1.avg:.3f}) Prec@5 {top5.val:.3f} ({top5.avg:.3f}) LR {lr:.3f}")
|
| 130 |
+
|
| 131 |
+
# update lr schedule for non-FAT
|
| 132 |
+
if cfg.train_test.mode.lower() != "fat":
|
| 133 |
+
lr_schedule.step(epoch + 1)
|
| 134 |
+
|
| 135 |
+
if cfg.visualization.tool == "wandb":
|
| 136 |
+
wandb.log({"Train top1 accuracy": top1.avg}, step=epoch)
|
| 137 |
+
wandb.log({"Train top5 accuracy": top5.avg}, step=epoch)
|
| 138 |
+
wandb.log({"Train loss": losses.avg}, step=epoch)
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
def test_natural(model: nn.Module, val_loader: DataLoader, criterion: nn.Module, device: torch.device, cfg: DictConfig, log: logging.Logger) -> float:
|
| 142 |
+
# initialize all meters
|
| 143 |
+
batch_time = AverageMeter()
|
| 144 |
+
losses = AverageMeter()
|
| 145 |
+
top1 = AverageMeter()
|
| 146 |
+
top5 = AverageMeter()
|
| 147 |
+
|
| 148 |
+
model.eval() # eval mode
|
| 149 |
+
mark_time = time.time()
|
| 150 |
+
for i, (input, target) in enumerate(val_loader):
|
| 151 |
+
with torch.no_grad():
|
| 152 |
+
input = input.to(device, non_blocking=True)
|
| 153 |
+
target = target.to(device, non_blocking=True)
|
| 154 |
+
|
| 155 |
+
N = input.shape[0]
|
| 156 |
+
if cfg.model.model_source == "madry":
|
| 157 |
+
output = model(input, with_image=False)
|
| 158 |
+
else:
|
| 159 |
+
output = model(input)
|
| 160 |
+
loss = criterion(output, target)
|
| 161 |
+
|
| 162 |
+
## record results and elapsed time
|
| 163 |
+
prec1, prec5 = accuracy(output, target, topk=(1, 5))
|
| 164 |
+
losses.update(loss.item(), N)
|
| 165 |
+
top1.update(prec1, N)
|
| 166 |
+
top5.update(prec5, N)
|
| 167 |
+
batch_time.update(time.time() - mark_time)
|
| 168 |
+
|
| 169 |
+
mark_time = time.time()
|
| 170 |
+
|
| 171 |
+
# log results
|
| 172 |
+
if i % cfg.train_test.print_freq == 0:
|
| 173 |
+
log.info(f"Test: [{i}/{len(val_loader)}] Time {batch_time.val:.3f} ({batch_time.avg:.3f}) Loss {losses.val:.4f} ({losses.avg:.4f}) Prec@1 {top1.val:.3f} ({top1.avg:.3f}) Prec@5 {top5.val:.3f} ({top5.avg:.3f})")
|
| 174 |
+
|
| 175 |
+
log.info(f"Final Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}")
|
| 176 |
+
return top1.avg
|
| 177 |
+
|
| 178 |
+
def test_pgd(model: nn.Module, val_loader: DataLoader, criterion: nn.Module, device: torch.device, cfg: DictConfig, log: logging.Logger):
|
| 179 |
+
## attack
|
| 180 |
+
if cfg.attack.test.norm == "linf":
|
| 181 |
+
adversary = LinfPGDAttack(
|
| 182 |
+
model, loss_fn=criterion,
|
| 183 |
+
eps=1.0 * cfg.attack.test.eps / cfg.dataset.max_color_value,
|
| 184 |
+
nb_iter=cfg.attack.test.step,
|
| 185 |
+
eps_iter=1.0 * cfg.attack.test.gamma / cfg.dataset.max_color_value,
|
| 186 |
+
rand_init=cfg.attack.test.random_init,
|
| 187 |
+
clip_min=0.0, clip_max=1.0, targeted=False
|
| 188 |
+
)
|
| 189 |
+
else:
|
| 190 |
+
raise NotImplementedError
|
| 191 |
+
|
| 192 |
+
## initialize all meters
|
| 193 |
+
batch_time = AverageMeter()
|
| 194 |
+
losses = AverageMeter()
|
| 195 |
+
top1 = AverageMeter()
|
| 196 |
+
top5 = AverageMeter()
|
| 197 |
+
log.info(pad_str(f" PGD eps: {cfg.attack.test.eps}, step: {cfg.attack.test.step}, gamma: {cfg.attack.test.gamma}, restarts: {cfg.attack.test.restart} "))
|
| 198 |
+
|
| 199 |
+
model.eval() # eval mode
|
| 200 |
+
mark_time = time.time()
|
| 201 |
+
for i, (input, target) in enumerate(val_loader):
|
| 202 |
+
input = input.to(device, non_blocking=True)
|
| 203 |
+
target = target.to(device, non_blocking=True)
|
| 204 |
+
|
| 205 |
+
## adversarial inputs
|
| 206 |
+
for j in range(cfg.attack.test.restart):
|
| 207 |
+
input_adv = adversary.perturb(input, target)
|
| 208 |
+
with torch.no_grad():
|
| 209 |
+
if j == 0:
|
| 210 |
+
final_input_adv = input_adv
|
| 211 |
+
else:
|
| 212 |
+
# record misclassified images
|
| 213 |
+
I = output.max(1)[1] != target
|
| 214 |
+
final_input_adv[I] = input[I]
|
| 215 |
+
|
| 216 |
+
with torch.no_grad():
|
| 217 |
+
N = input.shape[0]
|
| 218 |
+
if cfg.model.model_source == "madry":
|
| 219 |
+
output = model(input_adv, with_image=False)
|
| 220 |
+
else:
|
| 221 |
+
output = model(input_adv)
|
| 222 |
+
loss = criterion(output, target)
|
| 223 |
+
|
| 224 |
+
## record results and elapsed time
|
| 225 |
+
prec1, prec5 = accuracy(output, target, topk=(1, 5))
|
| 226 |
+
losses.update(loss.item(), N)
|
| 227 |
+
top1.update(prec1, N)
|
| 228 |
+
top5.update(prec5, N)
|
| 229 |
+
batch_time.update(time.time() - mark_time)
|
| 230 |
+
mark_time = time.time()
|
| 231 |
+
|
| 232 |
+
# log results
|
| 233 |
+
if i % cfg.train_test.print_freq == 0:
|
| 234 |
+
log.info(f"PGD Test: [{i}/{len(val_loader)}] Time {batch_time.val:.3f} ({batch_time.avg:.3f}) Loss {losses.val:.4f} ({losses.avg:.4f}) Prec@1 {top1.val:.3f} ({top1.avg:.3f}) Prec@5 {top5.val:.3f} ({top5.avg:.3f})")
|
| 235 |
+
|
| 236 |
+
log.info(f" PGD Final Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}")
|
| 237 |
+
|
| 238 |
+
|
| 239 |
+
|
| 240 |
+
|
attacks_per_round.py
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import networkx as nx
|
| 2 |
+
import random
|
| 3 |
+
import matplotlib.pyplot as plt
|
| 4 |
+
from matplotlib.colors import Normalize, LinearSegmentedColormap
|
| 5 |
+
|
| 6 |
+
n = 20000000
|
| 7 |
+
G = nx.barabasi_albert_graph(n, m)
|
| 8 |
+
|
| 9 |
+
trust = {node: 0 for node in G.nodes}
|
| 10 |
+
num_mainstream = 500
|
| 11 |
+
num_fake = 200
|
| 12 |
+
|
| 13 |
+
mainstream_nodes = random.sample(list(G.nodes), num_mainstream)
|
| 14 |
+
fake_nodes = random.sample(list(G.nodes), num_fake)
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
pos = nx.spring_layout(G, k=0.2) # 增大k值使布局更松散
|
| 18 |
+
|
| 19 |
+
# 更新信任度函数
|
| 20 |
+
def update_trust(node, is_mainstream, delta_x):
|
| 21 |
+
if is_mainstream:
|
| 22 |
+
trust[node] += delta_x # 主流媒体信任度提升
|
| 23 |
+
else:
|
| 24 |
+
trust[node] -= delta_x * 3.7 # 虚假信息传播强度
|
| 25 |
+
trust[node] = max(-1, min(1, trust[node]))
|
| 26 |
+
|
| 27 |
+
# 传播函数,动态调整传播强度并引入成功率
|
| 28 |
+
def propagate_info(source_node, is_mainstream, success_rate=0.8):
|
| 29 |
+
neighbors = list(G.neighbors(source_node))
|
| 30 |
+
degree = len(neighbors)
|
| 31 |
+
delta_x = 0.8 / (degree + 1) # 传播强度与节点度成反比
|
| 32 |
+
|
| 33 |
+
for neighbor in neighbors:
|
| 34 |
+
if random.random() < success_rate: # 按照传染成功率传播
|
| 35 |
+
update_trust(neighbor, is_mainstream, delta_x)
|
| 36 |
+
|
| 37 |
+
# 蓄意攻击函数,攻击主流媒体节点及其邻居
|
| 38 |
+
def attack_targeted_mainstream(num_attack, success_rate=0.75):
|
| 39 |
+
targeted_nodes = []
|
| 40 |
+
for node in mainstream_nodes:
|
| 41 |
+
neighbors = list(G.neighbors(node))
|
| 42 |
+
targeted_nodes.append(node)
|
| 43 |
+
targeted_nodes.extend(neighbors)
|
| 44 |
+
|
| 45 |
+
targeted_nodes = list(set(targeted_nodes))
|
| 46 |
+
if len(targeted_nodes) > num_attack:
|
| 47 |
+
targeted_nodes = random.sample(targeted_nodes, num_attack)
|
| 48 |
+
|
| 49 |
+
for node in targeted_nodes:
|
| 50 |
+
propagate_info(node, False, success_rate)
|
| 51 |
+
|
| 52 |
+
return targeted_nodes
|
| 53 |
+
|
| 54 |
+
def repair_network(success_rate=0.4):
|
| 55 |
+
for node in mainstream_nodes:
|
| 56 |
+
if random.random() < success_rate:
|
| 57 |
+
propagate_info(node, True, success_rate)
|
| 58 |
+
|
| 59 |
+
def check_cascade_failure(threshold=0.15, accelerate_threshold=0.25):
|
| 60 |
+
fake_trust_count = sum(1 for v in trust.values() if v < -0.5)
|
| 61 |
+
fake_ratio = fake_trust_count / n
|
| 62 |
+
|
| 63 |
+
if fake_ratio > accelerate_threshold:
|
| 64 |
+
return True, fake_ratio, 0.95
|
| 65 |
+
if fake_ratio > threshold:
|
| 66 |
+
return True, fake_ratio, 0.85
|
| 67 |
+
return False, fake_ratio, 0.8
|
| 68 |
+
|
| 69 |
+
def record_data(round_num, fake_ratio, trust_values):
|
| 70 |
+
print(f"Round {round_num}: Fake info ratio = {fake_ratio:.2f}")
|
| 71 |
+
|
| 72 |
+
def visualize_trust_custom(ax, round_num):
|
| 73 |
+
node_colors = [trust[node] for node in G.nodes]
|
| 74 |
+
norm = Normalize(vmin=-1, vmax=1)
|
| 75 |
+
node_colors_normalized = [norm(trust[node]) for node in G.nodes]
|
| 76 |
+
|
| 77 |
+
cmap_custom = LinearSegmentedColormap.from_list("custom_heatmap", ["#440154", "#3b0f70", "#8c2981", "#de4968", "#fba238", "#fcffa4"])
|
| 78 |
+
nx.draw(G, pos, node_color=node_colors_normalized, node_size=5, cmap=cmap_custom, with_labels=False, ax=ax)
|
| 79 |
+
|
| 80 |
+
ax.set_title(f"Round {round_num}", fontsize=10)
|
| 81 |
+
ax.axis('off')
|
| 82 |
+
|
| 83 |
+
# 模拟主流媒体与虚假信息的博弈
|
| 84 |
+
def simulate_rounds_custom(num_rounds, num_attacks_per_round, initial_success_rate=0.8, collapse_threshold=0.2, accelerate_threshold=0.4):
|
| 85 |
+
success_rate = initial_success_rate
|
| 86 |
+
fig, axes = plt.subplots(4, 5, figsize=(14, 15))
|
| 87 |
+
axes = axes.flatten()
|
| 88 |
+
|
| 89 |
+
for i in range(num_rounds):
|
| 90 |
+
attacked_nodes = attack_targeted_mainstream(num_attacks_per_round, success_rate)
|
| 91 |
+
repair_network()
|
| 92 |
+
is_cascading, fake_ratio, new_success_rate = check_cascade_failure(collapse_threshold, accelerate_threshold)
|
| 93 |
+
success_rate = new_success_rate
|
| 94 |
+
|
| 95 |
+
visualize_trust_custom(axes[i], i + 1)
|
| 96 |
+
record_data(i + 1, fake_ratio, trust)
|
| 97 |
+
|
| 98 |
+
if is_cascading:
|
| 99 |
+
print(f"Network is cascading in round {i + 1} with fake info ratio {fake_ratio:.2f}. Transmission rate increased to {success_rate}.")
|
| 100 |
+
if fake_ratio > 0.4:
|
| 101 |
+
print(f"Network collapsed in round {i + 1} with fake info ratio {fake_ratio:.2f}")
|
| 102 |
+
break
|
| 103 |
+
plt.tight_layout()
|
| 104 |
+
plt.show()
|
| 105 |
+
|
| 106 |
+
simulate_rounds_custom(num_rounds=20, num_attacks_per_round=30, initial_success_rate=0.8, collapse_threshold=0.2, accelerate_threshold=0.4)
|
| 107 |
+
|
gitattributes
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
main.py
ADDED
|
@@ -0,0 +1,196 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import time
|
| 3 |
+
import torch
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
import torch.backends.cudnn as cudnn
|
| 6 |
+
import torch.optim
|
| 7 |
+
import torchvision.transforms as transforms
|
| 8 |
+
import torchvision.datasets as datasets
|
| 9 |
+
import torchvision.models as models
|
| 10 |
+
from torch.utils.data import DataLoader
|
| 11 |
+
import wandb
|
| 12 |
+
import numpy as np
|
| 13 |
+
from pathlib import Path
|
| 14 |
+
from ptflops import get_model_complexity_info
|
| 15 |
+
from timm.scheduler.cosine_lr import CosineLRScheduler
|
| 16 |
+
from timm.scheduler.step_lr import StepLRScheduler
|
| 17 |
+
from robustbench import benchmark
|
| 18 |
+
|
| 19 |
+
from omegaconf import DictConfig, OmegaConf
|
| 20 |
+
import hydra
|
| 21 |
+
from hydra.utils import get_original_cwd
|
| 22 |
+
import logging
|
| 23 |
+
from apex import amp
|
| 24 |
+
from robustarch.utils import configure_optimizers, get_datasets, pad_str, save_checkpoint, make_linear_schedule
|
| 25 |
+
from robustarch.adv_train import train, test_natural, test_pgd
|
| 26 |
+
|
| 27 |
+
from robustarch.models.model_torch import TorchModel
|
| 28 |
+
from robustarch.models.model import NormalizedConfigurableModel
|
| 29 |
+
|
| 30 |
+
log = logging.getLogger(__name__)
|
| 31 |
+
|
| 32 |
+
@hydra.main(config_path="../configs", config_name="main", version_base="1.2")
|
| 33 |
+
def main(cfg: DictConfig):
|
| 34 |
+
### initialize parameters and folders
|
| 35 |
+
device = torch.device(cfg.train_test.device)
|
| 36 |
+
cwd = get_original_cwd()
|
| 37 |
+
cudnn.benchmark = True
|
| 38 |
+
|
| 39 |
+
# log all configurations
|
| 40 |
+
log.info(f"\n{pad_str(' ARGUMENTS ')}\n{OmegaConf.to_yaml(cfg)}\n{pad_str('')}")
|
| 41 |
+
|
| 42 |
+
### Create models (configurable models not implemented)
|
| 43 |
+
log.info(f"=> creating model:")
|
| 44 |
+
assert cfg.model.model_source in ["torch", "local", "madry"], f"{cfg.model.model_source} not supported"
|
| 45 |
+
if cfg.model.model_source == "torch":
|
| 46 |
+
model = TorchModel(cfg.model.arch, cfg.dataset.mean, cfg.dataset.std, **cfg.model.kwargs)
|
| 47 |
+
elif cfg.model.model_source == "local":
|
| 48 |
+
model = NormalizedConfigurableModel(cfg.dataset.mean, cfg.dataset.std, **hydra.utils.instantiate(cfg.model.kwargs))
|
| 49 |
+
else:
|
| 50 |
+
raise NotImplementedError
|
| 51 |
+
model.to(device)
|
| 52 |
+
log.info(model)
|
| 53 |
+
|
| 54 |
+
# compute model complexity
|
| 55 |
+
log.info(pad_str(" Model Complexity Info "))
|
| 56 |
+
macs, params = get_model_complexity_info(model, (3, cfg.train_test.crop_size, cfg.train_test.crop_size), as_strings=True,
|
| 57 |
+
print_per_layer_stat=False, verbose=False)
|
| 58 |
+
log.info(f"{'Computational complexity: ':<30} {macs:<8}")
|
| 59 |
+
log.info(f"{'Number of parameters: ':<30} {params:<8}")
|
| 60 |
+
|
| 61 |
+
### criterion, optimizer and lr scheduler
|
| 62 |
+
criterion = nn.CrossEntropyLoss().to(device)
|
| 63 |
+
|
| 64 |
+
param_groups = configure_optimizers(model)
|
| 65 |
+
optimizer_name = cfg.train_test.optim.lower()
|
| 66 |
+
if optimizer_name == "sgd":
|
| 67 |
+
optimizer = torch.optim.SGD(params=param_groups, lr=cfg.train_test.lr, momentum=cfg.train_test.momentum, weight_decay=cfg.train_test.weight_decay)
|
| 68 |
+
elif optimizer_name == "adamw":
|
| 69 |
+
raise NotImplementedError
|
| 70 |
+
|
| 71 |
+
### amp half precision
|
| 72 |
+
if cfg.train_test.half and not cfg.train_test.evaluate:
|
| 73 |
+
model, optimizer = amp.initialize(model, optimizer, opt_level="O1")
|
| 74 |
+
model = nn.DataParallel(model)
|
| 75 |
+
|
| 76 |
+
### Resume if training <accidentally stops/enters next phase>
|
| 77 |
+
if cfg.train_test.resume:
|
| 78 |
+
prev_model_dir = os.path.normpath(Path(cwd) / cfg.train_test.resume)
|
| 79 |
+
if os.path.isfile(prev_model_dir):
|
| 80 |
+
log.info(f"=> loading checkpoint '{prev_model_dir}'")
|
| 81 |
+
ckpt = torch.load(prev_model_dir)
|
| 82 |
+
start_epoch = ckpt["epoch"]
|
| 83 |
+
best_prec1 = ckpt["best_prec1"]
|
| 84 |
+
model.load_state_dict(ckpt["state_dict"])
|
| 85 |
+
optimizer.load_state_dict(ckpt["optimizer"])
|
| 86 |
+
else:
|
| 87 |
+
log.info(f"=> no checkpoint found at '{cfg.train_test.resume}'")
|
| 88 |
+
start_epoch = cfg.train_test.start_epoch
|
| 89 |
+
|
| 90 |
+
### Load dataset
|
| 91 |
+
train_loader, test_loader = get_datasets(cfg)
|
| 92 |
+
|
| 93 |
+
### Evaluate/Test
|
| 94 |
+
if cfg.train_test.evaluate:
|
| 95 |
+
if cfg.attack.test.name == "pgd":
|
| 96 |
+
log.info(pad_str(" Performing PGD Attacks "))
|
| 97 |
+
test_pgd(model, test_loader, criterion, device, cfg, log)
|
| 98 |
+
test_natural(model, test_loader, criterion, device, cfg, log)
|
| 99 |
+
elif cfg.attack.test.name == "aa":
|
| 100 |
+
model.eval()
|
| 101 |
+
test_aug = list()
|
| 102 |
+
if cfg.train_test.mode == "at_pgd":
|
| 103 |
+
if cfg.dataset.dataset == "imagenet":
|
| 104 |
+
test_aug.append(transforms.Resize(256))
|
| 105 |
+
elif cfg.dataset.dataset == "cifar10":
|
| 106 |
+
test_aug.append(transforms.Resize(cfg.train_test.crop_size))
|
| 107 |
+
test_aug.extend([
|
| 108 |
+
transforms.CenterCrop(cfg.train_test.crop_size),
|
| 109 |
+
transforms.ToTensor(),
|
| 110 |
+
])
|
| 111 |
+
test_transform = transforms.Compose(test_aug)
|
| 112 |
+
log.info(benchmark(
|
| 113 |
+
model,
|
| 114 |
+
dataset=cfg.dataset.dataset,
|
| 115 |
+
data_dir=cfg.dataset.data_dir,
|
| 116 |
+
device=device,
|
| 117 |
+
batch_size=cfg.attack.test.batch_size,
|
| 118 |
+
eps=cfg.attack.test.eps / 255.,
|
| 119 |
+
preprocessing=test_transform,
|
| 120 |
+
n_examples=cfg.attack.test.n_examples,
|
| 121 |
+
threat_model="Linf" if cfg.attack.test.norm == "linf" else cfg.attack.test.norm
|
| 122 |
+
))
|
| 123 |
+
|
| 124 |
+
return
|
| 125 |
+
|
| 126 |
+
# visualization
|
| 127 |
+
if cfg.visualization.tool == "wandb":
|
| 128 |
+
log.info(f"=> Visualization with wandb")
|
| 129 |
+
wandb.init(project=cfg.visualization.project, entity=cfg.visualization.entity, resume=True)
|
| 130 |
+
wandb.run.name = cfg.name
|
| 131 |
+
|
| 132 |
+
# trained model dir (add if)
|
| 133 |
+
model_dir = Path(cwd) / cfg.train_test.model_dir
|
| 134 |
+
if cfg.train_test.mode == "fat":
|
| 135 |
+
model_dir = model_dir / cfg.train_test.phase
|
| 136 |
+
model_dir.mkdir(parents=True, exist_ok=True)
|
| 137 |
+
|
| 138 |
+
# lr + eps schedule
|
| 139 |
+
eps_schedule = None
|
| 140 |
+
if cfg.train_test.mode == "fat":
|
| 141 |
+
lr_schedule = lambda t: np.interp(t, cfg.train_test.lr_epochs, cfg.train_test.lr_values)
|
| 142 |
+
total_epochs = cfg.train_test.end_epoch
|
| 143 |
+
else:
|
| 144 |
+
total_epochs = cfg.train_test.end_epoch - cfg.train_test.start_epoch
|
| 145 |
+
if cfg.train_test.schedule.lower() == "cosine":
|
| 146 |
+
lr_schedule = CosineLRScheduler(
|
| 147 |
+
optimizer,
|
| 148 |
+
t_initial=total_epochs,
|
| 149 |
+
lr_min=cfg.train_test.min_lr,
|
| 150 |
+
warmup_lr_init=cfg.train_test.warmup_lr,
|
| 151 |
+
warmup_t=cfg.train_test.warmup_epochs,
|
| 152 |
+
cycle_mul=cfg.train_test.lr_cycle_mul,
|
| 153 |
+
cycle_decay=cfg.train_test.lr_cycle_decay,
|
| 154 |
+
cycle_limit=cfg.train_test.lr_cycle_limit,
|
| 155 |
+
)
|
| 156 |
+
elif cfg.train_test.schedule.lower() == "step":
|
| 157 |
+
lr_schedule = StepLRScheduler(
|
| 158 |
+
optimizer,
|
| 159 |
+
decay_t=cfg.train_test.decay_t,
|
| 160 |
+
decay_rate=cfg.train_test.decay_rate,
|
| 161 |
+
warmup_t=cfg.train_test.warmup_epochs,
|
| 162 |
+
warmup_lr_init=cfg.train_test.warmup_lr,
|
| 163 |
+
)
|
| 164 |
+
total_epochs = total_epochs + cfg.train_test.cooldown_epochs
|
| 165 |
+
|
| 166 |
+
# eps schedule
|
| 167 |
+
if cfg.attack.train.eps_schedule and cfg.attack.train.eps_schedule.lower() == "linear" and cfg.attack.train.eps_schedule_epochs:
|
| 168 |
+
eps_schedule = make_linear_schedule(1.0 * cfg.attack.train.eps / cfg.dataset.max_color_value, cfg.attack.train.eps_schedule_epochs, cfg.attack.train.zero_eps_epochs)
|
| 169 |
+
|
| 170 |
+
### Train
|
| 171 |
+
best_prec1 = 0.
|
| 172 |
+
for epoch in range(start_epoch, total_epochs):
|
| 173 |
+
# Train one epoch
|
| 174 |
+
train(model, train_loader, optimizer, criterion, lr_schedule, epoch, device, cfg, log, eps_schedule)
|
| 175 |
+
|
| 176 |
+
# Test natural accuracy
|
| 177 |
+
prec1 = test_natural(model, test_loader, criterion, device, cfg, log)
|
| 178 |
+
|
| 179 |
+
if cfg.visualization.tool == "wandb":
|
| 180 |
+
wandb.log({"Test natural accuracy": prec1}, step=epoch)
|
| 181 |
+
|
| 182 |
+
# Save checkpoint based on best natural accuracy
|
| 183 |
+
is_best = prec1 > best_prec1
|
| 184 |
+
best_prec1 = max(prec1, best_prec1)
|
| 185 |
+
model_info = dict(
|
| 186 |
+
tag=cfg.name,
|
| 187 |
+
best_prec1=best_prec1,
|
| 188 |
+
epoch=epoch + 1,
|
| 189 |
+
state_dict=model.state_dict(),
|
| 190 |
+
optimizer=optimizer.state_dict()
|
| 191 |
+
)
|
| 192 |
+
save_checkpoint(model_info, model_dir, is_best)
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
if __name__ == "__main__":
|
| 196 |
+
main()
|
utils.py
ADDED
|
@@ -0,0 +1,239 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List, Dict, Tuple, Callable
|
| 2 |
+
import torch
|
| 3 |
+
import os
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
from torch.utils.data import DataLoader
|
| 7 |
+
import torchvision.transforms as transforms
|
| 8 |
+
import torchvision.datasets as datasets
|
| 9 |
+
import shutil
|
| 10 |
+
|
| 11 |
+
IMAGENET_PCA = {
|
| 12 |
+
'eigval': torch.Tensor([0.2175, 0.0188, 0.0045]),
|
| 13 |
+
'eigvec': torch.Tensor([
|
| 14 |
+
[-0.5675, 0.7192, 0.4009],
|
| 15 |
+
[-0.5808, -0.0045, -0.8140],
|
| 16 |
+
[-0.5836, -0.6948, 0.4203],
|
| 17 |
+
])
|
| 18 |
+
}
|
| 19 |
+
|
| 20 |
+
class Lighting(object):
|
| 21 |
+
"""
|
| 22 |
+
Lighting noise (see https://git.io/fhBOc)
|
| 23 |
+
https://github.com/MadryLab/robustness/blob/a9541241defd9972e9334bfcdb804f6aefe24dc7/robustness/data_augmentation.py#L18
|
| 24 |
+
"""
|
| 25 |
+
def __init__(self, alphastd, eigval, eigvec):
|
| 26 |
+
self.alphastd = alphastd
|
| 27 |
+
self.eigval = eigval
|
| 28 |
+
self.eigvec = eigvec
|
| 29 |
+
|
| 30 |
+
def __call__(self, img):
|
| 31 |
+
if self.alphastd == 0:
|
| 32 |
+
return img
|
| 33 |
+
|
| 34 |
+
alpha = img.new().resize_(3).normal_(0, self.alphastd)
|
| 35 |
+
rgb = self.eigvec.type_as(img).clone()\
|
| 36 |
+
.mul(alpha.view(1, 3).expand(3, 3))\
|
| 37 |
+
.mul(self.eigval.view(1, 3).expand(3, 3))\
|
| 38 |
+
.sum(1).squeeze()
|
| 39 |
+
|
| 40 |
+
return img.add(rgb.view(3, 1, 1).expand_as(img))
|
| 41 |
+
|
| 42 |
+
class PSiLU(nn.Module):
|
| 43 |
+
def __init__(self, alpha: float = 1.0, device=None, dtype=None) -> None:
|
| 44 |
+
super().__init__()
|
| 45 |
+
factory_kwargs = {'device': device, 'dtype': dtype}
|
| 46 |
+
self.alpha = nn.Parameter(torch.empty(1, **factory_kwargs).fill_(alpha))
|
| 47 |
+
|
| 48 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 49 |
+
return x * torch.sigmoid(self.alpha * x)
|
| 50 |
+
|
| 51 |
+
class PSSiLU(nn.Module):
|
| 52 |
+
def __init__(self, alpha: float = 1.0, beta: float = 1e-4, device=None, dtype=None) -> None:
|
| 53 |
+
super().__init__()
|
| 54 |
+
factory_kwargs = {'device': device, 'dtype': dtype}
|
| 55 |
+
self.alpha = nn.Parameter(torch.empty(1, **factory_kwargs).fill_(alpha))
|
| 56 |
+
self.beta = nn.Parameter(torch.empty(1, **factory_kwargs).fill_(beta))
|
| 57 |
+
|
| 58 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 59 |
+
self.beta.data = torch.clamp(self.beta.data, 0., 1.)
|
| 60 |
+
return x * (torch.sigmoid(self.alpha * x) - self.beta) / (1. - self.beta + 1e-6)
|
| 61 |
+
class AverageMeter(object):
|
| 62 |
+
"""Computes and stores the average and current value"""
|
| 63 |
+
|
| 64 |
+
def __init__(self):
|
| 65 |
+
self.reset()
|
| 66 |
+
|
| 67 |
+
def reset(self):
|
| 68 |
+
self.val = 0
|
| 69 |
+
self.avg = 0
|
| 70 |
+
self.sum = 0
|
| 71 |
+
self.count = 0
|
| 72 |
+
|
| 73 |
+
def update(self, val: float, n: int = 1):
|
| 74 |
+
self.val = val
|
| 75 |
+
self.sum += val * n
|
| 76 |
+
self.count += n
|
| 77 |
+
self.avg = self.sum / self.count
|
| 78 |
+
|
| 79 |
+
def accuracy(output: torch.Tensor, target: torch.Tensor, topk: tuple = (1, 5)) -> List[float]:
|
| 80 |
+
"""Computes the accuracy over the k top predictions for the specified values of k"""
|
| 81 |
+
|
| 82 |
+
with torch.no_grad():
|
| 83 |
+
maxk = max(topk)
|
| 84 |
+
batch_size = target.shape[0]
|
| 85 |
+
|
| 86 |
+
_, pred = output.topk(k=maxk, dim=1, largest=True, sorted=True)
|
| 87 |
+
pred = pred.t()
|
| 88 |
+
correct = pred.eq(target.view(1, -1).expand_as(pred))
|
| 89 |
+
|
| 90 |
+
res = []
|
| 91 |
+
for k in topk:
|
| 92 |
+
correct_k = correct[:k].flatten().float().sum(0)
|
| 93 |
+
res.append(correct_k.mul_(100.0 / batch_size))
|
| 94 |
+
return res
|
| 95 |
+
|
| 96 |
+
def configure_optimizers(model: nn.Module) -> List[Dict]:
|
| 97 |
+
"""
|
| 98 |
+
all normalization params and all bias shouldn't be weight decayed
|
| 99 |
+
code adapted from https://github.com/karpathy/minGPT/blob/3ed14b2cec0dfdad3f4b2831f2b4a86d11aef150/mingpt/model.py#L136
|
| 100 |
+
"""
|
| 101 |
+
decay = set()
|
| 102 |
+
no_decay = set()
|
| 103 |
+
weight_decay_blacklist = (nn.BatchNorm2d, nn.LayerNorm, nn.InstanceNorm2d, nn.PReLU, PSiLU, PSSiLU)
|
| 104 |
+
for mn, m in model.named_modules():
|
| 105 |
+
for pn, p in m.named_parameters(recurse=False):
|
| 106 |
+
full_name = f"{mn}.{pn}" if mn else pn
|
| 107 |
+
if pn.endswith("bias"):
|
| 108 |
+
# all biases are not decayed
|
| 109 |
+
no_decay.add(full_name)
|
| 110 |
+
elif pn.endswith("weight") and isinstance(m, weight_decay_blacklist):
|
| 111 |
+
# weights of blacklist are not decayed
|
| 112 |
+
no_decay.add(full_name)
|
| 113 |
+
else:
|
| 114 |
+
decay.add(full_name)
|
| 115 |
+
|
| 116 |
+
# assert all parameters are considered
|
| 117 |
+
param_dict = {pn: p for pn, p in model.named_parameters()}
|
| 118 |
+
inter_params = decay & no_decay
|
| 119 |
+
union_params = decay | no_decay
|
| 120 |
+
assert len(inter_params) == 0, f"parameters {str(inter_params)} made it into both decay/no_decay sets!"
|
| 121 |
+
assert len(param_dict.keys() - union_params) == 0, f"parameters {str(param_dict.keys() - union_params)} were not separated into either decay/no_decay set!"
|
| 122 |
+
|
| 123 |
+
optim_group = list([
|
| 124 |
+
{"params": [param_dict[fpn] for fpn in sorted(list(decay))]},
|
| 125 |
+
{"params": [param_dict[fpn] for fpn in sorted(list(no_decay))], "weight_decay": 0.0},
|
| 126 |
+
])
|
| 127 |
+
|
| 128 |
+
return optim_group
|
| 129 |
+
|
| 130 |
+
def get_cifar10(root_dir: str, batch_size: int, workers: int, crop_size: int) -> List[DataLoader]:
|
| 131 |
+
"""
|
| 132 |
+
The transforms follow the robustness library: https://github.com/MadryLab/robustness/blob/a9541241defd9972e9334bfcdb804f6aefe24dc7/robustness/data_augmentation.py#L68
|
| 133 |
+
"""
|
| 134 |
+
train_transform = transforms.Compose([
|
| 135 |
+
transforms.RandomCrop(crop_size, padding=4),
|
| 136 |
+
transforms.RandomHorizontalFlip(),
|
| 137 |
+
transforms.ColorJitter(.25, .25, .25),
|
| 138 |
+
transforms.RandomRotation(2),
|
| 139 |
+
transforms.ToTensor(),
|
| 140 |
+
])
|
| 141 |
+
|
| 142 |
+
test_transform = transforms.Compose([
|
| 143 |
+
transforms.Resize(crop_size),
|
| 144 |
+
transforms.CenterCrop(crop_size),
|
| 145 |
+
transforms.ToTensor()
|
| 146 |
+
])
|
| 147 |
+
|
| 148 |
+
train_dataset = datasets.CIFAR10(root=root_dir, train=True, download=True, transform=train_transform)
|
| 149 |
+
test_dataset = datasets.CIFAR10(root=root_dir, train=False, download=True, transform=test_transform)
|
| 150 |
+
|
| 151 |
+
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=workers, pin_memory=True)
|
| 152 |
+
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=True, num_workers=workers, pin_memory=True)
|
| 153 |
+
|
| 154 |
+
return train_loader, test_loader
|
| 155 |
+
|
| 156 |
+
def get_imagenet(data_dir: str, batch_size: int, workers: int, crop_size: int, color_jitter: float = 0., use_lighting: bool = False, mode: str = "fat") -> List[DataLoader]:
|
| 157 |
+
train_aug = list([
|
| 158 |
+
transforms.RandomResizedCrop(crop_size),
|
| 159 |
+
transforms.RandomHorizontalFlip(),
|
| 160 |
+
])
|
| 161 |
+
|
| 162 |
+
if color_jitter > 0:
|
| 163 |
+
cj = (float(color_jitter),) * 3
|
| 164 |
+
train_aug.append(transforms.ColorJitter(*cj))
|
| 165 |
+
|
| 166 |
+
train_aug.append(transforms.ToTensor())
|
| 167 |
+
|
| 168 |
+
if use_lighting:
|
| 169 |
+
train_aug.append(Lighting(0.05, IMAGENET_PCA['eigval'], IMAGENET_PCA['eigvec']))
|
| 170 |
+
|
| 171 |
+
train_transform = transforms.Compose(train_aug)
|
| 172 |
+
|
| 173 |
+
test_aug = list()
|
| 174 |
+
|
| 175 |
+
# Test Transform is exactly the same as `robustness` library
|
| 176 |
+
if mode == "at_pgd":
|
| 177 |
+
test_aug.append(transforms.Resize(256))
|
| 178 |
+
test_aug.extend([
|
| 179 |
+
transforms.CenterCrop(crop_size),
|
| 180 |
+
transforms.ToTensor(),
|
| 181 |
+
])
|
| 182 |
+
|
| 183 |
+
test_transform = transforms.Compose(test_aug)
|
| 184 |
+
|
| 185 |
+
train_path = os.path.join(data_dir, 'train')
|
| 186 |
+
test_path = os.path.join(data_dir, 'val')
|
| 187 |
+
|
| 188 |
+
train_dataset = datasets.ImageFolder(root=train_path, transform=train_transform)
|
| 189 |
+
test_dataset = datasets.ImageFolder(root=test_path, transform=test_transform)
|
| 190 |
+
|
| 191 |
+
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=workers, pin_memory=True)
|
| 192 |
+
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=workers, pin_memory=True)
|
| 193 |
+
|
| 194 |
+
return train_loader, test_loader
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
def get_datasets(cfg):
|
| 198 |
+
if cfg.dataset.dataset == "imagenet":
|
| 199 |
+
return get_imagenet(
|
| 200 |
+
data_dir=cfg.dataset.data_dir,
|
| 201 |
+
batch_size=cfg.train_test.batch_size,
|
| 202 |
+
workers=cfg.train_test.workers,
|
| 203 |
+
crop_size=cfg.train_test.crop_size,
|
| 204 |
+
color_jitter=cfg.train_test.color_jitter,
|
| 205 |
+
use_lighting=cfg.train_test.lighting,
|
| 206 |
+
mode=cfg.train_test.mode,
|
| 207 |
+
)
|
| 208 |
+
elif cfg.dataset.dataset == "cifar10":
|
| 209 |
+
return get_cifar10(
|
| 210 |
+
root_dir=cfg.dataset.data_dir,
|
| 211 |
+
batch_size=cfg.train_test.batch_size,
|
| 212 |
+
workers=cfg.train_test.workers,
|
| 213 |
+
crop_size=cfg.train_test.crop_size,
|
| 214 |
+
)
|
| 215 |
+
|
| 216 |
+
def pad_str(msg: str, total_len: int=80) -> str:
|
| 217 |
+
rem_len = total_len - len(msg)
|
| 218 |
+
return f"{'*' * (rem_len // 2)}{msg}{'*' * (rem_len // 2)}"
|
| 219 |
+
|
| 220 |
+
def save_checkpoint(model_info: dict, filepath: Path, is_best: bool):
|
| 221 |
+
filename = filepath / f"ckpt_epoch{model_info['epoch']}.pt"
|
| 222 |
+
torch.save(model_info, filename)
|
| 223 |
+
if is_best:
|
| 224 |
+
shutil.copyfile(filename, filepath / "model_best.pt")
|
| 225 |
+
|
| 226 |
+
def compute_total_parameters(model: nn.Module) -> float:
|
| 227 |
+
# return # of parameters in million
|
| 228 |
+
pytorch_total_params = sum(p.numel() for p in model.parameters())
|
| 229 |
+
return pytorch_total_params / 1e6
|
| 230 |
+
|
| 231 |
+
# linear scheduling for eps
|
| 232 |
+
def make_linear_schedule(final: float, warmup: int, zero_eps_epochs: int) -> Callable[[int], float]:
|
| 233 |
+
def linear_schedule(step: int) -> float:
|
| 234 |
+
if step < zero_eps_epochs:
|
| 235 |
+
return 0.0
|
| 236 |
+
if step < warmup:
|
| 237 |
+
return (step - zero_eps_epochs) / warmup * final
|
| 238 |
+
return final
|
| 239 |
+
return linear_schedule
|