temp_backup / main_online_cifar10.py
benzlxs's picture
Upload folder using huggingface_hub
9e56db5 verified
# Copyright (c) Alibaba Group
import argparse
import torch
import torchvision.datasets as datasets
import torch.nn.functional as F
import clip
import os
import math
import numpy as np
from torchvision.datasets import MNIST, CIFAR10
from datetime import datetime
import logging
from MAPLS.mapls import mapls
from MAPLS.common import lsc
log_filename = os.path.join("logs", f"onzeta_eval_{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}.log")
logging.basicConfig(
level=logging.INFO,
format='%(message)s',
handlers=[
logging.FileHandler(log_filename),
logging.StreamHandler()
]
)
model_names = ['RN50', 'ViT-B/32', 'ViT-B/16', 'ViT-L/14', 'ViT-L/14@336px']
parser = argparse.ArgumentParser(description='OnZeta for ImageNet')
parser.add_argument('--data_path', default='./CIFAR10_TEST', type=str,
help='dataset path')
parser.add_argument('-a', '--arch', metavar='ARCH', default='ViT-B/16',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: RN50)')
parser.add_argument('-j', '--workers', default=8, type=int, metavar='N',
help='number of data loading workers (default: 8)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N',
help='mini-batch size (default: 256)')
parser.add_argument('--tau_t', default=0.01, type=float)
parser.add_argument('--tau_i', default=0.04, type=float)
parser.add_argument('--cw', default=0.5, type=float)
parser.add_argument('--cr', default=20, type=float)
parser.add_argument('--alpha', default=0, type=float)
parser.add_argument('--beta', default=0.4, type=float)
parser.add_argument('--repeat', default=5, type=int)
device = "cuda" if torch.cuda.is_available() else "cpu"
def main(beta):
args = parser.parse_args()
logging.info(args)
lam = 1
args.beta = beta
logging.info("the beta is {}".format(beta))
cifar10_classes = [
'airplane',
'automobile',
'bird',
'cat',
'deer',
'dog',
'frog',
'horse',
'ship',
'truck',
]
cifar10_templates = [
'a photo of a {}.',
'a blurry photo of a {}.',
'a black and white photo of a {}.',
'a low contrast photo of a {}.',
'a high contrast photo of a {}.',
'a bad photo of a {}.',
'a good photo of a {}.',
'a photo of a small {}.',
'a photo of a big {}.',
'a photo of the {}.',
'a blurry photo of the {}.',
'a black and white photo of the {}.',
'a low contrast photo of the {}.',
'a high contrast photo of the {}.',
'a bad photo of the {}.',
'a good photo of the {}.',
'a photo of the small {}.',
'a photo of the big {}.',
]
logging.info('load pre-trained model')
model, preprocess = clip.load(args.arch)
model = model.cuda()
model.eval()
logging.info('load data')
# valdir = os.path.join(args.data_path, 'val')
# valdir = os.path.join(args.data_path, '')
cifar10 = CIFAR10(root=os.path.expanduser("~/.cache"), download=True, train=False)
# val_set = datasets.ImageFolder(valdir, transform=preprocess)
# loader = torch.utils.data.DataLoader(val_set, batch_size=args.batch_size, num_workers=args.workers)
with torch.no_grad():
image_feat = []
image_label = []
for i, (images, target) in enumerate(cifar10):
# images = images.cuda()
# target = target.cuda()
# image_features = model.encode_image(images)
images = preprocess(images).unsqueeze(0).to(device)
with torch.no_grad():
images = model.encode_image(images)
images /= images.norm()
image_feat.append(images)
image_label.append(target)
image_feat = torch.stack(image_feat, dim=1).to(device)
image_feat = image_feat.squeeze()
# image_label = torch.cat(image_label, dim=0)
image_label = torch.tensor(image_label, dtype=torch.long).to(device)
n = len(image_label)
image_feat = image_feat.float()
logging.info('obtain text proxy')
text_classifier = zeroshot_classifier(clip, model, cifar10_classes, cifar10_templates)
text_classifier = text_classifier.float()
logits_t = image_feat @ text_classifier
acc1, acc5 = accuracy(logits_t, image_label, topk=(1, 5))
top1 = (acc1 / n) * 100
logging.info(f'accuracy with text proxy: {top1:.2f}')
logging.info('online zero-shot transfer: repeat {} times'.format(args.repeat))
num_class = len(torch.unique(image_label))
acc_onzeta = torch.zeros(args.repeat).cuda()
acc_onlab = torch.zeros(args.repeat).cuda()
acc_ls = torch.zeros(args.repeat).cuda()
for iter in range(args.repeat):
idx = torch.randperm(n).cuda()
combo_label = torch.zeros(n, num_class).cuda()
text_label = torch.zeros(n, num_class).cuda()
w = text_classifier.clone()
rho = torch.zeros(num_class).cuda()
for i in range(n):
lr = args.cw / math.sqrt(i + 1)
rlr = args.cr / math.sqrt(i + 1)
beta = args.beta * math.sqrt((i + 1) / n)
x = image_feat[idx[i], :]
tlabel = F.softmax(x @ text_classifier / args.tau_t, dim=0)
tlabel = tlabel * torch.exp(rho)
tlabel /= torch.sum(tlabel)
rho -= rlr * (tlabel - args.alpha / num_class)
rho[rho < 0] = 0
text_label[i, :] = tlabel
vision_label = F.softmax(x @ w / args.tau_i, dim=0)
combo_label[i, :] = beta * vision_label + (1 - beta) * tlabel
grad = torch.outer(x, vision_label - tlabel)
w -= (lr / args.tau_i) * grad
w = F.normalize(w, dim=0)
acc1, acc5 = accuracy(text_label, image_label[idx], topk=(1, 5))
acc_onlab[iter] = (acc1 / n) * 100
acc1, acc5 = accuracy(combo_label, image_label[idx], topk=(1, 5))
# MAPLS - EM Algorithm
pz = np.full(len(cifar10_classes), 1.0 / len(cifar10_classes))
qy = mapls(combo_label, pz=pz, qy_mode="soft", max_iter=100, lam=lam) # FIXME why return nan
w = np.array(qy) / np.array(pz)
if combo_label.is_cuda:
combo_label_cpu = combo_label.cpu()
qy_probs = lsc(combo_label_cpu, 1.0 / w)
acc1_ls, acc5_ls = accuracy(qy_probs, image_label[idx], topk=(1, 5))
acc_onzeta[iter] = (acc1 / n) * 100
acc_ls[iter] = (acc1_ls / n) * 100
logging.info('mean acc of onlab is: {:.2f}'.format(torch.mean(acc_onlab)))
logging.info('mean acc of onzeta is: {:.2f}'.format(torch.mean(acc_onzeta)))
logging.info('mean acc of MAPLS is: {:.2f}'.format(torch.mean(acc_ls)))
def zeroshot_classifier(clip, model, classnames, templates):
with torch.no_grad():
zeroshot_weights = []
for classname in classnames:
texts = [template.format(classname) for template in templates]
texts = clip.tokenize(texts).cuda()
class_embeddings = model.encode_text(texts)
class_embeddings /= class_embeddings.norm(dim=-1, keepdim=True)
class_embedding = class_embeddings.mean(dim=0)
class_embedding /= class_embedding.norm()
zeroshot_weights.append(class_embedding)
zeroshot_weights = torch.stack(zeroshot_weights, dim=1).cuda()
return zeroshot_weights
def accuracy(output, target, topk=(1,)):
pred = output.topk(max(topk), 1, True, True)[1].t()
pred, target = pred.cpu(), target.cpu()
correct = pred.eq(target.view(1, -1).expand_as(pred))
return [float(correct[:k].reshape(-1).float().sum(0, keepdim=True).cpu().numpy()) for k in topk]
if __name__ == '__main__':
# main()
betas = [1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1]
for beta in betas:
main(beta)