| |
| import argparse |
| import torch |
| import torchvision.datasets as datasets |
| import torch.nn.functional as F |
| import clip |
| import os |
| import math |
| import numpy as np |
| import copy |
| from PIL import Image |
| import torchvision.transforms as transforms |
| |
| import numpy as np |
| from PIL import Image, ImageOps, ImageEnhance |
|
|
| from MAPLS.mapls import mapls |
| from MAPLS.common import lsc |
|
|
| from tpt.custom_clip import get_coop |
| from tpt.cocoop import get_cocoop |
| from tpt.tpt_utils import * |
|
|
| model_names = ['RN50', 'ViT-B/32', 'ViT-B/16', 'ViT-L/14', 'ViT-L/14@336px'] |
| parser = argparse.ArgumentParser(description='OnZeta for ImageNet') |
| parser.add_argument('--data_path', default='/home/li325/space_mlai/pengxiao_space/dataset/ImageNet/', type=str, |
| help='dataset path') |
| parser.add_argument('-a', '--arch', metavar='ARCH', default='RN50', |
| choices=model_names, |
| help='model architecture: ' + |
| ' | '.join(model_names) + |
| ' (default: RN50)') |
| parser.add_argument('-j', '--workers', default=8, type=int, metavar='N', |
| help='number of data loading workers (default: 8)') |
| parser.add_argument('-b', '--batch-size', default=1, type=int, |
| metavar='N', |
| help='mini-batch size (default: 256)') |
| parser.add_argument('--tau_t', default=0.01, type=float) |
| parser.add_argument('--tau_i', default=0.04, type=float) |
| parser.add_argument('--cw', default=0.5, type=float) |
| parser.add_argument('--cr', default=20, type=float) |
| parser.add_argument('--alpha', default=1, type=float) |
| parser.add_argument('--beta', default=0.8, type=float) |
| parser.add_argument('--repeat', default=1, type=int) |
|
|
| |
| |
| parser.add_argument('--test_sets', type=str, default='I', help='test dataset (multiple datasets split by slash)') |
| parser.add_argument('--dataset_mode', type=str, default='test', help='which split to use: train/val/test') |
| parser.add_argument('--resolution', default=224, type=int, help='CLIP image resolution') |
| |
| |
| |
| parser.add_argument('--lr', '--learning-rate', default=5e-3, type=float, |
| metavar='LR', help='initial learning rate', dest='lr') |
| parser.add_argument('-p', '--print-freq', default=200, type=int, |
| metavar='N', help='print frequency (default: 10)') |
| parser.add_argument('--gpu', default=0, type=int, |
| help='GPU id to use.') |
| parser.add_argument('--tpt', default=True, help='run test-time prompt tuning') |
| parser.add_argument('--selection_p', default=0.1, type=float, help='confidence selection percentile') |
| parser.add_argument('--tta_steps', default=1, type=int, help='test-time-adapt steps') |
| parser.add_argument('--n_ctx', default=4, type=int, help='number of tunable tokens') |
| parser.add_argument('--ctx_init', default=None, type=str, help='init tunable prompts') |
| parser.add_argument('--cocoop', default=True, help="use cocoop's output as prompt initialization") |
| parser.add_argument('--load', |
| default="/home/li325/space_mlai/pengxiao_space/OnZeta/tpt/pretrained_cocoop/rn50_ep50_16shots/nctx4_cscFalse_ctpend/seed1/prompt_learner/model.pth.tar-50", |
| type=str, help='path to a pre-trained coop/cocoop') |
| parser.add_argument('--seed', type=int, default=0) |
|
|
|
|
| try: |
| from torchvision.transforms import InterpolationMode |
| BICUBIC = InterpolationMode.BICUBIC |
| except ImportError: |
| BICUBIC = Image.BICUBIC |
|
|
| IMAGE_SIZE = 224 |
|
|
| normalize = transforms.Normalize(mean=[0.48145466, 0.4578275, 0.40821073], |
| std=[0.26862954, 0.26130258, 0.27577711]) |
|
|
| |
| def get_preaugment(): |
| return transforms.Compose([ |
| transforms.RandomResizedCrop(224), |
| transforms.RandomHorizontalFlip(), |
| ]) |
|
|
| def augmix(image, preprocess, aug_list, severity=1): |
| preaugment = get_preaugment() |
| x_orig = preaugment(image) |
| x_processed = preprocess(x_orig) |
| if len(aug_list) == 0: |
| return x_processed |
| w = np.float32(np.random.dirichlet([1.0, 1.0, 1.0])) |
| m = np.float32(np.random.beta(1.0, 1.0)) |
|
|
| mix = torch.zeros_like(x_processed) |
| for i in range(3): |
| x_aug = x_orig.copy() |
| for _ in range(np.random.randint(1, 4)): |
| x_aug = np.random.choice(aug_list)(x_aug, severity) |
| mix += w[i] * preprocess(x_aug) |
| mix = m * x_processed + (1 - m) * mix |
| return mix |
|
|
| def int_parameter(level, maxval): |
| """Helper function to scale `val` between 0 and maxval . |
| |
| Args: |
| level: Level of the operation that will be between [0, `PARAMETER_MAX`]. |
| maxval: Maximum value that the operation can have. This will be scaled to |
| level/PARAMETER_MAX. |
| |
| Returns: |
| An int that results from scaling `maxval` according to `level`. |
| """ |
| return int(level * maxval / 10) |
|
|
|
|
| def float_parameter(level, maxval): |
| """Helper function to scale `val` between 0 and maxval. |
| |
| Args: |
| level: Level of the operation that will be between [0, `PARAMETER_MAX`]. |
| maxval: Maximum value that the operation can have. This will be scaled to |
| level/PARAMETER_MAX. |
| |
| Returns: |
| A float that results from scaling `maxval` according to `level`. |
| """ |
| return float(level) * maxval / 10. |
|
|
|
|
| def sample_level(n): |
| return np.random.uniform(low=0.1, high=n) |
|
|
|
|
| def autocontrast(pil_img, _): |
| return ImageOps.autocontrast(pil_img) |
|
|
|
|
| def equalize(pil_img, _): |
| return ImageOps.equalize(pil_img) |
|
|
|
|
| def posterize(pil_img, level): |
| level = int_parameter(sample_level(level), 4) |
| return ImageOps.posterize(pil_img, 4 - level) |
|
|
|
|
| def rotate(pil_img, level): |
| degrees = int_parameter(sample_level(level), 30) |
| if np.random.uniform() > 0.5: |
| degrees = -degrees |
| return pil_img.rotate(degrees, resample=Image.BILINEAR) |
|
|
|
|
| def solarize(pil_img, level): |
| level = int_parameter(sample_level(level), 256) |
| return ImageOps.solarize(pil_img, 256 - level) |
|
|
|
|
| def shear_x(pil_img, level): |
| level = float_parameter(sample_level(level), 0.3) |
| if np.random.uniform() > 0.5: |
| level = -level |
| return pil_img.transform((IMAGE_SIZE, IMAGE_SIZE), |
| Image.AFFINE, (1, level, 0, 0, 1, 0), |
| resample=Image.BILINEAR) |
|
|
|
|
| def shear_y(pil_img, level): |
| level = float_parameter(sample_level(level), 0.3) |
| if np.random.uniform() > 0.5: |
| level = -level |
| return pil_img.transform((IMAGE_SIZE, IMAGE_SIZE), |
| Image.AFFINE, (1, 0, 0, level, 1, 0), |
| resample=Image.BILINEAR) |
|
|
|
|
| def translate_x(pil_img, level): |
| level = int_parameter(sample_level(level), IMAGE_SIZE / 3) |
| if np.random.random() > 0.5: |
| level = -level |
| return pil_img.transform((IMAGE_SIZE, IMAGE_SIZE), |
| Image.AFFINE, (1, 0, level, 0, 1, 0), |
| resample=Image.BILINEAR) |
|
|
|
|
| def translate_y(pil_img, level): |
| level = int_parameter(sample_level(level), IMAGE_SIZE / 3) |
| if np.random.random() > 0.5: |
| level = -level |
| return pil_img.transform((IMAGE_SIZE, IMAGE_SIZE), |
| Image.AFFINE, (1, 0, 0, 0, 1, level), |
| resample=Image.BILINEAR) |
|
|
|
|
| |
| def color(pil_img, level): |
| level = float_parameter(sample_level(level), 1.8) + 0.1 |
| return ImageEnhance.Color(pil_img).enhance(level) |
|
|
|
|
| |
| def contrast(pil_img, level): |
| level = float_parameter(sample_level(level), 1.8) + 0.1 |
| return ImageEnhance.Contrast(pil_img).enhance(level) |
|
|
|
|
| |
| def brightness(pil_img, level): |
| level = float_parameter(sample_level(level), 1.8) + 0.1 |
| return ImageEnhance.Brightness(pil_img).enhance(level) |
|
|
|
|
| |
| def sharpness(pil_img, level): |
| level = float_parameter(sample_level(level), 1.8) + 0.1 |
| return ImageEnhance.Sharpness(pil_img).enhance(level) |
|
|
|
|
| augmentations = [ |
| autocontrast, equalize, posterize, rotate, solarize, shear_x, shear_y, |
| translate_x, translate_y |
| ] |
|
|
| class AugMixAugmenter(object): |
| def __init__(self, base_transform, preprocess, n_views=2, augmix=False, |
| severity=1): |
| self.base_transform = base_transform |
| self.preprocess = preprocess |
| self.n_views = n_views |
| if augmix: |
| self.aug_list = augmentations.augmentations |
| else: |
| self.aug_list = [] |
| self.severity = severity |
|
|
| def __call__(self, x): |
| image = self.preprocess(self.base_transform(x)) |
| views = [augmix(x, self.preprocess, self.aug_list, self.severity) for _ in range(self.n_views)] |
| return [image] + views |
|
|
| def load_model_weight(load_path, model, device, args): |
| if os.path.isfile(load_path): |
| print("=> loading checkpoint '{}'".format(load_path)) |
| checkpoint = torch.load(load_path, map_location=device) |
| state_dict = checkpoint['state_dict'] |
| |
| if "token_prefix" in state_dict: |
| del state_dict["token_prefix"] |
|
|
| if "token_suffix" in state_dict: |
| del state_dict["token_suffix"] |
|
|
| args.start_epoch = checkpoint['epoch'] |
| try: |
| best_acc1 = checkpoint['best_acc1'] |
| except: |
| best_acc1 = torch.tensor(0) |
| if device is not 'cpu': |
| |
| best_acc1 = best_acc1.to(device) |
| try: |
| model.load_state_dict(state_dict) |
| except: |
| |
| model.prompt_generator.load_state_dict(state_dict, strict=False) |
| print("=> loaded checkpoint '{}' (epoch {})" |
| .format(load_path, checkpoint['epoch'])) |
| del checkpoint |
| torch.cuda.empty_cache() |
| else: |
| print("=> no checkpoint found at '{}'".format(load_path)) |
|
|
| def main(lam): |
|
|
| args = parser.parse_args() |
| print(args) |
| imagenet_classes = ["tench", "goldfish", "great white shark", "tiger shark", "hammerhead shark", "electric ray", |
| "stingray", "rooster", "hen", "ostrich", "brambling", "goldfinch", "house finch", "junco", |
| "indigo bunting", "American robin", "bulbul", "jay", "magpie", "chickadee", "American dipper", |
| "kite (bird of prey)", "bald eagle", "vulture", "great grey owl", "fire salamander", |
| "smooth newt", "newt", "spotted salamander", "axolotl", "American bullfrog", "tree frog", |
| "tailed frog", "loggerhead sea turtle", "leatherback sea turtle", "mud turtle", "terrapin", |
| "box turtle", "banded gecko", "green iguana", "Carolina anole", |
| "desert grassland whiptail lizard", "agama", "frilled-necked lizard", "alligator lizard", |
| "Gila monster", "European green lizard", "chameleon", "Komodo dragon", "Nile crocodile", |
| "American alligator", "triceratops", "worm snake", "ring-necked snake", |
| "eastern hog-nosed snake", "smooth green snake", "kingsnake", "garter snake", "water snake", |
| "vine snake", "night snake", "boa constrictor", "African rock python", "Indian cobra", |
| "green mamba", "sea snake", "Saharan horned viper", "eastern diamondback rattlesnake", |
| "sidewinder rattlesnake", "trilobite", "harvestman", "scorpion", "yellow garden spider", |
| "barn spider", "European garden spider", "southern black widow", "tarantula", "wolf spider", |
| "tick", "centipede", "black grouse", "ptarmigan", "ruffed grouse", "prairie grouse", "peafowl", |
| "quail", "partridge", "african grey parrot", "macaw", "sulphur-crested cockatoo", "lorikeet", |
| "coucal", "bee eater", "hornbill", "hummingbird", "jacamar", "toucan", "duck", |
| "red-breasted merganser", "goose", "black swan", "tusker", "echidna", "platypus", "wallaby", |
| "koala", "wombat", "jellyfish", "sea anemone", "brain coral", "flatworm", "nematode", "conch", |
| "snail", "slug", "sea slug", "chiton", "chambered nautilus", "Dungeness crab", "rock crab", |
| "fiddler crab", "red king crab", "American lobster", "spiny lobster", "crayfish", "hermit crab", |
| "isopod", "white stork", "black stork", "spoonbill", "flamingo", "little blue heron", |
| "great egret", "bittern bird", "crane bird", "limpkin", "common gallinule", "American coot", |
| "bustard", "ruddy turnstone", "dunlin", "common redshank", "dowitcher", "oystercatcher", |
| "pelican", "king penguin", "albatross", "grey whale", "killer whale", "dugong", "sea lion", |
| "Chihuahua", "Japanese Chin", "Maltese", "Pekingese", "Shih Tzu", "King Charles Spaniel", |
| "Papillon", "toy terrier", "Rhodesian Ridgeback", "Afghan Hound", "Basset Hound", "Beagle", |
| "Bloodhound", "Bluetick Coonhound", "Black and Tan Coonhound", "Treeing Walker Coonhound", |
| "English foxhound", "Redbone Coonhound", "borzoi", "Irish Wolfhound", "Italian Greyhound", |
| "Whippet", "Ibizan Hound", "Norwegian Elkhound", "Otterhound", "Saluki", "Scottish Deerhound", |
| "Weimaraner", "Staffordshire Bull Terrier", "American Staffordshire Terrier", |
| "Bedlington Terrier", "Border Terrier", "Kerry Blue Terrier", "Irish Terrier", |
| "Norfolk Terrier", "Norwich Terrier", "Yorkshire Terrier", "Wire Fox Terrier", |
| "Lakeland Terrier", "Sealyham Terrier", "Airedale Terrier", "Cairn Terrier", |
| "Australian Terrier", "Dandie Dinmont Terrier", "Boston Terrier", "Miniature Schnauzer", |
| "Giant Schnauzer", "Standard Schnauzer", "Scottish Terrier", "Tibetan Terrier", |
| "Australian Silky Terrier", "Soft-coated Wheaten Terrier", "West Highland White Terrier", |
| "Lhasa Apso", "Flat-Coated Retriever", "Curly-coated Retriever", "Golden Retriever", |
| "Labrador Retriever", "Chesapeake Bay Retriever", "German Shorthaired Pointer", "Vizsla", |
| "English Setter", "Irish Setter", "Gordon Setter", "Brittany dog", "Clumber Spaniel", |
| "English Springer Spaniel", "Welsh Springer Spaniel", "Cocker Spaniel", "Sussex Spaniel", |
| "Irish Water Spaniel", "Kuvasz", "Schipperke", "Groenendael dog", "Malinois", "Briard", |
| "Australian Kelpie", "Komondor", "Old English Sheepdog", "Shetland Sheepdog", "collie", |
| "Border Collie", "Bouvier des Flandres dog", "Rottweiler", "German Shepherd Dog", "Dobermann", |
| "Miniature Pinscher", "Greater Swiss Mountain Dog", "Bernese Mountain Dog", |
| "Appenzeller Sennenhund", "Entlebucher Sennenhund", "Boxer", "Bullmastiff", "Tibetan Mastiff", |
| "French Bulldog", "Great Dane", "St. Bernard", "husky", "Alaskan Malamute", "Siberian Husky", |
| "Dalmatian", "Affenpinscher", "Basenji", "pug", "Leonberger", "Newfoundland dog", |
| "Great Pyrenees dog", "Samoyed", "Pomeranian", "Chow Chow", "Keeshond", "brussels griffon", |
| "Pembroke Welsh Corgi", "Cardigan Welsh Corgi", "Toy Poodle", "Miniature Poodle", |
| "Standard Poodle", "Mexican hairless dog (xoloitzcuintli)", "grey wolf", "Alaskan tundra wolf", |
| "red wolf or maned wolf", "coyote", "dingo", "dhole", "African wild dog", "hyena", "red fox", |
| "kit fox", "Arctic fox", "grey fox", "tabby cat", "tiger cat", "Persian cat", "Siamese cat", |
| "Egyptian Mau", "cougar", "lynx", "leopard", "snow leopard", "jaguar", "lion", "tiger", |
| "cheetah", "brown bear", "American black bear", "polar bear", "sloth bear", "mongoose", |
| "meerkat", "tiger beetle", "ladybug", "ground beetle", "longhorn beetle", "leaf beetle", |
| "dung beetle", "rhinoceros beetle", "weevil", "fly", "bee", "ant", "grasshopper", |
| "cricket insect", "stick insect", "cockroach", "praying mantis", "cicada", "leafhopper", |
| "lacewing", "dragonfly", "damselfly", "red admiral butterfly", "ringlet butterfly", |
| "monarch butterfly", "small white butterfly", "sulphur butterfly", "gossamer-winged butterfly", |
| "starfish", "sea urchin", "sea cucumber", "cottontail rabbit", "hare", "Angora rabbit", |
| "hamster", "porcupine", "fox squirrel", "marmot", "beaver", "guinea pig", "common sorrel horse", |
| "zebra", "pig", "wild boar", "warthog", "hippopotamus", "ox", "water buffalo", "bison", |
| "ram (adult male sheep)", "bighorn sheep", "Alpine ibex", "hartebeest", "impala (antelope)", |
| "gazelle", "arabian camel", "llama", "weasel", "mink", "European polecat", |
| "black-footed ferret", "otter", "skunk", "badger", "armadillo", "three-toed sloth", "orangutan", |
| "gorilla", "chimpanzee", "gibbon", "siamang", "guenon", "patas monkey", "baboon", "macaque", |
| "langur", "black-and-white colobus", "proboscis monkey", "marmoset", "white-headed capuchin", |
| "howler monkey", "titi monkey", "Geoffroy's spider monkey", "common squirrel monkey", |
| "ring-tailed lemur", "indri", "Asian elephant", "African bush elephant", "red panda", |
| "giant panda", "snoek fish", "eel", "silver salmon", "rock beauty fish", "clownfish", |
| "sturgeon", "gar fish", "lionfish", "pufferfish", "abacus", "abaya", "academic gown", |
| "accordion", "acoustic guitar", "aircraft carrier", "airliner", "airship", "altar", "ambulance", |
| "amphibious vehicle", "analog clock", "apiary", "apron", "trash can", "assault rifle", |
| "backpack", "bakery", "balance beam", "balloon", "ballpoint pen", "Band-Aid", "banjo", |
| "baluster / handrail", "barbell", "barber chair", "barbershop", "barn", "barometer", "barrel", |
| "wheelbarrow", "baseball", "basketball", "bassinet", "bassoon", "swimming cap", "bath towel", |
| "bathtub", "station wagon", "lighthouse", "beaker", "military hat (bearskin or shako)", |
| "beer bottle", "beer glass", "bell tower", "baby bib", "tandem bicycle", "bikini", |
| "ring binder", "binoculars", "birdhouse", "boathouse", "bobsleigh", "bolo tie", "poke bonnet", |
| "bookcase", "bookstore", "bottle cap", "hunting bow", "bow tie", "brass memorial plaque", "bra", |
| "breakwater", "breastplate", "broom", "bucket", "buckle", "bulletproof vest", |
| "high-speed train", "butcher shop", "taxicab", "cauldron", "candle", "cannon", "canoe", |
| "can opener", "cardigan", "car mirror", "carousel", "tool kit", "cardboard box / carton", |
| "car wheel", "automated teller machine", "cassette", "cassette player", "castle", "catamaran", |
| "CD player", "cello", "mobile phone", "chain", "chain-link fence", "chain mail", "chainsaw", |
| "storage chest", "chiffonier", "bell or wind chime", "china cabinet", "Christmas stocking", |
| "church", "movie theater", "cleaver", "cliff dwelling", "cloak", "clogs", "cocktail shaker", |
| "coffee mug", "coffeemaker", "spiral or coil", "combination lock", "computer keyboard", |
| "candy store", "container ship", "convertible", "corkscrew", "cornet", "cowboy boot", |
| "cowboy hat", "cradle", "construction crane", "crash helmet", "crate", "infant bed", |
| "Crock Pot", "croquet ball", "crutch", "cuirass", "dam", "desk", "desktop computer", |
| "rotary dial telephone", "diaper", "digital clock", "digital watch", "dining table", |
| "dishcloth", "dishwasher", "disc brake", "dock", "dog sled", "dome", "doormat", "drilling rig", |
| "drum", "drumstick", "dumbbell", "Dutch oven", "electric fan", "electric guitar", |
| "electric locomotive", "entertainment center", "envelope", "espresso machine", "face powder", |
| "feather boa", "filing cabinet", "fireboat", "fire truck", "fire screen", "flagpole", "flute", |
| "folding chair", "football helmet", "forklift", "fountain", "fountain pen", "four-poster bed", |
| "freight car", "French horn", "frying pan", "fur coat", "garbage truck", |
| "gas mask or respirator", "gas pump", "goblet", "go-kart", "golf ball", "golf cart", "gondola", |
| "gong", "gown", "grand piano", "greenhouse", "radiator grille", "grocery store", "guillotine", |
| "hair clip", "hair spray", "half-track", "hammer", "hamper", "hair dryer", "hand-held computer", |
| "handkerchief", "hard disk drive", "harmonica", "harp", "combine harvester", "hatchet", |
| "holster", "home theater", "honeycomb", "hook", "hoop skirt", "gymnastic horizontal bar", |
| "horse-drawn vehicle", "hourglass", "iPod", "clothes iron", "carved pumpkin", "jeans", "jeep", |
| "T-shirt", "jigsaw puzzle", "rickshaw", "joystick", "kimono", "knee pad", "knot", "lab coat", |
| "ladle", "lampshade", "laptop computer", "lawn mower", "lens cap", "letter opener", "library", |
| "lifeboat", "lighter", "limousine", "ocean liner", "lipstick", "slip-on shoe", "lotion", |
| "music speaker", "loupe magnifying glass", "sawmill", "magnetic compass", "messenger bag", |
| "mailbox", "tights", "one-piece bathing suit", "manhole cover", "maraca", "marimba", "mask", |
| "matchstick", "maypole", "maze", "measuring cup", "medicine cabinet", "megalith", "microphone", |
| "microwave oven", "military uniform", "milk can", "minibus", "miniskirt", "minivan", "missile", |
| "mitten", "mixing bowl", "mobile home", "ford model t", "modem", "monastery", "monitor", |
| "moped", "mortar and pestle", "graduation cap", "mosque", "mosquito net", "vespa", |
| "mountain bike", "tent", "computer mouse", "mousetrap", "moving van", "muzzle", "metal nail", |
| "neck brace", "necklace", "baby pacifier", "notebook computer", "obelisk", "oboe", "ocarina", |
| "odometer", "oil filter", "pipe organ", "oscilloscope", "overskirt", "bullock cart", |
| "oxygen mask", "product packet / packaging", "paddle", "paddle wheel", "padlock", "paintbrush", |
| "pajamas", "palace", "pan flute", "paper towel", "parachute", "parallel bars", "park bench", |
| "parking meter", "railroad car", "patio", "payphone", "pedestal", "pencil case", |
| "pencil sharpener", "perfume", "Petri dish", "photocopier", "plectrum", "Pickelhaube", |
| "picket fence", "pickup truck", "pier", "piggy bank", "pill bottle", "pillow", "ping-pong ball", |
| "pinwheel", "pirate ship", "drink pitcher", "block plane", "planetarium", "plastic bag", |
| "plate rack", "farm plow", "plunger", "Polaroid camera", "pole", "police van", "poncho", |
| "pool table", "soda bottle", "plant pot", "potter's wheel", "power drill", "prayer rug", |
| "printer", "prison", "missile", "projector", "hockey puck", "punching bag", "purse", "quill", |
| "quilt", "race car", "racket", "radiator", "radio", "radio telescope", "rain barrel", |
| "recreational vehicle", "fishing casting reel", "reflex camera", "refrigerator", |
| "remote control", "restaurant", "revolver", "rifle", "rocking chair", "rotisserie", "eraser", |
| "rugby ball", "ruler measuring stick", "sneaker", "safe", "safety pin", "salt shaker", "sandal", |
| "sarong", "saxophone", "scabbard", "weighing scale", "school bus", "schooner", "scoreboard", |
| "CRT monitor", "screw", "screwdriver", "seat belt", "sewing machine", "shield", "shoe store", |
| "shoji screen / room divider", "shopping basket", "shopping cart", "shovel", "shower cap", |
| "shower curtain", "ski", "balaclava ski mask", "sleeping bag", "slide rule", "sliding door", |
| "slot machine", "snorkel", "snowmobile", "snowplow", "soap dispenser", "soccer ball", "sock", |
| "solar thermal collector", "sombrero", "soup bowl", "keyboard space bar", "space heater", |
| "space shuttle", "spatula", "motorboat", "spider web", "spindle", "sports car", "spotlight", |
| "stage", "steam locomotive", "through arch bridge", "steel drum", "stethoscope", "scarf", |
| "stone wall", "stopwatch", "stove", "strainer", "tram", "stretcher", "couch", "stupa", |
| "submarine", "suit", "sundial", "sunglasses", "sunglasses", "sunscreen", "suspension bridge", |
| "mop", "sweatshirt", "swim trunks / shorts", "swing", "electrical switch", "syringe", |
| "table lamp", "tank", "tape player", "teapot", "teddy bear", "television", "tennis ball", |
| "thatched roof", "front curtain", "thimble", "threshing machine", "throne", "tile roof", |
| "toaster", "tobacco shop", "toilet seat", "torch", "totem pole", "tow truck", "toy store", |
| "tractor", "semi-trailer truck", "tray", "trench coat", "tricycle", "trimaran", "tripod", |
| "triumphal arch", "trolleybus", "trombone", "hot tub", "turnstile", "typewriter keyboard", |
| "umbrella", "unicycle", "upright piano", "vacuum cleaner", "vase", "vaulted or arched ceiling", |
| "velvet fabric", "vending machine", "vestment", "viaduct", "violin", "volleyball", |
| "waffle iron", "wall clock", "wallet", "wardrobe", "military aircraft", "sink", |
| "washing machine", "water bottle", "water jug", "water tower", "whiskey jug", "whistle", |
| "hair wig", "window screen", "window shade", "Windsor tie", "wine bottle", "airplane wing", |
| "wok", "wooden spoon", "wool", "split-rail fence", "shipwreck", "sailboat", "yurt", "website", |
| "comic book", "crossword", "traffic or street sign", "traffic light", "dust jacket", "menu", |
| "plate", "guacamole", "consomme", "hot pot", "trifle", "ice cream", "popsicle", "baguette", |
| "bagel", "pretzel", "cheeseburger", "hot dog", "mashed potatoes", "cabbage", "broccoli", |
| "cauliflower", "zucchini", "spaghetti squash", "acorn squash", "butternut squash", "cucumber", |
| "artichoke", "bell pepper", "cardoon", "mushroom", "Granny Smith apple", "strawberry", "orange", |
| "lemon", "fig", "pineapple", "banana", "jackfruit", "cherimoya (custard apple)", "pomegranate", |
| "hay", "carbonara", "chocolate syrup", "dough", "meatloaf", "pizza", "pot pie", "burrito", |
| "red wine", "espresso", "tea cup", "eggnog", "mountain", "bubble", "cliff", "coral reef", |
| "geyser", "lakeshore", "promontory", "sandbar", "beach", "valley", "volcano", "baseball player", |
| "bridegroom", "scuba diver", "rapeseed", "daisy", "yellow lady's slipper", "corn", "acorn", |
| "rose hip", "horse chestnut seed", "coral fungus", "agaric", "gyromitra", "stinkhorn mushroom", |
| "earth star fungus", "hen of the woods mushroom", "bolete", "corn cob", "toilet paper"] |
|
|
| |
| |
|
|
| imagenet_7_templates = [ |
| 'a photo of a {}.', |
| ] |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
|
|
| print('load pre-trained model') |
| model, preprocess = clip.load(args.arch) |
| model = model.cuda() |
| model.eval() |
|
|
| print('load data') |
| valdir = os.path.join(args.data_path, 'val') |
| |
| base_transform = transforms.Compose([ |
| transforms.Resize(args.resolution, interpolation=BICUBIC), |
| transforms.CenterCrop(args.resolution)]) |
| preprocess = transforms.Compose([ |
| transforms.ToTensor(), |
| normalize]) |
| data_transform = AugMixAugmenter(base_transform, preprocess, n_views=args.batch_size - 1, |
| augmix=False) |
| val_set = datasets.ImageFolder(valdir, transform=data_transform) |
| |
| loader = torch.utils.data.DataLoader(val_set, batch_size=args.batch_size, num_workers=args.workers) |
| with torch.no_grad(): |
| image_feat = [] |
| image_label = [] |
| image_list = [] |
| for i, (images, target) in enumerate(loader): |
| images = images[0].cuda() |
| target = target.cuda() |
| image_features = model.encode_image(images) |
| image_feat.append(F.normalize(image_features, dim=1)) |
| image_label.append(target) |
| |
| image_feat = torch.cat(image_feat, dim=0) |
| image_label = torch.cat(image_label, dim=0) |
| n = len(image_label) |
| image_feat = image_feat.float() |
|
|
| print('obtain text proxy') |
|
|
| text_classifier = zeroshot_classifier(clip, model, imagenet_classes, imagenet_7_templates) |
| text_classifier = text_classifier.float() |
| logits_t = image_feat @ text_classifier |
| acc1, acc5 = accuracy(logits_t, image_label, topk=(1, 5)) |
| top1 = (acc1 / n) * 100 |
| print(f'accuracy with text proxy: {top1:.2f}') |
|
|
| print('online zero-shot transfer: repeat {} times'.format(args.repeat)) |
| num_class = len(torch.unique(image_label)) |
| acc_onzeta = torch.zeros(args.repeat).cuda() |
| acc_onlab = torch.zeros(args.repeat).cuda() |
| acc_ls = torch.zeros(args.repeat).cuda() |
|
|
| |
| if args.cocoop: |
| model = get_cocoop(args.arch, args.test_sets, 'cpu', args.n_ctx) |
| assert args.load is not None |
| load_model_weight(args.load, model, 'cpu', args) |
| model_state = copy.deepcopy(model.state_dict()) |
| else: |
| model = get_coop(args.arch, args.test_sets, args.gpu, args.n_ctx, args.ctx_init) |
| if args.load is not None: |
| print("Use pre-trained soft prompt (CoOp) as initialization") |
| pretrained_ctx = torch.load(args.load)['state_dict']['ctx'] |
| |
| assert pretrained_ctx.size()[0] == args.n_ctx |
| with torch.no_grad(): |
| model.prompt_learner.ctx.copy_(pretrained_ctx) |
| model.prompt_learner.ctx_init_state = pretrained_ctx |
| model_state = None |
| for name, param in model.named_parameters(): |
| if not args.cocoop: |
| if "prompt_learner" not in name: |
| param.requires_grad_(False) |
| else: |
| if "text_encoder" not in name: |
| param.requires_grad_(False) |
| model = model.cuda(args.gpu) |
| if args.cocoop: |
| optimizer = None |
| optim_state = None |
| else: |
| trainable_param = model.prompt_learner.parameters() |
| optimizer = torch.optim.AdamW(trainable_param, args.lr) |
| optim_state = copy.deepcopy(optimizer.state_dict()) |
|
|
| |
| scaler = torch.cuda.amp.GradScaler(init_scale=1000) |
|
|
| base_transform = transforms.Compose([ |
| transforms.Resize(args.resolution, interpolation=BICUBIC), |
| transforms.CenterCrop(args.resolution)]) |
| preprocess = transforms.Compose([ |
| transforms.ToTensor(), |
| normalize]) |
| data_transform = AugMixAugmenter(base_transform, preprocess, n_views=args.batch_size - 1, |
| augmix=False) |
| batchsize = 1 |
|
|
| classnames_all = imagenet_classes |
| classnames = [] |
| classnames = classnames_all |
|
|
| if args.cocoop: |
| model.prompt_generator.reset_classnames(classnames, args.arch) |
| model = model.cpu() |
| model_state = model.state_dict() |
| model = model.cuda(args.gpu) |
| else: |
| model.reset_classnames(classnames, args.arch) |
| |
| model.eval() |
| if not args.cocoop: |
| with torch.no_grad(): |
| model.reset() |
| import time |
| end = time.time() |
|
|
| |
| |
|
|
| |
|
|
| |
|
|
| for iter in range(args.repeat): |
| idx = torch.randperm(n).cuda() |
| combo_label = torch.zeros(n, num_class).cuda() |
| text_label = torch.zeros(n, num_class).cuda() |
| w = text_classifier.clone() |
| rho = torch.zeros(num_class).cuda() |
| |
| for i, data in enumerate(loader): |
| img, label = data |
| device = torch.device('cuda:0') |
| model = model.to(device).half() |
| img = img[0].to(device).half() |
| lr = args.cw / math.sqrt(i + 1) |
| rlr = args.cr / math.sqrt(i + 1) |
| beta = args.beta * math.sqrt((i + 1) / n) |
| x = image_feat[idx[i], :] |
| |
| |
| |
| |
| if not args.cocoop: |
| if args.tta_steps > 0: |
| with torch.no_grad(): |
| model.reset() |
| optimizer.load_state_dict(optim_state) |
| test_time_tuning(model, x, optimizer, scaler, args) |
| else: |
| with torch.no_grad(): |
| with torch.cuda.amp.autocast(): |
| image_feature, pgen_ctx = model.gen_ctx(img, args.tpt) |
| optimizer = None |
| pgen_ctx = pgen_ctx.to(dtype=torch.float16) |
| image_feature = image_feature.to(dtype=torch.float16) |
| pgen_ctx = test_time_tuning(model, (image_feature, pgen_ctx), optimizer, scaler, args) |
|
|
| |
| with torch.no_grad(): |
| with torch.cuda.amp.autocast(): |
| if args.cocoop: |
| tlabel = model((image_feature, pgen_ctx)) |
| else: |
| tlabel = model(img) |
| |
| tlabel = tlabel.squeeze(0) |
| |
| tlabel = tlabel * torch.exp(rho) |
| tlabel /= torch.sum(tlabel) |
| rho -= rlr * (tlabel - args.alpha / num_class) |
| rho[rho < 0] = 0 |
| text_label[i, :] = tlabel |
| |
| |
| vision_label = F.softmax(x @ w / args.tau_i, dim=0) |
| combo_label[i, :] = beta * vision_label + (1 - beta) * tlabel |
| |
| |
| grad = torch.outer(x, vision_label - tlabel) |
| w -= (lr / args.tau_i) * grad |
| w = F.normalize(w, dim=0) |
| acc1, acc5 = accuracy(text_label, image_label[idx], topk=(1, 5)) |
| acc_onlab[iter] = (acc1 / n) * 100 |
| acc1, acc5 = accuracy(combo_label, image_label[idx], topk=(1, 5)) |
|
|
| |
| pz = np.full(len(imagenet_classes), 1.0 / len(imagenet_classes)) |
| qy = mapls(combo_label, pz = pz, qy_mode = "soft", max_iter = 100, lam = lam) |
|
|
| w = np.array(qy) / np.array(pz) |
| if combo_label.is_cuda: |
| combo_label_cpu = combo_label.cpu() |
| qy_probs = lsc(combo_label_cpu, 1.0/w) |
| qy_probs = torch.from_numpy(qy_probs) |
| acc1_ls, acc5_ls = accuracy(qy_probs, image_label[idx], topk=(1, 5)) |
|
|
| acc_onzeta[iter] = (acc1 / n) * 100 |
| acc_ls[iter] = (acc1_ls / n) * 100 |
| print('mean acc of onlab is: {:.2f}'.format(torch.mean(acc_onlab))) |
| print('mean acc of onzeta is: {:.2f}'.format(torch.mean(acc_onzeta))) |
| print('mean acc of MAPLS is: {:.2f}'.format(torch.mean(acc_ls))) |
|
|
|
|
| def zeroshot_classifier(clip, model, classnames, templates): |
| with torch.no_grad(): |
| zeroshot_weights = [] |
| for classname in classnames: |
| texts = [template.format(classname) for template in templates] |
| texts = clip.tokenize(texts).cuda() |
| class_embeddings = model.encode_text(texts) |
| class_embeddings /= class_embeddings.norm(dim=-1, keepdim=True) |
| class_embedding = class_embeddings.mean(dim=0) |
| class_embedding /= class_embedding.norm() |
| zeroshot_weights.append(class_embedding) |
| zeroshot_weights = torch.stack(zeroshot_weights, dim=1).cuda() |
| return zeroshot_weights |
|
|
|
|
| def accuracy(output, target, topk=(1,)): |
| pred = output.topk(max(topk), 1, True, True)[1].t() |
| pred, target = pred.cpu(), target.cpu() |
| correct = pred.eq(target.view(1, -1).expand_as(pred)) |
| return [float(correct[:k].reshape(-1).float().sum(0, keepdim=True).cpu().numpy()) for k in topk] |
|
|
|
|
| if __name__ == '__main__': |
| |
|
|
| lams = [0.6] |
| for lam in lams: |
| main(lam) |
|
|
|
|