Python torchvision.models.alexnet() Examples
The following are 30
code examples of torchvision.models.alexnet().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torchvision.models
, or try the search function
.
Example #1
Source File: test_attack_BlendedUniformNoiseAttack.py From perceptron-benchmark with Apache License 2.0 | 6 votes |
def test_untargeted_AlexNet(image, label=None): import torch import torchvision.models as models from perceptron.models.classification import PyTorchModel mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1)) std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1)) model_pyt = models.alexnet(pretrained=True).eval() if torch.cuda.is_available(): model_pyt = model_pyt.cuda() model = PyTorchModel( model_pyt, bounds=(0, 1), num_classes=1000, preprocessing=(mean, std)) print(np.argmax(model.predictions(image))) attack = Attack(model, criterion=Misclassification()) adversarial_obj = attack(image, label, unpack=False, epsilons=10000) distance = adversarial_obj.distance adversarial = adversarial_obj.image return distance, adversarial
Example #2
Source File: __main__.py From gandissect with MIT License | 6 votes |
def test_dissection(): verbose_progress(True) from torchvision.models import alexnet from torchvision import transforms model = InstrumentedModel(alexnet(pretrained=True)) model.eval() # Load an alexnet model.retain_layers([ ('features.0', 'conv1'), ('features.3', 'conv2'), ('features.6', 'conv3'), ('features.8', 'conv4'), ('features.10', 'conv5') ]) # load broden dataset bds = BrodenDataset('dataset/broden', transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize(IMAGE_MEAN, IMAGE_STDEV)]), size=100) # run dissect dissect('dissect/test', model, bds, examples_per_unit=10)
Example #3
Source File: inference.py From fine-tuning.pytorch with MIT License | 6 votes |
def getNetwork(args): if (args.net_type == 'alexnet'): net = models.alexnet(pretrained=args.finetune) file_name = 'alexnet' elif (args.net_type == 'vggnet'): if(args.depth == 16): net = models.vgg16(pretrained=args.finetune) file_name = 'vgg-%s' %(args.depth) elif (args.net_type == 'inception'): net = models.inception(pretrained=args.finetune) file_name = 'inceptino-v3' elif (args.net_type == 'resnet'): net = resnet(args.finetune, args.depth) file_name = 'resnet-%s' %(args.depth) else: print('Error : Network should be either [VGGNet / ResNet]') sys.exit(1) return net, file_name
Example #4
Source File: cnn_benchmarks.py From stacks-usecase with Apache License 2.0 | 6 votes |
def select(self, model_name=None): """select models to be run""" logging.info("Run details") logging.info("=" * 71) models = [ self.alexnet, self.resnet18, self.resnet50, self.vgg16, self.squeezenet, ] if model_name: self.models = [ model for model in models for name in model_name if name == model.name ] logging.info("Selected model(s) :: ") for m in self.models: logging.info("%s ------------- Batchsize :: %s " % (m.name, m.batch)) logging.info("=" * 71)
Example #5
Source File: evaluate.py From Robust-Lane-Detection-and-Tracking with MIT License | 6 votes |
def __init__(self,feature,hidden_unit, D_in, D_out): """ In the constructor we instantiate two nn.Linear modules and assign them as member variables. """ super(cnn_lstm, self).__init__() self.model_ft = models.alexnet(pretrained=True) # print (model_ft) self.num_ftrs = self.model_ft.classifier[6].in_features self.feature_model = list(self.model_ft.classifier.children()) self.feature_model.pop() self.feature_model.pop() # feature_model.append(nn.Linear(num_ftrs, 3)) self.feature_model.append(nn.Linear(self.num_ftrs, 1046)) # self.feature_model.append(nn.Linear(self.num_ftrs, 524)) self.feature_model.append(nn.Linear(1046, 100)) # self.feature_model.append(nn.Linear(524, 100)) self.model_ft.classifier = nn.Sequential(*self.feature_model) self.rnn = nn.LSTM(feature,hidden_unit,batch_first=True).cuda() self.linear = torch.nn.Linear(D_in, D_out).cuda()
Example #6
Source File: run_py_node.py From Robust-Lane-Detection-and-Tracking with MIT License | 6 votes |
def __init__(self,feature,hidden_unit, D_in, D_out): """ In the constructor we instantiate two nn.Linear modules and assign them as member variables. """ super(cnn_lstm, self).__init__() self.model_ft = models.alexnet(pretrained=True) # print (model_ft) self.num_ftrs = self.model_ft.classifier[6].in_features self.feature_model = list(self.model_ft.classifier.children()) self.feature_model.pop() self.feature_model.pop() # feature_model.append(nn.Linear(num_ftrs, 3)) self.feature_model.append(nn.Linear(self.num_ftrs, 1046)) self.feature_model.append(nn.Linear(1046, 100)) self.model_ft.classifier = nn.Sequential(*self.feature_model) self.rnn = nn.LSTM(feature,hidden_unit,batch_first=True).cuda() self.linear = torch.nn.Linear(D_in, D_out).cuda()
Example #7
Source File: pretrained_networks.py From PerceptualSimilarity with BSD 2-Clause "Simplified" License | 6 votes |
def __init__(self, requires_grad=False, pretrained=True): super(alexnet, self).__init__() alexnet_pretrained_features = tv.alexnet(pretrained=pretrained).features self.slice1 = torch.nn.Sequential() self.slice2 = torch.nn.Sequential() self.slice3 = torch.nn.Sequential() self.slice4 = torch.nn.Sequential() self.slice5 = torch.nn.Sequential() self.N_slices = 5 for x in range(2): self.slice1.add_module(str(x), alexnet_pretrained_features[x]) for x in range(2, 5): self.slice2.add_module(str(x), alexnet_pretrained_features[x]) for x in range(5, 8): self.slice3.add_module(str(x), alexnet_pretrained_features[x]) for x in range(8, 10): self.slice4.add_module(str(x), alexnet_pretrained_features[x]) for x in range(10, 12): self.slice5.add_module(str(x), alexnet_pretrained_features[x]) if not requires_grad: for param in self.parameters(): param.requires_grad = False
Example #8
Source File: pretrained_networks.py From TecoGAN with Apache License 2.0 | 6 votes |
def __init__(self, requires_grad=False, pretrained=True): super(alexnet, self).__init__() alexnet_pretrained_features = models.alexnet(pretrained=pretrained).features self.slice1 = torch.nn.Sequential() self.slice2 = torch.nn.Sequential() self.slice3 = torch.nn.Sequential() self.slice4 = torch.nn.Sequential() self.slice5 = torch.nn.Sequential() self.N_slices = 5 for x in range(2): self.slice1.add_module(str(x), alexnet_pretrained_features[x]) for x in range(2, 5): self.slice2.add_module(str(x), alexnet_pretrained_features[x]) for x in range(5, 8): self.slice3.add_module(str(x), alexnet_pretrained_features[x]) for x in range(8, 10): self.slice4.add_module(str(x), alexnet_pretrained_features[x]) for x in range(10, 12): self.slice5.add_module(str(x), alexnet_pretrained_features[x]) if not requires_grad: for param in self.parameters(): param.requires_grad = False
Example #9
Source File: pretrained_networks.py From SMIT with MIT License | 6 votes |
def __init__(self, requires_grad=False, pretrained=True): super(alexnet, self).__init__() alexnet_pretrained_features = models.alexnet( pretrained=pretrained).features self.slice1 = torch.nn.Sequential() self.slice2 = torch.nn.Sequential() self.slice3 = torch.nn.Sequential() self.slice4 = torch.nn.Sequential() self.slice5 = torch.nn.Sequential() self.N_slices = 5 for x in range(2): self.slice1.add_module(str(x), alexnet_pretrained_features[x]) for x in range(2, 5): self.slice2.add_module(str(x), alexnet_pretrained_features[x]) for x in range(5, 8): self.slice3.add_module(str(x), alexnet_pretrained_features[x]) for x in range(8, 10): self.slice4.add_module(str(x), alexnet_pretrained_features[x]) for x in range(10, 12): self.slice5.add_module(str(x), alexnet_pretrained_features[x]) if not requires_grad: for param in self.parameters(): param.requires_grad = False
Example #10
Source File: test_attack_MotionBlurAttack.py From perceptron-benchmark with Apache License 2.0 | 6 votes |
def test_untargeted_AlexNet(image, label=None): import torch import torchvision.models as models from perceptron.models.classification import PyTorchModel mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1)) std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1)) model_pyt = models.alexnet(pretrained=True).eval() if torch.cuda.is_available(): model_pyt = model_pyt.cuda() model = PyTorchModel( model_pyt, bounds=(0, 1), num_classes=1000, preprocessing=(mean, std)) print(np.argmax(model.predictions(image))) attack = Attack(model, criterion=Misclassification()) adversarial_obj = attack(image, label, unpack=False, epsilons=10000) distance = adversarial_obj.distance adversarial = adversarial_obj.image return distance, adversarial
Example #11
Source File: test_attack_AdditiveUniformNoiseAttack.py From perceptron-benchmark with Apache License 2.0 | 6 votes |
def test_untargeted_AlexNet(image, label=None): import torch import torchvision.models as models from perceptron.models.classification import PyTorchModel mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1)) std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1)) model_pyt = models.alexnet(pretrained=True).eval() if torch.cuda.is_available(): model_pyt = model_pyt.cuda() model = PyTorchModel( model_pyt, bounds=(0, 1), num_classes=1000, preprocessing=(mean, std)) print(np.argmax(model.predictions(image))) attack = Attack(model, criterion=Misclassification()) adversarial_obj = attack(image, label, unpack=False, epsilons=10000) distance = adversarial_obj.distance adversarial = adversarial_obj.image return distance, adversarial
Example #12
Source File: test_attack_AdditiveGaussianNoiseAttack.py From perceptron-benchmark with Apache License 2.0 | 6 votes |
def test_untargeted_AlexNet(image, label=None): import torch import torchvision.models as models from perceptron.models.classification import PyTorchModel mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1)) std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1)) model_pyt = models.alexnet(pretrained=True).eval() if torch.cuda.is_available(): model_pyt = model_pyt.cuda() model = PyTorchModel( model_pyt, bounds=(0, 1), num_classes=1000, preprocessing=(mean, std)) print(np.argmax(model.predictions(image))) attack = Attack(model, criterion=Misclassification()) adversarial_obj = attack(image, label, unpack=False, epsilons=10000) distance = adversarial_obj.distance adversarial = adversarial_obj.image return distance, adversarial
Example #13
Source File: test_attack_Gaussian_blur.py From perceptron-benchmark with Apache License 2.0 | 6 votes |
def test_untargeted_AlexNet(image, label=None): import torch import torchvision.models as models from perceptron.models.classification import PyTorchModel mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1)) std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1)) model_pyt = models.alexnet(pretrained=True).eval() if torch.cuda.is_available(): model_pyt = model_pyt.cuda() model = PyTorchModel( model_pyt, bounds=(0, 1), num_classes=1000, preprocessing=(mean, std)) print(np.argmax(model.predictions(image))) attack = Attack(model, criterion=Misclassification()) adversarial_obj = attack(image, label, unpack=False, epsilons=10000) distance = adversarial_obj.distance adversarial = adversarial_obj.image return distance, adversarial
Example #14
Source File: extractor.py From DeepSpectrum with GNU General Public License v3.0 | 6 votes |
def __init__(self, images, model_key, layer, batch_size=256): super().__init__(images, batch_size) self.models = { "alexnet": models.alexnet, "squeezenet": models.squeezenet1_1, "googlenet": models.googlenet } self.preprocessors = { "alexnet": self.__preprocess_alexnet, "squeezenet": self.__preprocess_squeezenet, "googlenet": self.__preprocess_googlenet } self.batch_size = batch_size self.layer = layer self.model_key = model_key self.model, self.feature_layer, self.output_size = self.__build_model( layer)
Example #15
Source File: img_to_vec.py From img2vec with MIT License | 5 votes |
def _get_model_and_layer(self, model_name, layer): """ Internal method for getting layer from model :param model_name: model name such as 'resnet-18' :param layer: layer as a string for resnet-18 or int for alexnet :returns: pytorch model, selected layer """ if model_name == 'resnet-18': model = models.resnet18(pretrained=True) if layer == 'default': layer = model._modules.get('avgpool') self.layer_output_size = 512 else: layer = model._modules.get(layer) return model, layer elif model_name == 'alexnet': model = models.alexnet(pretrained=True) if layer == 'default': layer = model.classifier[-2] self.layer_output_size = 4096 else: layer = model.classifier[-layer] return model, layer else: raise KeyError('Model %s was not found' % model_name)
Example #16
Source File: test_torchvision_models.py From pytorch-cnn-finetune with MIT License | 5 votes |
def test_alexnet_model_with_default_classifier(input_var): original_model = torchvision_models.alexnet(pretrained=True) original_model(input_var) finetune_model = make_model( 'alexnet', num_classes=1000, use_original_classifier=True, input_size=(224, 224), pretrained=True, ) assert_equal_model_outputs(input_var, original_model, finetune_model)
Example #17
Source File: test_torchvision_models.py From pytorch-cnn-finetune with MIT License | 5 votes |
def test_alexnet_model_with_another_input_size(input_var, pool): model = make_model( 'alexnet', num_classes=1000, input_size=(256, 256), pool=pool, pretrained=True, ) model(input_var)
Example #18
Source File: torchvision_models.py From models-comparison.pytorch with BSD 3-Clause "New" or "Revised" License | 5 votes |
def alexnet(num_classes=1000, pretrained='imagenet'): r"""AlexNet model architecture from the `"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper. """ # https://github.com/pytorch/vision/blob/master/torchvision/models/alexnet.py model = models.alexnet(pretrained=False) if pretrained is not None: settings = pretrained_settings['alexnet'][pretrained] model = load_pretrained(model, num_classes, settings) model = modify_alexnet(model) return model ############################################################### # DenseNets
Example #19
Source File: extractor.py From DeepSpectrum with GNU General Public License v3.0 | 5 votes |
def __build_model(self, layer): assert (self.model_key in self.models ), f"Invalid model for pytorch extractor. Available models: \ {self.models}" base_model = self.models[self.model_key](pretrained=True) base_model.eval() if self.model_key == "alexnet": log.debug(f'Layout of base model: \n{base_model}') layers = {"fc6": -5, "fc7": -2} assert (layer in layers ), f"Invalid layer key. Available layers: {layers.keys}" feature_layer = base_model.classifier[layers[layer]] return base_model, feature_layer, (4096, ) elif self.model_key == "squeezenet": log.info( f'Disregarding user choice of feature layer: Only one layer is currently available for squeezenet.' ) base_model = torch.nn.Sequential( base_model.features, torch.nn.AdaptiveAvgPool2d(output_size=(2, 2))) feature_layer = base_model[-1] log.debug(f'Layout of model: \n{base_model}') return base_model, feature_layer, (512, 2, 2) elif self.model_key == "googlenet": layers = {"avgpool": base_model.avgpool, "fc": base_model.fc} assert (layer in layers ), f"Invalid layer key. Available layers: {layers.keys}" feature_layer = layers[layer] log.debug(f'Layout of model: \n{base_model}') return base_model, feature_layer, (1024, 1, 1) else: pass
Example #20
Source File: basenet.py From MCD_DA with MIT License | 5 votes |
def __init__(self, num_classes=12): super(Classifier, self).__init__() model_ft = models.alexnet(pretrained=False) mod = list(model_ft.classifier.children()) mod.pop() mod.append(nn.Linear(4096,num_classes)) self.classifier = nn.Sequential(*mod)
Example #21
Source File: basenet.py From MCD_DA with MIT License | 5 votes |
def __init__(self): super(AlexNet_office, self).__init__() model_ft = models.alexnet(pretrained=True) mod = list(model_ft.features.children()) self.features = model_ft.features#nn.Sequential(*mod) mod = list(model_ft.classifier.children()) mod.pop() print(mod) self.classifier = nn.Sequential(*mod)
Example #22
Source File: basenet.py From MCD_DA with MIT License | 5 votes |
def __init__(self): super(AlexNet, self).__init__() model_ft = models.alexnet(pretrained=True) mod = list(model_ft.features.children()) self.features = model_ft.features#nn.Sequential(*mod) print(self.features[0]) #mod = list(model_ft.classifier.children()) #mod.pop() #self.classifier = nn.Sequential(*mod)
Example #23
Source File: main.py From gradcam.pytorch with MIT License | 5 votes |
def getNetwork(args): if (args.net_type == 'alexnet'): net = models.alexnet(pretrained=args.finetune) file_name = 'alexnet' elif (args.net_type == 'vggnet'): if(args.depth == 11): net = models.vgg11(pretrained=args.finetune) elif(args.depth == 13): net = models.vgg13(pretrained=args.finetune) elif(args.depth == 16): net = models.vgg16(pretrained=args.finetune) elif(args.depth == 19): net = models.vgg19(pretrained=args.finetune) else: print('Error : VGGnet should have depth of either [11, 13, 16, 19]') sys.exit(1) file_name = 'vgg-%s' %(args.depth) elif (args.net_type == 'resnet'): net = resnet(args.finetune, args.depth) file_name = 'resnet-%s' %(args.depth) else: print('Error : Network should be either [alexnet / vggnet / resnet / densenet]') sys.exit(1) return net, file_name
Example #24
Source File: alexnet-extraction.py From DEMUD with Apache License 2.0 | 5 votes |
def usage(): print("usage: python alexnet-extraction.py dataset_dir out_dir layer batch") sys.exit(1)
Example #25
Source File: misc_functions.py From pytorch-cnn-visualizations with MIT License | 5 votes |
def get_example_params(example_index): """ Gets used variables for almost all visualizations, like the image, model etc. Args: example_index (int): Image id to use from examples returns: original_image (numpy arr): Original image read from the file prep_img (numpy_arr): Processed image target_class (int): Target class for the image file_name_to_export (string): File name to export the visualizations pretrained_model(Pytorch model): Model to use for the operations """ # Pick one of the examples example_list = (('../input_images/snake.jpg', 56), ('../input_images/cat_dog.png', 243), ('../input_images/spider.png', 72)) img_path = example_list[example_index][0] target_class = example_list[example_index][1] file_name_to_export = img_path[img_path.rfind('/')+1:img_path.rfind('.')] # Read image original_image = Image.open(img_path).convert('RGB') # Process image prep_img = preprocess_image(original_image) # Define model pretrained_model = models.alexnet(pretrained=True) return (original_image, prep_img, target_class, file_name_to_export, pretrained_model)
Example #26
Source File: main.py From fine-tuning.pytorch with MIT License | 5 votes |
def getNetwork(args): if (args.net_type == 'alexnet'): net = models.alexnet(pretrained=args.finetune) file_name = 'alexnet' elif (args.net_type == 'vggnet'): if(args.depth == 11): net = models.vgg11(pretrained=args.finetune) elif(args.depth == 13): net = models.vgg13(pretrained=args.finetune) elif(args.depth == 16): net = models.vgg16(pretrained=args.finetune) elif(args.depth == 19): net = models.vgg19(pretrained=args.finetune) else: print('Error : VGGnet should have depth of either [11, 13, 16, 19]') sys.exit(1) file_name = 'vgg-%s' %(args.depth) elif (args.net_type == 'squeezenet'): net = models.squeezenet1_0(pretrained=args.finetune) file_name = 'squeeze' elif (args.net_type == 'resnet'): net = resnet(args.finetune, args.depth) file_name = 'resnet-%s' %(args.depth) elif (args.net_type == 'inception'): net = pretrainedmodels.inceptionv3(num_classes=1000, pretrained='imagenet') file_name = 'inception-v3' elif (args.net_type == 'xception'): net = pretrainedmodels.xception(num_classes=1000, pretrained='imagenet') file_name = 'xception' else: print('Error : Network should be either [alexnet / squeezenet / vggnet / resnet]') sys.exit(1) return net, file_name
Example #27
Source File: misc.py From LightNet with MIT License | 5 votes |
def get_params(example_index): """ Gets used variables for almost all visualizations, like the image, model etc. Args: example_index (int): Image id to use from examples returns: original_image (numpy arr): Original image read from the file prep_img (numpy_arr): Processed image target_class (int): Target class for the image file_name_to_export (string): File name to export the visualizations pretrained_model(Pytorch model): Model to use for the operations """ # Pick one of the examples example_list = [['../input_images/snake.jpg', 56], ['../input_images/cat_dog.png', 243], ['../input_images/spider.png', 72]] selected_example = example_index img_path = example_list[selected_example][0] target_class = example_list[selected_example][1] file_name_to_export = img_path[img_path.rfind('/')+1:img_path.rfind('.')] # Read image original_image = cv2.imread(img_path, 1) # Process image prep_img = preprocess_image(original_image) # Define model pretrained_model = models.alexnet(pretrained=True) return (original_image, prep_img, target_class, file_name_to_export, pretrained_model)
Example #28
Source File: backbone.py From transferlearning with MIT License | 5 votes |
def __init__(self): super(AlexNetFc, self).__init__() model_alexnet = models.alexnet(pretrained=True) self.features = model_alexnet.features self.classifier = nn.Sequential() for i in range(6): self.classifier.add_module( "classifier"+str(i), model_alexnet.classifier[i]) self.__in_features = model_alexnet.classifier[6].in_features
Example #29
Source File: torchvision_models.py From pretrained-models.pytorch with BSD 3-Clause "New" or "Revised" License | 5 votes |
def alexnet(num_classes=1000, pretrained='imagenet'): r"""AlexNet model architecture from the `"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper. """ # https://github.com/pytorch/vision/blob/master/torchvision/models/alexnet.py model = models.alexnet(pretrained=False) if pretrained is not None: settings = pretrained_settings['alexnet'][pretrained] model = load_pretrained(model, num_classes, settings) model = modify_alexnet(model) return model ############################################################### # DenseNets
Example #30
Source File: test_gradient_ascent.py From flashtorch with MIT License | 5 votes |
def model(): return models.alexnet().features