Python torchvision.models.squeezenet1_0() Examples

The following are 7 code examples of torchvision.models.squeezenet1_0(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torchvision.models , or try the search function .
Example #1
Source File: torchvision_models.py    From pretorched-x with MIT License 5 votes vote down vote up
def squeezenet1_0(num_classes=1000, pretrained='imagenet'):
    r"""SqueezeNet model architecture from the `"SqueezeNet: AlexNet-level
    accuracy with 50x fewer parameters and <0.5MB model size"
    <https://arxiv.org/abs/1602.07360>`_ paper.
    """
    model = models.squeezenet1_0(pretrained=False)
    if pretrained is not None:
        settings = pretrained_settings['squeezenet1_0'][pretrained]
        model = load_pretrained(model, num_classes, settings)
    model = modify_squeezenets(model)
    return model 
Example #2
Source File: tools.py    From perceptron-benchmark with Apache License 2.0 5 votes vote down vote up
def _load_pytorch_model(model_name, summary):
    import torchvision.models as models
    switcher = {
        'alexnet': lambda: models.alexnet(pretrained=True).eval(),
        "vgg11": lambda: models.vgg11(pretrained=True).eval(),
        "vgg11_bn": lambda: models.vgg11_bn(pretrained=True).eval(),
        "vgg13": lambda: models.vgg13(pretrained=True).eval(),
        "vgg13_bn": lambda: models.vgg13_bn(pretrained=True).eval(),
        "vgg16": lambda: models.vgg16(pretrained=True).eval(),
        "vgg16_bn": lambda: models.vgg16_bn(pretrained=True).eval(),
        "vgg19": lambda: models.vgg19(pretrained=True).eval(),
        "vgg19_bn": lambda: models.vgg19_bn(pretrained=True).eval(),
        "resnet18": lambda: models.resnet18(pretrained=True).eval(),
        "resnet34": lambda: models.resnet34(pretrained=True).eval(),
        "resnet50": lambda: models.resnet50(pretrained=True).eval(),
        "resnet101": lambda: models.resnet101(pretrained=True).eval(),
        "resnet152": lambda: models.resnet152(pretrained=True).eval(),
        "squeezenet1_0": lambda: models.squeezenet1_0(pretrained=True).eval(),
        "squeezenet1_1": lambda: models.squeezenet1_1(pretrained=True).eval(),
        "densenet121": lambda: models.densenet121(pretrained=True).eval(),
        "densenet161": lambda: models.densenet161(pretrained=True).eval(),
        "densenet201": lambda: models.densenet201(pretrained=True).eval(),
        "inception_v3": lambda: models.inception_v3(pretrained=True).eval(),
    }

    _load_model = switcher.get(model_name, None)
    _model = _load_model()
    import torch
    if torch.cuda.is_available():
        _model = _model.cuda()
    from perceptron.models.classification.pytorch import PyTorchModel as ClsPyTorchModel
    import numpy as np
    mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))
    std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))
    pmodel = ClsPyTorchModel(
        _model, bounds=(
            0, 1), num_classes=1000, preprocessing=(
            mean, std))
    return pmodel 
Example #3
Source File: tools.py    From perceptron-benchmark with Apache License 2.0 5 votes vote down vote up
def load_pytorch_model(model_name):
    import torchvision.models as models
    switcher = {
        'alexnet': lambda: models.alexnet(pretrained=True).eval(),
        "vgg11": lambda: models.vgg11(pretrained=True).eval(),
        "vgg11_bn": lambda: models.vgg11_bn(pretrained=True).eval(),
        "vgg13": lambda: models.vgg13(pretrained=True).eval(),
        "vgg13_bn": lambda: models.vgg13_bn(pretrained=True).eval(),
        "vgg16": lambda: models.vgg16(pretrained=True).eval(),
        "vgg16_bn": lambda: models.vgg16_bn(pretrained=True).eval(),
        "vgg19": lambda: models.vgg19(pretrained=True).eval(),
        "vgg19_bn": lambda: models.vgg19_bn(pretrained=True).eval(),
        "resnet18": lambda: models.resnet18(pretrained=True).eval(),
        "resnet34": lambda: models.resnet34(pretrained=True).eval(),
        "resnet50": lambda: models.resnet50(pretrained=True).eval(),
        "resnet101": lambda: models.resnet101(pretrained=True).eval(),
        "resnet152": lambda: models.resnet152(pretrained=True).eval(),
        "squeezenet1_0": lambda: models.squeezenet1_0(pretrained=True).eval(),
        "squeezenet1_1": lambda: models.squeezenet1_1(pretrained=True).eval(),
        "densenet121": lambda: models.densenet121(pretrained=True).eval(),
        "densenet161": lambda: models.densenet161(pretrained=True).eval(),
        "densenet201": lambda: models.densenet201(pretrained=True).eval(),
        "inception_v3": lambda: models.inception_v3(pretrained=True).eval(),
    }

    _load_model = switcher.get(model_name, None)
    _model = _load_model()
    return _model 
Example #4
Source File: torchvision_models.py    From pretrained-models.pytorch with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def squeezenet1_0(num_classes=1000, pretrained='imagenet'):
    r"""SqueezeNet model architecture from the `"SqueezeNet: AlexNet-level
    accuracy with 50x fewer parameters and <0.5MB model size"
    <https://arxiv.org/abs/1602.07360>`_ paper.
    """
    model = models.squeezenet1_0(pretrained=False)
    if pretrained is not None:
        settings = pretrained_settings['squeezenet1_0'][pretrained]
        model = load_pretrained(model, num_classes, settings)
    model = modify_squeezenets(model)
    return model 
Example #5
Source File: main.py    From fine-tuning.pytorch with MIT License 5 votes vote down vote up
def getNetwork(args):
    if (args.net_type == 'alexnet'):
        net = models.alexnet(pretrained=args.finetune)
        file_name = 'alexnet'
    elif (args.net_type == 'vggnet'):
        if(args.depth == 11):
            net = models.vgg11(pretrained=args.finetune)
        elif(args.depth == 13):
            net = models.vgg13(pretrained=args.finetune)
        elif(args.depth == 16):
            net = models.vgg16(pretrained=args.finetune)
        elif(args.depth == 19):
            net = models.vgg19(pretrained=args.finetune)
        else:
            print('Error : VGGnet should have depth of either [11, 13, 16, 19]')
            sys.exit(1)
        file_name = 'vgg-%s' %(args.depth)
    elif (args.net_type == 'squeezenet'):
        net = models.squeezenet1_0(pretrained=args.finetune)
        file_name = 'squeeze'
    elif (args.net_type == 'resnet'):
        net = resnet(args.finetune, args.depth)
        file_name = 'resnet-%s' %(args.depth)
    elif (args.net_type == 'inception'):
        net = pretrainedmodels.inceptionv3(num_classes=1000, pretrained='imagenet')
        file_name = 'inception-v3'
    elif (args.net_type == 'xception'):
        net = pretrainedmodels.xception(num_classes=1000, pretrained='imagenet')
        file_name = 'xception'
    else:
        print('Error : Network should be either [alexnet / squeezenet / vggnet / resnet]')
        sys.exit(1)

    return net, file_name 
Example #6
Source File: torchvision_models.py    From models-comparison.pytorch with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def squeezenet1_0(num_classes=1000, pretrained='imagenet'):
    r"""SqueezeNet model architecture from the `"SqueezeNet: AlexNet-level
    accuracy with 50x fewer parameters and <0.5MB model size"
    <https://arxiv.org/abs/1602.07360>`_ paper.
    """
    model = models.squeezenet1_0(pretrained=False)
    if pretrained is not None:
        settings = pretrained_settings['squeezenet1_0'][pretrained]
        model = load_pretrained(model, num_classes, settings)
    model = modify_squeezenets(model)
    return model 
Example #7
Source File: squeezenet.py    From pytorch-lighthead with MIT License 4 votes vote down vote up
def _init_modules(self):
    if self.version == '1_0':
        squeezenet = models.squeezenet1_0()
        self.clip = -2
    elif self.version == '1_1':
        squeezenet = models.squeezenet1_1()
        self.clip = -5
    if self.pretrained:
        print("Loading pretrained weights from %s" %(self.model_path))
        if torch.cuda.is_available():
          state_dict = torch.load(self.model_path)
        else:
          state_dict = torch.load(self.model_path, map_location=lambda storage, loc: storage)
        squeezenet.load_state_dict({k:v for k,v in state_dict.items() if k in squeezenet.state_dict()})

    squeezenet.classifier = nn.Sequential(*list(squeezenet.classifier._modules.values())[:-1])

    # not using the last maxpool layer
    if self.lighthead:
      self.RCNN_base = nn.Sequential(*list(squeezenet.features._modules.values())[:self.clip])
    else:
      self.RCNN_base = nn.Sequential(*list(squeezenet.features._modules.values()))

    # Fix Layers
    for layer in range(len(self.RCNN_base)):
      for p in self.RCNN_base[layer].parameters(): p.requires_grad = False

    # self.RCNN_base = _RCNN_base(vgg.features, self.classes, self.dout_base_model)
    if self.lighthead:
      self.lighthead_base = nn.Sequential(*list(squeezenet.features._modules.values())[self.clip+1:])
      self.RCNN_top = nn.Sequential(nn.Linear(490 * 7 * 7, 2048), nn.ReLU(inplace=True))
    else:
      self.RCNN_top = squeezenet.classifier

    d_in = 2048 if self.lighthead else 512

    # not using the last maxpool layer
    self.RCNN_cls_score = nn.Linear(d_in, self.n_classes)

    if self.class_agnostic:
      self.RCNN_bbox_pred = nn.Linear(d_in, 4)
    else:
      self.RCNN_bbox_pred = nn.Linear(d_in, 4 * self.n_classes)