Python torchvision.models.__dict__() Examples

The following are 30 code examples of torchvision.models.__dict__(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torchvision.models , or try the search function .
Example #1
Source File: model.py    From VSE-C with MIT License 6 votes vote down vote up
def get_cnn(self, arch, pretrained):
        """Load a pretrained CNN and parallelize over GPUs
        """
        if pretrained:
            print(("=> using pre-trained model '{}'".format(arch)))
            model = models.__dict__[arch](pretrained=True)
        else:
            print(("=> creating model '{}'".format(arch)))
            model = models.__dict__[arch]()

        if arch.startswith('alexnet') or arch.startswith('vgg'):
            model.features = nn.DataParallel(model.features)
            model.cuda()
        else:
            model = nn.DataParallel(model).cuda()

        return model 
Example #2
Source File: utils.py    From actor-observer with GNU General Public License v3.0 6 votes vote down vote up
def generic_load(arch, pretrained, weights, args):
    if arch in tmodels.__dict__:  # torchvision models
        if pretrained:
            print("=> using pre-trained model '{}'".format(arch))
            model = tmodels.__dict__[arch](pretrained=True)
            model = model.cuda()
        else:
            print("=> creating model '{}'".format(arch))
            model = tmodels.__dict__[arch]()
    else:  # defined as script in this directory
        model = importlib.import_module('.' + arch, package='models')
        model = model.__dict__[arch](args)

    if not weights == '':
        print('loading pretrained-weights from {}'.format(weights))
        chkpoint = torch.load(weights)
        if isinstance(chkpoint, dict) and 'state_dict' in chkpoint:
            chkpoint = chkpoint['state_dict']
        load_partial_state(model, chkpoint)
    return model 
Example #3
Source File: main.py    From elastic with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def initialize_model(
    arch: str, lr: float, momentum: float, weight_decay: float, device_id: int
):
    print(f"=> creating model: {arch}")
    model = models.__dict__[arch]()
    # For multiprocessing distributed, DistributedDataParallel constructor
    # should always set the single device scope, otherwise,
    # DistributedDataParallel will use all available devices.
    model.cuda(device_id)
    cudnn.benchmark = True
    model = DistributedDataParallel(model, device_ids=[device_id])
    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda(device_id)
    optimizer = SGD(
        model.parameters(), lr, momentum=momentum, weight_decay=weight_decay
    )
    return model, criterion, optimizer 
Example #4
Source File: imagenet.py    From pytorch-lightning with Apache License 2.0 6 votes vote down vote up
def __init__(self,
                 arch,
                 pretrained,
                 lr: float,
                 momentum: float,
                 weight_decay: int,
                 data_path: str,
                 batch_size: int, **kwargs):
        """
        TODO: add docstring here
        """
        super().__init__()
        self.arch = arch
        self.pretrained = pretrained
        self.lr = lr
        self.momentum = momentum
        self.weight_decay = weight_decay
        self.data_path = data_path
        self.batch_size = batch_size
        self.model = models.__dict__[self.arch](pretrained=self.pretrained) 
Example #5
Source File: imagenet_models.py    From imagenet with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def build_model(self):
		# Create model from scratch or use a pretrained one
		print("=> using model '{}'".format(self._arch))
		self._model = models.__dict__[self._arch](num_classes=len(self._labels))
		print("=> loading checkpoint '{}'".format(self._ckp))
		if self._cuda:
			checkpoint = torch.load(self._ckp)
		else:
			# Load GPU model on CPU
			checkpoint = torch.load(self._ckp, map_location=lambda storage, loc: storage)
		# Load weights
		self._model.load_state_dict(checkpoint['state_dict'])

		if self._cuda:
			self._model.cuda()
		else:
			self._model.cpu()


	# Preprocess Images to be ImageNet-compliant 
Example #6
Source File: test_model_CAM.py    From moments_models with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def load_model(modelID, categories):
    if modelID == 1:
        model_name = 'resnet50_imagenetpretrained_moments'
        weight_file = 'moments_RGB_resnet50_imagenetpretrained.pth.tar'
        if not os.access(weight_file, os.W_OK):
            weight_url = 'http://moments.csail.mit.edu/moments_models/' + weight_file
            os.system('wget ' + weight_url)

        model = models.__dict__['resnet50'](num_classes=len(categories))

        useGPU = 0
        if useGPU == 1:
            checkpoint = torch.load(weight_file)
        else:
            checkpoint = torch.load(weight_file, map_location=lambda storage, loc: storage) # allow cpu

        state_dict = {str.replace(k,'module.',''): v for k,v in checkpoint['state_dict'].items()}
        model.load_state_dict(state_dict)

    model.eval()
    # hook the feature extractor
    features_names = ['layer4','avgpool'] # this is the last conv layer of the resnet
    for name in features_names:
        model._modules.get(name).register_forward_hook(hook_feature)
    return model 
Example #7
Source File: model.py    From vsepp with Apache License 2.0 6 votes vote down vote up
def get_cnn(self, arch, pretrained):
        """Load a pretrained CNN and parallelize over GPUs
        """
        if pretrained:
            print("=> using pre-trained model '{}'".format(arch))
            model = models.__dict__[arch](pretrained=True)
        else:
            print("=> creating model '{}'".format(arch))
            model = models.__dict__[arch]()

        if arch.startswith('alexnet') or arch.startswith('vgg'):
            model.features = nn.DataParallel(model.features)
            model.cuda()
        else:
            model = nn.DataParallel(model).cuda()

        return model 
Example #8
Source File: extractor.py    From MMdnn with MIT License 6 votes vote down vote up
def download(cls, architecture, path="./"):
        if cls.sanity_check(architecture):
            architecture_file = path + "imagenet_{}.pth".format(architecture)
            if not os.path.exists(architecture_file):
                kwargs = {}
                if architecture == 'inception_v3':
                    kwargs['transform_input'] = False
                model = models.__dict__[architecture](pretrained=True, **kwargs)
                torch.save(model, architecture_file)
                print("PyTorch pretrained model is saved as [{}].".format(architecture_file))
            else:
                print("File [{}] existed!".format(architecture_file))

            return architecture_file

        else:
            return None 
Example #9
Source File: test_model.py    From moments_models with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def load_model(modelID, categories):
    if modelID == 1:
        weight_file = 'moments_RGB_resnet50_imagenetpretrained.pth.tar'
        if not os.access(weight_file, os.W_OK):
            weight_url = 'http://moments.csail.mit.edu/moments_models/' + weight_file
            os.system('wget ' + weight_url)
        model = models.__dict__['resnet50'](num_classes=len(categories))

        useGPU = 0
        if useGPU == 1:
            checkpoint = torch.load(weight_file)
        else:
            checkpoint = torch.load(weight_file, map_location=lambda storage,
                                    loc: storage)  # allow cpu

        state_dict = {str.replace(str(k), 'module.', ''): v for k, v in checkpoint['state_dict'].items()}
        model.load_state_dict(state_dict)

    model.eval()
    return model 
Example #10
Source File: test.py    From coord-conv-pytorch with GNU General Public License v3.0 6 votes vote down vote up
def test_coordConvNet(input_image):

    print('- CoordConvNet')

    device = input_image.device

    import torchvision.models as models

    vgg16 = models.__dict__['vgg16'](pretrained=False)

    print('VGG16 :\n', vgg16)

    vgg16 = CoordConvNet(vgg16, with_r=True)

    print('CoordVGG16 :\n', vgg16)

    vgg16 = vgg16.to(device)

    output = vgg16(input_image)

    print('Input Size  : ', input_image.size())
    print('Output Size : ', [i.size() for i in output])

    print('- CoordConvNet: OK!') 
Example #11
Source File: main.py    From smooth-topk with MIT License 6 votes vote down vote up
def get_model(args):
    parse_model(args)

    if args.dataset == 'imagenet':
        model = torch_models.__dict__[args.model]()
        args.model_name = args.model
    elif args.basic_model:
        model = cifar_models.BasicConvNet(args.dataset, args.planes)
        args.model_name = 'convnet_{}'.format(args.planes)
    else:
        model = cifar_models.DenseNet3(args.depth, args.num_classes, args.growth)
        args.model_name = 'densenet_{}_{}'.format(args.depth, args.growth)

    # Print the number of model parameters
    nparams = sum([p.data.nelement() for p in model.parameters()])
    print('Number of model parameters: \t {}'.format(nparams))

    return model 
Example #12
Source File: model.py    From CAMP_iccv19 with Apache License 2.0 6 votes vote down vote up
def get_cnn(self, arch, pretrained, fusion):
        """Load a pretrained CNN and parallelize over GPUs
        """
        if arch == "resnet152":
            if pretrained:
                print("=> using pre-trained model '{}'".format(arch))
                model = resnet152(pretrained=True, fusion=fusion)
            else:
                print("=> creating model '{}'".format(arch))
                model = resnet152(pretrained=False, fusion=fusion)
        
        else:
            if pretrained:
                print("=> using pre-trained model '{}'".format(arch))
                model = models.__dict__[arch](pretrained=True)
            else:
                print("=> creating model '{}'".format(arch))
                model = models.__dict__[arch]()
    
        return model 
Example #13
Source File: cpu_convnet_benchmark.py    From benchmark with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def setupRun(self, state, arg):
        arch, sizes = arg[("arch", "size")]
        batch_size, c, h, w = sizes[0], sizes[1], sizes[2], sizes[3]
        batch_size = 1 if arg.single_batch_size else batch_size

        data_ = torch.randn(batch_size, c, h, w)
        target_ = torch.arange(1, batch_size + 1).long()
        state.net = models.__dict__[
            arch
        ]()  # no need to load pre-trained weights for dummy data

        state.optimizer = optim.SGD(state.net.parameters(), lr=0.01)
        state.criterion = nn.CrossEntropyLoss()

        state.net.eval()

        state.data, state.target = Variable(data_), Variable(target_)

        state.steps = 0
        state.time_fwd = 0
        state.time_bwd = 0
        state.time_upt = 0 
Example #14
Source File: main.py    From PyTorch with MIT License 5 votes vote down vote up
def __str__(self):
        fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
        return fmtstr.format(**self.__dict__) 
Example #15
Source File: arch.py    From reseg-pytorch with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, usegpu=True):
        super(CNN, self).__init__()

        self.model = models.__dict__['vgg16'](pretrained=True)
        self.model = nn.Sequential(*list(self.model.children())[0]) #TODO resnet50 :-5
        self.model = nn.Sequential(*list(self.model.children())[:16]) 
Example #16
Source File: dataparallel.py    From pytorch-distributed with MIT License 5 votes vote down vote up
def __str__(self):
        fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
        return fmtstr.format(**self.__dict__) 
Example #17
Source File: model.py    From 3DKeypoints-DA with GNU General Public License v3.0 5 votes vote down vote up
def getModel(args):
  # create model
  if args.pretrained:
    print("=> using pre-trained model '{}'".format(args.arch))
    model = models.__dict__[args.arch](pretrained=True)
    if args.arch.startswith('resnet'):
      if '18' in args.arch:
        model.fc = nn.Linear(512 * 1, ref.J * 3)
      else :
        model.fc = nn.Linear(512 * 4, ref.J * 3)
      print 'reset classifier'
    if args.arch.startswith('densenet'):
      if '161' in args.arch:
        model.classifier = nn.Linear(2208, ref.J * 3)
      elif '201' in args.arch:
        model.classifier = nn.Linear(1920, ref.J * 3)
      else:
        model.classifier = nn.Linear(1024, ref.J * 3)
    if args.arch.startswith('alex'):
      feature_model = list(model.classifier.children())
      feature_model.pop()
      feature_model.append(nn.Linear(4096, ref.J * 3))
      model.classifier = nn.Sequential(*feature_model)
  else:
    print("=> creating model '{}'".format(args.arch))
    model = models.__dict__[args.arch](num_classes = ref.J * 3)

  model = torch.nn.DataParallel(model).cuda()
        
  if args.loadModel:
    if os.path.isfile(args.loadModel):
      print("=> loading model '{}'".format(args.loadModel))
      checkpoint = torch.load(args.loadModel)
      model.load_state_dict(checkpoint['state_dict'])
    else:
      raise Exception("=> no model found at '{}'".format(args.loadModel))
  
  return model 
Example #18
Source File: ImageModels.py    From DAVEnet-pytorch with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __init__(self, embedding_dim=1024, pretrained=False):
        super(VGG16, self).__init__()
        seed_model = imagemodels.__dict__['vgg16'](pretrained=pretrained).features
        seed_model = nn.Sequential(*list(seed_model.children())[:-1]) # remove final maxpool
        last_layer_index = len(list(seed_model.children()))
        seed_model.add_module(str(last_layer_index),
            nn.Conv2d(512, embedding_dim, kernel_size=(3,3), stride=(1,1), padding=(1,1)))
        self.image_model = seed_model 
Example #19
Source File: main.py    From elastic with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __str__(self):
        fmtstr = "{name} {val" + self.fmt + "} ({avg" + self.fmt + "})"
        return fmtstr.format(**self.__dict__) 
Example #20
Source File: main.py    From online-normalization with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __str__(self):
        fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
        return fmtstr.format(**self.__dict__) 
Example #21
Source File: resnet.py    From Dense-CoAttention-Network with MIT License 5 votes vote down vote up
def __init__(self, name, is_freeze=True):
		assert name in ["resnet50", "resnet101", "resnet152"], "Invalid CNN type: {:s}".format(name)
		super(ResNet, self).__init__()

		self.model = models.__dict__[name](pretrained=True)
		delattr(self.model, "fc")
		delattr(self.model, "avgpool")
		
		if is_freeze:
			print("Freezing %s ..." % name)
			for param in self.model.parameters():
				param.requires_grad_(requires_grad=False) 
Example #22
Source File: drnet.py    From SpatialSense with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def __init__(self, phrase_encoder, feature_dim, num_layers=3, backbone='resnet18'):
        super(DRNet, self).__init__()
    
        self.phrase_encoder = phrase_encoder
        self.num_layers = num_layers

        self.appr_module = models.__dict__[backbone](pretrained=True)
        self.appr_module.fc = nn.Linear(512, 256)

        self.pos_module = nn.Sequential(OrderedDict([
            ('conv1_p', nn.Conv2d(2, 32, 5, 2, 2)),
            ('batchnorm1_p', nn.BatchNorm2d(32)),
            ('relu1_p', nn.ReLU()),
            ('conv2_p', nn.Conv2d(32, 64, 3, 1, 1)),
            ('batchnorm2_p', nn.BatchNorm2d(64)),
            ('relu2_p', nn.ReLU()),
            ('maxpool2_p', nn.MaxPool2d(2)),
            ('hg', Hourglass(8, 64)), 
            ('batchnorm_p', nn.BatchNorm2d(64)),
            ('relu_p', nn.ReLU()),
            ('maxpool_p', nn.MaxPool2d(2)),
            ('conv3_p', nn.Conv2d(64, 256, 4)),
            ('batchnorm3_p', nn.BatchNorm2d(256)),
        ]))

        self.PhiR_0 = nn.Linear(512, feature_dim)
        self.batchnorm = nn.BatchNorm1d(feature_dim)

        self.PhiA = nn.Linear(300, feature_dim)
        self.PhiB = nn.Linear(300, feature_dim)
        self.PhiR = nn.Linear(feature_dim, feature_dim)
    
        self.fc = nn.Linear(feature_dim, 9) 
Example #23
Source File: vtranse.py    From SpatialSense with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def __init__(self, phrase_encoder, visual_feature_size, predicate_embedding_dim, backbone='resnet18'):
        super(VtransE, self).__init__()

        self.visual_feature_size = visual_feature_size
        self.phrase_encoder = phrase_encoder

        self.backbone = models.__dict__[backbone](pretrained=True)
        self.backbone = nn.Sequential(self.backbone.conv1,
                                    self.backbone.bn1,
                                    self.backbone.relu,
                                    self.backbone.maxpool,
                                    self.backbone.layer1,
                                    self.backbone.layer2,
                                    self.backbone.layer3,
                                    self.backbone.layer4)


        self.scale_factor = nn.Parameter(torch.Tensor(3))
        nn.init.uniform_(self.scale_factor)
        
        self.linear1 = nn.Linear(visual_feature_size * visual_feature_size * 512, visual_feature_size * visual_feature_size * 64)
        self.batchnorm1 = nn.BatchNorm1d(visual_feature_size * visual_feature_size * 64)
        self.linear2 = nn.Linear(visual_feature_size * visual_feature_size * 512, visual_feature_size * visual_feature_size * 64)
        self.batchnorm2 = nn.BatchNorm1d(visual_feature_size * visual_feature_size * 64)

        feature_dim = 300 + 4 + visual_feature_size * visual_feature_size * 64
        self.W_o = nn.Linear(feature_dim, predicate_embedding_dim)
        self.W_s = nn.Linear(feature_dim, predicate_embedding_dim)
        self.W_p = nn.Linear(predicate_embedding_dim, 9) 
Example #24
Source File: utils.py    From PyVideoResearch with GNU General Public License v3.0 5 votes vote down vote up
def generic_load(arch, pretrained, weights, args):
    if arch in tmodels.__dict__:  # torchvision models
        if pretrained:
            print("=> using pre-trained model '{}'".format(arch))
            model = tmodels.__dict__[arch](pretrained=True)
            model = model.cuda()
        else:
            print("=> creating model '{}'".format(arch))
            model = tmodels.__dict__[arch]()
    else:  # defined as script in bases
        model = case_getattr(import_module('models.bases.' + arch), arch).get(args)
        if not weights == '':
            print('loading pretrained-weights from {}'.format(weights))
            model.load_state_dict(torch.load(weights))
    return model 
Example #25
Source File: Resnet_ins101.py    From RANet with Apache License 2.0 5 votes vote down vote up
def get_imagenet_model(type, pretrained=True):
    if type == 'vgg16':
        return models.__dict__['vgg16'](pretrained=pretrained)
    if type == 'resnet50':
        return models.__dict__['resnet50'](pretrained=pretrained)
    if type == 'resnet101':
        return models.__dict__['resnet101'](pretrained=pretrained)
    if type == 'resnet_ins101':
        return models.__dict__['resnet_ins101'](pretrained=pretrained)
    if type == 'resnet_ins_RGBM101':
        return models.__dict__['resnet_ins_RGBM101'](pretrained=pretrained)
    else:
        print('avilible types: ')
        print(model_names)
        assert 'error model type' 
Example #26
Source File: RANet_Model_imagenet.py    From RANet with Apache License 2.0 5 votes vote down vote up
def get_imagenet_model(type, pretrained=True):
    if type == 'resnet101':
        return models.__dict__['resnet101'](pretrained=pretrained)
    if type == 'resnet_ins101':
        return resnet_ins101(pretrained=pretrained)
    else:
        print('avilible types: ')
        print(model_names)
        assert 'error model type' 
Example #27
Source File: models.py    From moments_models with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def resnet50(num_classes=339, pretrained=True):
    model = models.__dict__['resnet50'](num_classes=num_classes)
    if pretrained:
        model.load_state_dict(load_checkpoint(weights['resnet50']))
    model = modify_resnets(model)
    return model 
Example #28
Source File: main.py    From Teacher-free-Knowledge-Distillation with MIT License 5 votes vote down vote up
def __str__(self):
        fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
        return fmtstr.format(**self.__dict__) 
Example #29
Source File: feature_extractor.py    From imsearch with Apache License 2.0 5 votes vote down vote up
def get_extractor(arch='resnet50'):
    model_ft = models.__dict__[arch](pretrained=True)
    extractor = nn.Sequential(*list(model_ft.children())[:-1])
    return extractor 
Example #30
Source File: teaser.py    From ignite with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def get_model(name):
    if name in models.__dict__:
        fn = models.__dict__[name]
    else:
        raise RuntimeError("Unknown model name {}".format(name))

    return fn(num_classes=10)