Python pretrainedmodels.__dict__() Examples
The following are 30
code examples of pretrainedmodels.__dict__().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
pretrainedmodels
, or try the search function
.
Example #1
Source File: encoder.py From pytorch-segmentation with MIT License | 6 votes |
def se_net(name, pretrained=False): if name in ['se_resnet50', 'se_resnet101', 'se_resnet152', 'se_resnext50_32x4d', 'se_resnext101_32x4d', 'senet154']: pretrained = 'imagenet' if pretrained else None senet = pretrainedmodels.__dict__[name](num_classes=1000, pretrained=pretrained) else: return NotImplemented layer0 = senet.layer0 layer1 = senet.layer1 layer2 = senet.layer2 layer3 = senet.layer3 layer4 = senet.layer4 layer0.out_channels = senet.layer1[0].conv1.in_channels layer1.out_channels = senet.layer1[-1].conv3.out_channels layer2.out_channels = senet.layer2[-1].conv3.out_channels layer3.out_channels = senet.layer3[-1].conv3.out_channels layer4.out_channels = senet.layer4[-1].conv3.out_channels return [layer0, layer1, layer2, layer3, layer4]
Example #2
Source File: encoder.py From pytorch-segmentation with MIT License | 6 votes |
def resnext(name, pretrained=False): if name in ['resnext101_32x4d', 'resnext101_64x4d']: pretrained = 'imagenet' if pretrained else None resnext = pretrainedmodels.__dict__[name](num_classes=1000, pretrained=pretrained) else: return NotImplemented layer0 = nn.Sequential(resnext.features[0], resnext.features[1], resnext.features[2], resnext.features[3]) layer1 = resnext.features[4] layer2 = resnext.features[5] layer3 = resnext.features[6] layer4 = resnext.features[7] layer0.out_channels = 64 layer1.out_channels = 256 layer2.out_channels = 512 layer3.out_channels = 1024 layer4.out_channels = 2048 return [layer0, layer1, layer2, layer3, layer4]
Example #3
Source File: model_factory.py From kaggle-hpa with BSD 2-Clause "Simplified" License | 6 votes |
def get_senet(model_name='se_resnext50', num_classes=28, **_): model = pretrainedmodels.__dict__[model_name](num_classes=1000, pretrained='imagenet') conv1 = model.layer0.conv1 model.layer0.conv1 = nn.Conv2d(in_channels=4, out_channels=conv1.out_channels, kernel_size=conv1.kernel_size, stride=conv1.stride, padding=conv1.padding, bias=conv1.bias) # copy pretrained weights model.layer0.conv1.weight.data[:,:3,:,:] = conv1.weight.data model.layer0.conv1.weight.data[:,3:,:,:] = conv1.weight.data[:,:1,:,:] model.avgpool = nn.AdaptiveAvgPool2d(1) in_features = model.last_linear.in_features model.last_linear = nn.Linear(in_features, num_classes) return model
Example #4
Source File: model_factory.py From kaggle-hpa with BSD 2-Clause "Simplified" License | 6 votes |
def get_resnet18(num_classes=28, **_): model_name = 'resnet18' model = pretrainedmodels.__dict__[model_name](num_classes=1000, pretrained='imagenet') conv1 = model.conv1 model.conv1 = nn.Conv2d(in_channels=4, out_channels=conv1.out_channels, kernel_size=conv1.kernel_size, stride=conv1.stride, padding=conv1.padding, bias=conv1.bias) # copy pretrained weights model.conv1.weight.data[:,:3,:,:] = conv1.weight.data model.conv1.weight.data[:,3:,:,:] = conv1.weight.data[:,:1,:,:] model.avgpool = nn.AdaptiveAvgPool2d(1) in_features = model.last_linear.in_features model.last_linear = nn.Linear(in_features, num_classes) return model
Example #5
Source File: model_factory.py From kaggle-hpa with BSD 2-Clause "Simplified" License | 6 votes |
def get_resnet34(num_classes=28, **_): model_name = 'resnet34' model = pretrainedmodels.__dict__[model_name](num_classes=1000, pretrained='imagenet') conv1 = model.conv1 model.conv1 = nn.Conv2d(in_channels=4, out_channels=conv1.out_channels, kernel_size=conv1.kernel_size, stride=conv1.stride, padding=conv1.padding, bias=conv1.bias) # copy pretrained weights model.conv1.weight.data[:,:3,:,:] = conv1.weight.data model.conv1.weight.data[:,3:,:,:] = conv1.weight.data[:,:1,:,:] model.avgpool = nn.AdaptiveAvgPool2d(1) in_features = model.last_linear.in_features model.last_linear = nn.Linear(in_features, num_classes) return model
Example #6
Source File: encoders.py From open-solution-ship-detection with MIT License | 6 votes |
def __init__(self, encoder_depth, pretrained='imagenet', pool0=False): super().__init__() if encoder_depth == 50: self.encoder = pretrainedmodels.__dict__['se_resnext50_32x4d'](num_classes=1000, pretrained=pretrained) elif encoder_depth == 101: self.encoder = pretrainedmodels.__dict__['se_resnext101_32x4d'](num_classes=1000, pretrained=pretrained) else: raise NotImplementedError('only 50, 101 version of Resnet are implemented') if pool0: self.conv1 = nn.Sequential(self.encoder.layer0.conv1, self.encoder.layer0.bn1, self.encoder.layer0.relu1, self.encoder.layer0.pool) else: self.conv1 = nn.Sequential(self.encoder.layer0.conv1, self.encoder.layer0.bn1, self.encoder.layer0.relu1) self.encoder2 = self.encoder.layer1 self.encoder3 = self.encoder.layer2 self.encoder4 = self.encoder.layer3 self.encoder5 = self.encoder.layer4
Example #7
Source File: netlib.py From Deep-Metric-Learning-Baselines with Apache License 2.0 | 6 votes |
def __init__(self, opt, list_style=False, no_norm=False): super(ResNet50, self).__init__() self.pars = opt if not opt.not_pretrained: print('Getting pretrained weights...') self.model = ptm.__dict__['resnet50'](num_classes=1000, pretrained='imagenet') print('Done.') else: print('Not utilizing pretrained weights!') self.model = ptm.__dict__['resnet50'](num_classes=1000, pretrained=None) for module in filter(lambda m: type(m) == nn.BatchNorm2d, self.model.modules()): module.eval() module.train = lambda _: None self.model.last_linear = torch.nn.Linear(self.model.last_linear.in_features, opt.embed_dim) self.layer_blocks = nn.ModuleList([self.model.layer1, self.model.layer2, self.model.layer3, self.model.layer4])
Example #8
Source File: encoders.py From open-solution-salt-identification with MIT License | 6 votes |
def __init__(self, encoder_depth, pretrained='imagenet', pool0=False): super().__init__() if encoder_depth == 50: self.encoder = pretrainedmodels.__dict__['se_resnext50_32x4d'](num_classes=1000, pretrained=pretrained) elif encoder_depth == 101: self.encoder = pretrainedmodels.__dict__['se_resnext101_32x4d'](num_classes=1000, pretrained=pretrained) else: raise NotImplementedError('only 50, 101 version of Resnet are implemented') if pool0: self.conv1 = nn.Sequential(self.encoder.layer0.conv1, self.encoder.layer0.bn1, self.encoder.layer0.relu1, self.encoder.layer0.pool0) else: self.conv1 = nn.Sequential(self.encoder.layer0.conv1, self.encoder.layer0.bn1, self.encoder.layer0.relu1) self.encoder2 = self.encoder.layer1 self.encoder3 = self.encoder.layer2 self.encoder4 = self.encoder.layer3 self.encoder5 = self.encoder.layer4
Example #9
Source File: etc.py From kaggle-human-protein-atlas-image-classification with Apache License 2.0 | 5 votes |
def __init__(self, pre=True): super().__init__() self.encoder = pretrainedmodels.__dict__['polynet'](num_classes=1000, pretrained='imagenet') conv = nn.Conv2d(4, 32, kernel_size=3, stride=2, bias=False) if pre: w = self.encoder.stem.conv1[0].conv.weight conv.weight = nn.Parameter(torch.cat((w, 0.5 * (w[:, :1, :, :] + w[:, 2:, :, :])), dim=1)) self.encoder.stem.conv1[0].conv = conv self.last_linear = nn.Linear(2048, num_class())
Example #10
Source File: etc.py From kaggle-human-protein-atlas-image-classification with Apache License 2.0 | 5 votes |
def __init__(self, pre=True): super().__init__() self.encoder = pretrainedmodels.__dict__['senet154'](num_classes=1000, pretrained='imagenet') conv = nn.Conv2d(4, 64, kernel_size=3, stride=2, bias=False) if pre: w = self.encoder.layer0[0].weight conv.weight = nn.Parameter(torch.cat((w, 0.5 * (w[:, :1, :, :] + w[:, 2:, :, :])), dim=1)) self.encoder.layer0[0] = conv self.last_linear = nn.Linear(2048, num_class())
Example #11
Source File: models.py From isic2019 with MIT License | 5 votes |
def Resnet50(config): return pretrainedmodels.__dict__['resnet50'](num_classes=1000, pretrained='imagenet')
Example #12
Source File: inception.py From kaggle-human-protein-atlas-image-classification with Apache License 2.0 | 5 votes |
def __init__(self, pre=True): super().__init__() self.encoder = pretrainedmodels.__dict__['inceptionv4'](num_classes=1000, pretrained='imagenet') conv1 = BasicConv2d(4, 32, kernel_size=3, stride=2) if pre: w = self.encoder.features[0].conv.weight conv1.conv.weight = nn.Parameter(torch.cat((w, 0.5 * (w[:, :1, :, :] + w[:, 2:, :, :])), dim=1)) self.encoder.features[0].conv = conv1 self.last_linear = nn.Linear(1536, num_class()) pass
Example #13
Source File: basenet.py From pytorch-saltnet with MIT License | 5 votes |
def resnext(name, pretrained): import pretrainedmodels if name in ['resnext101_32x4d', 'resnext101_64x4d']: imagenet_pretrained = 'imagenet' if pretrained == 'imagenet' else None resnext = pretrainedmodels.__dict__[name](num_classes=1000, pretrained=imagenet_pretrained) else: return NotImplemented resnext_features = resnext.features layer0 = [resnext_features[i] for i in range(4)] layer0 = nn.Sequential(*layer0) layer0.out_channels = layer0[-1].out_channels = 64 layer1 = resnext_features[4] layer1.out_channels = layer1[-1].out_channels = 256 layer2 = resnext_features[5] layer2.out_channels = layer2[-1].out_channels = 512 layer3 = resnext_features[6] layer3.out_channels = layer3[-1].out_channels = 1024 layer4 = resnext_features[7] layer4.out_channels = layer4[-1].out_channels = 2048 n_pretrained = 5 if imagenet_pretrained else 0 return [layer0, layer1, layer2, layer3, layer4], True, n_pretrained
Example #14
Source File: basenet.py From pytorch-saltnet with MIT License | 5 votes |
def se_net(name, pretrained): import pretrainedmodels if name in ['se_resnet50', 'se_resnet101', 'se_resnet152', 'se_resnext50_32x4d', 'se_resnext101_32x4d', 'senet154']: imagenet_pretrained = 'imagenet' if pretrained == 'imagenet' else None senet = pretrainedmodels.__dict__[name](num_classes=1000, pretrained=imagenet_pretrained) else: return NotImplemented layer0 = replace_bn_in_sequential(senet.layer0) block = senet.layer1[0].__class__ layer1 = replace_bn_in_sequential(senet.layer1, block=block) layer1.out_channels = layer1[-1].out_channels = senet.layer1[-1].conv3.out_channels layer0.out_channels = layer0[-1].out_channels = senet.layer1[0].conv1.in_channels layer2 = replace_bn_in_sequential(senet.layer2, block=block) layer2.out_channels = layer2[-1].out_channels = senet.layer2[-1].conv3.out_channels layer3 = replace_bn_in_sequential(senet.layer3, block=block) layer3.out_channels = layer3[-1].out_channels = senet.layer3[-1].conv3.out_channels layer4 = replace_bn_in_sequential(senet.layer4, block=block) layer4.out_channels = layer4[-1].out_channels = senet.layer4[-1].conv3.out_channels n_pretrained = 5 if imagenet_pretrained else 0 return [layer0, layer1, layer2, layer3, layer4], True, n_pretrained
Example #15
Source File: test_pm_imagenet.py From pretrained-models.pytorch with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_pm_imagenet(model_name, pretrained): if set_grad_enabled: set_grad_enabled(False) print('test_pm_imagenet("{}")'.format(model_name)) net = pm.__dict__[model_name]( num_classes=1000, pretrained=pretrained) net.eval() tensor = utils.TransformImage(net)(img) tensor = tensor.unsqueeze(0) x = Variable(tensor, requires_grad=False) out_logits = net(x) if 'squeezenet' in model_name: # Conv2d without view at the end assert out_logits.shape == torch.Size([1,1000,1,1]) return assert out_logits.shape == torch.Size([1,1000]) out_feats = net.features(x) out_logits_2 = net.logits(out_feats) assert equal(out_logits, out_logits_2) if 'dpn' in model_name: # Conv2d instead of Linear return net.last_linear = nn.Linear( net.last_linear.in_features, 10) out_logits_3 = net.logits(out_feats) assert out_logits_3.shape == torch.Size([1,10]) if set_grad_enabled: set_grad_enabled(True)
Example #16
Source File: test_torch_save.py From pretrained-models.pytorch with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_torch_save(model_name, pretrained, tmp_path): print('test_torch_save("{}")'.format(model_name)) net = pm.__dict__[model_name]( num_classes=1000, pretrained=pretrained) tmp_file = tmp_path/'{}.pkl'.format(model_name) torch.save(net, tmp_file.open('wb')) tmp_file.unlink()
Example #17
Source File: classification.py From open-solution-ship-detection with MIT License | 5 votes |
def __init__(self, pretrained): super().__init__() self.features = pretrainedmodels.__dict__['densenet201'](num_classes=1000, pretrained=pretrained) self.classifier = nn.Linear(in_features=1000, out_features=2)
Example #18
Source File: encoders.py From open-solution-ship-detection with MIT License | 5 votes |
def __init__(self, encoder_depth, pretrained='imagenet', pool0=False): super().__init__() if encoder_depth == 50: self.encoder = pretrainedmodels.__dict__['se_resnet50'](num_classes=1000, pretrained=pretrained) elif encoder_depth == 101: self.encoder = pretrainedmodels.__dict__['se_resnet101'](num_classes=1000, pretrained=pretrained) elif encoder_depth == 152: self.encoder = pretrainedmodels.__dict__['se_resnet152'](num_classes=1000, pretrained=pretrained) else: raise NotImplementedError('only 50, 101, 152 version of Resnet are implemented') if pool0: self.conv1 = nn.Sequential(self.encoder.layer0.conv1, self.encoder.layer0.bn1, self.encoder.layer0.relu1, self.encoder.layer0.pool0) else: self.conv1 = nn.Sequential(self.encoder.layer0.conv1, self.encoder.layer0.bn1, self.encoder.layer0.relu1) self.encoder2 = self.encoder.layer1 self.encoder3 = self.encoder.layer2 self.encoder4 = self.encoder.layer3 self.encoder5 = self.encoder.layer4
Example #19
Source File: encoders.py From open-solution-ship-detection with MIT License | 5 votes |
def __init__(self, encoder_depth, pretrained='imagenet', pool0=False): super().__init__() if encoder_depth == 121: self.encoder = pretrainedmodels.__dict__['densenet121'](num_classes=1000, pretrained=pretrained) elif encoder_depth == 161: self.encoder = pretrainedmodels.__dict__['densenet161'](num_classes=1000, pretrained=pretrained) elif encoder_depth == 169: self.encoder = pretrainedmodels.__dict__['densenet169'](num_classes=1000, pretrained=pretrained) elif encoder_depth == 201: self.encoder = pretrainedmodels.__dict__['densenet201'](num_classes=1000, pretrained=pretrained) else: raise NotImplementedError('only 121, 161, 169, 201 version of Densenet are implemented') if pool0: self.conv1 = nn.Sequential(self.encoder.features.conv0, self.encoder.features.norm0, self.encoder.features.relu0, self.encoder.features.pool0) else: self.conv1 = nn.Sequential(self.encoder.features.conv0, self.encoder.features.norm0, self.encoder.features.relu0) self.encoder2 = self.encoder.features.denseblock1 self.transition1 = self.encoder.features.transition1 self.encoder3 = self.encoder.features.denseblock2 self.transition2 = self.encoder.features.transition2 self.encoder4 = self.encoder.features.denseblock3 self.transition3 = self.encoder.features.transition3 self.encoder5 = self.encoder.features.denseblock4
Example #20
Source File: pretrained.py From mlcomp with Apache License 2.0 | 5 votes |
def __init__(self, variant, num_classes, pretrained=True, activation=None): super().__init__() params = {'num_classes': 1000} if not pretrained: params['pretrained'] = None model = pretrainedmodels.__dict__[variant](**params) self.model = model linear = self.model.last_linear if isinstance(linear, nn.Linear): self.model.last_linear = nn.Linear( model.last_linear.in_features, num_classes ) self.model.last_linear.in_channels = linear.in_features elif isinstance(linear, nn.Conv2d): self.model.last_linear = nn.Conv2d( linear.in_channels, num_classes, kernel_size=linear.kernel_size, bias=True ) self.model.last_linear.in_features = linear.in_channels self.model.last_linear = nn.Sequential( LambdaLayer(lambda x: x.unsqueeze_(0)), nn.AdaptiveAvgPool1d(self.model.last_linear.in_channels), LambdaLayer(lambda x: x.squeeze_(0).view(x.size(0), -1)), self.model.last_linear ) if callable(activation) or activation is None: self.activation = activation elif activation == 'softmax': self.activation = nn.Softmax(dim=1) elif activation == 'sigmoid': self.activation = nn.Sigmoid() else: raise ValueError( 'Activation should be "sigmoid"/"softmax"/callable/None')
Example #21
Source File: compute_computational_complexity.py From models-comparison.pytorch with BSD 3-Clause "New" or "Revised" License | 5 votes |
def main(): args = parser.parse_args() try: with open(args.save) as fp: model_info = json.load(fp) except: model_info = {} for m in model_names: if not m in model_info.keys(): # create model print("=> creating model '{}'".format(m)) if args.pretrained.lower() not in ['false', 'none', 'not', 'no', '0']: print("=> using pre-trained parameters '{}'".format(args.pretrained)) model = pretrainedmodels.__dict__[m](num_classes=1000, pretrained=args.pretrained) else: model = pretrainedmodels.__dict__[m]() cudnn.benchmark = True scale = 0.875 print('Images transformed from size {} to {}'.format( int(round(max(model.input_size) / scale)), model.input_size)) model = model.cuda().eval() model = utils.add_flops_counting_methods(model) model.start_flops_count() with torch.no_grad(): _ = model(torch.randn(args.batch_size, *model.input_size).cuda(non_blocking=True)) summary, n_params = utils.summary(model.input_size, model) model_info[m] = (model.compute_average_flops_cost() / 1e9 / 2, n_params.item()) with open(args.save, 'w') as fp: json.dump(model_info, fp)
Example #22
Source File: featurizer.py From classification_metric_learning with Apache License 2.0 | 5 votes |
def resnet50(output_dim): """ resnet50 variant with `output_dim` embedding output size. """ basemodel = pretrainedmodels.__dict__["resnet50"](num_classes=1000) model = nn.Sequential( basemodel.conv1, basemodel.bn1, basemodel.relu, basemodel.maxpool, basemodel.layer1, basemodel.layer2, basemodel.layer3, basemodel.layer4 ) model.name = "resnet50" featurizer = EmbeddedFeatureWrapper(feature=model, input_dim=2048, output_dim=output_dim) featurizer.input_space = basemodel.input_space featurizer.input_range = basemodel.input_range featurizer.input_size = basemodel.input_size featurizer.std = basemodel.std featurizer.mean = basemodel.mean return featurizer
Example #23
Source File: models.py From isic2019 with MIT License | 5 votes |
def dpn92(config): return pretrainedmodels.__dict__['dpn92'](num_classes=1000, pretrained='imagenet+5k')
Example #24
Source File: models.py From isic2019 with MIT License | 5 votes |
def se_resnext50(config): return pretrainedmodels.__dict__['se_resnext50_32x4d'](num_classes=1000, pretrained='imagenet')
Example #25
Source File: models.py From isic2019 with MIT License | 5 votes |
def se_resnext101(config): return pretrainedmodels.__dict__['se_resnext101_32x4d'](num_classes=1000, pretrained='imagenet')
Example #26
Source File: models.py From isic2019 with MIT License | 5 votes |
def se_resnet50(config): return pretrainedmodels.__dict__['se_resnet50'](num_classes=1000, pretrained='imagenet')
Example #27
Source File: models.py From isic2019 with MIT License | 5 votes |
def se_resnet101(config): return pretrainedmodels.__dict__['se_resnet101'](num_classes=1000, pretrained='imagenet')
Example #28
Source File: models.py From isic2019 with MIT License | 5 votes |
def resnext101(config): return pretrainedmodels.__dict__['resnext101_32x4d'](num_classes=1000, pretrained='imagenet')
Example #29
Source File: models.py From isic2019 with MIT License | 5 votes |
def resnext101_64(config): return pretrainedmodels.__dict__['resnext101_64x4d'](num_classes=1000, pretrained='imagenet')
Example #30
Source File: models.py From isic2019 with MIT License | 5 votes |
def senet154(config): return pretrainedmodels.__dict__['senet154'](num_classes=1000, pretrained='imagenet')