Python torchvision.models.resnet50() Examples

The following are 30 code examples of torchvision.models.resnet50(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torchvision.models , or try the search function .
Example #1
Source File: resnet_encoder.py    From packnet-sfm with MIT License 8 votes vote down vote up
def __init__(self, num_layers, pretrained, num_input_images=1):
        super(ResnetEncoder, self).__init__()

        self.num_ch_enc = np.array([64, 64, 128, 256, 512])

        resnets = {18: models.resnet18,
                   34: models.resnet34,
                   50: models.resnet50,
                   101: models.resnet101,
                   152: models.resnet152}

        if num_layers not in resnets:
            raise ValueError("{} is not a valid number of resnet layers".format(num_layers))

        if num_input_images > 1:
            self.encoder = resnet_multiimage_input(num_layers, pretrained, num_input_images)
        else:
            self.encoder = resnets[num_layers](pretrained)

        if num_layers > 34:
            self.num_ch_enc[1:] *= 4 
Example #2
Source File: croppingModel.py    From Grid-Anchor-based-Image-Cropping-Pytorch with MIT License 7 votes vote down vote up
def __init__(self, alignsize = 8, reddim = 32, loadweight = True, model = None, downsample = 4):
        super(crop_model_multi_scale_shared, self).__init__()

        if model == 'shufflenetv2':
            self.Feat_ext = shufflenetv2_base(loadweight,downsample)
            self.DimRed = nn.Conv2d(812, reddim, kernel_size=1, padding=0)
        elif model == 'mobilenetv2':
            self.Feat_ext = mobilenetv2_base(loadweight,downsample)
            self.DimRed = nn.Conv2d(448, reddim, kernel_size=1, padding=0)
        elif model == 'vgg16':
            self.Feat_ext = vgg_base(loadweight,downsample)
            self.DimRed = nn.Conv2d(1536, reddim, kernel_size=1, padding=0)
        elif model == 'resnet50':
            self.Feat_ext = resnet50_base(loadweight,downsample)
            self.DimRed = nn.Conv2d(3584, reddim, kernel_size=1, padding=0)

        self.downsample2 = nn.UpsamplingBilinear2d(scale_factor=1.0/2.0)
        self.upsample2 = nn.UpsamplingBilinear2d(scale_factor=2.0)
        self.RoIAlign = RoIAlignAvg(alignsize, alignsize, 1.0/2**downsample)
        self.RoDAlign = RoDAlignAvg(alignsize, alignsize, 1.0/2**downsample)
        self.FC_layers = fc_layers(reddim*2, alignsize) 
Example #3
Source File: test_attack_AdditiveUniformNoiseAttack.py    From perceptron-benchmark with Apache License 2.0 6 votes vote down vote up
def test_untargeted_resnet50(image, label=None):
    import torch
    import torchvision.models as models
    from perceptron.models.classification import PyTorchModel
    mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))
    std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))
    model_pyt = models.resnet50(pretrained=True).eval()
    if torch.cuda.is_available():
        model_pyt = model_pyt.cuda()
    model = PyTorchModel(
        model_pyt, bounds=(0, 1), num_classes=1000, preprocessing=(mean, std))
    print(np.argmax(model.predictions(image)))
    attack = Attack(model, criterion=Misclassification())
    adversarial_obj = attack(image, label, unpack=False, epsilons=10000)
    distance = adversarial_obj.distance
    adversarial = adversarial_obj.image
    return distance, adversarial 
Example #4
Source File: main.py    From Grad-CAM.pytorch with Apache License 2.0 6 votes vote down vote up
def get_net(net_name, weight_path=None):
    """
    根据网络名称获取模型
    :param net_name: 网络名称
    :param weight_path: 与训练权重路径
    :return:
    """
    pretrain = weight_path is None  # 没有指定权重路径,则加载默认的预训练权重
    if net_name in ['vgg', 'vgg16']:
        net = models.vgg16(pretrained=pretrain)
    elif net_name == 'vgg19':
        net = models.vgg19(pretrained=pretrain)
    elif net_name in ['resnet', 'resnet50']:
        net = models.resnet50(pretrained=pretrain)
    elif net_name == 'resnet101':
        net = models.resnet101(pretrained=pretrain)
    elif net_name in ['densenet', 'densenet121']:
        net = models.densenet121(pretrained=pretrain)
    elif net_name in ['inception']:
        net = models.inception_v3(pretrained=pretrain)
    elif net_name in ['mobilenet_v2']:
        net = models.mobilenet_v2(pretrained=pretrain)
    elif net_name in ['shufflenet_v2']:
        net = models.shufflenet_v2_x1_0(pretrained=pretrain)
    else:
        raise ValueError('invalid network name:{}'.format(net_name))
    # 加载指定路径的权重参数
    if weight_path is not None and net_name.startswith('densenet'):
        pattern = re.compile(
            r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$')
        state_dict = torch.load(weight_path)
        for key in list(state_dict.keys()):
            res = pattern.match(key)
            if res:
                new_key = res.group(1) + res.group(2)
                state_dict[new_key] = state_dict[key]
                del state_dict[key]
        net.load_state_dict(state_dict)
    elif weight_path is not None:
        net.load_state_dict(torch.load(weight_path))
    return net 
Example #5
Source File: basenet.py    From MCD_DA with MIT License 6 votes vote down vote up
def __init__(self,option = 'resnet18',pret=True):
        super(ResFreeze, self).__init__()
        self.dim = 2048*2*2
        if option == 'resnet18':
            model_ft = models.resnet18(pretrained=pret)
            self.dim = 512
        if option == 'resnet50':
            model_ft = models.resnet50(pretrained=pret)
        if option == 'resnet101':
            model_ft = models.resnet101(pretrained=pret)
        if option == 'resnet152':
            model_ft = models.resnet152(pretrained=pret)
        if option == 'resnet200':
            model_ft = Res200()
        self.conv1 = model_ft.conv1
        self.bn0 = model_ft.bn1
        self.relu = model_ft.relu
        self.maxpool = model_ft.maxpool
        self.layer1 = model_ft.layer1
        self.layer2 = model_ft.layer2
        self.layer3 = model_ft.layer3
        self.layer4 = model_ft.layer4
        self.avgpool = model_ft.avgpool 
Example #6
Source File: models.py    From ACME with GNU General Public License v3.0 6 votes vote down vote up
def __init__(self):
        super(ImageEmbedding, self).__init__()
        
        resnet = models.resnet50(pretrained=True)
        modules = list(resnet.children())[:-1]  # we do not use the last fc layer.
        self.visionMLP = nn.Sequential(*modules)

        self.visual_embedding = nn.Sequential(
            nn.Linear(opts.imfeatDim, opts.embDim),
            nn.Tanh(),
        )

        self.semantic_branch = nn.Linear(opts.embDim, opts.numClasses)

        self.fc_visual = nn.Sequential(
            nn.Linear(opts.embDim, opts.embDim),
            nn.BatchNorm1d(opts.embDim),
            nn.Tanh(),
        ) 
Example #7
Source File: resnet_encoder.py    From DF-VO with MIT License 6 votes vote down vote up
def __init__(self, num_layers, pretrained, num_input_images=1):
        super(ResnetEncoder, self).__init__()

        self.num_ch_enc = np.array([64, 64, 128, 256, 512])

        resnets = {18: models.resnet18,
                   34: models.resnet34,
                   50: models.resnet50,
                   101: models.resnet101,
                   152: models.resnet152}

        if num_layers not in resnets:
            raise ValueError("{} is not a valid number of resnet layers".format(num_layers))

        if num_input_images > 1:
            self.encoder = resnet_multiimage_input(num_layers, pretrained, num_input_images)
        else:
            self.encoder = resnets[num_layers](pretrained)

        if num_layers > 34:
            self.num_ch_enc[1:] *= 4 
Example #8
Source File: pretrained_networks.py    From PerceptualSimilarity with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def __init__(self, requires_grad=False, pretrained=True, num=18):
        super(resnet, self).__init__()
        if(num==18):
            self.net = tv.resnet18(pretrained=pretrained)
        elif(num==34):
            self.net = tv.resnet34(pretrained=pretrained)
        elif(num==50):
            self.net = tv.resnet50(pretrained=pretrained)
        elif(num==101):
            self.net = tv.resnet101(pretrained=pretrained)
        elif(num==152):
            self.net = tv.resnet152(pretrained=pretrained)
        self.N_slices = 5

        self.conv1 = self.net.conv1
        self.bn1 = self.net.bn1
        self.relu = self.net.relu
        self.maxpool = self.net.maxpool
        self.layer1 = self.net.layer1
        self.layer2 = self.net.layer2
        self.layer3 = self.net.layer3
        self.layer4 = self.net.layer4 
Example #9
Source File: tools.py    From perceptron-benchmark with Apache License 2.0 6 votes vote down vote up
def get_image_format(framework_name, model_name):
    """Return the correct input range and shape for target framework and model"""
    special_shape = {'pytorch':{'inception_v3': (299, 299)},
                     'keras': {'xception': (299, 299),
                               'inception_v3':(299, 299),
                               'yolo_v3': (416, 416),
                               'ssd300': (300, 300)}}
    special_bound = {'keras':{'vgg16':(0, 255),
                              'vgg19':(0, 255),
                              'resnet50':(0, 255),
                              'ssd300': (0, 255)},
                     'cloud': {'aip_antiporn': (0, 255),
                               'google_safesearch': (0, 255),
                               'google_objectdetection': (0, 255)}}
    default_shape = (224, 224)
    default_bound = (0, 1)
    if special_shape.get(framework_name, None):
        if special_shape[framework_name].get(model_name, None):
            default_shape = special_shape[framework_name][model_name]
    if special_bound.get(framework_name, None):
        if special_bound[framework_name].get(model_name, None):
            default_bound = special_bound[framework_name][model_name]
    return {'shape': default_shape, 'bounds': default_bound} 
Example #10
Source File: test_attack_BlendedUniformNoiseAttack.py    From perceptron-benchmark with Apache License 2.0 6 votes vote down vote up
def test_untargeted_resnet50(image, label=None):
    import torch
    import torchvision.models as models
    from perceptron.models.classification import PyTorchModel
    mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))
    std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))
    model_pyt = models.resnet50(pretrained=True).eval()
    if torch.cuda.is_available():
        model_pyt = model_pyt.cuda()
    model = PyTorchModel(
        model_pyt, bounds=(0, 1), num_classes=1000, preprocessing=(mean, std))
    print(np.argmax(model.predictions(image)))
    attack = Attack(model, criterion=Misclassification())
    adversarial_obj = attack(image, label, unpack=False, epsilons=10000)
    distance = adversarial_obj.distance
    adversarial = adversarial_obj.image
    return distance, adversarial 
Example #11
Source File: basenet.py    From MCD_DA with MIT License 6 votes vote down vote up
def __init__(self,option = 'resnet18',pret=True):
        super(ResBase, self).__init__()
        self.dim = 2048
        if option == 'resnet18':
            model_ft = models.resnet18(pretrained=pret)
            self.dim = 512
        if option == 'resnet50':
            model_ft = models.resnet50(pretrained=pret)
        if option == 'resnet101':
            model_ft = models.resnet101(pretrained=pret)
        if option == 'resnet152':
            model_ft = models.resnet152(pretrained=pret)
        if option == 'resnet200':
            model_ft = Res200()
        if option == 'resnetnext':
            model_ft = ResNeXt(layer_num=101)
        mod = list(model_ft.children())
        mod.pop()
        #self.model_ft =model_ft
        self.features = nn.Sequential(*mod) 
Example #12
Source File: test_attack_Gaussian_blur.py    From perceptron-benchmark with Apache License 2.0 6 votes vote down vote up
def test_untargeted_resnet50(image, label=None):
    import torch
    import torchvision.models as models
    from perceptron.models.classification import PyTorchModel
    mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))
    std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))
    model_pyt = models.resnet50(pretrained=True).eval()
    if torch.cuda.is_available():
        model_pyt = model_pyt.cuda()
    model = PyTorchModel(
        model_pyt, bounds=(0, 1), num_classes=1000, preprocessing=(mean, std))
    print(np.argmax(model.predictions(image)))
    attack = Attack(model, criterion=Misclassification())
    adversarial_obj = attack(image, label, unpack=False, epsilons=10000)
    distance = adversarial_obj.distance
    adversarial = adversarial_obj.image
    return distance, adversarial 
Example #13
Source File: test_attack_SaltAndPepperNoiseAttack.py    From perceptron-benchmark with Apache License 2.0 6 votes vote down vote up
def test_untargeted_resnet50(image, label=None):
    import torch
    import torchvision.models as models
    from perceptron.models.classification import PyTorchModel
    mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))
    std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))
    model_pyt = models.resnet50(pretrained=True).eval()
    if torch.cuda.is_available():
        model_pyt = model_pyt.cuda()
    model = PyTorchModel(
        model_pyt, bounds=(0, 1), num_classes=1000, preprocessing=(mean, std))
    print(np.argmax(model.predictions(image)))
    attack = Attack(model, criterion=Misclassification())
    adversarial_obj = attack(image, label, unpack=False, epsilons=10000)
    distance = adversarial_obj.distance
    adversarial = adversarial_obj.image
    return distance, adversarial 
Example #14
Source File: test_attack_MotionBlurAttack.py    From perceptron-benchmark with Apache License 2.0 6 votes vote down vote up
def test_untargeted_resnet50(image, label=None):
    import torch
    import torchvision.models as models
    from perceptron.models.classification import PyTorchModel
    mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))
    std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))
    model_pyt = models.resnet50(pretrained=True).eval()
    if torch.cuda.is_available():
        model_pyt = model_pyt.cuda()
    model = PyTorchModel(
        model_pyt, bounds=(0, 1), num_classes=1000, preprocessing=(mean, std))
    print(np.argmax(model.predictions(image)))
    attack = Attack(model, criterion=Misclassification())
    adversarial_obj = attack(image, label, unpack=False, epsilons=10000)
    distance = adversarial_obj.distance
    adversarial = adversarial_obj.image
    return distance, adversarial 
Example #15
Source File: featureModels.py    From multi-modal-regression with MIT License 6 votes vote down vote up
def __init__(self, model_type='resnet50', layer_type='layer4'):
		super().__init__()
		# get model
		if model_type == 'resnet50':
			original_model = models.resnet50(pretrained=True)
		elif model_type == 'resnet101':
			original_model = models.resnet101(pretrained=True)
		else:
			raise NameError('Unknown model_type passed')
		# get requisite layer
		if layer_type == 'layer2':
			num_layers = 6
			pool_size = 28
		elif layer_type == 'layer3':
			num_layers = 7
			pool_size = 14
		elif layer_type == 'layer4':
			num_layers = 8
			pool_size = 7
		else:
			raise NameError('Uknown layer_type passed')
		self.features = nn.Sequential(*list(original_model.children())[:num_layers])
		self.avgpool = nn.AvgPool2d(pool_size, stride=1) 
Example #16
Source File: croppingModel.py    From Grid-Anchor-based-Image-Cropping-Pytorch with MIT License 6 votes vote down vote up
def __init__(self, alignsize = 8, reddim = 32, loadweight = True, model = None, downsample = 4):
        super(crop_model_multi_scale_shared, self).__init__()

        if model == 'shufflenetv2':
            self.Feat_ext = shufflenetv2_base(loadweight,downsample)
            self.DimRed = nn.Conv2d(812, reddim, kernel_size=1, padding=0)
        elif model == 'mobilenetv2':
            self.Feat_ext = mobilenetv2_base(loadweight,downsample)
            self.DimRed = nn.Conv2d(448, reddim, kernel_size=1, padding=0)
        elif model == 'vgg16':
            self.Feat_ext = vgg_base(loadweight,downsample)
            self.DimRed = nn.Conv2d(1536, reddim, kernel_size=1, padding=0)
        elif model == 'resnet50':
            self.Feat_ext = resnet50_base(loadweight,downsample)
            self.DimRed = nn.Conv2d(3584, reddim, kernel_size=1, padding=0)

        self.downsample2 = nn.UpsamplingBilinear2d(scale_factor=1.0/2.0)
        self.upsample2 = nn.UpsamplingBilinear2d(scale_factor=2.0)
        self.RoIAlign = RoIAlignAvg(alignsize, alignsize, 1.0/2**downsample)
        self.RoDAlign = RoDAlignAvg(alignsize, alignsize, 1.0/2**downsample)
        self.FC_layers = fc_layers(reddim*2, alignsize) 
Example #17
Source File: models.py    From STE-NVAN with MIT License 6 votes vote down vote up
def __init__(self,non_layers=[0,1,1,1],stripes=[16,16,16,16],non_type='normal',temporal=None):
        super(Resnet50_NL,self).__init__()
        original = models.resnet50(pretrained=True).state_dict()
        if non_type == 'normal':
            self.backbone = res.ResNet_Video_nonlocal(last_stride=1,non_layers=non_layers)
        elif non_type == 'stripe':
            self.backbone = res.ResNet_Video_nonlocal_stripe(last_stride = 1, non_layers=non_layers, stripes=stripes)
        elif non_type == 'hr':
            self.backbone = res.ResNet_Video_nonlocal_hr(last_stride = 1, non_layers=non_layers, stripes=stripes)
        elif non_type == 'stripe_hr':
            self.backbone = res.ResNet_Video_nonlocal_stripe_hr(last_stride = 1, non_layers=non_layers, stripes=stripes)
        for key in original:
            if key.find('fc') != -1:
                continue
            self.backbone.state_dict()[key].copy_(original[key])
        del original

        self.temporal = temporal
        if self.temporal == 'Done':
            self.avgpool = nn.AdaptiveAvgPool3d(1) 
Example #18
Source File: cnn_benchmarks.py    From stacks-usecase with Apache License 2.0 6 votes vote down vote up
def select(self, model_name=None):
        """select models to be run"""
        logging.info("Run details")
        logging.info("=" * 71)
        models = [
            self.alexnet,
            self.resnet18,
            self.resnet50,
            self.vgg16,
            self.squeezenet,
        ]
        if model_name:
            self.models = [
                model for model in models for name in model_name if name == model.name
            ]
        logging.info("Selected model(s) :: ")
        for m in self.models:
            logging.info("%s ------------- Batchsize :: %s " % (m.name, m.batch))
        logging.info("=" * 71) 
Example #19
Source File: models.py    From eSPGAN with MIT License 6 votes vote down vote up
def __init__(self, class_num):
        super(ft_net, self).__init__()
        model_ft = models.resnet50(pretrained=True)
        # avg pooling to global pooling
        model_ft.avgpool = nn.AdaptiveAvgPool2d((1,1))

        num_ftrs = model_ft.fc.in_features
        add_block = []
        num_bottleneck = 512
        add_block += [nn.Linear(num_ftrs, num_bottleneck)]
        add_block += [nn.BatchNorm1d(num_bottleneck)]
        add_block += [nn.LeakyReLU(0.1)]
        add_block += [nn.Dropout(p=0.5)]  #default dropout rate 0.5
        add_block = nn.Sequential(*add_block)
        add_block.apply(weights_init_kaiming)
        model_ft.fc = add_block
        self.model = model_ft

        classifier = []
        classifier += [nn.Linear(num_bottleneck, class_num)]
        classifier = nn.Sequential(*classifier)
        classifier.apply(weights_init_classifier)
        self.classifier = classifier 
Example #20
Source File: resnet_model.py    From celeb-detection-oss with Mozilla Public License 2.0 6 votes vote down vote up
def build_resnet_50(self):
        model = models.resnet50(num_classes=self.num_classes)
        model_with_bottleneck = AdaptResNetBottleneck(model, self.embedding_size, self.num_classes)

        model_with_loss = ResNetCenterLoss(
            model_with_bottleneck,
            self.num_classes,
            self.embedding_size,
            center_loss_weight=.0,
            use_cuda=self.use_cuda
        )

        model_with_loss = model_with_loss.cuda() if self.use_cuda else model_with_loss.cpu()
        model_with_loss.train(False)

        if self.use_cuda:
            checkpoint = torch.load(self.weights_path)
        else:
            checkpoint = torch.load(self.weights_path, map_location=lambda storage, loc: storage)

        model_with_loss.load_state_dict(checkpoint['model_state'])

        return model_with_loss 
Example #21
Source File: model.py    From Adversarial_Metric_Attack with MIT License 6 votes vote down vote up
def __init__(self, class_num):
        super(ft_net, self).__init__()
        model_ft = models.resnet50(pretrained=True)

        # avg pooling to global pooling
        model_ft.avgpool = nn.AdaptiveAvgPool2d((1,1))

        add_block = []
        num_bottleneck = 2048

        add_block += [nn.BatchNorm1d(num_bottleneck)]

        add_block = nn.Sequential(*add_block)
        add_block.apply(weights_init_kaiming)
        model_ft.fc = add_block
        self.model = model_ft

        self.fc0 = nn.Linear(num_bottleneck, class_num, bias = True)
        init.normal(self.fc0.weight.data, std=0.001)
        if hasattr(self.fc0.bias, 'data'):
            init.constant(self.fc0.bias.data, 0.0) 
Example #22
Source File: model.py    From Beyond-Binary-Supervision-CVPR19 with MIT License 5 votes vote down vote up
def __init__(self,embedding_size, pretrained=True, is_norm=False):
        super(PoseModel_Resnet50, self).__init__()

        self.model = resnet50(pretrained)
        self.is_norm = is_norm

        self.embedding_size = embedding_size
        self.num_ftrs = self.model.fc.in_features
        self.model.fc1 = nn.Linear(self.num_ftrs, 512)
        self.model.fc2 = nn.Linear(512, self.embedding_size)
        self.model.fc = nn.Linear(self.num_ftrs, self.embedding_size)
        self.not_training = [self.model.conv1]

        self._initialize_weights() 
Example #23
Source File: resnet.py    From VisualizingNDF with MIT License 5 votes vote down vote up
def Hybridmodel(num_output):
    # the default feature extractor is un-modified resnet50
    return Hybrid(HierRes, [6,8,12,5], num_classes=num_output)
#-----------------------------------------------------------------------------#
#-----------------------------some deprecated functions-----------------------# 
Example #24
Source File: amnet_model.py    From AMNet with MIT License 5 votes vote down vote up
def __init__(self):
        super(ResNet50FC, self).__init__()
        self.core_cnn = models.resnet50(pretrained=True)
        self.D = 1024
        return 
Example #25
Source File: resnet.py    From deeplabv3 with MIT License 5 votes vote down vote up
def __init__(self, num_layers):
        super(ResNet_Bottleneck_OS16, self).__init__()

        if num_layers == 50:
            resnet = models.resnet50()
            # load pretrained model:
            resnet.load_state_dict(torch.load("/root/deeplabv3/pretrained_models/resnet/resnet50-19c8e357.pth"))
            # remove fully connected layer, avg pool and layer5:
            self.resnet = nn.Sequential(*list(resnet.children())[:-3])

            print ("pretrained resnet, 50")
        elif num_layers == 101:
            resnet = models.resnet101()
            # load pretrained model:
            resnet.load_state_dict(torch.load("/root/deeplabv3/pretrained_models/resnet/resnet101-5d3b4d8f.pth"))
            # remove fully connected layer, avg pool and layer5:
            self.resnet = nn.Sequential(*list(resnet.children())[:-3])

            print ("pretrained resnet, 101")
        elif num_layers == 152:
            resnet = models.resnet152()
            # load pretrained model:
            resnet.load_state_dict(torch.load("/root/deeplabv3/pretrained_models/resnet/resnet152-b121ed2d.pth"))
            # remove fully connected layer, avg pool and layer5:
            self.resnet = nn.Sequential(*list(resnet.children())[:-3])

            print ("pretrained resnet, 152")
        else:
            raise Exception("num_layers must be in {50, 101, 152}!")

        self.layer5 = make_layer(Bottleneck, in_channels=4*256, channels=512, num_blocks=3, stride=1, dilation=2) 
Example #26
Source File: pyt_common.py    From advex-uar with Apache License 2.0 5 votes vote down vote up
def get_imagenet_model(resnet_size, nb_classes):
    size_to_model = {
        18: models.resnet18,
        34: models.resnet34,
        50: models.resnet50,
        101: models.resnet101,
        152: models.resnet152
    }
    return size_to_model[resnet_size](num_classes=nb_classes) 
Example #27
Source File: trijoint.py    From im2recipe-Pytorch with MIT License 5 votes vote down vote up
def __init__(self):
        super(im2recipe, self).__init__()
        if opts.preModel=='resNet50':
        
            resnet = models.resnet50(pretrained=True)
            modules = list(resnet.children())[:-1]  # we do not use the last fc layer.
            self.visionMLP = nn.Sequential(*modules)

            self.visual_embedding = nn.Sequential(
                nn.Linear(opts.imfeatDim, opts.embDim),
                nn.Tanh(),
            )
            
            self.recipe_embedding = nn.Sequential(
                nn.Linear(opts.irnnDim*2 + opts.srnnDim, opts.embDim, opts.embDim),
                nn.Tanh(),
            )

        else:
            raise Exception('Only resNet50 model is implemented.') 

        self.stRNN_     = stRNN()
        self.ingRNN_    = ingRNN()
        self.table      = TableModule()
 
        if opts.semantic_reg:
            self.semantic_branch = nn.Linear(opts.embDim, opts.numClasses) 
Example #28
Source File: FCN_GCN.py    From Global_Convolutional_Network with MIT License 5 votes vote down vote up
def __init__(self, num_classes):
        self.num_classes = num_classes #21 in paper
        super(FCN_GCN, self).__init__()
        
        resnet = models.resnet50(pretrained=True)
        
        self.conv1 = resnet.conv1 # 7x7,64, stride=2 (output_size=112x112)
        self.bn0 = resnet.bn1 #BatchNorm2d(64)
        self.relu = resnet.relu
        self.maxpool = resnet.maxpool # maxpool /2 (kernel_size=3, stride=2, padding=1)
        
        self.layer1 = resnet.layer1 #res-2 o/p = 56x56,256
        self.layer2 = resnet.layer2 #res-3 o/p = 28x28,512
        self.layer3 = resnet.layer3 #res-4 o/p = 14x14,1024
        self.layer4 = resnet.layer4 #res-5 o/p = 7x7,2048
        
        self.gcn1 = GCN(256,self.num_classes,55) #gcn_i after layer-1
        self.gnc2 = GCN(512,self.num_classes,27)
        self.gcn3 = GCN(1024,self.num_classes,13)
        self.gcn4 = GCN(2048,self.num_classes,7)

        self.br1 = BR(num_classes)
        self.br2 = BR(num_classes)
        self.br3 = BR(num_classes)
        self.br4 = BR(num_classes)
        self.br5 = BR(num_classes)
        self.br6 = BR(num_classes)
        self.br7 = BR(num_classes)
        self.br8 = BR(num_classes)
        self.br9 = BR(num_classes) 
Example #29
Source File: build_model.py    From Global_Convolutional_Network with MIT License 5 votes vote down vote up
def __init__(self, num_classes):     
        super(FCN_GCN, self).__init__()
        self.num_classes = num_classes #21 in paper

        resnet = models.resnet50(pretrained=True)
        
        self.conv1 = resnet.conv1 # 7x7,64, stride=2
        self.bn0 = resnet.bn1 #BatchNorm2d(64)
        self.relu = resnet.relu
        # self.maxpool = resnet.maxpool # maxpool /2 (kernel_size=3, stride=2, padding=1)
        self.layer1 = nn.Sequential(resnet.maxpool, resnet.layer1) #res-2 o/p = 56x56,256
        self.layer2 = resnet.layer2 #res-3 o/p = 28x28,512
        self.layer3 = resnet.layer3 #res-4 o/p = 14x14,1024
        self.layer4 = resnet.layer4 #res-5 o/p = 7x7,2048
        
        self.gcn1 = GCN(256,self.num_classes) #gcn_i after layer-1
        self.gcn2 = GCN(512,self.num_classes)
        self.gcn3 = GCN(1024,self.num_classes)
        self.gcn4 = GCN(2048,self.num_classes)

        self.br1 = BR(num_classes)
        self.br2 = BR(num_classes)
        self.br3 = BR(num_classes)
        self.br4 = BR(num_classes)
        self.br5 = BR(num_classes)
        self.br6 = BR(num_classes)
        self.br7 = BR(num_classes)
        self.br8 = BR(num_classes)
        self.br9 = BR(num_classes) 
Example #30
Source File: models.py    From nni with MIT License 5 votes vote down vote up
def create_resnet(layers):
    if layers == 34:
        return resnet34(pretrained=True), 512
    elif layers == 50:
        return resnet50(pretrained=True), 2048
    elif layers == 101:
        return resnet101(pretrained=True), 2048
    elif layers == 152:
        return resnet152(pretrained=True), 2048
    else:
        raise NotImplementedError('only 34, 50, 101, 152 version of Resnet are implemented')