Python torchvision.models.vgg.vgg16() Examples

The following are 18 code examples of torchvision.models.vgg.vgg16(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torchvision.models.vgg , or try the search function .
Example #1
Source File: solver.py    From super-resolution with Apache License 2.0 6 votes vote down vote up
def build_model(self):
        self.netG = Generator(n_residual_blocks=self.num_residuals, upsample_factor=self.upscale_factor, base_filter=64, num_channel=1).to(self.device)
        self.netD = Discriminator(base_filter=64, num_channel=1).to(self.device)
        self.feature_extractor = vgg16(pretrained=True)
        self.netG.weight_init(mean=0.0, std=0.2)
        self.netD.weight_init(mean=0.0, std=0.2)
        self.criterionG = nn.MSELoss()
        self.criterionD = nn.BCELoss()
        torch.manual_seed(self.seed)

        if self.GPU_IN_USE:
            torch.cuda.manual_seed(self.seed)
            self.feature_extractor.cuda()
            cudnn.benchmark = True
            self.criterionG.cuda()
            self.criterionD.cuda()

        self.optimizerG = optim.Adam(self.netG.parameters(), lr=self.lr, betas=(0.9, 0.999))
        self.optimizerD = optim.SGD(self.netD.parameters(), lr=self.lr / 100, momentum=0.9, nesterov=True)
        self.scheduler = optim.lr_scheduler.MultiStepLR(self.optimizerG, milestones=[50, 75, 100], gamma=0.5)  # lr decay
        self.scheduler = optim.lr_scheduler.MultiStepLR(self.optimizerD, milestones=[50, 75, 100], gamma=0.5)  # lr decay 
Example #2
Source File: model_factory.py    From DMS with MIT License 5 votes vote down vote up
def vgg16(*args, **kwargs):
    pretrained = False
    if 'pretrained' in kwargs:
        pretrained = kwargs['pretrained']
        kwargs['pretrained'] = False
    base_vgg = vgg(*args, **kwargs)
    conv_fc6 = nn.Conv2d(in_channels=512,
                         out_channels=4096,
                         kernel_size=7,
                         padding=3)

    conv_fc7 = nn.Conv2d(in_channels=4096,
                         out_channels=4096,
                         kernel_size=1,
                         padding=0)

    conv_fc8 = nn.Conv2d(in_channels=4096,
                         out_channels=2688,
                         kernel_size=1,
                         padding=0)

    fconv_layers = []
    for layer in (conv_fc6, conv_fc7, conv_fc8):
        fconv_layers += [layer, nn.ReLU(), nn.Dropout(p=0.2)]
    base_vgg = list(base_vgg.children())[:-1]
    base_vgg += fconv_layers
    model = nn.Sequential(*base_vgg)
    if pretrained:
        state_dict = model.state_dict()
        pretrained_state = model_zoo.load_url(VGG16_URL)
        for layer_name in pretrained_state:
            if layer_name in state_dict:
                state_dict[layer_name] = pretrained_state[layer_name]
        model.load_state_dict(state_dict)
    return model 
Example #3
Source File: object_detector.py    From KERN with MIT License 5 votes vote down vote up
def load_vgg(use_dropout=True, use_relu=True, use_linear=True, pretrained=True):
    model = vgg16(pretrained=pretrained)
    del model.features._modules['30']  # Get rid of the maxpool
    del model.classifier._modules['6']  # Get rid of class layer
    if not use_dropout:
        del model.classifier._modules['5']  # Get rid of dropout
        if not use_relu:
            del model.classifier._modules['4']  # Get rid of relu activation
            if not use_linear:
                del model.classifier._modules['3']  # Get rid of linear layer
    return model 
Example #4
Source File: resnet.py    From KERN with MIT License 5 votes vote down vote up
def vgg_fc(relu_end=True, linear_end=True):
    model = vgg16(pretrained=True)
    vfc = model.classifier
    del vfc._modules['6'] # Get rid of linear layer
    del vfc._modules['5'] # Get rid of linear layer
    if not relu_end:
        del vfc._modules['4'] # Get rid of linear layer
        if not linear_end:
            del vfc._modules['3']
    return vfc 
Example #5
Source File: test_model_speedup.py    From nni with MIT License 5 votes vote down vote up
def test_speedup_vgg16(self):
        prune_model_l1(vgg16())
        model = vgg16()
        model.train()
        ms = ModelSpeedup(model, torch.randn(2, 3, 32, 32), MASK_FILE)
        ms.speedup_model()

        orig_model = vgg16()
        assert model.training
        assert model.features[2].out_channels == int(orig_model.features[2].out_channels * SPARSITY)
        assert model.classifier[0].in_features == int(orig_model.classifier[0].in_features * SPARSITY) 
Example #6
Source File: object_detector.py    From VCTree-Scene-Graph-Generation with MIT License 5 votes vote down vote up
def load_vgg(use_dropout=False, use_relu=True, use_linear=True, pretrained=True):
    model = vgg16(pretrained=pretrained)
    del model.features._modules['30']  # Get rid of the maxpool
    del model.classifier._modules['6']  # Get rid of class layer
    if not use_dropout:
        del model.classifier._modules['5']  # Get rid of dropout
        if not use_relu:
            del model.classifier._modules['4']  # Get rid of relu activation
            if not use_linear:
                del model.classifier._modules['3']  # Get rid of linear layer
    return model 
Example #7
Source File: resnet.py    From VCTree-Scene-Graph-Generation with MIT License 5 votes vote down vote up
def vgg_fc(relu_end=True, linear_end=True):
    model = vgg16(pretrained=True)
    vfc = model.classifier
    del vfc._modules['6'] # Get rid of linear layer
    del vfc._modules['5'] # Get rid of linear layer
    if not relu_end:
        del vfc._modules['4'] # Get rid of linear layer
        if not linear_end:
            del vfc._modules['3']
    return vfc 
Example #8
Source File: loss.py    From SRGAN with MIT License 5 votes vote down vote up
def __init__(self):
        super(GeneratorLoss, self).__init__()
        vgg = vgg16(pretrained=True)
        loss_network = nn.Sequential(*list(vgg.features)[:31]).eval()
        for param in loss_network.parameters():
            param.requires_grad = False
        self.loss_network = loss_network
        self.mse_loss = nn.MSELoss()
        self.tv_loss = TVLoss() 
Example #9
Source File: vggnet.py    From nn-transfer with MIT License 5 votes vote down vote up
def vggnet_pytorch():
    return vgg.vgg16() 
Example #10
Source File: object_detector.py    From neural-motifs with MIT License 5 votes vote down vote up
def load_vgg(use_dropout=True, use_relu=True, use_linear=True, pretrained=True):
    model = vgg16(pretrained=pretrained)
    del model.features._modules['30']  # Get rid of the maxpool
    del model.classifier._modules['6']  # Get rid of class layer
    if not use_dropout:
        del model.classifier._modules['5']  # Get rid of dropout
        if not use_relu:
            del model.classifier._modules['4']  # Get rid of relu activation
            if not use_linear:
                del model.classifier._modules['3']  # Get rid of linear layer
    return model 
Example #11
Source File: resnet.py    From neural-motifs with MIT License 5 votes vote down vote up
def vgg_fc(relu_end=True, linear_end=True):
    model = vgg16(pretrained=True)
    vfc = model.classifier
    del vfc._modules['6'] # Get rid of linear layer
    del vfc._modules['5'] # Get rid of linear layer
    if not relu_end:
        del vfc._modules['4'] # Get rid of linear layer
        if not linear_end:
            del vfc._modules['3']
    return vfc 
Example #12
Source File: vgg.py    From convNet.pytorch with MIT License 5 votes vote down vote up
def vgg(**config):
    dataset = config.pop('dataset', 'imagenet')
    depth = config.pop('depth', 16)
    bn = config.pop('bn', True)

    if dataset == 'imagenet':
        config.setdefault('num_classes', 1000)
        if depth == 11:
            if bn is False:
                return vgg11(pretrained=False, **config)
            else:
                return vgg11_bn(pretrained=False, **config)
        if depth == 13:
            if bn is False:
                return vgg13(pretrained=False, **config)
            else:
                return vgg13_bn(pretrained=False, **config)
        if depth == 16:
            if bn is False:
                return vgg16(pretrained=False, **config)
            else:
                return vgg16_bn(pretrained=False, **config)
        if depth == 19:
            if bn is False:
                return vgg19(pretrained=False, **config)
            else:
                return vgg19_bn(pretrained=False, **config)
    elif dataset == 'cifar10':
        config.setdefault('num_classes', 10)
    elif dataset == 'cifar100':
        config.setdefault('num_classes', 100)
    config.setdefault('batch_norm', bn)
    return VGG(model_name[depth], **config) 
Example #13
Source File: loss.py    From real-world-sr with MIT License 5 votes vote down vote up
def __init__(self):
        super(PerceptualLossVGG16, self).__init__()
        vgg = vgg16(pretrained=True)
        loss_network = nn.Sequential(*list(vgg.features)[:31]).eval()
        for param in loss_network.parameters():
            param.requires_grad = False
        self.loss_network = loss_network
        self.mse_loss = nn.MSELoss() 
Example #14
Source File: loss.py    From iSeeBetter with MIT License 5 votes vote down vote up
def __init__(self):
        super(GeneratorLoss, self).__init__()
        vgg = vgg16(pretrained=True)
        loss_network = nn.Sequential(*list(vgg.features)[:31]).eval()
        for param in loss_network.parameters():
            param.requires_grad = False
        self.loss_network = loss_network
        self.mse_loss = nn.MSELoss()
        self.tv_loss = TVLoss() 
Example #15
Source File: vggnet.py    From nn-transfer with MIT License 4 votes vote down vote up
def vggnet_keras():

    # Block 1
    img_input = Input((3, 224, 224))
    x = Conv2D(64, (3, 3), activation='relu',
               padding='same', name='features.0')(img_input)
    x = Conv2D(64, (3, 3), activation='relu',
               padding='same', name='features.2')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)

    # Block 2
    x = Conv2D(128, (3, 3), activation='relu',
               padding='same', name='features.5')(x)
    x = Conv2D(128, (3, 3), activation='relu',
               padding='same', name='features.7')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)

    # Block 3
    x = Conv2D(256, (3, 3), activation='relu',
               padding='same', name='features.10')(x)
    x = Conv2D(256, (3, 3), activation='relu',
               padding='same', name='features.12')(x)
    x = Conv2D(256, (3, 3), activation='relu',
               padding='same', name='features.14')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)

    # Block 4
    x = Conv2D(512, (3, 3), activation='relu',
               padding='same', name='features.17')(x)
    x = Conv2D(512, (3, 3), activation='relu',
               padding='same', name='features.19')(x)
    x = Conv2D(512, (3, 3), activation='relu',
               padding='same', name='features.21')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)

    # Block 5
    x = Conv2D(512, (3, 3), activation='relu',
               padding='same', name='features.24')(x)
    x = Conv2D(512, (3, 3), activation='relu',
               padding='same', name='features.26')(x)
    x = Conv2D(512, (3, 3), activation='relu',
               padding='same', name='features.28')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)

    x = Flatten(name='flatten')(x)
    x = Dense(4096, activation='relu', name='classifier.0')(x)
    x = Dropout(0.5)(x)
    x = Dense(4096, activation='relu', name='classifier.3')(x)
    x = Dropout(0.5)(x)
    x = Dense(1000, activation=None, name='classifier.6')(x)

    # Create model.
    model = Model(img_input, x, name='vgg16')

    return model 
Example #16
Source File: integration_test.py    From baal with Apache License 2.0 4 votes vote down vote up
def test_calibration_integration():
    transform_pipeline = Compose([Resize((64, 64)), ToTensor()])
    cifar10_train = DummyDataset(transform_pipeline)
    cifar10_test = DummyDataset(transform_pipeline)

    # we don't create different trainset for calibration since the goal is not
    # to calibrate
    al_dataset = ActiveLearningDataset(cifar10_train,
                                       pool_specifics={'transform': transform_pipeline})
    al_dataset.label_randomly(10)
    use_cuda = False
    model = vgg.vgg16(pretrained=False,
                      num_classes=10)

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9, weight_decay=0.0005)

    wrapper = ModelWrapper(model, criterion)
    calibrator = DirichletCalibrator(wrapper=wrapper, num_classes=10,
                                     lr=0.001, reg_factor=0.01)


    for step in range(2):
        wrapper.train_on_dataset(al_dataset, optimizer=optimizer,
                                 batch_size=10, epoch=1,
                                 use_cuda=use_cuda, workers=0)

        wrapper.test_on_dataset(cifar10_test, batch_size=10,
                                use_cuda=use_cuda, workers=0)


        before_calib_param = list(map(lambda x: x.clone(), wrapper.model.parameters()))

        calibrator.calibrate(al_dataset, cifar10_test,
                            batch_size=10, epoch=5,
                            use_cuda=use_cuda, double_fit=False, workers=0)

        after_calib_param = list(map(lambda x: x.clone(), model.parameters()))


        assert all([np.allclose(i.detach(), j.detach())
                    for i, j in zip(before_calib_param, after_calib_param)])

        assert len(list(wrapper.model.modules())) < len(list(calibrator.calibrated_model.modules())) 
Example #17
Source File: integration_test.py    From baal with Apache License 2.0 4 votes vote down vote up
def test_integration():
    transform_pipeline = Compose([Resize((64, 64)), ToTensor()])
    cifar10_train = DummyDataset(transform_pipeline)
    cifar10_test = DummyDataset(transform_pipeline)

    al_dataset = ActiveLearningDataset(cifar10_train,
                                       pool_specifics={'transform': transform_pipeline})
    al_dataset.label_randomly(10)

    use_cuda = False
    model = vgg.vgg16(pretrained=False,
                      num_classes=10)

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9, weight_decay=0.0005)

    # We can now use BaaL to create the active learning loop.

    model = ModelWrapper(model, criterion)
    # We create an ActiveLearningLoop that will automatically label the most uncertain samples.
    # In this case, we use the widely used BALD heuristic.

    active_loop = ActiveLearningLoop(al_dataset,
                                     model.predict_on_dataset,
                                     heuristic=heuristics.BALD(),
                                     ndata_to_label=10,
                                     batch_size=10,
                                     iterations=10,
                                     use_cuda=use_cuda,
                                     workers=4)

    # We're all set!
    num_steps = 10
    for step in range(num_steps):
        old_param = list(map(lambda x: x.clone(), model.model.parameters()))
        model.train_on_dataset(al_dataset, optimizer=optimizer, batch_size=10,
                               epoch=5, use_cuda=use_cuda, workers=2)
        model.test_on_dataset(cifar10_test, batch_size=10, use_cuda=use_cuda,
                              workers=2)

        if not active_loop.step():
            break
        new_param = list(map(lambda x: x.clone(), model.model.parameters()))
        assert any([not np.allclose(i.detach(), j.detach())
                    for i, j in zip(old_param, new_param)])
    assert step == 4  # 10 + (4 * 10) = 50, so it stops at iterations 4 
Example #18
Source File: vgg_loss.py    From sigmanet with MIT License 4 votes vote down vote up
def __init__(
            self,
            layer_name_mapping=None,
            normalize=True,
            device='gpu',
            vgg_model=None,
            full=False,
            inplace=False,
            distance=2,
    ):
        super(VGGLoss, self).__init__()
        self.layer_name_mapping = layer_name_mapping
        if self.layer_name_mapping is None:
            self.layer_name_mapping = {
                '0': 'conv1_0',
                # '1': 'relu1_0',
                '2': "conv1_1",
                # '3': 'relu1_1',
                '7': "conv2_2",
                # '8': "relu2_2",
                '14': "conv3_3",
                # '15': "relu3_3",
                '21': "conv4_3",
                # '22': "relu4_3",  # <- gradient is strangely huge... turn off for now
            }

        self.normalize = normalize
        self.device = device
        self.full = full
        if distance == 1:
            self.distance = F.l1_loss
        else:
            self.distance = F.mse_loss

        if vgg_model is None:
            if inplace:
                vgg_model = vgg.vgg16(pretrained=True)
            else:
                vgg_model = modified_vgg.vgg16(pretrained=True)

        vgg_model.to(self.device)
        vgg_model.eval()

        self.vgg_layers = vgg_model.features
        del vgg_model

        # normalizatoin
        self.mean_t = torch.tensor([0.229, 0.224, 0.225], dtype=torch.float32)
        self.std_t = torch.tensor([0.485, 0.456, 0.406], dtype=torch.float32)
        self.mean_t = self.mean_t.view(1, 3, 1, 1).to(self.device)
        self.std_t = self.std_t.view(1, 3, 1, 1).to(self.device)