Python pretrainedmodels.utils() Examples

The following are 6 code examples of pretrainedmodels.utils(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module pretrainedmodels , or try the search function .
Example #1
Source File: compute_computational_complexity.py    From models-comparison.pytorch with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def main():
	args = parser.parse_args()
	
	try:
		with open(args.save) as fp:
			model_info = json.load(fp)
	except:
		model_info = {}

	for m in model_names:
		if not m in model_info.keys():
		
			# create model
			print("=> creating model '{}'".format(m))
			if args.pretrained.lower() not in ['false', 'none', 'not', 'no', '0']:
				print("=> using pre-trained parameters '{}'".format(args.pretrained))
				model = pretrainedmodels.__dict__[m](num_classes=1000,
					pretrained=args.pretrained)
			else:
				model = pretrainedmodels.__dict__[m]()

			cudnn.benchmark = True

			scale = 0.875

			print('Images transformed from size {} to {}'.format(
				int(round(max(model.input_size) / scale)),
				model.input_size))
			
			model = model.cuda().eval()
			model = utils.add_flops_counting_methods(model)
			model.start_flops_count()
			
			with torch.no_grad():
				_ = model(torch.randn(args.batch_size, *model.input_size).cuda(non_blocking=True))
			
			summary, n_params = utils.summary(model.input_size, model)
			model_info[m] = (model.compute_average_flops_cost() / 1e9 / 2, n_params.item())

			with open(args.save, 'w') as fp:
				json.dump(model_info, fp) 
Example #2
Source File: imagenet_logits.py    From pretorched-x with MIT License 4 votes vote down vote up
def main():
    global args
    args = parser.parse_args()

    # Load Model
    model = pretrainedmodels.__dict__[args.arch](num_classes=1000,
                                            pretrained='imagenet')
    model.eval()

    path_img = args.path_img
    # Load and Transform one input image
    load_img = utils.LoadImage()
    tf_img = utils.TransformImage(model)

    input_data = load_img(args.path_img) # 3x400x225
    input_data = tf_img(input_data)      # 3x299x299
    input_data = input_data.unsqueeze(0) # 1x3x299x299
    input = torch.autograd.Variable(input_data)

    # Load Imagenet Synsets
    with open('../data/imagenet_synsets.txt', 'r') as f:
        synsets = f.readlines()

    # len(synsets)==1001
    # sysnets[0] == background
    synsets = [x.strip() for x in synsets]
    splits = [line.split(' ') for line in synsets]
    key_to_classname = {spl[0]:' '.join(spl[1:]) for spl in splits}

    with open('../data/imagenet_classes.txt', 'r') as f:
        class_id_to_key = f.readlines()

    class_id_to_key = [x.strip() for x in class_id_to_key]

    # Make predictions
    output = model(input) # size(1, 1000)
    max, argmax = output.data.squeeze().max(0)
    class_id = argmax[0]
    class_key = class_id_to_key[class_id]
    classname = key_to_classname[class_key]

    print("'{}': '{}' is a '{}'".format(args.arch, path_img, classname)) 
Example #3
Source File: voc2007_extract.py    From pretorched-x with MIT License 4 votes vote down vote up
def main ():
    global args
    args = parser.parse_args()
    print('\nCUDA status: {}'.format(args.cuda))

    print('\nLoad pretrained model on Imagenet')
    model = pretrainedmodels.__dict__[args.arch](num_classes=1000, pretrained='imagenet')
    model.eval()
    if args.cuda:
        model.cuda()

    features_size = model.last_linear.in_features
    model.last_linear = pretrainedmodels.utils.Identity() # Trick to get inputs (features) from last_linear

    print('\nLoad datasets')
    tf_img = pretrainedmodels.utils.TransformImage(model)
    train_set = pretrainedmodels.datasets.Voc2007Classification(args.dir_datasets, 'train', transform=tf_img)
    val_set = pretrainedmodels.datasets.Voc2007Classification(args.dir_datasets, 'val', transform=tf_img)
    test_set = pretrainedmodels.datasets.Voc2007Classification(args.dir_datasets, 'test', transform=tf_img)

    train_loader = torch.utils.data.DataLoader(train_set, batch_size=args.batch_size, shuffle=False, num_workers=2)
    val_loader = torch.utils.data.DataLoader(val_set, batch_size=args.batch_size, shuffle=False, num_workers=2)
    test_loader = torch.utils.data.DataLoader(test_set, batch_size=args.batch_size, shuffle=False, num_workers=2)

    print('\nLoad features')
    dir_features = os.path.join(args.dir_outputs, 'data/{}'.format(args.arch))
    path_train_data = '{}/{}set.pth'.format(dir_features, 'train')
    path_val_data = '{}/{}set.pth'.format(dir_features, 'val')
    path_test_data = '{}/{}set.pth'.format(dir_features, 'test')

    features = {}
    targets = {}
    features['train'], targets['train'] = extract_features_targets(model, features_size, train_loader, path_train_data, args.cuda)
    features['val'], targets['val'] = extract_features_targets(model, features_size, val_loader, path_val_data, args.cuda)
    features['test'], targets['test'] = extract_features_targets(model, features_size, test_loader, path_test_data, args.cuda)
    features['trainval'] = torch.cat([features['train'], features['val']], 0)
    targets['trainval'] = torch.cat([targets['train'], targets['val']], 0)

    print('\nTrain Support Vector Machines')
    if args.train_split == 'train' and args.test_split == 'val':
        print('\nHyperparameters search: train multilabel classifiers (on-versus-all) on train/val')
    elif args.train_split == 'trainval' and args.test_split == 'test':
        print('\nEvaluation: train a multilabel classifier on trainval/test')
    else:
        raise ValueError('Trying to train on {} and eval on {}'.format(args.train_split, args.test_split))

    train_multilabel(features, targets, train_set.classes, args.train_split, args.test_split, C=args.C) 
Example #4
Source File: imagenet_logits.py    From pretrained-models.pytorch with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def main():
    global args
    args = parser.parse_args()

    for arch in args.arch:
        # Load Model
        model = pretrainedmodels.__dict__[arch](num_classes=1000,
                                                pretrained='imagenet')
        model.eval()

        path_img = args.path_img
        # Load and Transform one input image
        load_img = utils.LoadImage()
        tf_img = utils.TransformImage(model)

        input_data = load_img(args.path_img) # 3x400x225
        input_data = tf_img(input_data)      # 3x299x299
        input_data = input_data.unsqueeze(0) # 1x3x299x299
        input = torch.autograd.Variable(input_data)

        # Load Imagenet Synsets
        with open('data/imagenet_synsets.txt', 'r') as f:
            synsets = f.readlines()

        # len(synsets)==1001
        # sysnets[0] == background
        synsets = [x.strip() for x in synsets]
        splits = [line.split(' ') for line in synsets]
        key_to_classname = {spl[0]:' '.join(spl[1:]) for spl in splits}

        with open('data/imagenet_classes.txt', 'r') as f:
            class_id_to_key = f.readlines()

        class_id_to_key = [x.strip() for x in class_id_to_key]

        # Make predictions
        output = model(input) # size(1, 1000)
        max, argmax = output.data.squeeze().max(0)
        class_id = argmax[0]
        class_key = class_id_to_key[class_id]
        classname = key_to_classname[class_key]

        print("'{}': '{}' is a '{}'".format(arch, path_img, classname)) 
Example #5
Source File: voc2007_extract.py    From pretrained-models.pytorch with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def main ():
    global args
    args = parser.parse_args()
    print('\nCUDA status: {}'.format(args.cuda))

    print('\nLoad pretrained model on Imagenet')
    model = pretrainedmodels.__dict__[args.arch](num_classes=1000, pretrained='imagenet')
    model.eval()
    if args.cuda:
        model.cuda()

    features_size = model.last_linear.in_features
    model.last_linear = pretrainedmodels.utils.Identity() # Trick to get inputs (features) from last_linear

    print('\nLoad datasets')
    tf_img = pretrainedmodels.utils.TransformImage(model)
    train_set = pretrainedmodels.datasets.Voc2007Classification(args.dir_datasets, 'train', transform=tf_img)
    val_set = pretrainedmodels.datasets.Voc2007Classification(args.dir_datasets, 'val', transform=tf_img)
    test_set = pretrainedmodels.datasets.Voc2007Classification(args.dir_datasets, 'test', transform=tf_img)

    train_loader = torch.utils.data.DataLoader(train_set, batch_size=args.batch_size, shuffle=False, num_workers=2)
    val_loader = torch.utils.data.DataLoader(val_set, batch_size=args.batch_size, shuffle=False, num_workers=2)
    test_loader = torch.utils.data.DataLoader(test_set, batch_size=args.batch_size, shuffle=False, num_workers=2)

    print('\nLoad features')
    dir_features = os.path.join(args.dir_outputs, 'data/{}'.format(args.arch))
    path_train_data = '{}/{}set.pth'.format(dir_features, 'train')
    path_val_data = '{}/{}set.pth'.format(dir_features, 'val')
    path_test_data = '{}/{}set.pth'.format(dir_features, 'test')

    features = {}
    targets = {}
    features['train'], targets['train'] = extract_features_targets(model, features_size, train_loader, path_train_data, args.cuda)
    features['val'], targets['val'] = extract_features_targets(model, features_size, val_loader, path_val_data, args.cuda)
    features['test'], targets['test'] = extract_features_targets(model, features_size, test_loader, path_test_data, args.cuda)
    features['trainval'] = torch.cat([features['train'], features['val']], 0)
    targets['trainval'] = torch.cat([targets['train'], targets['val']], 0)

    print('\nTrain Support Vector Machines')
    if args.train_split == 'train' and args.test_split == 'val':
        print('\nHyperparameters search: train multilabel classifiers (on-versus-all) on train/val')
    elif args.train_split == 'trainval' and args.test_split == 'test':
        print('\nEvaluation: train a multilabel classifier on trainval/test')
    else:
        raise ValueError('Trying to train on {} and eval on {}'.format(args.train_split, args.test_split))

    train_multilabel(features, targets, train_set.classes, args.train_split, args.test_split, C=args.C) 
Example #6
Source File: compute_accuracy_rate.py    From models-comparison.pytorch with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def main():
	args = parser.parse_args()

	try:
		with open(args.save) as fp:
			model_info = json.load(fp)
	except:
		model_info = {}

	for m in model_names:
		if not m in model_info.keys():

			# create model
			print("=> creating model '{}'".format(m))
			if args.pretrained.lower() not in ['false', 'none', 'not', 'no', '0']:
				print("=> using pre-trained parameters '{}'".format(args.pretrained))
				model = pretrainedmodels.__dict__[m](num_classes=1000,
					pretrained=args.pretrained)
			else:
				model = pretrainedmodels.__dict__[m]()

			cudnn.benchmark = True

			# Data loading code
			valdir = os.path.join(args.data, 'val')

			# if 'scale' in pretrainedmodels.pretrained_settings[args.arch][args.pretrained]:
			#	 scale = pretrainedmodels.pretrained_settings[args.arch][args.pretrained]['scale']
			# else:
			#	 scale = 0.875
			scale = 0.875

			print('Images transformed from size {} to {}'.format(
				int(round(max(model.input_size) / scale)),
				model.input_size))

			val_tf = pretrainedmodels.utils.TransformImage(model, scale=scale)

			val_loader = torch.utils.data.DataLoader(
				datasets.ImageFolder(valdir, val_tf),
				batch_size=args.batch_size, shuffle=False,
				num_workers=args.workers, pin_memory=True)

			model = model.cuda()

			top1, top5 = validate(val_loader, model)
			model_info[m] = (top1, top5)
	
			with open(args.save, 'w') as fp:
				json.dump(model_info, fp)