Python models.load_model() Examples
The following are 4
code examples of models.load_model().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
models
, or try the search function
.
Example #1
Source File: demo.py From 3D-R2N2 with MIT License | 5 votes |
def main(): '''Main demo function''' # Save prediction into a file named 'prediction.obj' or the given argument pred_file_name = sys.argv[1] if len(sys.argv) > 1 else 'prediction.obj' # load images demo_imgs = load_demo_images() # Download and load pretrained weights download_model(DEFAULT_WEIGHTS) # Use the default network model NetClass = load_model('ResidualGRUNet') # Define a network and a solver. Solver provides a wrapper for the test function. net = NetClass(compute_grad=False) # instantiate a network net.load(DEFAULT_WEIGHTS) # load downloaded weights solver = Solver(net) # instantiate a solver # Run the network voxel_prediction, _ = solver.test_output(demo_imgs) # Save the prediction to an OBJ file (mesh file). voxel2obj(pred_file_name, voxel_prediction[0, :, 1, :, :] > cfg.TEST.VOXEL_THRESH) # Use meshlab or other mesh viewers to visualize the prediction. # For Ubuntu>=14.04, you can install meshlab using # `sudo apt-get install meshlab` if cmd_exists('meshlab'): call(['meshlab', pred_file_name]) else: print('Meshlab not found: please use visualization of your choice to view %s' % pred_file_name)
Example #2
Source File: demo.py From 3D-R2N2-PyTorch with MIT License | 5 votes |
def main(): '''Main demo function''' # Save prediction into a file named 'prediction.obj' or the given argument pred_file_name = sys.argv[1] if len(sys.argv) > 1 else 'prediction.obj' # load images demo_imgs = load_demo_images() # Use the default network model NetClass = load_model('ResidualGRUNet') # Define a network and a solver. Solver provides a wrapper for the test function. net = NetClass() # instantiate a network if torch.cuda.is_available(): net.cuda() net.eval() solver = Solver(net) # instantiate a solver solver.load(DEFAULT_WEIGHTS) # Run the network voxel_prediction, _ = solver.test_output(demo_imgs) voxel_prediction = voxel_prediction.detach().cpu().numpy() # Save the prediction to an OBJ file (mesh file). voxel2obj(pred_file_name, voxel_prediction[0, 1] > cfg.TEST.VOXEL_THRESH) # Use meshlab or other mesh viewers to visualize the prediction. # For Ubuntu>=14.04, you can install meshlab using # `sudo apt-get install meshlab` if cmd_exists('meshlab'): call(['meshlab', pred_file_name]) else: print('Meshlab not found: please use visualization of your choice to view %s' % pred_file_name)
Example #3
Source File: train_net.py From 3D-R2N2 with MIT License | 4 votes |
def train_net(): '''Main training function''' # Set up the model and the solver NetClass = load_model(cfg.CONST.NETWORK_CLASS) print('Network definition: \n') print(inspect.getsource(NetClass.network_definition)) net = NetClass() # Check that single view reconstruction net is not used for multi view # reconstruction. if net.is_x_tensor4 and cfg.CONST.N_VIEWS > 1: raise ValueError('Do not set the config.CONST.N_VIEWS > 1 when using' \ 'single-view reconstruction network') # Generate the solver solver = Solver(net) # Prefetching data processes # # Create worker and data queue for data processing. For training data, use # multiple processes to speed up the loading. For validation data, use 1 # since the queue will be popped every TRAIN.NUM_VALIDATION_ITERATIONS. global train_queue, val_queue, train_processes, val_processes train_queue = Queue(cfg.QUEUE_SIZE) val_queue = Queue(cfg.QUEUE_SIZE) train_processes = make_data_processes( train_queue, category_model_id_pair(dataset_portion=cfg.TRAIN.DATASET_PORTION), cfg.TRAIN.NUM_WORKER, repeat=True) val_processes = make_data_processes( val_queue, category_model_id_pair(dataset_portion=cfg.TEST.DATASET_PORTION), 1, repeat=True, train=False) # Train the network solver.train(train_queue, val_queue) # Cleanup the processes and the queue. kill_processes(train_queue, train_processes) kill_processes(val_queue, val_processes)
Example #4
Source File: train_net.py From 3D-R2N2-PyTorch with MIT License | 4 votes |
def train_net(): '''Main training function''' # Set up the model and the solver NetClass = load_model(cfg.CONST.NETWORK_CLASS) net = NetClass() print('\nNetwork definition: ') print(net) # Check that single view reconstruction net is not used for multi view # reconstruction. if net.is_x_tensor4 and cfg.CONST.N_VIEWS > 1: raise ValueError('Do not set the config.CONST.N_VIEWS > 1 when using' \ 'single-view reconstruction network') # Prefetching data processes # # Create worker and data queue for data processing. For training data, use # multiple processes to speed up the loading. For validation data, use 1 # since the queue will be popped every TRAIN.NUM_VALIDATION_ITERATIONS. train_dataset = ShapeNetDataset(cfg.TRAIN.DATASET_PORTION) train_collate_fn = ShapeNetCollateFn() train_loader = DataLoader( dataset=train_dataset, batch_size=cfg.CONST.BATCH_SIZE, shuffle=True, num_workers=cfg.TRAIN.NUM_WORKER, collate_fn=train_collate_fn, pin_memory=True ) val_dataset = ShapeNetDataset(cfg.TEST.DATASET_PORTION) val_collate_fn = ShapeNetCollateFn(train=False) val_loader = DataLoader( dataset=val_dataset, batch_size=cfg.CONST.BATCH_SIZE, shuffle=True, num_workers=1, collate_fn=val_collate_fn, pin_memory=True ) net.cuda() # Generate the solver solver = Solver(net) # Train the network solver.train(train_loader, val_loader)