Python nn.DataParallel() Examples
The following are 30
code examples of nn.DataParallel().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
nn
, or try the search function
.
Example #1
Source File: net.py From PANet with MIT License | 6 votes |
def save_ckpt(output_dir, args, model, optimizer): """Save checkpoint""" if args.no_save: return ckpt_dir = os.path.join(output_dir, 'ckpt') if not os.path.exists(ckpt_dir): os.makedirs(ckpt_dir) save_name = os.path.join(ckpt_dir, 'model_{}_{}.pth'.format(args.epoch, args.step)) if isinstance(model, mynn.DataParallel): model = model.module # TODO: (maybe) Do not save redundant shared params # model_state_dict = model.state_dict() torch.save({ 'epoch': args.epoch, 'step': args.step, 'iters_per_epoch': args.iters_per_epoch, 'model': model.state_dict(), 'optimizer': optimizer.state_dict()}, save_name) logger.info('save model: %s', save_name)
Example #2
Source File: train_net_step.py From DIoU-pytorch-detectron with GNU General Public License v3.0 | 6 votes |
def save_ckpt(output_dir, args, step, train_size, model, optimizer): """Save checkpoint""" if args.no_save: return ckpt_dir = os.path.join(output_dir, 'ckpt') if not os.path.exists(ckpt_dir): os.makedirs(ckpt_dir) save_name = os.path.join(ckpt_dir, 'model_step{}.pth'.format(step)) if isinstance(model, mynn.DataParallel): model = model.module model_state_dict = model.state_dict() torch.save({ 'step': step, 'train_size': train_size, 'batch_size': args.batch_size, 'model': model.state_dict(), 'optimizer': optimizer.state_dict()}, save_name) logger.info('save model: %s', save_name)
Example #3
Source File: test_engine.py From DIoU-pytorch-detectron with GNU General Public License v3.0 | 6 votes |
def initialize_model_from_cfg(args, gpu_id=0): """Initialize a model from the global cfg. Loads test-time weights and set to evaluation mode. """ model = model_builder.Generalized_RCNN() model.eval() if args.cuda: model.cuda() if args.load_ckpt: load_name = args.load_ckpt logger.info("loading checkpoint %s", load_name) checkpoint = torch.load(load_name, map_location=lambda storage, loc: storage) net_utils.load_ckpt(model, checkpoint['model']) if args.load_detectron: logger.info("loading detectron weights %s", args.load_detectron) load_detectron_weight(model, args.load_detectron) model = mynn.DataParallel(model, cpu_keywords=['im_info', 'roidb'], minibatch=True) return model
Example #4
Source File: net.py From DIoU-pytorch-detectron with GNU General Public License v3.0 | 6 votes |
def save_ckpt(output_dir, args, model, optimizer): """Save checkpoint""" if args.no_save: return ckpt_dir = os.path.join(output_dir, 'ckpt') if not os.path.exists(ckpt_dir): os.makedirs(ckpt_dir) save_name = os.path.join(ckpt_dir, 'model_{}_{}.pth'.format(args.epoch, args.step)) if isinstance(model, mynn.DataParallel): model = model.module # TODO: (maybe) Do not save redundant shared params # model_state_dict = model.state_dict() torch.save({ 'epoch': args.epoch, 'step': args.step, 'iters_per_epoch': args.iters_per_epoch, 'model': model.state_dict(), 'optimizer': optimizer.state_dict()}, save_name) logger.info('save model: %s', save_name)
Example #5
Source File: train_net_step.py From detectron-self-train with MIT License | 6 votes |
def save_ckpt(output_dir, args, step, train_size, model, optimizer): """Save checkpoint""" if args.no_save: return ckpt_dir = os.path.join(output_dir, 'ckpt') if not os.path.exists(ckpt_dir): os.makedirs(ckpt_dir) save_name = os.path.join(ckpt_dir, 'model_step{}.pth'.format(step)) if isinstance(model, mynn.DataParallel): model = model.module model_state_dict = model.state_dict() torch.save({ 'step': step, 'train_size': train_size, 'batch_size': args.batch_size, 'model': model.state_dict(), 'optimizer': optimizer.state_dict()}, save_name) logger.info('save model: %s', save_name)
Example #6
Source File: test_engine.py From detectron-self-train with MIT License | 6 votes |
def initialize_model_from_cfg(args, gpu_id=0): """Initialize a model from the global cfg. Loads test-time weights and set to evaluation mode. """ model = model_builder.Generalized_RCNN() model.eval() if args.cuda: model.cuda() if args.load_ckpt: load_name = args.load_ckpt logger.info("loading checkpoint %s", load_name) checkpoint = torch.load(load_name, map_location=lambda storage, loc: storage) net_utils.load_ckpt(model, checkpoint['model']) if args.load_detectron: logger.info("loading detectron weights %s", args.load_detectron) load_detectron_weight(model, args.load_detectron) model = mynn.DataParallel(model, cpu_keywords=['im_info', 'roidb'], minibatch=True) return model
Example #7
Source File: net.py From detectron-self-train with MIT License | 6 votes |
def save_ckpt(output_dir, args, model, optimizer): """Save checkpoint""" if args.no_save: return ckpt_dir = os.path.join(output_dir, 'ckpt') if not os.path.exists(ckpt_dir): os.makedirs(ckpt_dir) save_name = os.path.join(ckpt_dir, 'model_{}_{}.pth'.format(args.epoch, args.step)) if isinstance(model, mynn.DataParallel): model = model.module # TODO: (maybe) Do not save redundant shared params # model_state_dict = model.state_dict() torch.save({ 'epoch': args.epoch, 'step': args.step, 'iters_per_epoch': args.iters_per_epoch, 'model': model.state_dict(), 'optimizer': optimizer.state_dict()}, save_name) logger.info('save model: %s', save_name)
Example #8
Source File: train_net_step.py From Large-Scale-VRD.pytorch with MIT License | 6 votes |
def save_ckpt(output_dir, args, step, train_size, model, optimizer): """Save checkpoint""" if args.no_save: return ckpt_dir = os.path.join(output_dir, 'ckpt') if not os.path.exists(ckpt_dir): os.makedirs(ckpt_dir) save_name = os.path.join(ckpt_dir, 'model_step{}.pth'.format(step)) if isinstance(model, mynn.DataParallel): model = model.module model_state_dict = model.state_dict() torch.save({ 'step': step, 'train_size': train_size, 'batch_size': args.batch_size, 'model': model.state_dict(), 'optimizer': optimizer.state_dict()}, save_name) logger.info('save model: %s', save_name)
Example #9
Source File: train_net_step_rel.py From Large-Scale-VRD.pytorch with MIT License | 6 votes |
def save_ckpt(output_dir, args, step, train_size, model, optimizer): """Save checkpoint""" if args.no_save: return ckpt_dir = os.path.join(output_dir, 'ckpt') if not os.path.exists(ckpt_dir): os.makedirs(ckpt_dir) save_name = os.path.join(ckpt_dir, 'model_step{}.pth'.format(step)) if isinstance(model, mynn.DataParallel): model = model.module model_state_dict = model.state_dict() torch.save({ 'step': step, 'train_size': train_size, 'batch_size': args.batch_size, 'model': model.state_dict(), 'optimizer': optimizer.state_dict()}, save_name) logger.info('save model: %s', save_name)
Example #10
Source File: test_engine_rel.py From Large-Scale-VRD.pytorch with MIT License | 6 votes |
def initialize_model_from_cfg(args, gpu_id=0): """Initialize a model from the global cfg. Loads test-time weights and set to evaluation mode. """ model = model_builder_rel.Generalized_RCNN() model.eval() if args.cuda: model.cuda() if args.load_ckpt: load_name = args.load_ckpt logger.info("loading checkpoint %s", load_name) checkpoint = torch.load(load_name, map_location=lambda storage, loc: storage) net_utils.load_ckpt(model, checkpoint['model']) if args.load_detectron: logger.info("loading detectron weights %s", args.load_detectron) load_detectron_weight(model, args.load_detectron) model = mynn.DataParallel(model, cpu_keywords=['im_info', 'roidb'], minibatch=True) return model
Example #11
Source File: test_engine.py From Large-Scale-VRD.pytorch with MIT License | 6 votes |
def initialize_model_from_cfg(args, gpu_id=0): """Initialize a model from the global cfg. Loads test-time weights and set to evaluation mode. """ model = model_builder.Generalized_RCNN() model.eval() if args.cuda: model.cuda() if args.load_ckpt: load_name = args.load_ckpt logger.info("loading checkpoint %s", load_name) checkpoint = torch.load(load_name, map_location=lambda storage, loc: storage) net_utils.load_ckpt(model, checkpoint['model']) if args.load_detectron: logger.info("loading detectron weights %s", args.load_detectron) load_detectron_weight(model, args.load_detectron) model = mynn.DataParallel(model, cpu_keywords=['im_info', 'roidb'], minibatch=True) return model
Example #12
Source File: net.py From Large-Scale-VRD.pytorch with MIT License | 6 votes |
def save_ckpt(output_dir, args, model, optimizer): """Save checkpoint""" if args.no_save: return ckpt_dir = os.path.join(output_dir, 'ckpt') if not os.path.exists(ckpt_dir): os.makedirs(ckpt_dir) save_name = os.path.join(ckpt_dir, 'model_{}_{}.pth'.format(args.epoch, args.step)) if isinstance(model, mynn.DataParallel): model = model.module # TODO: (maybe) Do not save redundant shared params # model_state_dict = model.state_dict() torch.save({ 'epoch': args.epoch, 'step': args.step, 'iters_per_epoch': args.iters_per_epoch, 'model': model.state_dict(), 'optimizer': optimizer.state_dict()}, save_name) logger.info('save model: %s', save_name)
Example #13
Source File: train_net_step.py From PMFNet with MIT License | 6 votes |
def save_ckpt(output_dir, args, step, train_size, model, optimizer): """Save checkpoint""" if args.no_save: return ckpt_dir = os.path.join(output_dir, 'ckpt') if not os.path.exists(ckpt_dir): os.makedirs(ckpt_dir) save_name = os.path.join(ckpt_dir, 'model_step{}.pth'.format(step)) if isinstance(model, mynn.DataParallel): model = model.module model_state_dict = model.state_dict() torch.save({ 'step': step, 'train_size': train_size, 'batch_size': args.batch_size, 'model': model.state_dict(), 'optimizer': optimizer.state_dict()}, save_name) logger.info('save model: %s', save_name)
Example #14
Source File: test_engine.py From PMFNet with MIT License | 6 votes |
def initialize_model_from_cfg(args, gpu_id=0): """Initialize a model from the global cfg. Loads test-time weights and set to evaluation mode. """ model = Generalized_RCNN() model.eval() if args.cuda: model.cuda() if args.load_ckpt: load_name = args.load_ckpt logger.info("loading checkpoint %s", load_name) checkpoint = torch.load(load_name, map_location=lambda storage, loc: storage) net_utils.load_ckpt(model, checkpoint['model']) # model.load_state_dict(checkpoint['model']) if args.load_detectron: logger.info("loading detectron weights %s", args.load_detectron) load_detectron_weight(model, args.load_detectron) model = mynn.DataParallel(model, cpu_keywords=['im_info', 'roidb'], minibatch=True) return model
Example #15
Source File: train_net_step.py From PANet with MIT License | 6 votes |
def save_ckpt(output_dir, args, step, train_size, model, optimizer): """Save checkpoint""" if args.no_save: return ckpt_dir = os.path.join(output_dir, 'ckpt') if not os.path.exists(ckpt_dir): os.makedirs(ckpt_dir) save_name = os.path.join(ckpt_dir, 'model_step{}.pth'.format(step)) if isinstance(model, mynn.DataParallel): model = model.module model_state_dict = model.state_dict() torch.save({ 'step': step, 'train_size': train_size, 'batch_size': args.batch_size, 'model': model.state_dict(), 'optimizer': optimizer.state_dict()}, save_name) logger.info('save model: %s', save_name)
Example #16
Source File: test_engine.py From PANet with MIT License | 6 votes |
def initialize_model_from_cfg(args, gpu_id=0): """Initialize a model from the global cfg. Loads test-time weights and set to evaluation mode. """ model = model_builder.Generalized_RCNN() model.eval() if args.cuda: model.cuda() if args.load_ckpt: load_name = args.load_ckpt logger.info("loading checkpoint %s", load_name) checkpoint = torch.load(load_name, map_location=lambda storage, loc: storage) net_utils.load_ckpt(model, checkpoint['model']) if args.load_detectron: logger.info("loading detectron weights %s", args.load_detectron) load_detectron_weight(model, args.load_detectron) model = mynn.DataParallel(model, cpu_keywords=['im_info', 'roidb'], minibatch=True) return model
Example #17
Source File: train_net_step.py From pcl.pytorch with MIT License | 6 votes |
def save_ckpt(output_dir, args, step, train_size, model, optimizer): """Save checkpoint""" if args.no_save: return ckpt_dir = os.path.join(output_dir, 'ckpt') if not os.path.exists(ckpt_dir): os.makedirs(ckpt_dir) save_name = os.path.join(ckpt_dir, 'model_step{}.pth'.format(step)) if isinstance(model, mynn.DataParallel): model = model.module model_state_dict = model.state_dict() torch.save({ 'step': step, 'train_size': train_size, 'batch_size': args.batch_size, 'model': model.state_dict(), 'optimizer': optimizer.state_dict()}, save_name) logger.info('save model: %s', save_name)
Example #18
Source File: test_engine.py From Detectron.pytorch with MIT License | 6 votes |
def initialize_model_from_cfg(args, gpu_id=0): """Initialize a model from the global cfg. Loads test-time weights and set to evaluation mode. """ model = model_builder.Generalized_RCNN() model.eval() if args.cuda: model.cuda() if args.load_ckpt: load_name = args.load_ckpt logger.info("loading checkpoint %s", load_name) checkpoint = torch.load(load_name, map_location=lambda storage, loc: storage) net_utils.load_ckpt(model, checkpoint['model']) if args.load_detectron: logger.info("loading detectron weights %s", args.load_detectron) load_detectron_weight(model, args.load_detectron) model = mynn.DataParallel(model, cpu_keywords=['im_info', 'roidb'], minibatch=True) return model
Example #19
Source File: train_net_step.py From Detectron.pytorch with MIT License | 6 votes |
def save_ckpt(output_dir, args, step, train_size, model, optimizer): """Save checkpoint""" if args.no_save: return ckpt_dir = os.path.join(output_dir, 'ckpt') if not os.path.exists(ckpt_dir): os.makedirs(ckpt_dir) save_name = os.path.join(ckpt_dir, 'model_step{}.pth'.format(step)) if isinstance(model, mynn.DataParallel): model = model.module model_state_dict = model.state_dict() torch.save({ 'step': step, 'train_size': train_size, 'batch_size': args.batch_size, 'model': model.state_dict(), 'optimizer': optimizer.state_dict()}, save_name) logger.info('save model: %s', save_name)
Example #20
Source File: net.py From FPN-Pytorch with MIT License | 6 votes |
def save_ckpt(output_dir, args, model, optimizer): """Save checkpoint""" if args.no_save: return ckpt_dir = os.path.join(output_dir, 'ckpt') if not os.path.exists(ckpt_dir): os.makedirs(ckpt_dir) save_name = os.path.join(ckpt_dir, 'model_{}_{}.pth'.format(args.epoch, args.step)) if isinstance(model, mynn.DataParallel): model = model.module # TODO: (maybe) Do not save redundant shared params # model_state_dict = model.state_dict() torch.save({ 'epoch': args.epoch, 'step': args.step, 'iters_per_epoch': args.iters_per_epoch, 'model': model.state_dict(), 'optimizer': optimizer.state_dict()}, save_name) logger.info('save model: %s', save_name)
Example #21
Source File: test_engine.py From FPN-Pytorch with MIT License | 6 votes |
def initialize_model_from_cfg(args, gpu_id=0): """Initialize a model from the global cfg. Loads test-time weights and set to evaluation mode. """ model = model_builder.Generalized_RCNN() model.eval() if args.cuda: model.cuda() if args.load_ckpt: load_name = args.load_ckpt logger.info("loading checkpoint %s", load_name) checkpoint = torch.load(load_name, map_location=lambda storage, loc: storage) net_utils.load_ckpt(model, checkpoint['model']) if args.load_detectron: logger.info("loading detectron weights %s", args.load_detectron) load_detectron_weight(model, args.load_detectron) model = mynn.DataParallel(model, cpu_keywords=['im_info', 'roidb'], minibatch=True) return model
Example #22
Source File: train_net_step_win.py From FPN-Pytorch with MIT License | 6 votes |
def save_ckpt(output_dir, args, step, train_size, model, optimizer): """Save checkpoint""" if args.no_save: return ckpt_dir = os.path.join(output_dir, 'ckpt') if not os.path.exists(ckpt_dir): os.makedirs(ckpt_dir) save_name = os.path.join(ckpt_dir, 'model_step{}.pth'.format(step)) if isinstance(model, mynn.DataParallel): model = model.module model_state_dict = model.state_dict() torch.save({ 'step': step, 'train_size': train_size, 'batch_size': args.batch_size, 'model': model.state_dict(), 'optimizer': optimizer.state_dict()}, save_name) logger.info('save model: %s', save_name)
Example #23
Source File: train_net_step.py From FPN-Pytorch with MIT License | 6 votes |
def save_ckpt(output_dir, args, step, train_size, model, optimizer): """Save checkpoint""" if args.no_save: return ckpt_dir = os.path.join(output_dir, 'ckpt') if not os.path.exists(ckpt_dir): os.makedirs(ckpt_dir) save_name = os.path.join(ckpt_dir, 'model_step{}.pth'.format(step)) if isinstance(model, mynn.DataParallel): model = model.module model_state_dict = model.state_dict() torch.save({ 'step': step, 'train_size': train_size, 'batch_size': args.batch_size, 'model': model.state_dict(), 'optimizer': optimizer.state_dict()}, save_name) logger.info('save model: %s', save_name)
Example #24
Source File: net.py From pcl.pytorch with MIT License | 6 votes |
def save_ckpt(output_dir, args, model, optimizer): """Save checkpoint""" if args.no_save: return ckpt_dir = os.path.join(output_dir, 'ckpt') if not os.path.exists(ckpt_dir): os.makedirs(ckpt_dir) save_name = os.path.join(ckpt_dir, 'model_{}_{}.pth'.format(args.epoch, args.step)) if isinstance(model, mynn.DataParallel): model = model.module # TODO: (maybe) Do not save redundant shared params # model_state_dict = model.state_dict() torch.save({ 'epoch': args.epoch, 'step': args.step, 'iters_per_epoch': args.iters_per_epoch, 'model': model.state_dict(), 'optimizer': optimizer.state_dict()}, save_name) logger.info('save model: %s', save_name)
Example #25
Source File: test_engine.py From pcl.pytorch with MIT License | 6 votes |
def initialize_model_from_cfg(args, gpu_id=0): """Initialize a model from the global cfg. Loads test-time weights and set to evaluation mode. """ model = model_builder.Generalized_RCNN() model.eval() if args.cuda: model.cuda() if args.load_ckpt: load_name = args.load_ckpt logger.info("loading checkpoint %s", load_name) checkpoint = torch.load(load_name, map_location=lambda storage, loc: storage) net_utils.load_ckpt(model, checkpoint['model']) if args.load_detectron: logger.info("loading detectron weights %s", args.load_detectron) load_detectron_weight(model, args.load_detectron) model = mynn.DataParallel(model, cpu_keywords=['im_info', 'roidb'], minibatch=True) return model
Example #26
Source File: net.py From Detectron.pytorch with MIT License | 6 votes |
def save_ckpt(output_dir, args, model, optimizer): """Save checkpoint""" if args.no_save: return ckpt_dir = os.path.join(output_dir, 'ckpt') if not os.path.exists(ckpt_dir): os.makedirs(ckpt_dir) save_name = os.path.join(ckpt_dir, 'model_{}_{}.pth'.format(args.epoch, args.step)) if isinstance(model, mynn.DataParallel): model = model.module # TODO: (maybe) Do not save redundant shared params # model_state_dict = model.state_dict() torch.save({ 'epoch': args.epoch, 'step': args.step, 'iters_per_epoch': args.iters_per_epoch, 'model': model.state_dict(), 'optimizer': optimizer.state_dict()}, save_name) logger.info('save model: %s', save_name)
Example #27
Source File: net.py From Detectron.pytorch with MIT License | 6 votes |
def save_ckpt(output_dir, args, model, optimizer): """Save checkpoint""" if args.no_save: return ckpt_dir = os.path.join(output_dir, 'ckpt') if not os.path.exists(ckpt_dir): os.makedirs(ckpt_dir) save_name = os.path.join(ckpt_dir, 'model_{}_{}.pth'.format(args.epoch, args.step)) if isinstance(model, mynn.DataParallel): model = model.module # TODO: (maybe) Do not save redundant shared params # model_state_dict = model.state_dict() torch.save({ 'epoch': args.epoch, 'step': args.step, 'iters_per_epoch': args.iters_per_epoch, 'model': model.state_dict(), 'optimizer': optimizer.state_dict()}, save_name) logger.info('save model: %s', save_name)
Example #28
Source File: test_engine.py From Detectron.pytorch with MIT License | 6 votes |
def initialize_model_from_cfg(args, gpu_id=0): """Initialize a model from the global cfg. Loads test-time weights and set to evaluation mode. """ model = model_builder.Generalized_RCNN() model.eval() if args.cuda: model.cuda() if args.load_ckpt: load_name = args.load_ckpt logger.info("loading checkpoint %s", load_name) checkpoint = torch.load(load_name, map_location=lambda storage, loc: storage) net_utils.load_ckpt(model, checkpoint['model']) if args.load_detectron: logger.info("loading detectron weights %s", args.load_detectron) load_detectron_weight(model, args.load_detectron) model = mynn.DataParallel(model, cpu_keywords=['im_info', 'roidb'], minibatch=True) return model
Example #29
Source File: train_net_step.py From Detectron.pytorch with MIT License | 6 votes |
def save_ckpt(output_dir, args, step, train_size, model, optimizer): """Save checkpoint""" if args.no_save: return ckpt_dir = os.path.join(output_dir, 'ckpt') if not os.path.exists(ckpt_dir): os.makedirs(ckpt_dir) save_name = os.path.join(ckpt_dir, 'model_step{}.pth'.format(step)) if isinstance(model, mynn.DataParallel): model = model.module model_state_dict = model.state_dict() torch.save({ 'step': step, 'train_size': train_size, 'batch_size': args.batch_size, 'model': model.state_dict(), 'optimizer': optimizer.state_dict()}, save_name) logger.info('save model: %s', save_name)
Example #30
Source File: net.py From Context-aware-ZSR with MIT License | 6 votes |
def save_ckpt(output_dir, args, model, optimizer): """Save checkpoint""" if args.no_save: return ckpt_dir = os.path.join(output_dir, 'ckpt') if not os.path.exists(ckpt_dir): os.makedirs(ckpt_dir) save_name = os.path.join(ckpt_dir, 'model_{}_{}.pth'.format(args.epoch, args.step)) if isinstance(model, mynn.DataParallel): model = model.module # TODO: (maybe) Do not save redundant shared params # model_state_dict = model.state_dict() torch.save({ 'epoch': args.epoch, 'step': args.step, 'iters_per_epoch': args.iters_per_epoch, 'model': model.state_dict(), 'optimizer': optimizer.state_dict()}, save_name) logger.info('save model: %s', save_name)