Python caffe.proto.caffe_pb2.SolverParameter() Examples
The following are 19
code examples of caffe.proto.caffe_pb2.SolverParameter().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
caffe.proto.caffe_pb2
, or try the search function
.
Example #1
Source File: train.py From tripletloss with MIT License | 6 votes |
def __init__(self, solver_prototxt, output_dir, pretrained_model=None): """Initialize the SolverWrapper.""" self.output_dir = output_dir caffe.set_mode_gpu() caffe.set_device(0) self.solver = caffe.SGDSolver(solver_prototxt) if pretrained_model is not None: print ('Loading pretrained model ' 'weights from {:s}').format(pretrained_model) self.solver.net.copy_from(pretrained_model) self.solver_param = caffe_pb2.SolverParameter() with open(solver_prototxt, 'rt') as f: pb2.text_format.Merge(f.read(), self.solver_param)
Example #2
Source File: train.py From triplet with MIT License | 6 votes |
def __init__(self, solver, output_dir, pretrained_model=None, gpu_id=0, data=None): """Initialize the SolverWrapper.""" self.output_dir = output_dir caffe.set_mode_gpu() caffe.set_device(gpu_id) self.solver = caffe.SGDSolver(solver) if pretrained_model is not None: print(('Loading pretrained model ' 'weights from {:s}').format(pretrained_model)) self.solver.net.copy_from(pretrained_model) self.solver_param = caffe_pb2.SolverParameter() with open(solver, 'rt') as f: pb2.text_format.Merge(f.read(), self.solver_param) self.solver.net.layers[0].set_data(data)
Example #3
Source File: train.py From dpl with MIT License | 6 votes |
def __init__(self, solver_prototxt, roidb, output_dir, pretrained_model=None): """Initialize the SolverWrapper.""" self.output_dir = output_dir print 'done' self.solver = caffe.SGDSolver(solver_prototxt) if pretrained_model is not None: print ('Loading pretrained model ' 'weights from {:s}').format(pretrained_model) self.solver.net.copy_from(pretrained_model) self.solver_param = caffe_pb2.SolverParameter() with open(solver_prototxt, 'rt') as f: pb2.text_format.Merge(f.read(), self.solver_param) self.solver.net.layers[0].set_roidb(roidb)
Example #4
Source File: train.py From WPAL-network with GNU General Public License v3.0 | 6 votes |
def __init__(self, solver_prototxt, db, output_dir, do_flip, snapshot_path=None): """Initialize the SolverWrapper.""" self._output_dir = output_dir self._solver = caffe.SGDSolver(solver_prototxt) self._solver_param = caffe_pb2.SolverParameter() with open(solver_prototxt, 'rt') as f: pb2.text_format.Merge(f.read(), self._solver_param) infix = ('_' + cfg.TRAIN.SNAPSHOT_INFIX if cfg.TRAIN.SNAPSHOT_INFIX != '' else '') self._snapshot_prefix = self._solver_param.snapshot_prefix + infix + '_iter_' if snapshot_path is not None: print ('Loading snapshot weights from {:s}').format(snapshot_path) self._solver.net.copy_from(snapshot_path) snapshot_path = snapshot_path.split('/')[-1] if snapshot_path.startswith(self._snapshot_prefix): print 'Warning! Existing snapshots may be overriden by new snapshots!' self._db = db self._solver.net.layers[0].set_db(self._db, do_flip)
Example #5
Source File: train.py From dilation with MIT License | 6 votes |
def make_solver(options): solver = caffe_pb2.SolverParameter() solver.train_net = options.train_net if options.test_net is not None: solver.test_net.append(options.test_net) solver.test_iter.append(50) solver.test_interval = 100 solver.base_lr = options.lr solver.lr_policy = "step" solver.gamma = 0.1 solver.stepsize = 100000 solver.display = 5 solver.max_iter = 400000 solver.momentum = options.momentum solver.weight_decay = 0.0005 solver.regularization_type = 'L2' solver.snapshot = 2000 solver.solver_mode = solver.GPU solver.iter_size = options.iter_size solver.snapshot_format = solver.BINARYPROTO solver.type = 'SGD' solver.snapshot_prefix = options.snapshot_prefix return solver
Example #6
Source File: solver.py From barrista with MIT License | 6 votes |
def Get_caffe_solver_instance(cls, solver_parameter_dict, net): """Get a caffe solver object.""" # now we actually create a instance of the solver solver_message = _caffe_pb2.SolverParameter(**solver_parameter_dict) messagestr = _gprototext.MessageToString(solver_message) with _NamedTemporaryFile(mode='w+b', suffix='.prototxt') as tmpfile: tmpfile.write(bytes(messagestr.encode('utf-8'))) tmpfile.flush() try: # Newer version of caffe with full solver init support. return cls.Get_caffe_solver_class( solver_parameter_dict['solver_type'])._caffe_solver_class( tmpfile.name, net, _caffe._caffe.NetVec(), True) except TypeError: # Fallback for older, patched versions. return cls.Get_caffe_solver_class( solver_parameter_dict['solver_type'])._caffe_solver_class( tmpfile.name, net) raise Exception('could not initialize solver class')
Example #7
Source File: train.py From SubCNN with MIT License | 6 votes |
def __init__(self, solver_prototxt, roidb, output_dir, pretrained_model=None): """Initialize the SolverWrapper.""" self.output_dir = output_dir print 'Computing bounding-box regression targets...' if cfg.TRAIN.BBOX_REG: if cfg.IS_RPN: self.bbox_means, self.bbox_stds = gdl_roidb.add_bbox_regression_targets(roidb) else: self.bbox_means, self.bbox_stds = rdl_roidb.add_bbox_regression_targets(roidb) print 'done' self.solver = caffe.SGDSolver(solver_prototxt) if pretrained_model is not None: print ('Loading pretrained model ' 'weights from {:s}').format(pretrained_model) self.solver.net.copy_from(pretrained_model) self.solver_param = caffe_pb2.SolverParameter() with open(solver_prototxt, 'rt') as f: pb2.text_format.Merge(f.read(), self.solver_param) self.solver.net.layers[0].set_roidb(roidb)
Example #8
Source File: solver_proto_generator.py From phocnet with BSD 3-Clause "New" or "Revised" License | 5 votes |
def generate_solver_proto(**kwargs): sp = caffe_pb2.SolverParameter() for k,v in kwargs.iteritems(): if not hasattr(sp, k): raise ValueError('The argument \'%s\' is not part of the Caffe solver parameters!') elif v is not None: elem = getattr(sp, k) if type(elem) == RepeatedScalarFieldContainer: elem.append(v) elif k == 'solver_mode': setattr(sp, k, sp.SolverMode.DESCRIPTOR.values_by_name[v].number) else: setattr(sp, k, v) return sp
Example #9
Source File: train.py From py-R-FCN with MIT License | 5 votes |
def __init__(self, solver_prototxt, roidb, output_dir, pretrained_model=None): """Initialize the SolverWrapper.""" self.output_dir = output_dir if (cfg.TRAIN.HAS_RPN and cfg.TRAIN.BBOX_REG and cfg.TRAIN.BBOX_NORMALIZE_TARGETS): # RPN can only use precomputed normalization because there are no # fixed statistics to compute a priori assert cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED if cfg.TRAIN.BBOX_REG: print 'Computing bounding-box regression targets...' self.bbox_means, self.bbox_stds = \ rdl_roidb.add_bbox_regression_targets(roidb) print 'done' self.solver = caffe.SGDSolver(solver_prototxt) if pretrained_model is not None: print ('Loading pretrained model ' 'weights from {:s}').format(pretrained_model) self.solver.net.copy_from(pretrained_model) self.solver_param = caffe_pb2.SolverParameter() with open(solver_prototxt, 'rt') as f: pb2.text_format.Merge(f.read(), self.solver_param) self.solver.net.layers[0].set_roidb(roidb)
Example #10
Source File: proto_file.py From Sensor-Specific-Hyperspectral-Image-Feature-Learning with Apache License 2.0 | 5 votes |
def train_solver(conf): s = caffe_pb2.SolverParameter() # Set a seed for reproducible experiments: # this controls for randomization in training. #s.random_seed = 0xCAFFE # Specify locations of the train and (maybe) test networks. s.train_net = conf.train_net_file s.test_net.append(conf.test_net_file) s.test_interval = 10000 # Test after every 500 training iterations. s.test_iter.append(1) # Test on 100 batches each time we test. s.max_iter = conf.max_iter # no. of times to update the net (training iterations) # s.max_iter = 50000 # no. of times to update the net (training iterations) s.type = "AdaGrad" s.gamma = 0.1 s.base_lr = 0.01 s.weight_decay = 5e-4 s.lr_policy = 'multistep' s.display = 10000 s.snapshot = 10000 s.snapshot_prefix = conf.snapshot_prefix #s.stepvalue.append(1000000) #s.stepvalue.append(300000) s.solver_mode = caffe_pb2.SolverParameter.GPU s.device_id = 1 # will use the second GPU card s.snapshot_format = 0 # 0 is HDF5, 1 is binary return s
Example #11
Source File: train.py From oicr with MIT License | 5 votes |
def __init__(self, solver_prototxt, roidb, output_dir, pretrained_model=None): """Initialize the SolverWrapper.""" self.output_dir = output_dir self.solver = caffe.SGDSolver(solver_prototxt) if pretrained_model is not None: print ('Loading pretrained model ' 'weights from {:s}').format(pretrained_model) self.solver.net.copy_from(pretrained_model) self.solver_param = caffe_pb2.SolverParameter() with open(solver_prototxt, 'rt') as f: text_format.Merge(f.read(), self.solver_param) self.solver.net.layers[0].set_roidb(roidb)
Example #12
Source File: train.py From caffe-faster-rcnn-resnet-fpn with MIT License | 5 votes |
def __init__(self, solver_prototxt, roidb, output_dir, pretrained_model=None): """Initialize the SolverWrapper.""" self.output_dir = output_dir if (cfg.TRAIN.HAS_RPN and cfg.TRAIN.BBOX_REG and cfg.TRAIN.BBOX_NORMALIZE_TARGETS): # RPN can only use precomputed normalization because there are no # fixed statistics to compute a priori assert cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED if cfg.TRAIN.BBOX_REG: print 'Computing bounding-box regression targets...' self.bbox_means, self.bbox_stds = \ rdl_roidb.add_bbox_regression_targets(roidb) print 'done' self.solver = caffe.SGDSolver(solver_prototxt) if pretrained_model is not None: print ('Loading pretrained model ' 'weights from {:s}').format(pretrained_model) self.solver.net.copy_from(pretrained_model) self.solver_param = caffe_pb2.SolverParameter() with open(solver_prototxt, 'rt') as f: pb2.text_format.Merge(f.read(), self.solver_param) self.solver.net.layers[0].set_roidb(roidb)
Example #13
Source File: config.py From face-magnet with Apache License 2.0 | 5 votes |
def get_snapshot_prefix(solver_path): solver_param = caffe_pb2.SolverParameter() with open(solver_path, 'rt') as f: text_format.Merge(f.read(), solver_param) return solver_param.snapshot_prefix
Example #14
Source File: train.py From uai-sdk with Apache License 2.0 | 5 votes |
def __init__(self, solver_prototxt, roidb, output_dir, pretrained_model=None): """Initialize the SolverWrapper.""" self.output_dir = output_dir if (cfg.TRAIN.HAS_RPN and cfg.TRAIN.BBOX_REG and cfg.TRAIN.BBOX_NORMALIZE_TARGETS): # RPN can only use precomputed normalization because there are no # fixed statistics to compute a priori assert cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED if cfg.TRAIN.BBOX_REG: print 'Computing bounding-box regression targets...' self.bbox_means, self.bbox_stds = \ rdl_roidb.add_bbox_regression_targets(roidb) print 'done' self.solver = caffe.SGDSolver(solver_prototxt) if pretrained_model is not None: print ('Loading pretrained model ' 'weights from {:s}').format(pretrained_model) self.solver.net.copy_from(pretrained_model) self.solver_param = caffe_pb2.SolverParameter() with open(solver_prototxt, 'rt') as f: pb2.text_format.Merge(f.read(), self.solver_param) self.solver.net.layers[0].set_roidb(roidb)
Example #15
Source File: train.py From faster-rcnn-resnet with MIT License | 5 votes |
def __init__(self, solver_prototxt, roidb, output_dir, pretrained_model=None): """Initialize the SolverWrapper.""" self.output_dir = output_dir if (cfg.TRAIN.HAS_RPN and cfg.TRAIN.BBOX_REG and cfg.TRAIN.BBOX_NORMALIZE_TARGETS): # RPN can only use precomputed normalization because there are no # fixed statistics to compute a priori assert cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED if cfg.TRAIN.BBOX_REG: print 'Computing bounding-box regression targets...' self.bbox_means, self.bbox_stds = \ rdl_roidb.add_bbox_regression_targets(roidb) print 'done' self.solver = caffe.SGDSolver(solver_prototxt) if pretrained_model is not None: print ('Loading pretrained model ' 'weights from {:s}').format(pretrained_model) self.solver.net.copy_from(pretrained_model) self.solver_param = caffe_pb2.SolverParameter() with open(solver_prototxt, 'rt') as f: pb2.text_format.Merge(f.read(), self.solver_param) self.solver.net.layers[0].set_roidb(roidb)
Example #16
Source File: train.py From face-py-faster-rcnn with MIT License | 5 votes |
def __init__(self, solver_prototxt, roidb, output_dir, pretrained_model=None): """Initialize the SolverWrapper.""" self.output_dir = output_dir if (cfg.TRAIN.HAS_RPN and cfg.TRAIN.BBOX_REG and cfg.TRAIN.BBOX_NORMALIZE_TARGETS): # RPN can only use precomputed normalization because there are no # fixed statistics to compute a priori assert cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED if cfg.TRAIN.BBOX_REG: print 'Computing bounding-box regression targets...' self.bbox_means, self.bbox_stds = \ rdl_roidb.add_bbox_regression_targets(roidb) print 'done' self.solver = caffe.SGDSolver(solver_prototxt) if pretrained_model is not None: print ('Loading pretrained model ' 'weights from {:s}').format(pretrained_model) self.solver.net.copy_from(pretrained_model) self.solver_param = caffe_pb2.SolverParameter() with open(solver_prototxt, 'rt') as f: pb2.text_format.Merge(f.read(), self.solver_param) self.solver.net.layers[0].set_roidb(roidb)
Example #17
Source File: predictor_caffe.py From visual_dynamics with MIT License | 5 votes |
def add_default_parameters(self, solver_param, val_net=True): if not solver_param.train_net: train_val_fname = self.get_model_fname('train_val') solver_param.train_net = train_val_fname if val_net: if not solver_param.test_net: train_val_fname = self.get_model_fname('train_val') solver_param.test_net.append(train_val_fname) if not solver_param.test_iter: solver_param.test_iter.append(10) else: del solver_param.test_net[:] del solver_param.test_iter[:] if not solver_param.solver_type: solver_param.solver_type = pb2.SolverParameter.SGD if not solver_param.test_interval: solver_param.test_interval = 1000 if not solver_param.base_lr: solver_param.base_lr = 0.05 if not solver_param.lr_policy: solver_param.lr_policy = "step" if not solver_param.gamma: solver_param.gamma = 0.9 if not solver_param.stepsize: solver_param.stepsize = 1000 if not solver_param.display: solver_param.display = 20 if not solver_param.max_iter: solver_param.max_iter = 10000 if not solver_param.momentum: solver_param.momentum = 0.9 if not solver_param.momentum2: solver_param.momentum2 = 0.999 if not solver_param.weight_decay: solver_param.weight_decay = 0.0005 if not solver_param.snapshot: solver_param.snapshot = 1000 if not solver_param.snapshot_prefix: snapshot_prefix = self.get_snapshot_prefix() solver_param.snapshot_prefix = snapshot_prefix # don't change solver_param.solver_mode
Example #18
Source File: predictor_caffe.py From visual_dynamics with MIT License | 4 votes |
def train(self, train_hdf5_fname, val_hdf5_fname=None, solverstate_fname=None, solver_param=None, batch_size=32, visualize_response_maps=False): hdf5_txt_fnames = [] for hdf5_fname in [train_hdf5_fname, val_hdf5_fname]: if hdf5_fname is not None: head, tail = os.path.split(hdf5_fname) root, _ = os.path.splitext(tail) hdf5_txt_fname = os.path.join(head, '.' + root + '.txt') if not os.path.isfile(hdf5_txt_fname): with open(hdf5_txt_fname, 'w') as f: f.write(hdf5_fname + '\n') hdf5_txt_fnames.append(hdf5_txt_fname) else: hdf5_txt_fnames.append(None) train_hdf5_txt_fname, val_hdf5_txt_fname = hdf5_txt_fnames input_shapes = (self.x_shape, self.u_shape) train_net_param, weight_fillers = self.net_func(input_shapes, train_hdf5_txt_fname, batch_size, self.net_name, phase=caffe.TRAIN) if val_hdf5_fname is not None: val_net_param, _ = self.net_func(input_shapes, val_hdf5_txt_fname, batch_size, self.net_name, phase=caffe.TEST) self.train_val_net_param = train_net_param if val_hdf5_fname is not None: layers = [layer for layer in self.train_val_net_param.layer] # remove layers except for data layers for layer in layers: if 'Data' not in layer.type: self.train_val_net_param.layer.remove(layer) # add data layers from validation net_caffe self.train_val_net_param.layer.extend([layer for layer in val_net_param.layer if 'Data' in layer.type]) # add back the layers that are not data layers self.train_val_net_param.layer.extend([layer for layer in layers if 'Data' not in layer.type]) self.train_val_net_param = net_caffe.train_val_net(self.train_val_net_param) train_val_fname = self.get_model_fname('train_val') with open(train_val_fname, 'w') as f: f.write(str(self.train_val_net_param)) if solver_param is None: solver_param = pb2.SolverParameter() self.add_default_parameters(solver_param, val_net=val_hdf5_fname is not None) solver_fname = self.get_model_fname('solver') with open(solver_fname, 'w') as f: f.write(str(solver_param)) solver = caffe.get_solver(solver_fname) self.set_weight_fillers(solver.net.params, weight_fillers) for param_name, param in self.params.items(): for blob, solver_blob in zip(param, solver.net.params[param_name]): solver_blob.data[...] = blob.data if solverstate_fname is not None: if not solverstate_fname.endswith('.solverstate'): solverstate_fname = self.get_snapshot_prefix() + '_iter_' + solverstate_fname + '.solverstate' solver.restore(solverstate_fname) self.solve(solver, solver_param, visualize_response_maps=visualize_response_maps) for param_name, param in self.params.items(): for blob, solver_blob in zip(param, solver.net.params[param_name]): blob.data[...] = solver_blob.data self.train_net = solver.net if val_hdf5_fname is not None: self.val_net = solver.test_nets[0]
Example #19
Source File: solver.py From faster-rcnn-scenarios with MIT License | 4 votes |
def generate(self, net): if net == None: raise Exception("Net not provided!") self.setScenario(net.scenarios_dir, net.scenario) self.stage=net.stage self.net_type=net.network_type s = caffe_pb2.SolverParameter() # Specify locations of the train and (maybe) test networks. s.train_net = net.path() s.lr_policy=self.lr_policy # The number of iterations over which to average the gradient. # Effectively boosts the training batch size by the given factor, without # affecting memory utilization. s.iter_size = 1 # Solve using the stochastic gradient descent (SGD) algorithm. # Other choices include 'Adam' and 'RMSProp'. s.type = self.solver_type # Set the initial learning rate for SGD. s.base_lr = self.base_lr # Set `lr_policy` to define how the learning rate changes during training. # Here, we 'step' the learning rate by multiplying it by a factor `gamma` # every `stepsize` iterations. s.gamma = self.gamma s.stepsize = self.step_size # Set other SGD hyperparameters. Setting a non-zero `momentum` takes a # weighted average of the current gradient and previous gradients to make # learning more stable. L2 weight decay regularizes learning, to help prevent # the model from overfitting. s.momentum = self.momentum s.weight_decay = self.weight_decay # Display the current training loss and accuracy every 1000 iterations. s.display = self.display # Snapshots are files used to store networks we've trained. Here, we'll # snapshot every 10K iterations -- ten times during training. s.snapshot = 0 # Train on the GPU. Using the CPU to train large networks is very slow. s.solver_mode = caffe_pb2.SolverParameter.GPU s.snapshot_prefix=self.snapshot_prefix return self.save(s)