Python chainer.optimizers.MomentumSGD() Examples
The following are 30
code examples of chainer.optimizers.MomentumSGD().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
chainer.optimizers
, or try the search function
.
Example #1
Source File: train_utils.py From chainer-segnet with MIT License | 6 votes |
def get_optimizer(opt, lr=None, adam_alpha=None, adam_beta1=None, adam_beta2=None, adam_eps=None, weight_decay=None): if opt == 'MomentumSGD': optimizer = optimizers.MomentumSGD(lr=lr, momentum=0.9) elif opt == 'Adam': optimizer = optimizers.Adam( alpha=adam_alpha, beta1=adam_beta1, beta2=adam_beta2, eps=adam_eps) elif opt == 'AdaGrad': optimizer = optimizers.AdaGrad(lr=lr) elif opt == 'RMSprop': optimizer = optimizers.RMSprop(lr=lr) else: raise Exception('No optimizer is selected') # The first model as the master model if opt == 'MomentumSGD': optimizer.decay = weight_decay return optimizer
Example #2
Source File: test_segnet.py From chainer-segnet with MIT License | 6 votes |
def test_remove_link(self): opt = optimizers.MomentumSGD(lr=0.01) # Update each depth for depth in six.moves.range(1, self.n_encdec + 1): model = segnet.SegNet(self.n_encdec, self.n_classes, self.x_shape[1], self.n_mid) model = segnet.SegNetLoss( model, class_weight=None, train_depth=depth) opt.setup(model) # Deregister non-target links from opt if depth > 1: model.predictor.remove_link('conv_cls') for d in range(1, self.n_encdec + 1): if d != depth: model.predictor.remove_link('encdec{}'.format(d)) for name, link in model.namedparams(): if depth > 1: self.assertTrue( 'encdec{}'.format(depth) in name) else: self.assertTrue( 'encdec{}'.format(depth) in name or 'conv_cls' in name)
Example #3
Source File: nutszebra_optimizer.py From neural_architecture_search_with_reinforcement_learning_appendix_a with MIT License | 6 votes |
def __init__(self, model=None, lr=0.1, momentum=0.9, schedule=(250, 375), weight_decay=1.0e-4): super(OptimizerResnetOfResnet, self).__init__(model) self.lr = lr self.momentum = momentum self.schedule = schedule self.weight_decay = weight_decay all_links = OptimizerStochasticDepth._find(model) optimizer_set = [] for link in all_links: optimizer = optimizers.MomentumSGD(lr, momentum) weight_decay = chainer.optimizer.WeightDecay(self.weight_decay) optimizer.setup(link[0]) optimizer.add_hook(weight_decay) optimizer_set.append(optimizer) self.optimizer_set = optimizer_set self.all_links = all_links
Example #4
Source File: nutszebra_optimizer.py From neural_architecture_search_with_reinforcement_learning_appendix_a with MIT License | 6 votes |
def __init__(self, model=None, lr=0.1, momentum=0.9, schedule=(250, 375), weight_decay=1.0e-4): super(OptimizerStochasticDepth, self).__init__(model) self.lr = lr self.momentum = momentum self.schedule = schedule self.weight_decay = weight_decay all_links = OptimizerStochasticDepth._find(model) optimizer_set = [] for link in all_links: optimizer = optimizers.MomentumSGD(lr, momentum) weight_decay = chainer.optimizer.WeightDecay(self.weight_decay) optimizer.setup(link[0]) optimizer.add_hook(weight_decay) optimizer_set.append(optimizer) self.optimizer_set = optimizer_set self.all_links = all_links
Example #5
Source File: nutszebra_optimizer.py From neural_architecture_search_with_reinforcement_learning_appendix_a with MIT License | 5 votes |
def __init__(self, model=None, lr=0.01, momentum=0.9, schedule=(150, 225), weight_decay=5.0e-4): super(OptimizerPReLUNet, self).__init__(model) optimizer = optimizers.MomentumSGD(lr, momentum) weight_decay = chainer.optimizer.WeightDecay(weight_decay) optimizer.setup(self.model) optimizer.add_hook(weight_decay) self.optimizer = optimizer self.schedule = schedule self.lr = lr self.momentum = momentum
Example #6
Source File: nutszebra_optimizer.py From neural_architecture_search_with_reinforcement_learning_appendix_a with MIT License | 5 votes |
def __init__(self, model=None, lr=0.045, momentum=0.9, weight_decay=1.0e-5, period=2): super(OptimizerXception, self).__init__(model) optimizer = optimizers.MomentumSGD(lr, momentum) weight_decay = chainer.optimizer.WeightDecay(weight_decay) optimizer.setup(self.model) optimizer.add_hook(weight_decay) self.optimizer = optimizer self.lr = lr self.momentum = momentum self.weight_decay = weight_decay self.period = int(period)
Example #7
Source File: nutszebra_optimizer.py From neural_architecture_search_with_reinforcement_learning_appendix_a with MIT License | 5 votes |
def __init__(self, model=None, lr=0.0015, momentum=0.9, weight_decay=2.0e-4): super(OptimizerGooglenet, self).__init__(model) optimizer = optimizers.MomentumSGD(lr, momentum) weight_decay = chainer.optimizer.WeightDecay(weight_decay) optimizer.setup(self.model) optimizer.add_hook(weight_decay) self.optimizer = optimizer self.lr = lr self.momentum = momentum self.weight_decay = weight_decay
Example #8
Source File: nutszebra_optimizer.py From neural_architecture_search_with_reinforcement_learning_appendix_a with MIT License | 5 votes |
def __init__(self, model=None, lr=0.1, momentum=0.9, weight_decay=1.0e-4, schedule=(int(1.0e5 / (50000. / 128)), )): super(OptimizerNetworkInNetwork, self).__init__(model) optimizer = optimizers.MomentumSGD(lr, momentum) weight_decay = chainer.optimizer.WeightDecay(weight_decay) optimizer.setup(self.model) optimizer.add_hook(weight_decay) self.optimizer = optimizer self.lr = lr self.momentum = momentum self.weight_decay = weight_decay self.schedule = schedule
Example #9
Source File: nutszebra_optimizer.py From neural_architecture_search_with_reinforcement_learning_appendix_a with MIT License | 5 votes |
def __init__(self, model=None, lr=0.045, momentum=0.9, weight_decay=4.0e-5): super(OptimizerGooglenetV2, self).__init__(model) optimizer = optimizers.MomentumSGD(lr, momentum) weight_decay = chainer.optimizer.WeightDecay(weight_decay) optimizer.setup(self.model) optimizer.add_hook(weight_decay) self.optimizer = optimizer self.lr = lr self.momentum = momentum self.weight_decay = weight_decay
Example #10
Source File: nutszebra_optimizer.py From neural_architecture_search_with_reinforcement_learning_appendix_a with MIT License | 5 votes |
def __init__(self, model=None, lr=0.1, momentum=0.9, weight_decay=5.0e-4, schedule=(150, 225)): super(OptimizerResNext, self).__init__(model) optimizer = optimizers.MomentumSGD(lr, momentum) weight_decay = chainer.optimizer.WeightDecay(weight_decay) optimizer.setup(self.model) optimizer.add_hook(weight_decay) self.optimizer = optimizer self.schedule = schedule self.lr = lr self.momentum = momentum self.weight_decay = weight_decay
Example #11
Source File: nutszebra_optimizer.py From neural_architecture_search_with_reinforcement_learning_appendix_a with MIT License | 5 votes |
def __init__(self, model=None, lr=0.02, momentum=0.9, schedule=(150, 225, 300, 375)): super(OptimizerFractalNet, self).__init__(model) optimizer = optimizers.MomentumSGD(lr, momentum) optimizer.setup(self.model) self.optimizer = optimizer self.schedule = schedule self.lr = lr self.momentum = momentum
Example #12
Source File: nutszebra_optimizer.py From neural_architecture_search_with_reinforcement_learning_appendix_a with MIT License | 5 votes |
def __init__(self, model=None, schedule=(196, 224), lr=0.1, momentum=0.9, weight_decay=1.0e-4): super(OptimizerSwapout, self).__init__(model) optimizer = optimizers.MomentumSGD(lr, momentum) weight_decay = chainer.optimizer.WeightDecay(weight_decay) optimizer.setup(self.model) optimizer.add_hook(weight_decay) self.optimizer = optimizer self.schedule = schedule self.lr = lr self.momentum = momentum self.weight_decay = weight_decay
Example #13
Source File: nutszebra_optimizer.py From neural_architecture_search_with_reinforcement_learning_appendix_a with MIT License | 5 votes |
def __init__(self, model=None, schedule=(42, 62), lr=0.1, momentum=0.9, weight_decay=1.0e-4): super(OptimizerResnetInResnet, self).__init__(model) optimizer = optimizers.MomentumSGD(lr, momentum) weight_decay = chainer.optimizer.WeightDecay(weight_decay) optimizer.setup(self.model) optimizer.add_hook(weight_decay) self.optimizer = optimizer self.schedule = schedule self.lr = lr self.momentum = momentum self.weight_decay = weight_decay
Example #14
Source File: nutszebra_optimizer.py From neural_architecture_search_with_reinforcement_learning_appendix_a with MIT License | 5 votes |
def __init__(self, model=None, schedule=(150, 175), lr=0.1, momentum=0.9, weight_decay=1.0e-4): super(OptimizerAppendixA, self).__init__(model) optimizer = optimizers.MomentumSGD(lr, momentum) weight_decay = chainer.optimizer.WeightDecay(weight_decay) optimizer.setup(self.model) optimizer.add_hook(weight_decay) self.optimizer = optimizer self.schedule = schedule self.lr = lr self.momentum = momentum self.weight_decay = weight_decay
Example #15
Source File: train.py From deeppose with GNU General Public License v2.0 | 5 votes |
def get_optimizer(model, opt, lr, adam_alpha=None, adam_beta1=None, adam_beta2=None, adam_eps=None, weight_decay=None, resume_opt=None): if opt == 'MomentumSGD': optimizer = optimizers.MomentumSGD(lr=lr, momentum=0.9) elif opt == 'Adam': optimizer = optimizers.Adam( alpha=adam_alpha, beta1=adam_beta1, beta2=adam_beta2, eps=adam_eps) elif opt == 'AdaGrad': optimizer = optimizers.AdaGrad(lr=lr) elif opt == 'RMSprop': optimizer = optimizers.RMSprop(lr=lr) else: raise Exception('No optimizer is selected') # The first model as the master model optimizer.setup(model) if opt == 'MomentumSGD': optimizer.add_hook( chainer.optimizer.WeightDecay(weight_decay)) if resume_opt is not None: serializers.load_npz(resume_opt, optimizer) return optimizer
Example #16
Source File: task.py From cloud-ml-sdk with Apache License 2.0 | 5 votes |
def main(): # Define train function def linear_train(train_data, train_target, n_epochs=200): for _ in range(n_epochs): output = linear_function(train_data) loss = F.mean_squared_error(train_target, output) linear_function.zerograds() loss.backward() optimizer.update() # Construct train data x = 30 * np.random.rand(1000).astype(np.float32) y = 7 * x + 10 y += 10 * np.random.randn(1000).astype(np.float32) linear_function = L.Linear(1, 1) x_var = Variable(x.reshape(1000, -1)) y_var = Variable(y.reshape(1000, -1)) optimizer = optimizers.MomentumSGD(lr=0.001) optimizer.setup(linear_function) for i in range(150): linear_train(x_var, y_var, n_epochs=20) y_pred = linear_function(x_var).data slope = linear_function.W.data[0, 0] intercept = linear_function.b.data[0] print("Final Line: {0:.3}x + {1:.3}".format(slope, intercept))
Example #17
Source File: __init__.py From deel with MIT License | 5 votes |
def __init__(self,name="perceptron",layers=(1000,1000),optimizer=None,activation=F.sigmoid): Network.__init__(self,name) self.layers = {} for i in range(len(layers)-1): layer = L.Linear(layers[i],layers[i+1]) self.layers['l'+str(i)]=layer self.model = Chain(**self.layers) if Deel.gpu >=0: self.model = self.model.to_gpu(Deel.gpu) self.optimizer = optimizers.MomentumSGD(lr=0.01,momentum=0.9) self.optimizer.setup(self.model) self.activation = activation
Example #18
Source File: nutszebra_optimizer.py From googlenet with MIT License | 5 votes |
def __init__(self, model=None, lr=0.045, momentum=0.9, weight_decay=1.0e-5, period=2): super(OptimizerXception, self).__init__(model) optimizer = optimizers.MomentumSGD(lr, momentum) weight_decay = chainer.optimizer.WeightDecay(weight_decay) optimizer.setup(self.model) optimizer.add_hook(weight_decay) self.optimizer = optimizer self.lr = lr self.momentum = momentum self.weight_decay = weight_decay self.period = int(period)
Example #19
Source File: invert.py From ssai-cnn with MIT License | 5 votes |
def prepare_optimizer(self): if self.args.opt == 'MomentumSGD': self.opt = optimizers.MomentumSGD(momentum=0.9) elif self.args.opt == 'Adam': self.opt = optimizers.Adam(alpha=self.args.adam_alpha) print('Adam alpha=', self.args.adam_alpha) else: raise ValueError('Opt should be MomentumSGD or Adam.') self.opt.setup(self.x_link)
Example #20
Source File: train.py From ssai-cnn with MIT License | 5 votes |
def get_model_optimizer(args): model = get_model(args) if 'opt' in args: # prepare optimizer if args.opt == 'MomentumSGD': optimizer = optimizers.MomentumSGD(lr=args.lr, momentum=0.9) elif args.opt == 'Adam': optimizer = optimizers.Adam(alpha=args.alpha) elif args.opt == 'AdaGrad': optimizer = optimizers.AdaGrad(lr=args.lr) else: raise Exception('No optimizer is selected') optimizer.setup(model) if args.opt == 'MomentumSGD': optimizer.add_hook( chainer.optimizer.WeightDecay(args.weight_decay)) if args.resume_opt is not None: serializers.load_hdf5(args.resume_opt, optimizer) args.epoch_offset = int( re.search('epoch-([0-9]+)', args.resume_opt).groups()[0]) return model, optimizer else: print('No optimizer generated.') return model
Example #21
Source File: invert_diff.py From ssai-cnn with MIT License | 5 votes |
def prepare_optimizer(self): if self.args.opt == 'MomentumSGD': self.opt = optimizers.MomentumSGD(momentum=0.9) elif self.args.opt == 'Adam': self.opt = optimizers.Adam(alpha=self.args.adam_alpha) print('Adam alpha=', self.args.adam_alpha) else: raise ValueError('Opt should be MomentumSGD or Adam.') self.opt.setup(self.x_link)
Example #22
Source File: nutszebra_optimizer.py From googlenet with MIT License | 5 votes |
def __init__(self, model=None, schedule=(int(32000. / (50000. / 128)), int(48000. / (50000. / 128))), lr=0.1, momentum=0.9, weight_decay=1.0e-4, warm_up_lr=0.01): super(OptimizerResnet, self).__init__(model) optimizer = optimizers.MomentumSGD(warm_up_lr, momentum) weight_decay = chainer.optimizer.WeightDecay(weight_decay) optimizer.setup(self.model) optimizer.add_hook(weight_decay) self.optimizer = optimizer self.schedule = schedule self.lr = lr self.warmup_lr = warm_up_lr self.momentum = momentum self.weight_decay = weight_decay
Example #23
Source File: nutszebra_optimizer.py From googlenet with MIT License | 5 votes |
def __init__(self, model=None, schedule=(150, 225), lr=0.1, momentum=0.9, weight_decay=1.0e-4): super(OptimizerDense, self).__init__(model) optimizer = optimizers.MomentumSGD(lr, momentum) weight_decay = chainer.optimizer.WeightDecay(weight_decay) optimizer.setup(self.model) optimizer.add_hook(weight_decay) self.optimizer = optimizer self.schedule = schedule self.lr = lr self.momentum = momentum self.weight_decay = weight_decay
Example #24
Source File: nutszebra_optimizer.py From googlenet with MIT License | 5 votes |
def __init__(self, model=None, schedule=(60, 120, 160), lr=0.1, momentum=0.9, weight_decay=5.0e-4): super(OptimizerWideRes, self).__init__(model) optimizer = optimizers.MomentumSGD(lr, momentum) weight_decay = chainer.optimizer.WeightDecay(weight_decay) optimizer.setup(self.model) optimizer.add_hook(weight_decay) self.optimizer = optimizer self.schedule = schedule self.lr = lr self.momentum = momentum self.weight_decay = weight_decay
Example #25
Source File: nutszebra_optimizer.py From googlenet with MIT License | 5 votes |
def __init__(self, model=None, schedule=(196, 224), lr=0.1, momentum=0.9, weight_decay=1.0e-4): super(OptimizerSwapout, self).__init__(model) optimizer = optimizers.MomentumSGD(lr, momentum) weight_decay = chainer.optimizer.WeightDecay(weight_decay) optimizer.setup(self.model) optimizer.add_hook(weight_decay) self.optimizer = optimizer self.schedule = schedule self.lr = lr self.momentum = momentum self.weight_decay = weight_decay
Example #26
Source File: chainer_alex.py From mlimages with MIT License | 5 votes |
def train(epoch=10, batch_size=32, gpu=False): if gpu: cuda.check_cuda_available() xp = cuda.cupy if gpu else np td = TrainingData(LABEL_FILE, img_root=IMAGES_ROOT, image_property=IMAGE_PROP) # make mean image if not os.path.isfile(MEAN_IMAGE_FILE): print("make mean image...") td.make_mean_image(MEAN_IMAGE_FILE) else: td.mean_image_file = MEAN_IMAGE_FILE # train model label_def = LabelingMachine.read_label_def(LABEL_DEF_FILE) model = alex.Alex(len(label_def)) optimizer = optimizers.MomentumSGD(lr=0.01, momentum=0.9) optimizer.setup(model) epoch = epoch batch_size = batch_size print("Now our model is {0} classification task.".format(len(label_def))) print("begin training the model. epoch:{0} batch size:{1}.".format(epoch, batch_size)) if gpu: model.to_gpu() for i in range(epoch): print("epoch {0}/{1}: (learning rate={2})".format(i + 1, epoch, optimizer.lr)) td.shuffle(overwrite=True) for x_batch, y_batch in td.generate_batches(batch_size): x = chainer.Variable(xp.asarray(x_batch)) t = chainer.Variable(xp.asarray(y_batch)) optimizer.update(model, x, t) print("loss: {0}, accuracy: {1}".format(float(model.loss.data), float(model.accuracy.data))) serializers.save_npz(MODEL_FILE, model) optimizer.lr *= 0.97
Example #27
Source File: nutszebra_optimizer.py From googlenet with MIT License | 5 votes |
def __init__(self, model=None, lr=0.0015, momentum=0.9, weight_decay=2.0e-4): super(OptimizerGooglenet, self).__init__(model) optimizer = optimizers.MomentumSGD(lr, momentum) weight_decay = chainer.optimizer.WeightDecay(weight_decay) optimizer.setup(self.model) optimizer.add_hook(weight_decay) self.optimizer = optimizer self.lr = lr self.momentum = momentum self.weight_decay = weight_decay
Example #28
Source File: nutszebra_optimizer.py From googlenet with MIT License | 5 votes |
def __init__(self, model=None, lr=0.1, momentum=0.9, weight_decay=1.0e-4, schedule=(int(1.0e5 / (50000. / 128)), )): super(OptimizerNetworkInNetwork, self).__init__(model) optimizer = optimizers.MomentumSGD(lr, momentum) weight_decay = chainer.optimizer.WeightDecay(weight_decay) optimizer.setup(self.model) optimizer.add_hook(weight_decay) self.optimizer = optimizer self.lr = lr self.momentum = momentum self.weight_decay = weight_decay self.schedule = schedule
Example #29
Source File: test_optimizers_by_linear_model.py From chainer with MIT License | 5 votes |
def create(self): return optimizers.MomentumSGD(0.1)
Example #30
Source File: nutszebra_optimizer.py From neural_architecture_search_with_reinforcement_learning_appendix_a with MIT License | 5 votes |
def __init__(self, model=None, schedule=(int(32000. / (50000. / 128)), int(48000. / (50000. / 128))), lr=0.1, momentum=0.9, weight_decay=1.0e-4, warm_up_lr=0.01): super(OptimizerResnet, self).__init__(model) optimizer = optimizers.MomentumSGD(warm_up_lr, momentum) weight_decay = chainer.optimizer.WeightDecay(weight_decay) optimizer.setup(self.model) optimizer.add_hook(weight_decay) self.optimizer = optimizer self.schedule = schedule self.lr = lr self.warmup_lr = warm_up_lr self.momentum = momentum self.weight_decay = weight_decay