Python chainer.optimizers.AdaDelta() Examples
The following are 4
code examples of chainer.optimizers.AdaDelta().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
chainer.optimizers
, or try the search function
.
Example #1
Source File: test_hdf5.py From chainer with MIT License | 6 votes |
def setUp(self): fd, path = tempfile.mkstemp() os.close(fd) self.temp_file_path = path child = link.Chain() with child.init_scope(): child.linear = links.Linear(2, 3) child.Wc = chainer.Parameter(shape=(2, 3)) self.parent = link.Chain() with self.parent.init_scope(): self.parent.child = child self.parent.Wp = chainer.Parameter(shape=(2, 3)) self.optimizer = optimizers.AdaDelta() self.optimizer.setup(self.parent) self.parent.cleargrads() self.optimizer.update() # init states
Example #2
Source File: test_optimizers_by_linear_model.py From chainer with MIT License | 5 votes |
def create(self): return optimizers.AdaDelta(eps=1e-5)
Example #3
Source File: test_npz.py From chainer with MIT License | 5 votes |
def setUp(self): if self.file_type == 'filename': fd, path = tempfile.mkstemp() os.close(fd) self.file = path elif self.file_type == 'bytesio': self.file = six.BytesIO() else: assert False child = link.Chain() with child.init_scope(): child.linear = links.Linear(2, 3) child.Wc = chainer.Parameter(shape=(2, 3)) self.parent = link.Chain() with self.parent.init_scope(): self.parent.child = child self.parent.Wp = chainer.Parameter(shape=(2, 3)) self.optimizer = optimizers.AdaDelta() self.optimizer.setup(self.parent) self.parent.cleargrads() self.optimizer.update() # init all states self.savez = numpy.savez_compressed if self.compress else numpy.savez
Example #4
Source File: chainer_backend.py From Chimp with Apache License 2.0 | 5 votes |
def set_params(self, params): self.gpu = params.get('gpu',False) self.learning_rate = params.get('learning_rate',0.00025) self.decay_rate = params.get('decay_rate',0.95) self.discount = params.get('discount',0.95) self.clip_err = params.get('clip_err',False) self.target_net_update = params.get('target_net_update',10000) self.double_DQN = params.get('double_DQN',False) # setting up various possible gradient update algorithms opt = params.get('optim_name', 'ADAM') if opt == 'RMSprop': self.optimizer = optimizers.RMSprop(lr=self.learning_rate, alpha=self.decay_rate) elif opt == 'ADADELTA': print("Supplied learning rate not used with ADADELTA gradient update method") self.optimizer = optimizers.AdaDelta() elif opt == 'ADAM': self.optimizer = optimizers.Adam(alpha=self.learning_rate) elif opt == 'SGD': self.optimizer = optimizers.SGD(lr=self.learning_rate) else: print('The requested optimizer is not supported!!!') exit() if self.clip_err is not False: self.optimizer.add_hook(chainer.optimizer.GradientClipping(self.clip_err)) self.optim_name = params['optim_name']