Python numpy.zeros_like() Examples
The following are 30
code examples of numpy.zeros_like().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.
Example #1
Source File: dqn_utils.py From cs294-112_hws with MIT License | 6 votes |
def _encode_observation(self, idx): end_idx = idx + 1 # make noninclusive start_idx = end_idx - self.frame_history_len # this checks if we are using low-dimensional observations, such as RAM # state, in which case we just directly return the latest RAM. if len(self.obs.shape) == 2: return self.obs[end_idx-1] # if there weren't enough frames ever in the buffer for context if start_idx < 0 and self.num_in_buffer != self.size: start_idx = 0 for idx in range(start_idx, end_idx - 1): if self.done[idx % self.size]: start_idx = idx + 1 missing_context = self.frame_history_len - (end_idx - start_idx) # if zero padding is needed for missing context # or we are on the boundry of the buffer if start_idx < 0 or missing_context > 0: frames = [np.zeros_like(self.obs[0]) for _ in range(missing_context)] for idx in range(start_idx, end_idx): frames.append(self.obs[idx % self.size]) return np.concatenate(frames, 2) else: # this optimization has potential to saves about 30% compute time \o/ img_h, img_w = self.obs.shape[1], self.obs.shape[2] return self.obs[start_idx:end_idx].transpose(1, 2, 0, 3).reshape(img_h, img_w, -1)
Example #2
Source File: test.py From cvpr2018-hnd with MIT License | 6 votes |
def count_super(p, m, counters, preds, labels, label_to_ch): for l in np.unique(labels): preds_l = preds[labels == l] # in -> known if label_to_ch[l]: acc = np.zeros_like(preds_l, dtype=bool) for c in label_to_ch[l]: if p == 0: counters['data'][m][c] += preds_l.shape[0] acc |= (preds_l == c) acc_sum = acc.sum() for c in label_to_ch[l]: counters['acc'][p,m][c] += acc_sum # out -> novel else: if p == 0: counters['data'][m][-1] += preds_l.shape[0] acc_sum = (preds_l < 0).sum() counters['acc'][p,m][-1] += acc_sum
Example #3
Source File: test_optimizer.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 6 votes |
def update(self, index, weight, grad, state): self._update_count(index) wd = self._get_wd(index) lr = self._get_lr(index) num_rows = weight.shape[0] dn, n = state for row in range(num_rows): all_zeros = mx.test_utils.almost_equal(grad[row].asnumpy(), np.zeros_like(grad[row].asnumpy())) if all_zeros and self.lazy_update: continue grad[row] = grad[row] * self.rescale_grad if self.clip_gradient is not None: mx.nd.clip(grad[row], -self.clip_gradient, self.clip_gradient, out=grad[row]) #update dn, n dn[row] += grad[row] - (mx.nd.sqrt(n[row] + grad[row] * grad[row]) - mx.nd.sqrt(n[row])) * weight[row] / lr n[row] += grad[row] * grad[row] # update weight weight[row] = (mx.nd.sign(dn[row]) * self.lamda1 - dn[row]) / \ ((self.beta + mx.nd.sqrt(n[row])) / lr + wd) * (mx.nd.abs(dn[row]) > self.lamda1)
Example #4
Source File: depth.py From Depth-Map-Prediction with GNU General Public License v3.0 | 6 votes |
def _depth_montage(depths): if depths.ndim == 4: assert depths.shape[1] == 1 depths = depths[:,0,:,:] #depths = imgutil.scale_values(depths, min=-2.5, max=2.5) #depths = map(imgutil.scale_values, depths) masks = [] for i in xrange(len(depths)): x = depths[i] mask = x != x.min() masks.append(mask) x = x[mask] if len(x) == 0: d = np.zeros_like(depths[i]) else: d = imgutil.scale_values(depths[i], min=x.min(), max=x.max()) depths[i] = d depths = plt.cm.jet(depths)[...,:3] for i in xrange(len(depths)): for c in xrange(3): depths[i, :, :, c][masks[i] == 0] = 0.2 return imgutil.montage(depths, border=1)
Example #5
Source File: seq2seq_class.py From Deep_Learning_Weather_Forecasting with Apache License 2.0 | 6 votes |
def evaluate(self, data_input_obs, data_input_ruitu, data_labels, data_ids, data_time, each_station_display=False): all_loss=[] for i in range(10): # iterate for each station. (sample_ind, timestep, staionID, features) #batch_placeholders = np.zeros_like(data_labels[:,:,i,:]) val_loss= self.model.evaluate(x=[data_input_obs[:,:,i,:], data_input_ruitu[:,:,i,:], data_ids[:,:,i], data_time], y=[data_labels[:,:,i,:]], verbose=False) all_loss.append(val_loss) if each_station_display: print('\tFor station 9000{}, val loss: {}'.format(i+1, val_loss)) self.current_mean_val_loss = np.mean(all_loss) print('Mean val loss:', self.current_mean_val_loss) self.val_loss_list.append(self.current_mean_val_loss)
Example #6
Source File: normalizations.py From pytorch-mri-segmentation-3D with MIT License | 6 votes |
def mapLandmarksVec(p, s, m): p_1, p_2 = p[0], p[1] s_1, s_2 = s[0], s[1] new_val = np.zeros_like(p_1) same_inds = (p_1 == p_2) if np.sum(same_inds): print('Fix this') sys.exit() #Change with this if I encounter bug #new_val[same_inds] = s_1[same_inds].reshape(-1) #new_val[np.inverse(same_inds)] = (((m - p_1) * ((s_2 - s_1) / (p_2 - p_1))) + s_1).reshape(-1) #sys.exit() #new_val = ((m - p_1) * ((s_2 - s_1) / (p_2 - p_1))) + s_1 return ((m-p_1) / (p_2-p_1) * (s_2 - s_1)) + s_1
Example #7
Source File: blocks.py From spinn with MIT License | 6 votes |
def RMSprop(cost, params, lr=0.001, rho=0.9, epsilon=1e-6, grads=None): # From: # https://github.com/Newmu/Theano-Tutorials/blob/master/4_modern_net.py if grads is None: grads = T.grad(cost=cost, wrt=params) assert len(grads) == len(params) updates = [] for p, g in zip(params, grads): acc = theano.shared(np.zeros_like(p.get_value(), dtype=np.float32), name="%s/rms/acc" % p.name) acc_new = rho * acc + (1 - rho) * g ** 2 gradient_scaling = T.sqrt(acc_new + epsilon) g = g / gradient_scaling updates.append((acc, acc_new)) updates.append((p, p - lr * g)) return updates
Example #8
Source File: gradient.py From deep-learning-note with MIT License | 6 votes |
def numerical_gradient(f, x): h = 1e-4 # 0.0001 grad = np.zeros_like(x) it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite']) while not it.finished: idx = it.multi_index tmp_val = x[idx] x[idx] = float(tmp_val) + h fxh1 = f(x) # f(x+h) x[idx] = tmp_val - h fxh2 = f(x) # f(x-h) grad[idx] = (fxh1 - fxh2) / (2 * h) x[idx] = tmp_val # 还原值 it.iternext() return grad
Example #9
Source File: competition_model_class.py From Deep_Learning_Weather_Forecasting with Apache License 2.0 | 6 votes |
def evaluate(self, data_input_obs, data_input_ruitu, data_labels, data_ids, data_time, each_station_display=False): all_loss=[] for i in range(10): # iterate for each station. (sample_ind, timestep, staionID, features) #batch_placeholders = np.zeros_like(data_labels[:,:,i,:]) val_loss= self.model.evaluate(x=[data_input_obs[:,:,i,:], data_input_ruitu[:,:,i,:], data_ids[:,:,i], data_time], y=[data_labels[:,:,i,:]], verbose=False) all_loss.append(val_loss) if each_station_display: print('\tFor station 9000{}, val MLE loss: {}'.format(i+1, val_loss)) self.current_mean_val_loss = np.mean(all_loss) print('Mean val MLE loss:', self.current_mean_val_loss) self.val_loss_list.append(self.current_mean_val_loss)
Example #10
Source File: gradient.py From deep-learning-note with MIT License | 6 votes |
def _numerical_gradient_1d(f, x): h = 1e-4 # 0.0001 grad = np.zeros_like(x) for idx in range(x.size): tmp_val = x[idx] x[idx] = float(tmp_val) + h fxh1 = f(x) # f(x+h) x[idx] = tmp_val - h fxh2 = f(x) # f(x-h) grad[idx] = (fxh1 - fxh2) / (2 * h) x[idx] = tmp_val # 还原值 return grad
Example #11
Source File: utils.py From contextualbandits with BSD 2-Clause "Simplified" License | 6 votes |
def _filter_arm_data(self, X, a, r, choice): if self.assume_un: this_choice = (a == choice) arms_w_rew = (r > 0.) yclass = np.where(arms_w_rew & (~this_choice), np.zeros_like(r), r) this_choice = this_choice | arms_w_rew yclass = yclass[this_choice] else: this_choice = (a == choice) yclass = r[this_choice] ## Note: don't filter X here as in many cases it won't end up used return yclass, this_choice ### TODO: these parallelizations probably shouldn't use sharedmem, ### but they still need to somehow modify the random states
Example #12
Source File: trpo_agent.py From pytorch-trpo with MIT License | 6 votes |
def conjugate_gradient(self, b): """ Returns F^(-1)b where F is the Hessian of the KL divergence """ p = b.clone().data r = b.clone().data x = np.zeros_like(b.data.cpu().numpy()) rdotr = r.double().dot(r.double()) for _ in xrange(self.cg_iters): z = self.hessian_vector_product(Variable(p)).squeeze(0) v = rdotr / p.double().dot(z.double()) x += v * p.cpu().numpy() r -= v * z newrdotr = r.double().dot(r.double()) mu = newrdotr / rdotr p = r + mu * p rdotr = newrdotr if rdotr < self.residual_tol: break return x
Example #13
Source File: 7_gradient.py From deep-learning-note with MIT License | 6 votes |
def numerical_gradient(f, x): h = 1e-4 grad = np.zeros_like(x) for idx in range(x.size): tmp_val = x[idx] # f(x+h) 的计算 x[idx] = tmp_val + h fxh1 = f(x) # f(x-h) 的计算 x[idx] = tmp_val - h fxh2 = f(x) grad[idx] = (fxh1 - fxh2) / (2 * h) x[idx] = tmp_val return grad # 梯度下降
Example #14
Source File: trust_region.py From DOTA_models with Apache License 2.0 | 6 votes |
def conjugate_gradient(f_Ax, b, cg_iters=10, residual_tol=1e-10): p = b.copy() r = b.copy() x = np.zeros_like(b) rdotr = r.dot(r) for i in xrange(cg_iters): z = f_Ax(p) v = rdotr / p.dot(z) x += v * p r -= v * z newrdotr = r.dot(r) mu = newrdotr / rdotr p = r + mu * p rdotr = newrdotr if rdotr < residual_tol: break return x
Example #15
Source File: optimization.py From Att-ChemdNER with Apache License 2.0 | 6 votes |
def sgdmomentum(self, cost, params,constraints={}, lr=0.01,consider_constant=None, momentum=0.): """ Stochatic gradient descent with momentum. Momentum has to be in [0, 1) """ # Check that the momentum is a correct value assert 0 <= momentum < 1 lr = theano.shared(np.float32(lr).astype(floatX)) momentum = theano.shared(np.float32(momentum).astype(floatX)) gradients = self.get_gradients(cost, params) velocities = [theano.shared(np.zeros_like(param.get_value(borrow=True)).astype(floatX)) for param in params] updates = [] for param, gradient, velocity in zip(params, gradients, velocities): new_velocity = momentum * velocity - lr * gradient updates.append((velocity, new_velocity)) new_p=param+new_velocity; # apply constraints if param in constraints: c=constraints[param]; new_p=c(new_p); updates.append((param, new_p)) return updates
Example #16
Source File: util.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def compute_roc_rfeinman(probs_neg, probs_pos, plot=False): """ TODO :param probs_neg: :param probs_pos: :param plot: :return: """ probs = np.concatenate((probs_neg, probs_pos)) labels = np.concatenate((np.zeros_like(probs_neg), np.ones_like(probs_pos))) fpr, tpr, _ = roc_curve(labels, probs) auc_score = auc(fpr, tpr) if plot: plt.figure(figsize=(7, 6)) plt.plot(fpr, tpr, color='blue', label='ROC (AUC = %0.4f)' % auc_score) plt.legend(loc='lower right') plt.title("ROC Curve") plt.xlabel("FPR") plt.ylabel("TPR") plt.show() return fpr, tpr, auc_score
Example #17
Source File: util.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def train_lr_rfeinman(densities_pos, densities_neg, uncerts_pos, uncerts_neg): """ TODO :param densities_pos: :param densities_neg: :param uncerts_pos: :param uncerts_neg: :return: """ values_neg = np.concatenate( (densities_neg.reshape((1, -1)), uncerts_neg.reshape((1, -1))), axis=0).transpose([1, 0]) values_pos = np.concatenate( (densities_pos.reshape((1, -1)), uncerts_pos.reshape((1, -1))), axis=0).transpose([1, 0]) values = np.concatenate((values_neg, values_pos)) labels = np.concatenate( (np.zeros_like(densities_neg), np.ones_like(densities_pos))) lr = LogisticRegressionCV(n_jobs=-1).fit(values, labels) return values, labels, lr
Example #18
Source File: cnn_train.py From cgp-cnn with MIT License | 6 votes |
def data_augmentation(self, x_train): _, c, h, w = x_train.shape pad_h = h + 2 * self.pad_size pad_w = w + 2 * self.pad_size aug_data = np.zeros_like(x_train) for i, x in enumerate(x_train): pad_img = np.zeros((c, pad_h, pad_w)) pad_img[:, self.pad_size:h+self.pad_size, self.pad_size:w+self.pad_size] = x # Randomly crop and horizontal flip the image top = np.random.randint(0, pad_h - h + 1) left = np.random.randint(0, pad_w - w + 1) bottom = top + h right = left + w if np.random.randint(0, 2): pad_img = pad_img[:, :, ::-1] aug_data[i] = pad_img[:, top:bottom, left:right] return aug_data
Example #19
Source File: optimization.py From Att-ChemdNER with Apache License 2.0 | 6 votes |
def rmsprop(self, cost, params, lr=0.001, rho=0.9, eps=1e-6,consider_constant=None): """ RMSProp. """ lr = theano.shared(np.float32(lr).astype(floatX)) gradients = self.get_gradients(cost, params,consider_constant) accumulators = [theano.shared(np.zeros_like(p.get_value()).astype(np.float32)) for p in params] updates = [] for param, gradient, accumulator in zip(params, gradients, accumulators): new_accumulator = rho * accumulator + (1 - rho) * gradient ** 2 updates.append((accumulator, new_accumulator)) new_param = param - lr * gradient / T.sqrt(new_accumulator + eps) updates.append((param, new_param)) return updates
Example #20
Source File: optimization.py From Att-ChemdNER with Apache License 2.0 | 6 votes |
def adadelta(self, cost, params, rho=0.95, epsilon=1e-6,consider_constant=None): """ Adadelta. Based on: http://www.matthewzeiler.com/pubs/googleTR2012/googleTR2012.pdf """ rho = theano.shared(np.float32(rho).astype(floatX)) epsilon = theano.shared(np.float32(epsilon).astype(floatX)) gradients = self.get_gradients(cost, params,consider_constant) accu_gradients = [theano.shared(np.zeros_like(param.get_value(borrow=True)).astype(floatX)) for param in params] accu_deltas = [theano.shared(np.zeros_like(param.get_value(borrow=True)).astype(floatX)) for param in params] updates = [] for param, gradient, accu_gradient, accu_delta in zip(params, gradients, accu_gradients, accu_deltas): new_accu_gradient = rho * accu_gradient + (1. - rho) * gradient ** 2. delta_x = - T.sqrt((accu_delta + epsilon) / (new_accu_gradient + epsilon)) * gradient new_accu_delta = rho * accu_delta + (1. - rho) * delta_x ** 2. updates.append((accu_gradient, new_accu_gradient)) updates.append((accu_delta, new_accu_delta)) updates.append((param, param + delta_x)) return updates
Example #21
Source File: optimization.py From Att-ChemdNER with Apache License 2.0 | 6 votes |
def adagrad(self, cost, params, lr=1.0, epsilon=1e-6,consider_constant=None): """ Adagrad. Based on http://www.ark.cs.cmu.edu/cdyer/adagrad.pdf """ lr = theano.shared(np.float32(lr).astype(floatX)) epsilon = theano.shared(np.float32(epsilon).astype(floatX)) gradients = self.get_gradients(cost, params,consider_constant) gsums = [theano.shared(np.zeros_like(param.get_value(borrow=True)).astype(floatX)) for param in params] updates = [] for param, gradient, gsum in zip(params, gradients, gsums): new_gsum = gsum + gradient ** 2. updates.append((gsum, new_gsum)) updates.append((param, param - lr * gradient / (T.sqrt(gsum + epsilon)))) return updates
Example #22
Source File: minibatch_sampler_test.py From object_detector_app with MIT License | 5 votes |
def test_subsample_indicator_when_num_samples_is_zero(self): np_indicator = [True, False, True, False, True, True, False] indicator = tf.constant(np_indicator) samples_none = minibatch_sampler.MinibatchSampler.subsample_indicator( indicator, 0) with self.test_session() as sess: samples_none_out = sess.run(samples_none) self.assertAllEqual( np.zeros_like(samples_none_out, dtype=bool), samples_none_out)
Example #23
Source File: Forecaster.py From EXOSIMS with BSD 3-Clause "New" or "Revised" License | 5 votes |
def split_hyper_linear(self, hyper): ''' split hyper and derive c ''' c0, slope,sigma, trans = \ hyper[0], hyper[1:1+self.n_pop], hyper[1+self.n_pop:1+2*self.n_pop], hyper[1+2*self.n_pop:] c = np.zeros_like(slope) c[0] = c0 for i in range(1,self.n_pop): c[i] = c[i-1] + trans[i-1]*(slope[i-1]-slope[i]) return c, slope, sigma, trans
Example #24
Source File: Forecaster.py From EXOSIMS with BSD 3-Clause "New" or "Revised" License | 5 votes |
def piece_linear(self, hyper, M, prob_R): ''' model: straight line ''' c, slope, sigma, trans = self.split_hyper_linear(hyper) R = np.zeros_like(M) for i in range(4): ind = self.indicate(M, trans, i) mu = c[i] + M[ind]*slope[i] R[ind] = norm.ppf(prob_R[ind], mu, sigma[i]) return R # # Unused functions # def classification(self, logm, trans ): # ''' # classify as four worlds # ''' # count = np.zeros(4) # sample_size = len(logm) # for iclass in range(4): # for isample in range(sample_size): # ind = self.indicate( logm[isample], trans[isample], iclass) # count[iclass] = count[iclass] + ind # prob = count / np.sum(count) * 100. # print 'Terran %(T).1f %%, Neptunian %(N).1f %%, Jovian %(J).1f %%, Star %(S).1f %%' \ # % {'T': prob[0], 'N': prob[1], 'J': prob[2], 'S': prob[3]} # return None # # def ProbRGivenM(self, radii, M, hyper): # ''' # p(radii|M) # ''' # c, slope, sigma, trans = self.split_hyper_linear(hyper) # prob = np.zeros_like(M) # for i in range(4): # ind = self.indicate(M, trans, i) # mu = c[i] + M[ind]*slope[i] # sig = sigma[i] # prob[ind] = norm.pdf(radii, mu, sig) # prob = prob/np.sum(prob) # return prob
Example #25
Source File: cg.py From lirpg with MIT License | 5 votes |
def cg(f_Ax, b, cg_iters=10, callback=None, verbose=False, residual_tol=1e-10): """ Demmel p 312 """ p = b.copy() r = b.copy() x = np.zeros_like(b) rdotr = r.dot(r) fmtstr = "%10i %10.3g %10.3g" titlestr = "%10s %10s %10s" if verbose: print(titlestr % ("iter", "residual norm", "soln norm")) for i in range(cg_iters): if callback is not None: callback(x) if verbose: print(fmtstr % (i, rdotr, np.linalg.norm(x))) z = f_Ax(p) v = rdotr / p.dot(z) x += v*p r -= v*z newrdotr = r.dot(r) mu = newrdotr/rdotr p = r + mu*p rdotr = newrdotr if rdotr < residual_tol: break if callback is not None: callback(x) if verbose: print(fmtstr % (i+1, rdotr, np.linalg.norm(x))) # pylint: disable=W0631 return x
Example #26
Source File: normalizations.py From pytorch-mri-segmentation-3D with MIT License | 5 votes |
def getTransform(img, pc, s, m_p, mean_m): z = np.copy(img) p, m = getLandmarks(img, pc, m_p) #using img, p, m, s, mean_m get the normalized image p_1, p_2 = p[0], p[1] s_1, s_2 = s[0], s[1] #histogram values at locations (pc + landmarks) m = [p_1] + list(m) + [p_2] #map scale corresponding to these values mean_m = [s_1] + list(mean_m) + [s_2] new_img = np.zeros_like(img, dtype=np.int64) hist_indices = np.zeros_like(img, dtype=np.int64) hist_indices = np.copy(new_img) for m_ in m: hist_indices += (img > m_).astype(int) hist_indices = np.clip(hist_indices, 1, len(m) - 1, out=hist_indices) indexer_m = lambda v: m[v] indexer_mm = lambda v: mean_m[v] f_m = np.vectorize(indexer_m) f_mm = np.vectorize(indexer_mm) new_p_1 = f_m(hist_indices - 1) new_p_2 = f_m(hist_indices) new_s_1 = f_mm(hist_indices - 1) new_s_2 = f_mm(hist_indices) new_img = mapLandmarksVec([new_p_1, new_p_2], [new_s_1, new_s_2], img) new_img = np.clip(new_img, s_1-1, s_2+1, out=new_img) return new_img ##################################################################
Example #27
Source File: test_recorders.py From pywr with GNU General Public License v3.0 | 5 votes |
def test_event_capture_with_node(self, cyclical_linear_model): """ Test Node flow events using a NodeThresholdRecorder """ m = cyclical_linear_model otpt = m.nodes['Output'] arry = NumpyArrayNodeRecorder(m, otpt) # Create the trigger using a threhsold parameter trigger = NodeThresholdRecorder(m, otpt, 4.0, predicate='>') evt_rec = EventRecorder(m, trigger) m.run() # Ensure there is at least one event assert evt_rec.events # Build a timeseries of when the events say an event is active triggered = np.zeros_like(arry.data, dtype=np.int) for evt in evt_rec.events: triggered[evt.start.index:evt.end.index, evt.scenario_index.global_id] = 1 # Check the duration td = evt.end.datetime - evt.start.datetime assert evt.duration == td.days # Test that the volumes in the Storage node during the event periods match assert_equal(triggered, arry.data > 4)
Example #28
Source File: saliency.py From OpenCV-Computer-Vision-Projects-with-Python with MIT License | 5 votes |
def get_saliency_map(self): """Returns a saliency map This method generates a saliency map for the image that was passed to the class constructor. :returns: grayscale saliency map """ if self.need_saliency_map: # haven't calculated saliency map for this image yet num_channels = 1 if len(self.frame_orig.shape) == 2: # single channel sal = self._get_channel_sal_magn(self.frame_small) else: # multiple channels: consider each channel independently sal = np.zeros_like(self.frame_small).astype(np.float32) for c in xrange(self.frame_small.shape[2]): small = self.frame_small[:, :, c] sal[:, :, c] = self._get_channel_sal_magn(small) # overall saliency: channel mean sal = np.mean(sal, 2) # postprocess: blur, square, and normalize if self.gauss_kernel is not None: sal = cv2.GaussianBlur(sal, self.gauss_kernel, sigmaX=8, sigmaY=0) sal = sal**2 sal = np.float32(sal)/np.max(sal) # scale up sal = cv2.resize(sal, self.frame_orig.shape[1::-1]) # store a copy so we do the work only once per frame self.saliencyMap = sal self.need_saliency_map = False return self.saliencyMap
Example #29
Source File: mpi_adam.py From lirpg with MIT License | 5 votes |
def update(self, localg, stepsize): if self.t % 100 == 0: self.check_synced() localg = localg.astype('float32') globalg = np.zeros_like(localg) self.comm.Allreduce(localg, globalg, op=MPI.SUM) if self.scale_grad_by_procs: globalg /= self.comm.Get_size() self.t += 1 a = stepsize * np.sqrt(1 - self.beta2**self.t)/(1 - self.beta1**self.t) self.m = self.beta1 * self.m + (1 - self.beta1) * globalg self.v = self.beta2 * self.v + (1 - self.beta2) * (globalg * globalg) step = (- a) * self.m / (np.sqrt(self.v) + self.epsilon) self.setfromflat(self.getflat() + step)
Example #30
Source File: Gradients.py From sopt with MIT License | 5 votes |
def gradients(func,variables): grads = np.zeros_like(variables) for i in range(len(variables)): variables_delta = np.copy(variables) variables_delta[i] += gradients_config.delta grads[i] = (func(variables_delta)-func(variables))/gradients_config.delta return grads