Python numpy.Inf() Examples
The following are 30
code examples of numpy.Inf().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.
Example #1
Source File: callbacks.py From lambda-packs with MIT License | 6 votes |
def _reset(self): """Resets wait counter and cooldown counter. """ if self.mode not in ['auto', 'min', 'max']: warnings.warn('Learning Rate Plateau Reducing mode %s is unknown, ' 'fallback to auto mode.' % (self.mode), RuntimeWarning) self.mode = 'auto' if (self.mode == 'min' or (self.mode == 'auto' and 'acc' not in self.monitor)): self.monitor_op = lambda a, b: np.less(a, b - self.epsilon) self.best = np.Inf else: self.monitor_op = lambda a, b: np.greater(a, b + self.epsilon) self.best = -np.Inf self.cooldown_counter = 0 self.wait = 0 self.lr_epsilon = self.min_lr * 1e-4
Example #2
Source File: monotonic_woe_binning.py From Monotonic-WOE-Binning-Algorithm with MIT License | 6 votes |
def generate_final_dataset(self): if self.sign == False: shift_var = 1 self.bucket = True else: shift_var = -1 self.bucket = False self.woe_summary[self.column + "_shift"] = self.woe_summary[self.column].shift(shift_var) if self.sign == False: self.woe_summary.loc[0, self.column + "_shift"] = -np.inf self.bins = np.sort(list(self.woe_summary[self.column]) + [np.Inf,-np.Inf]) else: self.woe_summary.loc[len(self.woe_summary) - 1, self.column + "_shift"] = np.inf self.bins = np.sort(list(self.woe_summary[self.column]) + [np.Inf,-np.Inf]) self.woe_summary["labels"] = self.woe_summary.apply(self.generate_bin_labels, axis=1) self.dataset["bins"] = pd.cut(self.dataset[self.column], self.bins, right=self.bucket, precision=0) self.dataset["bins"] = self.dataset["bins"].astype(str) self.dataset['bins'] = self.dataset['bins'].map(lambda x: x.lstrip('[').rstrip(')'))
Example #3
Source File: monotonic_woe_binning.py From Monotonic-WOE-Binning-Algorithm with MIT License | 6 votes |
def generate_final_dataset(self): if self.sign == False: shift_var = 1 self.bucket = True else: shift_var = -1 self.bucket = False self.woe_summary[self.column + "_shift"] = self.woe_summary[self.column].shift(shift_var) if self.sign == False: self.woe_summary.loc[0, self.column + "_shift"] = -np.inf self.bins = np.sort(list(self.woe_summary[self.column]) + [np.Inf,-np.Inf]) else: self.woe_summary.loc[len(self.woe_summary) - 1, self.column + "_shift"] = np.inf self.bins = np.sort(list(self.woe_summary[self.column]) + [np.Inf,-np.Inf]) self.woe_summary["labels"] = self.woe_summary.apply(self.generate_bin_labels, axis=1) self.dataset["bins"] = pd.cut(self.dataset[self.column], self.bins, right=self.bucket, precision=0) self.dataset["bins"] = self.dataset["bins"].astype(str) self.dataset['bins'] = self.dataset['bins'].map(lambda x: x.lstrip('[').rstrip(')'))
Example #4
Source File: earlystopping.py From DropEdge with MIT License | 6 votes |
def __init__(self, datasets="tmp", patience=7, fname=None, clean=False, verbose=False): """ Args: patience (int): How long to wait after last time validation loss improved. Default: 7 verbose (bool): If True, prints a message for each validation loss improvement. Default: False """ self.patience = patience self.verbose = verbose self.counter = 0 self.best_score = None self.early_stop = False self.val_loss_min = np.Inf timstr = datetime.datetime.now().strftime("%m%d-%H%M%S") if fname is None: fname = datasets + "-" + timstr + "-" + self._random_str() + ".pt" self.fname = os.path.join(folder, fname) self.clean = clean
Example #5
Source File: subsortingextractor.py From spikeextractors with MIT License | 6 votes |
def get_unit_spike_train(self, unit_id, start_frame=None, end_frame=None): start_frame, end_frame = self._cast_start_end_frame(start_frame, end_frame) if start_frame is None: start_frame = 0 if end_frame is None: end_frame = np.Inf original_unit_id = self._original_unit_id_lookup[unit_id] sf = self._start_frame + start_frame ef = self._start_frame + end_frame if sf < self._start_frame: sf = self._start_frame if ef > self._end_frame: ef = self._end_frame if ef == np.Inf: ef = None return self._parent_sorting.get_unit_spike_train(unit_id=original_unit_id, start_frame=sf, end_frame=ef) - self._start_frame
Example #6
Source File: subsortingextractor.py From spikeextractors with MIT License | 6 votes |
def __init__(self, parent_sorting, *, unit_ids=None, renamed_unit_ids=None, start_frame=None, end_frame=None): SortingExtractor.__init__(self) start_frame, end_frame = self._cast_start_end_frame(start_frame, end_frame) self._parent_sorting = parent_sorting self._unit_ids = unit_ids self._renamed_unit_ids = renamed_unit_ids self._start_frame = start_frame self._end_frame = end_frame if self._unit_ids is None: self._unit_ids = self._parent_sorting.get_unit_ids() if self._renamed_unit_ids is None: self._renamed_unit_ids = self._unit_ids if self._start_frame is None: self._start_frame = 0 if self._end_frame is None: self._end_frame = np.Inf self._original_unit_id_lookup = {} for i in range(len(self._unit_ids)): self._original_unit_id_lookup[self._renamed_unit_ids[i]] = self._unit_ids[i] self.copy_unit_properties(parent_sorting, unit_ids=self._renamed_unit_ids) self.copy_unit_spike_features(parent_sorting, unit_ids=self._renamed_unit_ids, start_frame=start_frame, end_frame=end_frame) self._kwargs = {'parent_sorting': parent_sorting.make_serialized_dict(), 'unit_ids': unit_ids, 'renamed_unit_ids': renamed_unit_ids, 'start_frame': start_frame, 'end_frame': end_frame}
Example #7
Source File: dA.py From KitNET-py with MIT License | 6 votes |
def __init__(self, params): self.params = params if self.params.hiddenRatio is not None: self.params.n_hidden = int(numpy.ceil(self.params.n_visible*self.params.hiddenRatio)) # for 0-1 normlaization self.norm_max = numpy.ones((self.params.n_visible,)) * -numpy.Inf self.norm_min = numpy.ones((self.params.n_visible,)) * numpy.Inf self.n = 0 self.rng = numpy.random.RandomState(1234) a = 1. / self.params.n_visible self.W = numpy.array(self.rng.uniform( # initialize W uniformly low=-a, high=a, size=(self.params.n_visible, self.params.n_hidden))) self.hbias = numpy.zeros(self.params.n_hidden) # initialize h bias 0 self.vbias = numpy.zeros(self.params.n_visible) # initialize v bias 0 self.W_prime = self.W.T
Example #8
Source File: test_analysis.py From controlpy with GNU General Public License v3.0 | 6 votes |
def sys_norm_h2_LMI(Acl, Bdisturbance, C): #doesn't work very well, if problem poorly scaled Riccati works better. #Dullerud p 210 n = Acl.shape[0] X = cvxpy.Semidef(n) Y = cvxpy.Semidef(n) constraints = [ Acl*X + X*Acl.T + Bdisturbance*Bdisturbance.T == -Y, ] obj = cvxpy.Minimize(cvxpy.trace(Y)) prob = cvxpy.Problem(obj, constraints) prob.solve() eps = 1e-16 if np.max(np.linalg.eigvals((-Acl*X - X*Acl.T - Bdisturbance*Bdisturbance.T).value)) > -eps: print('Acl*X + X*Acl.T +Bdisturbance*Bdisturbance.T is not neg def.') return np.Inf if np.min(np.linalg.eigvals(X.value)) < eps: print('X is not pos def.') return np.Inf return np.sqrt(np.trace(C*X.value*C.T))
Example #9
Source File: multi_gpu_model_checkpoint.py From squeezedet-keras with MIT License | 5 votes |
def __init__(self, filepath, monitor='val_loss', verbose=0, save_best_only=False, save_weights_only=False, mode='auto', period=1): super(ModelCheckpointMultiGPU, self).__init__() self.monitor = monitor self.verbose = verbose self.filepath = filepath self.save_best_only = save_best_only self.save_weights_only = save_weights_only self.period = period self.epochs_since_last_save = 0 if mode not in ['auto', 'min', 'max']: warnings.warn('ModelCheckpoint mode %s is unknown, ' 'fallback to auto mode.' % mode, RuntimeWarning) mode = 'auto' if mode == 'min': self.monitor_op = np.less self.best = np.Inf elif mode == 'max': self.monitor_op = np.greater self.best = -np.Inf else: if 'acc' in self.monitor or self.monitor.startswith('fmeasure'): self.monitor_op = np.greater self.best = -np.Inf else: self.monitor_op = np.less self.best = np.Inf
Example #10
Source File: quadpack.py From Computable with MIT License | 5 votes |
def _quad(func,a,b,args,full_output,epsabs,epsrel,limit,points): infbounds = 0 if (b != Inf and a != -Inf): pass # standard integration elif (b == Inf and a != -Inf): infbounds = 1 bound = a elif (b == Inf and a == -Inf): infbounds = 2 bound = 0 # ignored elif (b != Inf and a == -Inf): infbounds = -1 bound = b else: raise RuntimeError("Infinity comparisons don't work for you.") if points is None: if infbounds == 0: return _quadpack._qagse(func,a,b,args,full_output,epsabs,epsrel,limit) else: return _quadpack._qagie(func,bound,infbounds,args,full_output,epsabs,epsrel,limit) else: if infbounds != 0: raise ValueError("Infinity inputs cannot be used with break points.") else: nl = len(points) the_points = numpy.zeros((nl+2,), float) the_points[:nl] = points return _quadpack._qagpe(func,a,b,the_points,args,full_output,epsabs,epsrel,limit)
Example #11
Source File: test_linalg.py From Mastering-Elasticsearch-7.0 with MIT License | 5 votes |
def test_axis(self): # Vector norms. # Compare the use of `axis` with computing the norm of each row # or column separately. A = array([[1, 2, 3], [4, 5, 6]], dtype=self.dt) for order in [None, -1, 0, 1, 2, 3, np.Inf, -np.Inf]: expected0 = [norm(A[:, k], ord=order) for k in range(A.shape[1])] assert_almost_equal(norm(A, ord=order, axis=0), expected0) expected1 = [norm(A[k, :], ord=order) for k in range(A.shape[0])] assert_almost_equal(norm(A, ord=order, axis=1), expected1) # Matrix norms. B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4) nd = B.ndim for order in [None, -2, 2, -1, 1, np.Inf, -np.Inf, 'fro']: for axis in itertools.combinations(range(-nd, nd), 2): row_axis, col_axis = axis if row_axis < 0: row_axis += nd if col_axis < 0: col_axis += nd if row_axis == col_axis: assert_raises(ValueError, norm, B, ord=order, axis=axis) else: n = norm(B, ord=order, axis=axis) # The logic using k_index only works for nd = 3. # This has to be changed if nd is increased. k_index = nd - (row_axis + col_axis) if row_axis < col_axis: expected = [norm(B[:].take(k, axis=k_index), ord=order) for k in range(B.shape[k_index])] else: expected = [norm(B[:].take(k, axis=k_index).T, ord=order) for k in range(B.shape[k_index])] assert_almost_equal(n, expected)
Example #12
Source File: hs2sortingextractor.py From spikeextractors with MIT License | 5 votes |
def get_unit_spike_train(self, unit_id, start_frame=None, end_frame=None): start_frame, end_frame = self._cast_start_end_frame(start_frame, end_frame) if start_frame is None: start_frame = 0 if end_frame is None: end_frame = np.Inf times = self._times[self.get_unit_indices(unit_id)] inds = np.where((start_frame <= times) & (times < end_frame)) return times[inds]
Example #13
Source File: test_quadpack.py From Computable with MIT License | 5 votes |
def test_sine_weighted_infinite(self): # 5) Sine weighted integral (infinite limits) def myfunc(x,a): return exp(-x*a) a = 4.0 ome = 3.0 assert_quad(quad(myfunc,0,Inf,args=a,weight='sin',wvar=ome), ome/(a**2 + ome**2))
Example #14
Source File: test_linalg.py From Mastering-Elasticsearch-7.0 with MIT License | 5 votes |
def test_keepdims(self): A = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4) allclose_err = 'order {0}, axis = {1}' shape_err = 'Shape mismatch found {0}, expected {1}, order={2}, axis={3}' # check the order=None, axis=None case expected = norm(A, ord=None, axis=None) found = norm(A, ord=None, axis=None, keepdims=True) assert_allclose(np.squeeze(found), expected, err_msg=allclose_err.format(None, None)) expected_shape = (1, 1, 1) assert_(found.shape == expected_shape, shape_err.format(found.shape, expected_shape, None, None)) # Vector norms. for order in [None, -1, 0, 1, 2, 3, np.Inf, -np.Inf]: for k in range(A.ndim): expected = norm(A, ord=order, axis=k) found = norm(A, ord=order, axis=k, keepdims=True) assert_allclose(np.squeeze(found), expected, err_msg=allclose_err.format(order, k)) expected_shape = list(A.shape) expected_shape[k] = 1 expected_shape = tuple(expected_shape) assert_(found.shape == expected_shape, shape_err.format(found.shape, expected_shape, order, k)) # Matrix norms. for order in [None, -2, 2, -1, 1, np.Inf, -np.Inf, 'fro', 'nuc']: for k in itertools.permutations(range(A.ndim), 2): expected = norm(A, ord=order, axis=k) found = norm(A, ord=order, axis=k, keepdims=True) assert_allclose(np.squeeze(found), expected, err_msg=allclose_err.format(order, k)) expected_shape = list(A.shape) expected_shape[k[0]] = 1 expected_shape[k[1]] = 1 expected_shape = tuple(expected_shape) assert_(found.shape == expected_shape, shape_err.format(found.shape, expected_shape, order, k))
Example #15
Source File: model_checkpoint.py From Multi-Label-Text-Classification-for-Chinese with MIT License | 5 votes |
def __init__(self, checkpoint_dir, monitor, arch, mode='min', epoch_freq=1, best=None, save_best_only=True): if isinstance(checkpoint_dir, Path): checkpoint_dir = checkpoint_dir else: checkpoint_dir = Path(checkpoint_dir) assert checkpoint_dir.is_dir() checkpoint_dir.mkdir(exist_ok=True) self.base_path = checkpoint_dir self.arch = arch self.monitor = monitor self.epoch_freq = epoch_freq self.save_best_only = save_best_only # 计算模式 if mode == 'min': self.monitor_op = np.less self.best = np.Inf elif mode == 'max': self.monitor_op = np.greater self.best = -np.Inf # 这里主要重新加载模型时候 # 对best重新赋值 if best: self.best = best if save_best_only: self.model_name = f"BEST_{arch}_MODEL.pth"
Example #16
Source File: numpyextractors.py From spikeextractors with MIT License | 5 votes |
def get_unit_spike_train(self, unit_id, start_frame=None, end_frame=None): start_frame, end_frame = self._cast_start_end_frame(start_frame, end_frame) if start_frame is None: start_frame = 0 if end_frame is None: end_frame = np.Inf times = self._units[unit_id]['times'] inds = np.where((start_frame <= times) & (times < end_frame))[0] return np.rint(times[inds]).astype(int)
Example #17
Source File: spykingcircusextractors.py From spikeextractors with MIT License | 5 votes |
def get_unit_spike_train(self, unit_id, start_frame=None, end_frame=None): start_frame, end_frame = self._cast_start_end_frame(start_frame, end_frame) if start_frame is None: start_frame = 0 if end_frame is None: end_frame = np.Inf times = self._spiketrains[self.get_unit_ids().index(unit_id)] inds = np.where((start_frame <= times) & (times < end_frame)) return times[inds]
Example #18
Source File: motor_dashboard.py From gym-electric-motor with MIT License | 5 votes |
def __init__(self, update_period=5e-2, visu_period=5, plotted_variables='all', **_): """ Constructor of the dashboard. Args: plotted_variables: Names of the variables that shall be shown on the dashboard | Shortcut: ['all']/['none'] for all/no visualized variables update_period: Number of seconds after that dashboard will be updated | Updating with tiny periods lead to very low speed. visu_period: Time period shown on the dashboard """ self._update_period = update_period self._visu_period = visu_period self._plotted_variables = plotted_variables self._physical_system = None self._figure = None plt.ion() self._tau = None self._update_cycle = None self._episode_length = np.Inf self.dash_vars = None self._referenced_states = None self._limits = None self._nominal_state = None self._observation_space = None self._labels = None self._plotted_state_index = [] self._k = 0 self.initialized = False # If available use the Qt5 Backend and the update function to update the plot (faster) try: matplotlib.use('Qt5Agg') # Otherwise stick to the default backend and use the draw function (slower) except ImportError: warnings.warn('Cannot use Qt5Agg matplotlib backend. Plotting will be slower.')
Example #19
Source File: neuroscopesortingextractor.py From spikeextractors with MIT License | 5 votes |
def get_unit_spike_train(self, unit_id, start_frame=None, end_frame=None): start_frame, end_frame = self._cast_start_end_frame(start_frame, end_frame) if start_frame is None: start_frame = 0 if end_frame is None: end_frame = np.Inf times = self._spiketrains[self.get_unit_ids().index(unit_id)] inds = np.where((start_frame <= times) & (times < end_frame)) return times[inds]
Example #20
Source File: nwbextractors.py From spikeextractors with MIT License | 5 votes |
def get_unit_spike_train(self, unit_id, start_frame=None, end_frame=None): start_frame, end_frame = self._cast_start_end_frame(start_frame, end_frame) if start_frame is None: start_frame = 0 if end_frame is None: end_frame = np.Inf check_nwb_install() with NWBHDF5IO(self._path, 'r') as io: nwbfile = io.read() # chosen unit and interval times = nwbfile.units['spike_times'][list(nwbfile.units.id[:]).index(unit_id)][:] # spike times are measured in samples frames = self.time_to_frame(times) return frames[(frames > start_frame) & (frames < end_frame)]
Example #21
Source File: callbacks.py From few-shot with MIT License | 5 votes |
def _reset(self): """Resets wait counter and cooldown counter. """ if (self.mode == 'min' or (self.mode == 'auto' and 'acc' not in self.monitor)): self.monitor_op = lambda a, b: np.less(a, b - self.min_delta) self.best = np.Inf else: self.monitor_op = lambda a, b: np.greater(a, b + self.min_delta) self.best = -np.Inf self.cooldown_counter = 0 self.wait = 0
Example #22
Source File: exdirextractors.py From spikeextractors with MIT License | 5 votes |
def get_unit_spike_train(self, unit_id, start_frame=None, end_frame=None): start_frame, end_frame = self._cast_start_end_frame(start_frame, end_frame) if start_frame is None: start_frame = 0 if end_frame is None: end_frame = np.Inf times = self._spike_trains[self._unit_ids.index(unit_id)] inds = np.where((start_frame <= times) & (times < end_frame)) return np.rint(times[inds]).astype(int)
Example #23
Source File: shybridextractors.py From spikeextractors with MIT License | 5 votes |
def get_unit_spike_train(self, unit_id, start_frame=None, end_frame=None): start_frame, end_frame = self._cast_start_end_frame(start_frame, end_frame) train = self._spike_clusters[unit_id].get_actual_spike_train().spikes if start_frame is None: start_frame = 0 if end_frame is None: end_frame = np.Inf idxs = np.where((start_frame <= train) & (train < end_frame)) return train[idxs]
Example #24
Source File: mdaextractors.py From spikeextractors with MIT License | 5 votes |
def get_unit_spike_train(self, unit_id, start_frame=None, end_frame=None): start_frame, end_frame = self._cast_start_end_frame(start_frame, end_frame) if start_frame is None: start_frame = 0 if end_frame is None: end_frame = np.Inf inds = np.where((self._labels == unit_id) & (start_frame <= self._times) & (self._times < end_frame)) return np.rint(self._times[inds]).astype(int)
Example #25
Source File: mearecextractors.py From spikeextractors with MIT License | 5 votes |
def get_unit_spike_train(self, unit_id, start_frame=None, end_frame=None): start_frame, end_frame = self._cast_start_end_frame(start_frame, end_frame) if start_frame is None: start_frame = 0 if end_frame is None: end_frame = np.Inf if self._spike_trains is None: self._initialize() times = (self._spike_trains[self.get_unit_ids().index(unit_id)].times.rescale('s') * self._fs.rescale('Hz')).magnitude inds = np.where((start_frame <= times) & (times < end_frame)) return np.rint(times[inds]).astype(int)
Example #26
Source File: stacked_single_target_hoeffding_tree_regressor.py From scikit-multiflow with BSD 3-Clause "New" or "Revised" License | 5 votes |
def _get_predictors_faded_error(self, X): """Get the faded error of the leaf corresponding to the pased instance. Parameters ---------- X: numpy.ndarray of length equal to the number of features. Instance attributes. Returns ------- dict (predictor, fmae) """ fmaes = {} if self._tree_root is not None: found_node = self._tree_root.filter_instance_to_leaf(X, None, -1) leaf_node = found_node.node if leaf_node is None: leaf_node = found_node.parent if isinstance(leaf_node, LearningNode): fmaes['mean'] = leaf_node.fMAE_M fmaes['perceptron'] = leaf_node.fMAE_P fmaes['stacked_perceptron'] = leaf_node.fMAE_SP else: # If the found node is not a learning node, give preference to # the mean predictor fmaes['mean'] = np.zeros(self._n_targets) fmaes['perceptron'] = np.full(self._n_targets, np.Inf) fmaes['stacked_perceptron'] = np.full(self._n_targets, np.Inf) return fmaes
Example #27
Source File: isoup_tree.py From scikit-multiflow with BSD 3-Clause "New" or "Revised" License | 5 votes |
def _get_predictors_faded_error(self, X): """Get the faded error of the leaf corresponding to the instance. Parameters ---------- X: numpy.ndarray of length equal to the number of features. Instance attributes. Returns ------- dict (predictor, fmae) """ fmaes = {} if self._tree_root is not None: found_node = self._tree_root.filter_instance_to_leaf(X, None, -1) leaf_node = found_node.node if leaf_node is None: leaf_node = found_node.parent if isinstance(leaf_node, LearningNode): fmaes['mean'] = leaf_node.fMAE_M fmaes['perceptron'] = leaf_node.fMAE_P else: # If the found node is not a learning node, give preference to # the mean predictor fmaes['mean'] = np.zeros(self._n_targets) fmaes['perceptron'] = np.full(self._n_targets, np.Inf) return fmaes
Example #28
Source File: clustering.py From pycircstat with MIT License | 5 votes |
def train(self, alpha): """ Finds the agglomerative clustering on the data alpha :param alpha: angles in radians :returns: data, cluster ids """ assert len(alpha.shape) == 1, 'Clustering works only for 1d data' n = len(alpha) cid = np.arange(n, dtype=int) nu = n while nu > self.numclust: mu = np.asarray([descr.mean(alpha[cid == j]) if j in cid else np.Inf for j in range(n)]) D = np.abs(descr.pairwise_cdiff(mu)) idx = np.triu_indices(n,1) min = np.nanargmin(D[idx]) cid[cid == cid[idx[0][min]]] = cid[idx[1][min]] nu -= 1 cid2 = np.empty_like(cid) for i,j in enumerate(np.unique(cid)): cid2[cid == j] = i ucid = np.unique(cid2) self.centroids = np.asarray([descr.mean(alpha[cid2 == i]) for i in ucid]) self.cluster_ids = ucid self.r = np.asarray([descr.resultant_vector_length(alpha[cid2 == i]) for i in ucid]) return alpha, cid2
Example #29
Source File: optimizer.py From vnpy_crypto with MIT License | 5 votes |
def _fit_cg(f, score, start_params, fargs, kwargs, disp=True, maxiter=100, callback=None, retall=False, full_output=True, hess=None): gtol = kwargs.setdefault('gtol', 1.0000000000000001e-05) norm = kwargs.setdefault('norm', np.Inf) epsilon = kwargs.setdefault('epsilon', 1.4901161193847656e-08) retvals = optimize.fmin_cg(f, start_params, score, gtol=gtol, norm=norm, epsilon=epsilon, maxiter=maxiter, full_output=full_output, disp=disp, retall=retall, callback=callback) if full_output: if not retall: xopt, fopt, fcalls, gcalls, warnflag = retvals else: xopt, fopt, fcalls, gcalls, warnflag, allvecs = retvals converged = not warnflag retvals = {'fopt': fopt, 'fcalls': fcalls, 'gcalls': gcalls, 'warnflag': warnflag, 'converged': converged} if retall: retvals.update({'allvecs': allvecs}) else: xopt = retvals retvals = None return xopt, retvals
Example #30
Source File: utils.py From Att-ChemdNER with Apache License 2.0 | 5 votes |
def on_train_begin(self): self.wait = 0 # Allow instances to be re-used self.best = np.Inf if self.monitor_op == np.less else -np.Inf