Python bottleneck.nanmean() Examples

The following are 10 code examples of bottleneck.nanmean(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module bottleneck , or try the search function .
Example #1
Source File: accessors.py    From vectorbt with GNU General Public License v3.0 6 votes vote down vote up
def apply_and_reduce(self, apply_func_nb, reduce_func_nb, *args, **kwargs):
        """See `vectorbt.tseries.nb.apply_and_reduce_nb`.

        `**kwargs` will be passed to `vectorbt.tseries.common.TSArrayWrapper.wrap_reduced`.

        Example:
            ```python-repl
            >>> greater_nb = njit(lambda col, a: a[a > 2])
            >>> mean_nb = njit(lambda col, a: np.nanmean(a))
            >>> print(df.vbt.tseries.apply_and_reduce(greater_nb, mean_nb))
            a    4.0
            b    4.0
            c    3.0
            dtype: float64
            ```"""
        checks.assert_numba_func(apply_func_nb)
        checks.assert_numba_func(reduce_func_nb)

        result = nb.apply_and_reduce_nb(self.to_2d_array(), apply_func_nb, reduce_func_nb, *args)
        return self.wrap_reduced(result, **kwargs) 
Example #2
Source File: oPCA.py    From sima with GNU General Public License v2.0 6 votes vote down vote up
def _method_1(data, num_pcs=None):
    """Compute OPCA when num_observations > num_dimensions."""
    data = np.nan_to_num(data - nanmean(data, axis=0))
    T = data.shape[0]
    corr_offset = np.dot(data[1:].T, data[:-1])
    corr_offset += corr_offset.T
    if num_pcs is None:
        eivals, eivects = eigh(corr_offset)
    else:
        eivals, eivects = eigsh(corr_offset, num_pcs, which='LA')
    eivals = np.real(eivals)
    eivects = np.real(eivects)
    idx = np.argsort(-eivals)  # sort the eigenvectors and eigenvalues
    eivals = old_div(eivals[idx], (2. * (T - 1)))
    eivects = eivects[:, idx]
    return eivals, eivects, np.dot(data, eivects) 
Example #3
Source File: oPCA.py    From sima with GNU General Public License v2.0 6 votes vote down vote up
def _method_2(data, num_pcs=None):
    """Compute OPCA when num_observations <= num_dimensions."""
    data = np.nan_to_num(data - nanmean(data, axis=0))
    T = data.shape[0]
    tmp = np.dot(data, data.T)
    corr_offset = np.zeros(tmp.shape)
    corr_offset[1:] = tmp[:-1]
    corr_offset[:-1] += tmp[1:]
    if num_pcs is None:
        eivals, eivects = eig(corr_offset)
    else:
        eivals, eivects = eigs(corr_offset, num_pcs, which='LR')
    eivals = np.real(eivals)
    eivects = np.real(eivects)
    idx = np.argsort(-eivals)  # sort the eigenvectors and eigenvalues
    eivals = old_div(eivals[idx], (2. * (T - 1)))
    eivects = eivects[:, idx]
    transformed_eivects = np.dot(data.T, eivects)
    for i in range(transformed_eivects.shape[1]):  # normalize the eigenvectors
        transformed_eivects[:, i] /= np.linalg.norm(transformed_eivects[:, i])
    return eivals, transformed_eivects, np.dot(data, transformed_eivects) 
Example #4
Source File: accessors.py    From vectorbt with GNU General Public License v3.0 5 votes vote down vote up
def rolling_apply(self, window, apply_func_nb, *args, on_matrix=False):
        """See `vectorbt.tseries.nb.rolling_apply_nb` and
        `vectorbt.tseries.nb.rolling_apply_matrix_nb` for `on_matrix=True`.

        Example:
            ```python-repl
            >>> mean_nb = njit(lambda col, i, a: np.nanmean(a))
            >>> print(df.vbt.tseries.rolling_apply(3, mean_nb))
                          a    b         c
            2020-01-01  1.0  5.0  1.000000
            2020-01-02  1.5  4.5  1.500000
            2020-01-03  2.0  4.0  2.000000
            2020-01-04  3.0  3.0  2.333333
            2020-01-05  4.0  2.0  2.000000

            >>> mean_matrix_nb = njit(lambda i, a: np.nanmean(a))
            >>> print(df.vbt.tseries.rolling_apply(3,
            ...     mean_matrix_nb, on_matrix=True))
                               a         b         c
            2020-01-01  2.333333  2.333333  2.333333
            2020-01-02  2.500000  2.500000  2.500000
            2020-01-03  2.666667  2.666667  2.666667
            2020-01-04  2.777778  2.777778  2.777778
            2020-01-05  2.666667  2.666667  2.666667
            ```"""
        checks.assert_numba_func(apply_func_nb)

        if on_matrix:
            result = nb.rolling_apply_matrix_nb(self.to_2d_array(), window, apply_func_nb, *args)
        else:
            result = nb.rolling_apply_nb(self.to_2d_array(), window, apply_func_nb, *args)
        return self.wrap(result) 
Example #5
Source File: accessors.py    From vectorbt with GNU General Public License v3.0 5 votes vote down vote up
def expanding_apply(self, apply_func_nb, *args, on_matrix=False):
        """See `vectorbt.tseries.nb.expanding_apply_nb` and
        `vectorbt.tseries.nb.expanding_apply_matrix_nb` for `on_matrix=True`.

        Example:
            ```python-repl
            >>> mean_nb = njit(lambda col, i, a: np.nanmean(a))
            >>> print(df.vbt.tseries.expanding_apply(mean_nb))
                          a    b    c
            2020-01-01  1.0  5.0  1.0
            2020-01-02  1.5  4.5  1.5
            2020-01-03  2.0  4.0  2.0
            2020-01-04  2.5  3.5  2.0
            2020-01-05  3.0  3.0  1.8

            >>> mean_matrix_nb = njit(lambda i, a: np.nanmean(a))
            >>> print(df.vbt.tseries.expanding_apply(
            ...     mean_matrix_nb, on_matrix=True))
                               a         b         c
            2020-01-01  2.333333  2.333333  2.333333
            2020-01-02  2.500000  2.500000  2.500000
            2020-01-03  2.666667  2.666667  2.666667
            2020-01-04  2.666667  2.666667  2.666667
            2020-01-05  2.600000  2.600000  2.600000
            ```"""
        checks.assert_numba_func(apply_func_nb)

        if on_matrix:
            result = nb.expanding_apply_matrix_nb(self.to_2d_array(), apply_func_nb, *args)
        else:
            result = nb.expanding_apply_nb(self.to_2d_array(), apply_func_nb, *args)
        return self.wrap(result) 
Example #6
Source File: accessors.py    From vectorbt with GNU General Public License v3.0 5 votes vote down vote up
def groupby_apply(self, by, apply_func_nb, *args, on_matrix=False, **kwargs):
        """See `vectorbt.tseries.nb.groupby_apply_nb` and
        `vectorbt.tseries.nb.groupby_apply_matrix_nb` for `on_matrix=True`.

        For `by`, see `pandas.DataFrame.groupby`.

        Example:
            ```python-repl
            >>> mean_nb = njit(lambda col, i, a: np.nanmean(a))
            >>> print(df.vbt.tseries.groupby_apply([1, 1, 2, 2, 3], mean_nb))
                 a    b    c
            1  1.5  4.5  1.5
            2  3.5  2.5  2.5
            3  5.0  1.0  1.0

            >>> mean_matrix_nb = njit(lambda i, a: np.nanmean(a))
            >>> print(df.vbt.tseries.groupby_apply([1, 1, 2, 2, 3],
            ...     mean_matrix_nb, on_matrix=True))
                      a         b         c
            1  2.500000  2.500000  2.500000
            2  2.833333  2.833333  2.833333
            3  2.333333  2.333333  2.333333
            ```"""
        checks.assert_numba_func(apply_func_nb)

        regrouped = self._obj.groupby(by, axis=0, **kwargs)
        groups = Dict()
        for i, (k, v) in enumerate(regrouped.indices.items()):
            groups[i] = np.asarray(v)
        if on_matrix:
            result = nb.groupby_apply_matrix_nb(self.to_2d_array(), groups, apply_func_nb, *args)
        else:
            result = nb.groupby_apply_nb(self.to_2d_array(), groups, apply_func_nb, *args)
        return self.wrap_reduced(result, index=list(regrouped.indices.keys())) 
Example #7
Source File: accessors.py    From vectorbt with GNU General Public License v3.0 5 votes vote down vote up
def resample_apply(self, freq, apply_func_nb, *args, on_matrix=False, **kwargs):
        """See `vectorbt.tseries.nb.groupby_apply_nb` and
        `vectorbt.tseries.nb.groupby_apply_matrix_nb` for `on_matrix=True`.

        For `freq`, see `pandas.DataFrame.resample`.

        Example:
            ```python-repl
            >>> mean_nb = njit(lambda col, i, a: np.nanmean(a))
            >>> print(df.vbt.tseries.resample_apply('2d', mean_nb))
                          a    b    c
            2020-01-01  1.5  4.5  1.5
            2020-01-03  3.5  2.5  2.5
            2020-01-05  5.0  1.0  1.0

            >>> mean_matrix_nb = njit(lambda i, a: np.nanmean(a))
            >>> print(df.vbt.tseries.resample_apply('2d',
            ...     mean_matrix_nb, on_matrix=True))
                               a         b         c
            2020-01-01  2.500000  2.500000  2.500000
            2020-01-03  2.833333  2.833333  2.833333
            2020-01-05  2.333333  2.333333  2.333333
            ```"""
        checks.assert_numba_func(apply_func_nb)

        resampled = self._obj.resample(freq, axis=0, **kwargs)
        groups = Dict()
        for i, (k, v) in enumerate(resampled.indices.items()):
            groups[i] = np.asarray(v)
        if on_matrix:
            result = nb.groupby_apply_matrix_nb(self.to_2d_array(), groups, apply_func_nb, *args)
        else:
            result = nb.groupby_apply_nb(self.to_2d_array(), groups, apply_func_nb, *args)
        result_obj = self.wrap(result, index=list(resampled.indices.keys()))
        resampled_arr = np.full((resampled.ngroups, self.to_2d_array().shape[1]), np.nan)
        resampled_obj = self.wrap(resampled_arr, index=pd.Index(list(resampled.groups.keys()), freq=freq))
        resampled_obj.loc[result_obj.index] = result_obj.values
        return resampled_obj 
Example #8
Source File: accessors.py    From vectorbt with GNU General Public License v3.0 5 votes vote down vote up
def mean(self, **kwargs):
        """Return mean of non-NaN elements."""
        return self.wrap_reduced(nanmean(self.to_2d_array(), axis=0), **kwargs) 
Example #9
Source File: frame_align.py    From sima with GNU General Public License v2.0 5 votes vote down vote up
def shifted_corr(reference, image, displacement):
    """Calculate the correlation between the reference and the image shifted
    by the given displacement.

    Parameters
    ----------
    reference : np.ndarray
    image : np.ndarray
    displacement : np.ndarray

    Returns
    -------
    correlation : float

    """

    ref_cuts = np.maximum(0, displacement)
    ref = reference[ref_cuts[0]:, ref_cuts[1]:, ref_cuts[2]:]
    im_cuts = np.maximum(0, -displacement)
    im = image[im_cuts[0]:, im_cuts[1]:, im_cuts[2]:]
    s = np.minimum(im.shape, ref.shape)
    ref = ref[:s[0], :s[1], :s[2]]
    im = im[:s[0], :s[1], :s[2]]
    ref -= nanmean(ref.reshape(-1, ref.shape[-1]), axis=0)
    ref = np.nan_to_num(ref)
    im -= nanmean(im.reshape(-1, im.shape[-1]), axis=0)
    im = np.nan_to_num(im)
    assert np.all(np.isfinite(ref)) and np.all(np.isfinite(im))
    corr = nanmean(
        [old_div(np.sum(i * r), np.sqrt(np.sum(i * i) * np.sum(r * r))) for
         i, r in zip(np.rollaxis(im, -1), np.rollaxis(ref, -1))])
    return corr 
Example #10
Source File: boost_experiment.py    From mHTM with MIT License 4 votes vote down vote up
def _phase3(self):
	"""
	Normal phase 3, but with tracking the boost changes. Double commented lines
	are new.
	"""
	
	# Update permanences
	self.p = np.clip(self.p + (self.c_pupdate * self.y[:, 0:1] *
		self.x[self.syn_map] - self.pdec * self.y[:, 0:1]), 0, 1)
	
	if self.disable_boost is False:
		# Update the boosting mechanisms
		if self.global_inhibition:
			min_dc = np.zeros(self.ncolumns)
			min_dc.fill(self.c_mdc * bn.nanmax(self.active_dc))
		else:
			min_dc = self.c_mdc * bn.nanmax(self.neighbors * self.active_dc, 1)
		
		## Save pre-overlap boost info
		boost = list(self.boost)
		
		# Update boost
		self._update_active_duty_cycle()
		self._update_boost(min_dc)
		self._update_overlap_duty_cycle()
	
		## Write out overlap boost changes
		with open(os.path.join(self.out_path, 'overlap_boost.csv'), 'ab') as f:
			writer = csv.writer(f)
			writer.writerow([self.iter, bn.nanmean(boost != self.boost)])
	
		# Boost permanences
		mask = self.overlap_dc < min_dc
		mask.resize(self.ncolumns, 1)
		self.p = np.clip(self.p + self.c_sboost * mask, 0, 1)
	
		## Write out permanence boost info
		with open(os.path.join(self.out_path, 'permanence_boost.csv'), 'ab') \
			as f:
			writer = csv.writer(f)
			writer.writerow([self.iter, bn.nanmean(mask)])
	
	# Trim synapses
	if self.trim is not False:
		self.p[self.p < self.trim] = 0