Python bottleneck.nanmax() Examples

The following are 11 code examples of bottleneck.nanmax(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module bottleneck , or try the search function .
Example #1
Source File: ImageView.py    From tf-pose with Apache License 2.0 6 votes vote down vote up
def quickMinMax(self, data):
        """
        Estimate the min/max values of *data* by subsampling.
        Returns [(min, max), ...] with one item per channel
        """
        while data.size > 1e6:
            ax = np.argmax(data.shape)
            sl = [slice(None)] * data.ndim
            sl[ax] = slice(None, None, 2)
            data = data[sl]
            
        cax = self.axes['c']
        if cax is None:
            return [(float(nanmin(data)), float(nanmax(data)))]
        else:
            return [(float(nanmin(data.take(i, axis=cax))), 
                     float(nanmax(data.take(i, axis=cax)))) for i in range(data.shape[-1])] 
Example #2
Source File: accessors.py    From vectorbt with GNU General Public License v3.0 6 votes vote down vote up
def reduce_to_array(self, reduce_func_nb, *args, **kwargs):
        """See `vectorbt.tseries.nb.reduce_to_array_nb`.

        `**kwargs` will be passed to `vectorbt.tseries.common.TSArrayWrapper.wrap_reduced`.

        Example:
            ```python-repl
            >>> min_max_nb = njit(lambda col, a: np.array([np.nanmin(a), np.nanmax(a)]))
            >>> print(df.vbt.tseries.reduce_to_array(min_max_nb, index=['min', 'max']))
                   a    b    c
            min  1.0  1.0  1.0
            max  5.0  5.0  3.0
            ```"""
        checks.assert_numba_func(reduce_func_nb)

        result = nb.reduce_to_array_nb(self.to_2d_array(), reduce_func_nb, *args)
        return self.wrap_reduced(result, **kwargs) 
Example #3
Source File: region.py    From mHTM with MIT License 5 votes vote down vote up
def get_probabilities(self, store=True):
		"""
		Get the probabilities associated with each feature. This technique
		uses the max across probabilities to form the global probabilities.
		This method should be called after fitting the SP.
		
		@param store: If True, the probabilities are stored internally. Set to
		False to reduce memory.
		
		@return: Return the probabilities.
		"""
		
		# Get the probabilities
		prob = np.zeros(self.ninputs)
		for i in xrange(self.ninputs):
			# Find all of the potential synapses for this input
			valid = self.syn_map == i
			
			# Find the max permanence across each of the potential synapses
			try:
				prob[i] = bn.nanmax(self.p[valid])
			except ValueError:
				prob[i] = 0. # Occurs for missing connections
		
		# Store the probabilities
		if store: self.prob = prob
		
		return prob 
Example #4
Source File: region.py    From mHTM with MIT License 5 votes vote down vote up
def reconstruct_input(self, x=None):
		"""
		Reconstruct the original input using only the stored permanences and
		the set of active columns. The maximization of probabilities approach
		is used. This method must be called after fitting the SP.
		
		@param x: The set of active columns or None if the SP was never fitted.
		"""
		
		# Check input
		if x is None: x = self.column_activations
		if x is None: return None
		
		# Reshape x if needed
		ravel = False
		if len(x.shape) == 1:
			ravel = True
			x = x.reshape(1, x.shape[0])
		
		# Get the input mapping
		imap = [np.where(self.syn_map == i) for i in xrange(self.ninputs)]
		
		# Get the reconstruction
		x2 = np.zeros((x.shape[0], self.ninputs))
		for i, xi in enumerate(x):
			# Mask off permanences not relevant to this input
			y = self.p * xi.reshape(self.ncolumns, 1)
			
			# Remap permanences to input domain
			for j in xrange(self.ninputs):
				# Get the max probability across the current input space
				try:
					x2[i][j] = bn.nanmax(y[imap[j]])
				except ValueError:
					x2[i][j] = 0. # Occurs for missing connections
				
				# Threshold back to {0, 1}
				x2[i][j] = 1 if x2[i][j] >= self.syn_th else 0
		
		return x2 if not ravel else x2.ravel() 
Example #5
Source File: accessors.py    From vectorbt with GNU General Public License v3.0 5 votes vote down vote up
def max(self, **kwargs):
        """Return max of non-NaN elements."""
        return self.wrap_reduced(nanmax(self.to_2d_array(), axis=0), **kwargs) 
Example #6
Source File: ImageView.py    From soapy with GNU General Public License v3.0 5 votes vote down vote up
def quickMinMax(self, data):
        """
        Estimate the min/max values of *data* by subsampling.
        """
        while data.size > 1e6:
            ax = np.argmax(data.shape)
            sl = [slice(None)] * data.ndim
            sl[ax] = slice(None, None, 2)
            data = data[sl]
        return nanmin(data), nanmax(data) 
Example #7
Source File: ImageView.py    From qgisSpaceSyntaxToolkit with GNU General Public License v3.0 5 votes vote down vote up
def quickMinMax(self, data):
        """
        Estimate the min/max values of *data* by subsampling.
        """
        while data.size > 1e6:
            ax = np.argmax(data.shape)
            sl = [slice(None)] * data.ndim
            sl[ax] = slice(None, None, 2)
            data = data[sl]
        return nanmin(data), nanmax(data) 
Example #8
Source File: __init__.py    From sima with GNU General Public License v2.0 5 votes vote down vote up
def to8bit(array):
    """Convert an array to 8 bit."""
    return (old_div((255. * array), nanmax(array))).astype('uint8') 
Example #9
Source File: __init__.py    From sima with GNU General Public License v2.0 5 votes vote down vote up
def to16bit(array):
    """Convert an array to 16 bit."""
    return (old_div((65535. * array), nanmax(array))).astype('uint16') 
Example #10
Source File: boost_experiment.py    From mHTM with MIT License 4 votes vote down vote up
def _phase3(self):
	"""
	Normal phase 3, but with tracking the boost changes. Double commented lines
	are new.
	"""
	
	# Update permanences
	self.p = np.clip(self.p + (self.c_pupdate * self.y[:, 0:1] *
		self.x[self.syn_map] - self.pdec * self.y[:, 0:1]), 0, 1)
	
	if self.disable_boost is False:
		# Update the boosting mechanisms
		if self.global_inhibition:
			min_dc = np.zeros(self.ncolumns)
			min_dc.fill(self.c_mdc * bn.nanmax(self.active_dc))
		else:
			min_dc = self.c_mdc * bn.nanmax(self.neighbors * self.active_dc, 1)
		
		## Save pre-overlap boost info
		boost = list(self.boost)
		
		# Update boost
		self._update_active_duty_cycle()
		self._update_boost(min_dc)
		self._update_overlap_duty_cycle()
	
		## Write out overlap boost changes
		with open(os.path.join(self.out_path, 'overlap_boost.csv'), 'ab') as f:
			writer = csv.writer(f)
			writer.writerow([self.iter, bn.nanmean(boost != self.boost)])
	
		# Boost permanences
		mask = self.overlap_dc < min_dc
		mask.resize(self.ncolumns, 1)
		self.p = np.clip(self.p + self.c_sboost * mask, 0, 1)
	
		## Write out permanence boost info
		with open(os.path.join(self.out_path, 'permanence_boost.csv'), 'ab') \
			as f:
			writer = csv.writer(f)
			writer.writerow([self.iter, bn.nanmean(mask)])
	
	# Trim synapses
	if self.trim is not False:
		self.p[self.p < self.trim] = 0 
Example #11
Source File: region.py    From mHTM with MIT License 4 votes vote down vote up
def _phase3(self):
		"""
		Execute phase 3 of the SP region. This phase is used to conduct
		learning.
		
		Note - This should only be called after phase 2 has been called.
		"""
		
		# Notes:
		# 1. logical_not is faster than invert
		# 2. Multiplication is faster than bitwise_and which is faster than
		#    logical_not
		# 3. Slightly different format than original definition
		#    (in the comment) to get even more speed benefits
		"""
		x = self.x[self.syn_map]
		self.p = np.clip(self.p + self.y[:, 0:1] * (x * self.pinc -
			np.logical_not(x) * self.pdec), 0, 1)
		"""
		self.p = np.clip(self.p + (self.c_pupdate * self.y[:, 0:1] *
			self.x[self.syn_map] - self.pdec * self.y[:, 0:1]), 0, 1)
		
		if self.disable_boost is False:
			# Update the boosting mechanisms
			if self.global_inhibition:
				min_dc = np.zeros(self.ncolumns)
				min_dc.fill(self.c_mdc * bn.nanmax(self.active_dc))
			else:
				min_dc = self.c_mdc * bn.nanmax(self.neighbors *
					self.active_dc, 1)
			self._update_active_duty_cycle()
			self._update_boost(min_dc)
			self._update_overlap_duty_cycle()
			
			# Boost permanences
			mask = self.overlap_dc < min_dc
			mask.resize(self.ncolumns, 1)
			self.p = np.clip(self.p + self.c_sboost * mask, 0, 1)
		
		# Trim synapses
		if self.trim is not False:
			self.p[self.p < self.trim] = 0